mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
Bump wharfie to v0.5.1 and use shared decompression code
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
This commit is contained in:
parent
05d43278e2
commit
7d3447ceff
16
go.mod
16
go.mod
@ -21,6 +21,7 @@ replace (
|
||||
github.com/docker/docker => github.com/docker/docker v20.10.2+incompatible
|
||||
github.com/docker/libnetwork => github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34
|
||||
github.com/golang/protobuf => github.com/golang/protobuf v1.5.2
|
||||
github.com/googleapis/gax-go/v2 => github.com/googleapis/gax-go/v2 v2.0.5
|
||||
github.com/juju/errors => github.com/k3s-io/nocode v0.0.0-20200630202308-cb097102c09f
|
||||
github.com/kubernetes-sigs/cri-tools => github.com/k3s-io/cri-tools v1.21.0-k3s1
|
||||
github.com/matryer/moq => github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009
|
||||
@ -81,7 +82,7 @@ require (
|
||||
github.com/containerd/stargz-snapshotter v0.8.0
|
||||
github.com/coreos/go-iptables v0.5.0
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
|
||||
github.com/docker/docker v20.10.7+incompatible
|
||||
github.com/docker/docker v20.10.10+incompatible
|
||||
github.com/erikdubbelboer/gspt v0.0.0-20190125194910-e68493906b83
|
||||
github.com/flannel-io/flannel v0.15.1
|
||||
github.com/go-bindata/go-bindata v3.1.2+incompatible
|
||||
@ -93,7 +94,7 @@ require (
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/k3s-io/helm-controller v0.11.7
|
||||
github.com/k3s-io/kine v0.8.1
|
||||
github.com/klauspost/compress v1.13.5
|
||||
github.com/klauspost/compress v1.13.6
|
||||
github.com/kubernetes-sigs/cri-tools v0.0.0-00010101000000-000000000000
|
||||
github.com/lib/pq v1.10.2
|
||||
github.com/mattn/go-sqlite3 v1.14.8
|
||||
@ -105,12 +106,11 @@ require (
|
||||
github.com/opencontainers/runc v1.0.2
|
||||
github.com/opencontainers/selinux v1.8.2
|
||||
github.com/otiai10/copy v1.6.0
|
||||
github.com/pierrec/lz4 v2.6.0+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rancher/dynamiclistener v0.3.1
|
||||
github.com/rancher/lasso v0.0.0-20210616224652-fc3ebd901c08
|
||||
github.com/rancher/remotedialer v0.2.0
|
||||
github.com/rancher/wharfie v0.3.4
|
||||
github.com/rancher/wharfie v0.5.1
|
||||
github.com/rancher/wrangler v0.8.9
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/rootless-containers/rootlesskit v0.14.5
|
||||
@ -118,15 +118,15 @@ require (
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
|
||||
github.com/urfave/cli v1.22.2
|
||||
github.com/urfave/cli v1.22.4
|
||||
go.etcd.io/etcd/api/v3 v3.5.0
|
||||
go.etcd.io/etcd/client/v3 v3.5.0
|
||||
go.etcd.io/etcd/etcdutl/v3 v3.5.0
|
||||
go.etcd.io/etcd/server/v3 v3.5.0
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
|
||||
google.golang.org/grpc v1.40.0
|
||||
golang.org/x/net v0.0.0-20211005215030-d2e5035098b3
|
||||
golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef
|
||||
google.golang.org/grpc v1.41.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
inet.af/tcpproxy v0.0.0-20200125044825-b6bb9b5b8252
|
||||
k8s.io/api v0.22.4
|
||||
|
185
go.sum
185
go.sum
@ -11,20 +11,41 @@ cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gc
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.57.0 h1:EpMNVUorLiZIELdMZbCYX/ByTFCdoYopYAGxaGVz9ms=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
|
||||
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
|
||||
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
|
||||
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
|
||||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||
cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8=
|
||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/360EntSecGroup-Skylar/excelize v1.4.1/go.mod h1:vnax29X2usfl7HHkBrX5EvSCJcmH3dT9luvxzu8iGAE=
|
||||
@ -62,11 +83,11 @@ github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5
|
||||
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU=
|
||||
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||
github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w=
|
||||
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU=
|
||||
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/hcsshim v0.8.20 h1:ZTwcx3NS8n07kPf/JZ1qwU6vnjhVPMUWlXBF8r9UxrE=
|
||||
github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
@ -115,6 +136,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
|
||||
github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA=
|
||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
@ -175,9 +197,9 @@ github.com/containerd/nri v0.1.0 h1:6QioHRlThlKh2RkRTR4kIT3PKAcrLo3gIWnjkM4dQmQ=
|
||||
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/stargz-snapshotter v0.8.0 h1:dpcv7te4ef8dNQMgN6zzbd4IP1puSFIXqFbqZlKFvd4=
|
||||
github.com/containerd/stargz-snapshotter v0.8.0/go.mod h1:1BffdxelEfp8ct4oI7uy2mfmvMDE2o3SPe32AtFyYXk=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.8.0 h1:oA1wx8kTFfImfsT5bScbrZd8gK+WtQnn15q82Djvm0Y=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.8.0/go.mod h1:mwIwuwb+D8FX2t45Trwi0hmWmZm5VW7zPP/rekwhWQU=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.9.0 h1:PkB6BSTfOKX23erT2GkoUKkJEcXfNcyKskIViK770v8=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
|
||||
github.com/containerd/ttrpc v1.0.2 h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U=
|
||||
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
||||
github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
|
||||
@ -214,8 +236,9 @@ github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||
@ -237,14 +260,13 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v20.10.8+incompatible h1:/zO/6y9IOpcehE49yMRTV9ea0nBpb8OeqSskXLNfH1E=
|
||||
github.com/docker/cli v20.10.8+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v20.10.9+incompatible h1:OJ7YkwQA+k2Oi51lmCojpjiygKpi76P7bg91b2eJxYU=
|
||||
github.com/docker/cli v20.10.9+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v20.10.2+incompatible h1:vFgEHPqWBTp4pTjdLwjAA4bSo3gvIGOYwuJTlEjVBCw=
|
||||
github.com/docker/docker v20.10.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
|
||||
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
@ -406,8 +428,10 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
|
||||
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
@ -426,24 +450,38 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-containerregistry v0.5.0 h1:eb9sinv4PKm0AUwQGov0mvIdA4pyBGjRofxN4tWnMwM=
|
||||
github.com/google/go-containerregistry v0.5.0/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
|
||||
github.com/google/go-containerregistry v0.6.1-0.20211111182346-7a6ee45528a9 h1:m5Yu3eJF1gVvzA51M9Ll2TZH+fi8P38bBVDL2jdQzoU=
|
||||
github.com/google/go-containerregistry v0.6.1-0.20211111182346-7a6ee45528a9/go.mod h1:VGc0QpDDhK6zwYGlQb3s0LDGbTCGMFiifm0UgG9v1oQ=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
@ -453,7 +491,6 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
|
||||
@ -518,6 +555,7 @@ github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7U
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714/go.mod h1:2Goc3h8EklBH5mspfHFxBnEoURQCGzQQH1ga9Myjvis=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
||||
@ -532,7 +570,6 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
|
||||
github.com/joho/godotenv v0.0.0-20161216230537-726cc8b906e3/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||
@ -636,15 +673,16 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4=
|
||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
|
||||
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
@ -667,6 +705,7 @@ github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z
|
||||
github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
@ -690,7 +729,6 @@ github.com/mattn/go-sqlite3 v1.14.8/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
|
||||
github.com/mdlayher/ethernet v0.0.0-20190606142754-0394541c37b7/go.mod h1:U6ZQobyTjI/tJyq2HG+i/dfSoFUt8/aZCM+GKtmFk/Y=
|
||||
github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA=
|
||||
github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M=
|
||||
@ -722,8 +760,9 @@ github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUb
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0=
|
||||
github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ=
|
||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||
@ -762,7 +801,6 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM=
|
||||
github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
@ -778,7 +816,6 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.16.1/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
|
||||
@ -790,7 +827,6 @@ github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
|
||||
github.com/onsi/gomega v1.11.0 h1:+CqWgvj0OZycCaqclBD1pxKHAU+tOkHmQIWvDHq2aug=
|
||||
@ -798,6 +834,7 @@ github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQ
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20210730191737-8e42a01fb1b7/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283 h1:TVzvdjOalkJBNkbpPVMAr4KV9QRf2IjfxdyxwAK78Gs=
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg=
|
||||
@ -831,6 +868,7 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
@ -876,8 +914,8 @@ github.com/rancher/lasso v0.0.0-20210616224652-fc3ebd901c08 h1:NxR8Fh0eE7/5/5Zvl
|
||||
github.com/rancher/lasso v0.0.0-20210616224652-fc3ebd901c08/go.mod h1:9qZd/S8DqWzfKtjKGgSoHqGEByYmUE3qRaBaaAHwfEM=
|
||||
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
|
||||
github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI=
|
||||
github.com/rancher/wharfie v0.3.4 h1:wVqFW1cyRA20zY90Z56VrjG0lWg3oFmf/4ryr5Dkkis=
|
||||
github.com/rancher/wharfie v0.3.4/go.mod h1:cb8mSczpmw7ItbPF3K1W7crWuJLVdyV49sZZuaY4BS8=
|
||||
github.com/rancher/wharfie v0.5.1 h1:TUqZyNj6BaGe2+tqhwAGwZouuwx02mvAMMjNuyejc5I=
|
||||
github.com/rancher/wharfie v0.5.1/go.mod h1:5AHZRFBAOWYPDNCwj/y5Dpj+MMwXLoitPwxjYAIbcxQ=
|
||||
github.com/rancher/wrangler v0.8.10 h1:GfM3dZyw3TconwqknRm6YO/wiKEbUIGl0/HWVqBy658=
|
||||
github.com/rancher/wrangler v0.8.10/go.mod h1:Lte9WjPtGYxYacIWeiS9qawvu2R4NujFU9xuXWJvc/0=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
|
||||
@ -901,7 +939,6 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
@ -929,15 +966,19 @@ github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE
|
||||
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
|
||||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
||||
github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
|
||||
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
@ -946,6 +987,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
@ -980,11 +1022,14 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.21.0/go.mod h1:lxDj6qX9Q6lWQxIrbrT0nwecwUtRnhVZAJjJZrVUZZQ=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.4 h1:u7tSpNPPswAFymm8IehJhy4uJMlUuU/GmqSkvJ1InXA=
|
||||
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
|
||||
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
||||
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
|
||||
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
|
||||
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
|
||||
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
@ -1002,7 +1047,9 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
|
||||
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI=
|
||||
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
@ -1027,8 +1074,11 @@ go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvS
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
|
||||
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE=
|
||||
@ -1098,8 +1148,10 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4=
|
||||
@ -1111,6 +1163,8 @@ golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hM
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0=
|
||||
@ -1119,8 +1173,20 @@ golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAG
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 h1:B333XXssMuKQeBwiNODx4TupZy7bf4sxFZnN2ZOcvUE=
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -1146,13 +1212,13 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
|
||||
@ -1176,7 +1242,6 @@ golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgw
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
@ -1198,15 +1263,32 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -1231,31 +1313,53 @@ google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb
|
||||
google.golang.org/api v0.15.1-0.20200106000736-b8fc810ca6b5/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0 h1:J1Pl9P2lnmYFSJvgs70DKELqHNh8CNWXPbud4njEE2s=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
|
||||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
|
||||
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
|
||||
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
|
||||
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
|
||||
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
|
||||
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
|
||||
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.57.0 h1:4t9zuDlHLcIx0ZEhmXEeFVCRsiOgpgn2QOH9N0MNjPI=
|
||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 h1:YzfoEYWbODU5Fbt37+h7X16BWQbad7Q4S6gclTKFXM8=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
@ -1265,8 +1369,9 @@ gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKW
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
|
||||
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
|
||||
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
@ -1304,11 +1409,11 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
inet.af/tcpproxy v0.0.0-20200125044825-b6bb9b5b8252 h1:gmJCKidOfjKDUHF1jjke+I+2iQIyE3HNNxu2OKO/FUI=
|
||||
inet.af/tcpproxy v0.0.0-20200125044825-b6bb9b5b8252/go.mod h1:zq+R+tLcdHugi7Jt+FtIQY6m6wtX34lr2CdQVH2fhW0=
|
||||
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
|
@ -57,7 +57,7 @@ func setupContainerdConfig(ctx context.Context, cfg *config.Node) error {
|
||||
NodeConfig: cfg,
|
||||
DisableCgroup: disableCgroup,
|
||||
IsRunningInUserNS: isRunningInUserNS,
|
||||
PrivateRegistryConfig: privRegistries.Registry(),
|
||||
PrivateRegistryConfig: privRegistries.Registry,
|
||||
ExtraRuntimes: findNvidiaContainerRuntimes(os.DirFS(string(os.PathSeparator))),
|
||||
}
|
||||
|
||||
|
@ -2,8 +2,6 @@ package containerd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -21,14 +19,12 @@ import (
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/pkg/cri/constants"
|
||||
"github.com/containerd/containerd/reference/docker"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/natefinch/lumberjack"
|
||||
"github.com/pierrec/lz4"
|
||||
"github.com/pkg/errors"
|
||||
util2 "github.com/rancher/k3s/pkg/agent/util"
|
||||
"github.com/rancher/k3s/pkg/daemons/config"
|
||||
"github.com/rancher/k3s/pkg/untar"
|
||||
"github.com/rancher/k3s/pkg/version"
|
||||
"github.com/rancher/wharfie/pkg/tarfile"
|
||||
"github.com/rancher/wrangler/pkg/merr"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
@ -194,7 +190,7 @@ func preloadImages(ctx context.Context, cfg *config.Node) error {
|
||||
logrus.Errorf("Error encountered while importing %s: %v", filePath, err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Imported images from %s in %s", filePath, time.Since(start))
|
||||
logrus.Infof("Imported images from %s in %s", filePath, time.Since(start))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -203,39 +199,26 @@ func preloadImages(ctx context.Context, cfg *config.Node) error {
|
||||
// This is in its own function so that we can ensure that the various readers are properly closed, as some
|
||||
// decompressing readers need to be explicitly closed and others do not.
|
||||
func preloadFile(ctx context.Context, cfg *config.Node, client *containerd.Client, criConn *grpc.ClientConn, filePath string) error {
|
||||
file, err := os.Open(filePath)
|
||||
if util2.HasSuffixI(filePath, ".txt") {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
logrus.Infof("Pulling images from %s", filePath)
|
||||
return prePullImages(ctx, criConn, file)
|
||||
}
|
||||
|
||||
opener, err := tarfile.GetOpener(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var imageReader io.Reader
|
||||
switch {
|
||||
case util2.HasSuffixI(filePath, ".txt"):
|
||||
return prePullImages(ctx, criConn, file)
|
||||
case util2.HasSuffixI(filePath, ".tar"):
|
||||
imageReader = file
|
||||
case util2.HasSuffixI(filePath, ".tar.lz4"):
|
||||
imageReader = lz4.NewReader(file)
|
||||
case util2.HasSuffixI(filePath, ".tar.bz2", ".tbz"):
|
||||
imageReader = bzip2.NewReader(file)
|
||||
case util2.HasSuffixI(filePath, ".tar.gz", ".tgz"):
|
||||
zr, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer zr.Close()
|
||||
imageReader = zr
|
||||
case util2.HasSuffixI(filePath, "tar.zst", ".tzst"):
|
||||
zr, err := zstd.NewReader(file, zstd.WithDecoderMaxMemory(untar.MaxDecoderMemory))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer zr.Close()
|
||||
imageReader = zr
|
||||
default:
|
||||
return errors.New("unhandled file type")
|
||||
imageReader, err := opener()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer imageReader.Close()
|
||||
|
||||
logrus.Infof("Importing images from %s", filePath)
|
||||
|
||||
|
@ -15,20 +15,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/rancher/wharfie/pkg/tarfile"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// The zstd decoder will attempt to use up to 1GB memory for streaming operations by default,
|
||||
// which is excessive and will OOM low-memory devices.
|
||||
// NOTE: This must be at least as large as the window size used when compressing tarballs, or you
|
||||
// will see a "window size exceeded" error when decompressing. The zstd CLI tool uses 4MB by
|
||||
// default; the --long option defaults to 27 or 128M, which is still too much for a Pi3. 32MB
|
||||
// (--long=25) has been tested to work acceptably while still compressing by an additional 3-6% on
|
||||
// our datasets.
|
||||
MaxDecoderMemory = 1 << 25
|
||||
)
|
||||
|
||||
// TODO(bradfitz): this was copied from x/build/cmd/buildlet/buildlet.go
|
||||
// but there were some buildlet-specific bits in there, so the code is
|
||||
// forked for now. Unfork and add some opts arguments here, so the
|
||||
@ -49,7 +39,7 @@ func untar(r io.Reader, dir string) (err error) {
|
||||
logrus.Printf("error extracting tarball into %s after %d files, %d dirs, %v: %v", dir, nFiles, len(madeDir), td, err)
|
||||
}
|
||||
}()
|
||||
zr, err := zstd.NewReader(r, zstd.WithDecoderMaxMemory(MaxDecoderMemory))
|
||||
zr, err := zstd.NewReader(r, zstd.WithDecoderMaxMemory(tarfile.MaxDecoderMemory))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error extracting zstd-compressed body: %v", err)
|
||||
}
|
||||
|
26
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
26
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@ -140,7 +140,7 @@ func testOnGCE() bool {
|
||||
}()
|
||||
|
||||
go func() {
|
||||
addrs, err := net.LookupHost("metadata.google.internal")
|
||||
addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal")
|
||||
if err != nil || len(addrs) == 0 {
|
||||
resc <- false
|
||||
return
|
||||
@ -282,6 +282,7 @@ func NewClient(c *http.Client) *Client {
|
||||
// getETag returns a value from the metadata service as well as the associated ETag.
|
||||
// This func is otherwise equivalent to Get.
|
||||
func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||
ctx := context.TODO()
|
||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||
// a container, which is an important use-case for local testing of cloud
|
||||
// deployments. To enable spoofing of the metadata service, the environment
|
||||
@ -296,6 +297,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||
// being stable anyway.
|
||||
host = metadataIP
|
||||
}
|
||||
suffix = strings.TrimLeft(suffix, "/")
|
||||
u := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, err := http.NewRequest("GET", u, nil)
|
||||
if err != nil {
|
||||
@ -303,9 +305,25 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||
}
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
var res *http.Response
|
||||
var reqErr error
|
||||
retryer := newRetryer()
|
||||
for {
|
||||
res, reqErr = c.hc.Do(req)
|
||||
var code int
|
||||
if res != nil {
|
||||
code = res.StatusCode
|
||||
}
|
||||
if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry {
|
||||
if err := sleep(ctx, delay); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if reqErr != nil {
|
||||
return "", "", nil
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
|
114
vendor/cloud.google.com/go/compute/metadata/retry.go
generated
vendored
Normal file
114
vendor/cloud.google.com/go/compute/metadata/retry.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
// Copyright 2021 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
maxRetryAttempts = 5
|
||||
)
|
||||
|
||||
var (
|
||||
syscallRetryable = func(err error) bool { return false }
|
||||
)
|
||||
|
||||
// defaultBackoff is basically equivalent to gax.Backoff without the need for
|
||||
// the dependency.
|
||||
type defaultBackoff struct {
|
||||
max time.Duration
|
||||
mul float64
|
||||
cur time.Duration
|
||||
}
|
||||
|
||||
func (b *defaultBackoff) Pause() time.Duration {
|
||||
d := time.Duration(1 + rand.Int63n(int64(b.cur)))
|
||||
b.cur = time.Duration(float64(b.cur) * b.mul)
|
||||
if b.cur > b.max {
|
||||
b.cur = b.max
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// sleep is the equivalent of gax.Sleep without the need for the dependency.
|
||||
func sleep(ctx context.Context, d time.Duration) error {
|
||||
t := time.NewTimer(d)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Stop()
|
||||
return ctx.Err()
|
||||
case <-t.C:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func newRetryer() *metadataRetryer {
|
||||
return &metadataRetryer{bo: &defaultBackoff{
|
||||
cur: 100 * time.Millisecond,
|
||||
max: 30 * time.Second,
|
||||
mul: 2,
|
||||
}}
|
||||
}
|
||||
|
||||
type backoff interface {
|
||||
Pause() time.Duration
|
||||
}
|
||||
|
||||
type metadataRetryer struct {
|
||||
bo backoff
|
||||
attempts int
|
||||
}
|
||||
|
||||
func (r *metadataRetryer) Retry(status int, err error) (time.Duration, bool) {
|
||||
if status == http.StatusOK {
|
||||
return 0, false
|
||||
}
|
||||
retryOk := shouldRetry(status, err)
|
||||
if !retryOk {
|
||||
return 0, false
|
||||
}
|
||||
if r.attempts == maxRetryAttempts {
|
||||
return 0, false
|
||||
}
|
||||
r.attempts++
|
||||
return r.bo.Pause(), true
|
||||
}
|
||||
|
||||
func shouldRetry(status int, err error) bool {
|
||||
if 500 <= status && status <= 599 {
|
||||
return true
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return true
|
||||
}
|
||||
// Transient network errors should be retried.
|
||||
if syscallRetryable(err) {
|
||||
return true
|
||||
}
|
||||
if err, ok := err.(interface{ Temporary() bool }); ok {
|
||||
if err.Temporary() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if err, ok := err.(interface{ Unwrap() error }); ok {
|
||||
return shouldRetry(status, err.Unwrap())
|
||||
}
|
||||
return false
|
||||
}
|
26
vendor/cloud.google.com/go/compute/metadata/retry_linux.go
generated
vendored
Normal file
26
vendor/cloud.google.com/go/compute/metadata/retry_linux.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
// Copyright 2021 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package metadata
|
||||
|
||||
import "syscall"
|
||||
|
||||
func init() {
|
||||
// Initialize syscallRetryable to return true on transient socket-level
|
||||
// errors. These errors are specific to Linux.
|
||||
syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED }
|
||||
}
|
2
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
2
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
@ -1,4 +1,4 @@
|
||||
# go-winio
|
||||
# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml)
|
||||
|
||||
This repository contains utilities for efficiently performing Win32 IO operations in
|
||||
Go. Currently, this is focused on accessing named pipes and other file handles, and
|
||||
|
5
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
5
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
@ -28,8 +28,9 @@ const (
|
||||
|
||||
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
|
||||
|
||||
SeBackupPrivilege = "SeBackupPrivilege"
|
||||
SeRestorePrivilege = "SeRestorePrivilege"
|
||||
SeBackupPrivilege = "SeBackupPrivilege"
|
||||
SeRestorePrivilege = "SeRestorePrivilege"
|
||||
SeSecurityPrivilege = "SeSecurityPrivilege"
|
||||
)
|
||||
|
||||
const (
|
||||
|
131
vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
131
vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
@ -23,7 +23,6 @@
|
||||
package estargz
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
@ -42,6 +41,7 @@ import (
|
||||
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
)
|
||||
|
||||
// A Reader permits random access reads from a stargz file.
|
||||
@ -95,10 +95,10 @@ func WithTelemetry(telemetry *Telemetry) OpenOption {
|
||||
}
|
||||
}
|
||||
|
||||
// A func which takes start time and records the diff
|
||||
// MeasureLatencyHook is a func which takes start time and records the diff
|
||||
type MeasureLatencyHook func(time.Time)
|
||||
|
||||
// A struct which defines telemetry hooks. By implementing these hooks you should be able to record
|
||||
// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record
|
||||
// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...)
|
||||
type Telemetry struct {
|
||||
GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds)
|
||||
@ -146,7 +146,7 @@ func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
|
||||
fSize := d.FooterSize()
|
||||
fOffset := positive(int64(len(footer)) - fSize)
|
||||
maybeTocBytes := footer[:fOffset]
|
||||
tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:])
|
||||
_, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:])
|
||||
if err != nil {
|
||||
allErr = append(allErr, err)
|
||||
continue
|
||||
@ -187,7 +187,7 @@ func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr e
|
||||
for _, d := range []Decompressor{new(GzipDecompressor), new(legacyGzipDecompressor)} {
|
||||
fSize := d.FooterSize()
|
||||
fOffset := positive(int64(len(footer)) - fSize)
|
||||
tocOffset, _, err := d.ParseFooter(footer[fOffset:])
|
||||
_, tocOffset, _, err := d.ParseFooter(footer[fOffset:])
|
||||
if err == nil {
|
||||
return tocOffset, fSize, err
|
||||
}
|
||||
@ -326,6 +326,10 @@ func (r *Reader) getOrCreateDir(d string) *TOCEntry {
|
||||
return e
|
||||
}
|
||||
|
||||
func (r *Reader) TOCDigest() digest.Digest {
|
||||
return r.tocDigest
|
||||
}
|
||||
|
||||
// VerifyTOC checks that the TOC JSON in the passed blob matches the
|
||||
// passed digests and that the TOC JSON contains digests for all chunks
|
||||
// contained in the blob. If the verification succceeds, this function
|
||||
@ -335,7 +339,12 @@ func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) {
|
||||
if r.tocDigest != tocDigest {
|
||||
return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest)
|
||||
}
|
||||
return r.Verifiers()
|
||||
}
|
||||
|
||||
// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases
|
||||
// because this doesn't verify TOC.
|
||||
func (r *Reader) Verifiers() (TOCEntryVerifier, error) {
|
||||
chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest
|
||||
regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest
|
||||
var chunkDigestMapIncomplete bool
|
||||
@ -591,6 +600,11 @@ type currentCompressionWriter struct{ w *Writer }
|
||||
|
||||
func (ccw currentCompressionWriter) Write(p []byte) (int, error) {
|
||||
ccw.w.diffHash.Write(p)
|
||||
if ccw.w.gz == nil {
|
||||
if err := ccw.w.condOpenGz(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return ccw.w.gz.Write(p)
|
||||
}
|
||||
|
||||
@ -601,6 +615,25 @@ func (w *Writer) chunkSize() int {
|
||||
return w.ChunkSize
|
||||
}
|
||||
|
||||
// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob.
|
||||
// TOC JSON and footer are removed.
|
||||
func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
|
||||
footerSize := c.FooterSize()
|
||||
if sr.Size() < footerSize {
|
||||
return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize)
|
||||
}
|
||||
footerOffset := sr.Size() - footerSize
|
||||
footer := make([]byte, footerSize)
|
||||
if _, err := sr.ReadAt(footer, footerOffset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobPayloadSize, _, _, err := c.ParseFooter(footer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse footer")
|
||||
}
|
||||
return c.Reader(io.LimitReader(sr, blobPayloadSize))
|
||||
}
|
||||
|
||||
// NewWriter returns a new stargz writer (gzip-based) writing to w.
|
||||
//
|
||||
// The writer must be closed to write its trailing table of contents.
|
||||
@ -616,7 +649,7 @@ func NewWriterLevel(w io.Writer, compressionLevel int) *Writer {
|
||||
return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel))
|
||||
}
|
||||
|
||||
// NewWriterLevel returns a new stargz writer writing to w.
|
||||
// NewWriterWithCompressor returns a new stargz writer writing to w.
|
||||
// The compression method is configurable.
|
||||
//
|
||||
// The writer must be closed to write its trailing table of contents.
|
||||
@ -696,29 +729,71 @@ func (w *Writer) condOpenGz() (err error) {
|
||||
// each of its contents to w.
|
||||
//
|
||||
// The input r can optionally be gzip compressed but the output will
|
||||
// always be gzip compressed.
|
||||
// always be compressed by the specified compressor.
|
||||
func (w *Writer) AppendTar(r io.Reader) error {
|
||||
return w.appendTar(r, false)
|
||||
}
|
||||
|
||||
// AppendTarLossLess reads the tar or tar.gz file from r and appends
|
||||
// each of its contents to w.
|
||||
//
|
||||
// The input r can optionally be gzip compressed but the output will
|
||||
// always be compressed by the specified compressor.
|
||||
//
|
||||
// The difference of this func with AppendTar is that this writes
|
||||
// the input tar stream into w without any modification (e.g. to header bytes).
|
||||
//
|
||||
// Note that if the input tar stream already contains TOC JSON, this returns
|
||||
// error because w cannot overwrite the TOC JSON to the one generated by w without
|
||||
// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz,
|
||||
// you shoud decompress it and remove TOC JSON in advance.
|
||||
func (w *Writer) AppendTarLossLess(r io.Reader) error {
|
||||
return w.appendTar(r, true)
|
||||
}
|
||||
|
||||
func (w *Writer) appendTar(r io.Reader, lossless bool) error {
|
||||
var src io.Reader
|
||||
br := bufio.NewReader(r)
|
||||
var tr *tar.Reader
|
||||
if isGzip(br) {
|
||||
// NewReader can't fail if isGzip returned true.
|
||||
zr, _ := gzip.NewReader(br)
|
||||
tr = tar.NewReader(zr)
|
||||
src = zr
|
||||
} else {
|
||||
tr = tar.NewReader(br)
|
||||
src = io.Reader(br)
|
||||
}
|
||||
dst := currentCompressionWriter{w}
|
||||
var tw *tar.Writer
|
||||
if !lossless {
|
||||
tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode.
|
||||
}
|
||||
tr := tar.NewReader(src)
|
||||
if lossless {
|
||||
tr.RawAccounting = true
|
||||
}
|
||||
for {
|
||||
h, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
if lossless {
|
||||
if remain := tr.RawBytes(); len(remain) > 0 {
|
||||
// Collect the remaining null bytes.
|
||||
// https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53
|
||||
if _, err := dst.Write(remain); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err)
|
||||
}
|
||||
if h.Name == TOCTarName {
|
||||
if cleanEntryName(h.Name) == TOCTarName {
|
||||
// It is possible for a layer to be "stargzified" twice during the
|
||||
// distribution lifecycle. So we reserve "TOCTarName" here to avoid
|
||||
// duplicated entries in the resulting layer.
|
||||
if lossless {
|
||||
// We cannot handle this in lossless way.
|
||||
return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@ -744,9 +819,14 @@ func (w *Writer) AppendTar(r io.Reader) error {
|
||||
if err := w.condOpenGz(); err != nil {
|
||||
return err
|
||||
}
|
||||
tw := tar.NewWriter(currentCompressionWriter{w})
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return err
|
||||
if tw != nil {
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if _, err := dst.Write(tr.RawBytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
switch h.Typeflag {
|
||||
case tar.TypeLink:
|
||||
@ -808,7 +888,13 @@ func (w *Writer) AppendTar(r io.Reader) error {
|
||||
}
|
||||
|
||||
teeChunk := io.TeeReader(tee, chunkDigest.Hash())
|
||||
if _, err := io.CopyN(tw, teeChunk, chunkSize); err != nil {
|
||||
var out io.Writer
|
||||
if tw != nil {
|
||||
out = tw
|
||||
} else {
|
||||
out = dst
|
||||
}
|
||||
if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil {
|
||||
return fmt.Errorf("error copying %q: %v", h.Name, err)
|
||||
}
|
||||
ent.ChunkDigest = chunkDigest.Digest().String()
|
||||
@ -825,11 +911,18 @@ func (w *Writer) AppendTar(r io.Reader) error {
|
||||
if payloadDigest != nil {
|
||||
regFileEntry.Digest = payloadDigest.Digest().String()
|
||||
}
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
if tw != nil {
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
remainDest := ioutil.Discard
|
||||
if lossless {
|
||||
remainDest = dst // Preserve the remaining bytes in lossless mode
|
||||
}
|
||||
_, err := io.Copy(remainDest, src)
|
||||
return err
|
||||
}
|
||||
|
||||
// DiffID returns the SHA-256 of the uncompressed tar bytes.
|
||||
|
3
vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
generated
vendored
3
vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
generated
vendored
@ -3,8 +3,9 @@ module github.com/containerd/stargz-snapshotter/estargz
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/klauspost/compress v1.13.5
|
||||
github.com/klauspost/compress v1.13.6
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/vbatts/tar-split v0.11.2
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
)
|
||||
|
18
vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
generated
vendored
18
vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
generated
vendored
@ -1,8 +1,22 @@
|
||||
github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4=
|
||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
|
||||
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
30
vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
generated
vendored
30
vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
generated
vendored
@ -124,31 +124,31 @@ func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Dig
|
||||
return parseTOCEStargz(r)
|
||||
}
|
||||
|
||||
func (gz *GzipDecompressor) ParseFooter(p []byte) (tocOffset, tocSize int64, err error) {
|
||||
func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
|
||||
if len(p) != FooterSize {
|
||||
return 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
|
||||
return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(p))
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
defer zr.Close()
|
||||
extra := zr.Header.Extra
|
||||
si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
|
||||
if si1 != 'S' || si2 != 'G' {
|
||||
return 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
|
||||
return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
|
||||
}
|
||||
if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) {
|
||||
return 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
|
||||
return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
|
||||
}
|
||||
if string(subfield[16:]) != "STARGZ" {
|
||||
return 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
|
||||
return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
|
||||
}
|
||||
tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
}
|
||||
return tocOffset, 0, nil
|
||||
return tocOffset, tocOffset, 0, nil
|
||||
}
|
||||
|
||||
func (gz *GzipDecompressor) FooterSize() int64 {
|
||||
@ -165,27 +165,27 @@ func (gz *legacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst dige
|
||||
return parseTOCEStargz(r)
|
||||
}
|
||||
|
||||
func (gz *legacyGzipDecompressor) ParseFooter(p []byte) (tocOffset, tocSize int64, err error) {
|
||||
func (gz *legacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
|
||||
if len(p) != legacyFooterSize {
|
||||
return 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
|
||||
return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(p))
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader")
|
||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader")
|
||||
}
|
||||
defer zr.Close()
|
||||
extra := zr.Header.Extra
|
||||
if len(extra) != 16+len("STARGZ") {
|
||||
return 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
|
||||
return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
|
||||
}
|
||||
if string(extra[16:]) != "STARGZ" {
|
||||
return 0, 0, fmt.Errorf("legacy: magic string STARGZ not found")
|
||||
return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found")
|
||||
}
|
||||
tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
}
|
||||
return tocOffset, 0, nil
|
||||
return tocOffset, tocOffset, 0, nil
|
||||
}
|
||||
|
||||
func (gz *legacyGzipDecompressor) FooterSize() int64 {
|
||||
|
494
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
494
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
@ -148,93 +148,96 @@ func testBuild(t *testing.T, controllers ...TestingController) {
|
||||
srcCompression := srcCompression
|
||||
for _, cl := range controllers {
|
||||
cl := cl
|
||||
for _, prefix := range allowedPrefix {
|
||||
prefix := prefix
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v-prefix=%q-src=%d", cl, prefix, srcCompression), func(t *testing.T) {
|
||||
tarBlob := buildTarStatic(t, tt.in, prefix)
|
||||
// Test divideEntries()
|
||||
entries, err := sortEntries(tarBlob, nil, nil) // identical order
|
||||
if err != nil {
|
||||
t.Fatalf("faield to parse tar: %v", err)
|
||||
}
|
||||
var merged []*entry
|
||||
for _, part := range divideEntries(entries, 4) {
|
||||
merged = append(merged, part...)
|
||||
}
|
||||
if !reflect.DeepEqual(entries, merged) {
|
||||
for _, e := range entries {
|
||||
t.Logf("Original: %v", e.header)
|
||||
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
||||
srcTarFormat := srcTarFormat
|
||||
for _, prefix := range allowedPrefix {
|
||||
prefix := prefix
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) {
|
||||
tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
|
||||
// Test divideEntries()
|
||||
entries, err := sortEntries(tarBlob, nil, nil) // identical order
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse tar: %v", err)
|
||||
}
|
||||
for _, e := range merged {
|
||||
t.Logf("Merged: %v", e.header)
|
||||
var merged []*entry
|
||||
for _, part := range divideEntries(entries, 4) {
|
||||
merged = append(merged, part...)
|
||||
}
|
||||
if !reflect.DeepEqual(entries, merged) {
|
||||
for _, e := range entries {
|
||||
t.Logf("Original: %v", e.header)
|
||||
}
|
||||
for _, e := range merged {
|
||||
t.Logf("Merged: %v", e.header)
|
||||
}
|
||||
t.Errorf("divided entries couldn't be merged")
|
||||
return
|
||||
}
|
||||
t.Errorf("divided entries couldn't be merged")
|
||||
return
|
||||
}
|
||||
|
||||
// Prepare sample data
|
||||
wantBuf := new(bytes.Buffer)
|
||||
sw := NewWriterWithCompressor(wantBuf, cl)
|
||||
sw.ChunkSize = tt.chunkSize
|
||||
if err := sw.AppendTar(tarBlob); err != nil {
|
||||
t.Fatalf("faield to append tar to want stargz: %v", err)
|
||||
}
|
||||
if _, err := sw.Close(); err != nil {
|
||||
t.Fatalf("faield to prepare want stargz: %v", err)
|
||||
}
|
||||
wantData := wantBuf.Bytes()
|
||||
want, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(wantData), 0, int64(len(wantData))),
|
||||
WithDecompressors(cl),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the want stargz: %v", err)
|
||||
}
|
||||
// Prepare sample data
|
||||
wantBuf := new(bytes.Buffer)
|
||||
sw := NewWriterWithCompressor(wantBuf, cl)
|
||||
sw.ChunkSize = tt.chunkSize
|
||||
if err := sw.AppendTar(tarBlob); err != nil {
|
||||
t.Fatalf("failed to append tar to want stargz: %v", err)
|
||||
}
|
||||
if _, err := sw.Close(); err != nil {
|
||||
t.Fatalf("failed to prepare want stargz: %v", err)
|
||||
}
|
||||
wantData := wantBuf.Bytes()
|
||||
want, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(wantData), 0, int64(len(wantData))),
|
||||
WithDecompressors(cl),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the want stargz: %v", err)
|
||||
}
|
||||
|
||||
// Prepare testing data
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
WithChunkSize(tt.chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("faield to build stargz: %v", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
gotBuf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(gotBuf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
gotData := gotBuf.Bytes()
|
||||
got, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
|
||||
WithDecompressors(cl),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the got stargz: %v", err)
|
||||
}
|
||||
// Prepare testing data
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
WithChunkSize(tt.chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to build stargz: %v", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
gotBuf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(gotBuf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
gotData := gotBuf.Bytes()
|
||||
got, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
|
||||
WithDecompressors(cl),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the got stargz: %v", err)
|
||||
}
|
||||
|
||||
// Check DiffID is properly calculated
|
||||
rc.Close()
|
||||
diffID := rc.DiffID()
|
||||
wantDiffID := cl.DiffIDOf(t, gotData)
|
||||
if diffID.String() != wantDiffID {
|
||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||
}
|
||||
// Check DiffID is properly calculated
|
||||
rc.Close()
|
||||
diffID := rc.DiffID()
|
||||
wantDiffID := cl.DiffIDOf(t, gotData)
|
||||
if diffID.String() != wantDiffID {
|
||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||
}
|
||||
|
||||
// Compare as stargz
|
||||
if !isSameVersion(t, cl, wantData, gotData) {
|
||||
t.Errorf("built stargz hasn't same json")
|
||||
return
|
||||
}
|
||||
if !isSameEntries(t, want, got) {
|
||||
t.Errorf("built stargz isn't same as the original")
|
||||
return
|
||||
}
|
||||
// Compare as stargz
|
||||
if !isSameVersion(t, cl, wantData, gotData) {
|
||||
t.Errorf("built stargz hasn't same json")
|
||||
return
|
||||
}
|
||||
if !isSameEntries(t, want, got) {
|
||||
t.Errorf("built stargz isn't same as the original")
|
||||
return
|
||||
}
|
||||
|
||||
// Compare as tar.gz
|
||||
if !isSameTarGz(t, cl, wantData, gotData) {
|
||||
t.Errorf("built stargz isn't same tar.gz")
|
||||
return
|
||||
}
|
||||
})
|
||||
// Compare as tar.gz
|
||||
if !isSameTarGz(t, cl, wantData, gotData) {
|
||||
t.Errorf("built stargz isn't same tar.gz")
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -526,7 +529,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
checks: []check{
|
||||
checkStargzTOC,
|
||||
checkVerifyTOC,
|
||||
checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
|
||||
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
|
||||
dir("test2/"), // modified
|
||||
), allowedPrefix[0])),
|
||||
},
|
||||
@ -544,7 +547,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
checks: []check{
|
||||
checkStargzTOC,
|
||||
checkVerifyTOC,
|
||||
checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
|
||||
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
|
||||
file("baz.txt", ""),
|
||||
file("foo.txt", "M"), // modified
|
||||
dir("test/"),
|
||||
@ -567,7 +570,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
checks: []check{
|
||||
checkStargzTOC,
|
||||
checkVerifyTOC,
|
||||
checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
|
||||
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
|
||||
file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified
|
||||
file("foo.txt", "a"),
|
||||
dir("test/"),
|
||||
@ -593,7 +596,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
checks: []check{
|
||||
checkStargzTOC,
|
||||
checkVerifyTOC,
|
||||
checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
|
||||
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
|
||||
file("baz.txt", "bazbazbazbazbazbazbaz"),
|
||||
file("foo.txt", "a"),
|
||||
symlink("barlink", "test/bar.txt"),
|
||||
@ -615,30 +618,33 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
cl := cl
|
||||
for _, prefix := range allowedPrefix {
|
||||
prefix := prefix
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v-prefix=%q", cl, prefix), func(t *testing.T) {
|
||||
// Get original tar file and chunk digests
|
||||
dgstMap := make(map[string]digest.Digest)
|
||||
tarBlob := buildTarStatic(t, tt.tarInit(t, dgstMap), prefix)
|
||||
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
||||
srcTarFormat := srcTarFormat
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) {
|
||||
// Get original tar file and chunk digests
|
||||
dgstMap := make(map[string]digest.Digest)
|
||||
tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
|
||||
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
WithChunkSize(chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to convert stargz: %v", err)
|
||||
}
|
||||
tocDigest := rc.TOCDigest()
|
||||
defer rc.Close()
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
newStargz := buf.Bytes()
|
||||
// NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
|
||||
dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
WithChunkSize(chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to convert stargz: %v", err)
|
||||
}
|
||||
tocDigest := rc.TOCDigest()
|
||||
defer rc.Close()
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
newStargz := buf.Bytes()
|
||||
// NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
|
||||
dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
|
||||
|
||||
for _, check := range tt.checks {
|
||||
check(t, newStargz, tocDigest, dgstMap, cl)
|
||||
}
|
||||
})
|
||||
for _, check := range tt.checks {
|
||||
check(t, newStargz, tocDigest, dgstMap, cl)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1058,7 +1064,7 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
|
||||
if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil {
|
||||
return nil, 0, errors.Wrap(err, "error reading footer")
|
||||
}
|
||||
tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
|
||||
_, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed to parse footer")
|
||||
}
|
||||
@ -1085,11 +1091,15 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
||||
in []tarEntry
|
||||
want []stargzCheck
|
||||
wantNumGz int // expected number of streams
|
||||
|
||||
wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz
|
||||
wantFailOnLossLess bool
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
in: tarOf(),
|
||||
wantNumGz: 2, // TOC + footer
|
||||
name: "empty",
|
||||
in: tarOf(),
|
||||
wantNumGz: 2, // empty tar + TOC + footer
|
||||
wantNumGzLossLess: 3, // empty tar + TOC + footer
|
||||
want: checks(
|
||||
numTOCEntries(0),
|
||||
),
|
||||
@ -1224,26 +1234,29 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
||||
{
|
||||
name: "block_char_fifo",
|
||||
in: tarOf(
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Name: prefix + "b",
|
||||
Typeflag: tar.TypeBlock,
|
||||
Devmajor: 123,
|
||||
Devminor: 456,
|
||||
Format: format,
|
||||
})
|
||||
}),
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Name: prefix + "c",
|
||||
Typeflag: tar.TypeChar,
|
||||
Devmajor: 111,
|
||||
Devminor: 222,
|
||||
Format: format,
|
||||
})
|
||||
}),
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Name: prefix + "f",
|
||||
Typeflag: tar.TypeFifo,
|
||||
Format: format,
|
||||
})
|
||||
}),
|
||||
),
|
||||
@ -1278,6 +1291,29 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
||||
hasMode("foo3/bar5", os.FileMode(0755)),
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "lossy",
|
||||
in: tarOf(
|
||||
dir("bar/", sampleOwner),
|
||||
dir("foo/", sampleOwner),
|
||||
file("foo/bar.txt", content, sampleOwner),
|
||||
file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error)
|
||||
),
|
||||
wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer
|
||||
want: checks(
|
||||
numTOCEntries(3),
|
||||
hasDir("bar/"),
|
||||
hasDir("foo/"),
|
||||
hasFileLen("foo/bar.txt", len(content)),
|
||||
entryHasChildren("", "bar", "foo"),
|
||||
entryHasChildren("foo", "bar.txt"),
|
||||
hasChunkEntries("foo/bar.txt", 1),
|
||||
hasEntryOwner("bar/", sampleOwner),
|
||||
hasEntryOwner("foo/", sampleOwner),
|
||||
hasEntryOwner("foo/bar.txt", sampleOwner),
|
||||
),
|
||||
wantFailOnLossLess: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@ -1285,47 +1321,90 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
||||
cl := cl
|
||||
for _, prefix := range allowedPrefix {
|
||||
prefix := prefix
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v-prefix=%q", cl, prefix), func(t *testing.T) {
|
||||
tr, cancel := buildTar(t, tt.in, prefix)
|
||||
defer cancel()
|
||||
var stargzBuf bytes.Buffer
|
||||
w := NewWriterWithCompressor(&stargzBuf, cl)
|
||||
w.ChunkSize = tt.chunkSize
|
||||
if err := w.AppendTar(tr); err != nil {
|
||||
t.Fatalf("Append: %v", err)
|
||||
}
|
||||
if _, err := w.Close(); err != nil {
|
||||
t.Fatalf("Writer.Close: %v", err)
|
||||
}
|
||||
b := stargzBuf.Bytes()
|
||||
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
||||
srcTarFormat := srcTarFormat
|
||||
for _, lossless := range []bool{true, false} {
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) {
|
||||
var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
|
||||
origTarDgstr := digest.Canonical.Digester()
|
||||
tr = io.TeeReader(tr, origTarDgstr.Hash())
|
||||
var stargzBuf bytes.Buffer
|
||||
w := NewWriterWithCompressor(&stargzBuf, cl)
|
||||
w.ChunkSize = tt.chunkSize
|
||||
if lossless {
|
||||
err := w.AppendTarLossLess(tr)
|
||||
if tt.wantFailOnLossLess {
|
||||
if err != nil {
|
||||
return // expected to fail
|
||||
}
|
||||
t.Fatalf("Append wanted to fail on lossless")
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("Append(lossless): %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := w.AppendTar(tr); err != nil {
|
||||
t.Fatalf("Append: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := w.Close(); err != nil {
|
||||
t.Fatalf("Writer.Close: %v", err)
|
||||
}
|
||||
b := stargzBuf.Bytes()
|
||||
|
||||
diffID := w.DiffID()
|
||||
wantDiffID := cl.DiffIDOf(t, b)
|
||||
if diffID != wantDiffID {
|
||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||
}
|
||||
if lossless {
|
||||
// Check if the result blob reserves original tar metadata
|
||||
rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl)
|
||||
if err != nil {
|
||||
t.Errorf("failed to decompress blob: %v", err)
|
||||
return
|
||||
}
|
||||
defer rc.Close()
|
||||
resultDgstr := digest.Canonical.Digester()
|
||||
if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil {
|
||||
t.Errorf("failed to read result decompressed blob: %v", err)
|
||||
return
|
||||
}
|
||||
if resultDgstr.Digest() != origTarDgstr.Digest() {
|
||||
t.Errorf("lossy compression occurred: digest=%v; want %v",
|
||||
resultDgstr.Digest(), origTarDgstr.Digest())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
got := cl.CountStreams(t, b)
|
||||
if got != tt.wantNumGz {
|
||||
t.Errorf("number of streams = %d; want %d", got, tt.wantNumGz)
|
||||
}
|
||||
diffID := w.DiffID()
|
||||
wantDiffID := cl.DiffIDOf(t, b)
|
||||
if diffID != wantDiffID {
|
||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||
}
|
||||
|
||||
telemetry, checkCalled := newCalledTelemetry()
|
||||
r, err := Open(
|
||||
io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))),
|
||||
WithDecompressors(cl),
|
||||
WithTelemetry(telemetry),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("stargz.Open: %v", err)
|
||||
got := cl.CountStreams(t, b)
|
||||
wantNumGz := tt.wantNumGz
|
||||
if lossless && tt.wantNumGzLossLess > 0 {
|
||||
wantNumGz = tt.wantNumGzLossLess
|
||||
}
|
||||
if got != wantNumGz {
|
||||
t.Errorf("number of streams = %d; want %d", got, wantNumGz)
|
||||
}
|
||||
|
||||
telemetry, checkCalled := newCalledTelemetry()
|
||||
r, err := Open(
|
||||
io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))),
|
||||
WithDecompressors(cl),
|
||||
WithTelemetry(telemetry),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("stargz.Open: %v", err)
|
||||
}
|
||||
if err := checkCalled(); err != nil {
|
||||
t.Errorf("telemetry failure: %v", err)
|
||||
}
|
||||
for _, want := range tt.want {
|
||||
want.check(t, r)
|
||||
}
|
||||
})
|
||||
}
|
||||
if err := checkCalled(); err != nil {
|
||||
t.Errorf("telemetry failure: %v", err)
|
||||
}
|
||||
for _, want := range tt.want {
|
||||
want.check(t, r)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1655,49 +1734,41 @@ func hasEntryOwner(entry string, owner owner) stargzCheck {
|
||||
func tarOf(s ...tarEntry) []tarEntry { return s }
|
||||
|
||||
type tarEntry interface {
|
||||
appendTar(tw *tar.Writer, prefix string) error
|
||||
appendTar(tw *tar.Writer, prefix string, format tar.Format) error
|
||||
}
|
||||
|
||||
type tarEntryFunc func(*tar.Writer, string) error
|
||||
type tarEntryFunc func(*tar.Writer, string, tar.Format) error
|
||||
|
||||
func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string) error { return f(tw, prefix) }
|
||||
|
||||
func buildTar(t *testing.T, ents []tarEntry, prefix string) (r io.Reader, cancel func()) {
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
tw := tar.NewWriter(pw)
|
||||
for _, ent := range ents {
|
||||
if err := ent.appendTar(tw, prefix); err != nil {
|
||||
t.Errorf("building input tar: %v", err)
|
||||
pw.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Errorf("closing write of input tar: %v", err)
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
return pr, func() { go pr.Close(); go pw.Close() }
|
||||
func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error {
|
||||
return f(tw, prefix, format)
|
||||
}
|
||||
|
||||
func buildTarStatic(t *testing.T, ents []tarEntry, prefix string) *io.SectionReader {
|
||||
func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader {
|
||||
format := tar.FormatUnknown
|
||||
for _, opt := range opts {
|
||||
switch v := opt.(type) {
|
||||
case tar.Format:
|
||||
format = v
|
||||
default:
|
||||
panic(fmt.Errorf("unsupported opt for buildTar: %v", opt))
|
||||
}
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
for _, ent := range ents {
|
||||
if err := ent.appendTar(tw, prefix); err != nil {
|
||||
if err := ent.appendTar(tw, prefix, format); err != nil {
|
||||
t.Fatalf("building input tar: %v", err)
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Errorf("closing write of input tar: %v", err)
|
||||
}
|
||||
data := buf.Bytes()
|
||||
data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works
|
||||
return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data)))
|
||||
}
|
||||
|
||||
func dir(name string, opts ...interface{}) tarEntry {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
|
||||
var o owner
|
||||
mode := os.FileMode(0755)
|
||||
for _, opt := range opts {
|
||||
@ -1723,6 +1794,7 @@ func dir(name string, opts ...interface{}) tarEntry {
|
||||
Mode: tm,
|
||||
Uid: o.uid,
|
||||
Gid: o.gid,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
@ -1737,7 +1809,7 @@ type owner struct {
|
||||
}
|
||||
|
||||
func file(name, contents string, opts ...interface{}) tarEntry {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
|
||||
var xattrs xAttr
|
||||
var o owner
|
||||
mode := os.FileMode(0644)
|
||||
@ -1760,6 +1832,9 @@ func file(name, contents string, opts ...interface{}) tarEntry {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(xattrs) > 0 {
|
||||
format = tar.FormatPAX // only PAX supports xattrs
|
||||
}
|
||||
if err := tw.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: prefix + name,
|
||||
@ -1768,6 +1843,7 @@ func file(name, contents string, opts ...interface{}) tarEntry {
|
||||
Size: int64(len(contents)),
|
||||
Uid: o.uid,
|
||||
Gid: o.gid,
|
||||
Format: format,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1777,78 +1853,76 @@ func file(name, contents string, opts ...interface{}) tarEntry {
|
||||
}
|
||||
|
||||
func symlink(name, target string) tarEntry {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
|
||||
return tw.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeSymlink,
|
||||
Name: prefix + name,
|
||||
Linkname: target,
|
||||
Mode: 0644,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func link(name string, linkname string) tarEntry {
|
||||
now := time.Now()
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeLink,
|
||||
Name: prefix + name,
|
||||
Linkname: linkname,
|
||||
ModTime: now,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
Typeflag: tar.TypeLink,
|
||||
Name: prefix + name,
|
||||
Linkname: linkname,
|
||||
ModTime: now,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func chardev(name string, major, minor int64) tarEntry {
|
||||
now := time.Now()
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeChar,
|
||||
Name: prefix + name,
|
||||
Devmajor: major,
|
||||
Devminor: minor,
|
||||
ModTime: now,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
Typeflag: tar.TypeChar,
|
||||
Name: prefix + name,
|
||||
Devmajor: major,
|
||||
Devminor: minor,
|
||||
ModTime: now,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func blockdev(name string, major, minor int64) tarEntry {
|
||||
now := time.Now()
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeBlock,
|
||||
Name: prefix + name,
|
||||
Devmajor: major,
|
||||
Devminor: minor,
|
||||
ModTime: now,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
Typeflag: tar.TypeBlock,
|
||||
Name: prefix + name,
|
||||
Devmajor: major,
|
||||
Devminor: minor,
|
||||
ModTime: now,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
func fifo(name string) tarEntry {
|
||||
now := time.Now()
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeFifo,
|
||||
Name: prefix + name,
|
||||
ModTime: now,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
Typeflag: tar.TypeFifo,
|
||||
Name: prefix + name,
|
||||
ModTime: now,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func prefetchLandmark() tarEntry {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
if err := w.WriteHeader(&tar.Header{
|
||||
Name: PrefetchLandmark,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: int64(len([]byte{landmarkContents})),
|
||||
Format: format,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1861,11 +1935,12 @@ func prefetchLandmark() tarEntry {
|
||||
}
|
||||
|
||||
func noPrefetchLandmark() tarEntry {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
if err := w.WriteHeader(&tar.Header{
|
||||
Name: NoPrefetchLandmark,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: int64(len([]byte{landmarkContents})),
|
||||
Format: format,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1899,11 +1974,12 @@ func regDigest(t *testing.T, name string, contentStr string, digestMap map[strin
|
||||
n += size
|
||||
}
|
||||
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
if err := w.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: prefix + name,
|
||||
Size: int64(len(content)),
|
||||
Format: format,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
6
vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
6
vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
@ -290,7 +290,7 @@ type Compressor interface {
|
||||
WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error)
|
||||
}
|
||||
|
||||
// Deompressor represents the helper mothods to be used for parsing eStargz.
|
||||
// Decompressor represents the helper mothods to be used for parsing eStargz.
|
||||
type Decompressor interface {
|
||||
// Reader returns ReadCloser to be used for decompressing file payload.
|
||||
Reader(r io.Reader) (io.ReadCloser, error)
|
||||
@ -299,10 +299,12 @@ type Decompressor interface {
|
||||
FooterSize() int64
|
||||
|
||||
// ParseFooter parses the footer and returns the offset and (compressed) size of TOC.
|
||||
// payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
|
||||
// the top until the TOC JSON).
|
||||
//
|
||||
// Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range
|
||||
// from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize).
|
||||
ParseFooter(p []byte) (tocOffset, tocSize int64, err error)
|
||||
ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
|
||||
|
||||
// ParseTOC parses TOC from the passed reader. The reader provides the partial contents
|
||||
// of the underlying blob that has the range specified by ParseFooter method.
|
||||
|
25
vendor/github.com/containerd/stargz-snapshotter/estargz/zstdchunked/zstdchunked.go
generated
vendored
25
vendor/github.com/containerd/stargz-snapshotter/estargz/zstdchunked/zstdchunked.go
generated
vendored
@ -32,10 +32,16 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
ZstdChunkedManifestChecksumAnnotation = "io.containers.zstd-chunked.manifest-checksum"
|
||||
ZstdChunkedManifestPositionAnnotation = "io.containers.zstd-chunked.manifest-position"
|
||||
FooterSize = 40
|
||||
manifestTypeCRFS = 1
|
||||
// ManifestChecksumAnnotation is an annotation that contains the compressed TOC Digset
|
||||
ManifestChecksumAnnotation = "io.containers.zstd-chunked.manifest-checksum"
|
||||
|
||||
// ManifestPositionAnnotation is an annotation that contains the offset to the TOC.
|
||||
ManifestPositionAnnotation = "io.containers.zstd-chunked.manifest-position"
|
||||
|
||||
// FooterSize is the size of the footer
|
||||
FooterSize = 40
|
||||
|
||||
manifestTypeCRFS = 1
|
||||
)
|
||||
|
||||
var (
|
||||
@ -68,13 +74,14 @@ func (zz *Decompressor) ParseTOC(r io.Reader) (toc *estargz.JTOC, tocDgst digest
|
||||
return toc, dgstr.Digest(), nil
|
||||
}
|
||||
|
||||
func (zz *Decompressor) ParseFooter(p []byte) (tocOffset, tocSize int64, err error) {
|
||||
func (zz *Decompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
|
||||
offset := binary.LittleEndian.Uint64(p[0:8])
|
||||
compressedLength := binary.LittleEndian.Uint64(p[8:16])
|
||||
if !bytes.Equal(zstdChunkedFrameMagic, p[32:40]) {
|
||||
return 0, 0, fmt.Errorf("invalid magic number")
|
||||
return 0, 0, 0, fmt.Errorf("invalid magic number")
|
||||
}
|
||||
return int64(offset), int64(compressedLength), nil
|
||||
// 8 is the size of the zstd skippable frame header + the frame size (see WriteTOCAndFooter)
|
||||
return int64(offset - 8), int64(offset), int64(compressedLength), nil
|
||||
}
|
||||
|
||||
func (zz *Decompressor) FooterSize() int64 {
|
||||
@ -149,8 +156,8 @@ func (zc *Compressor) WriteTOCAndFooter(w io.Writer, off int64, toc *estargz.JTO
|
||||
}
|
||||
|
||||
if zc.Metadata != nil {
|
||||
zc.Metadata[ZstdChunkedManifestChecksumAnnotation] = digest.FromBytes(compressedTOC).String()
|
||||
zc.Metadata[ZstdChunkedManifestPositionAnnotation] = fmt.Sprintf("%d:%d:%d:%d",
|
||||
zc.Metadata[ManifestChecksumAnnotation] = digest.FromBytes(compressedTOC).String()
|
||||
zc.Metadata[ManifestPositionAnnotation] = fmt.Sprintf("%d:%d:%d:%d",
|
||||
tocOff, len(compressedTOC), len(tocJSON), manifestTypeCRFS)
|
||||
}
|
||||
|
||||
|
105
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
105
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
@ -15,7 +15,7 @@ type roffRenderer struct {
|
||||
extensions blackfriday.Extensions
|
||||
listCounters []int
|
||||
firstHeader bool
|
||||
defineTerm bool
|
||||
firstDD bool
|
||||
listDepth int
|
||||
}
|
||||
|
||||
@ -42,7 +42,8 @@ const (
|
||||
quoteCloseTag = "\n.RE\n"
|
||||
listTag = "\n.RS\n"
|
||||
listCloseTag = "\n.RE\n"
|
||||
arglistTag = "\n.TP\n"
|
||||
dtTag = "\n.TP\n"
|
||||
dd2Tag = "\n"
|
||||
tableStart = "\n.TS\nallbox;\n"
|
||||
tableEnd = ".TE\n"
|
||||
tableCellStart = "T{\n"
|
||||
@ -90,7 +91,7 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
|
||||
|
||||
switch node.Type {
|
||||
case blackfriday.Text:
|
||||
r.handleText(w, node, entering)
|
||||
escapeSpecialChars(w, node.Literal)
|
||||
case blackfriday.Softbreak:
|
||||
out(w, crTag)
|
||||
case blackfriday.Hardbreak:
|
||||
@ -150,40 +151,21 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
|
||||
out(w, codeCloseTag)
|
||||
case blackfriday.Table:
|
||||
r.handleTable(w, node, entering)
|
||||
case blackfriday.TableCell:
|
||||
r.handleTableCell(w, node, entering)
|
||||
case blackfriday.TableHead:
|
||||
case blackfriday.TableBody:
|
||||
case blackfriday.TableRow:
|
||||
// no action as cell entries do all the nroff formatting
|
||||
return blackfriday.GoToNext
|
||||
case blackfriday.TableCell:
|
||||
r.handleTableCell(w, node, entering)
|
||||
case blackfriday.HTMLSpan:
|
||||
// ignore other HTML tags
|
||||
default:
|
||||
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||||
}
|
||||
return walkAction
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
var (
|
||||
start, end string
|
||||
)
|
||||
// handle special roff table cell text encapsulation
|
||||
if node.Parent.Type == blackfriday.TableCell {
|
||||
if len(node.Literal) > 30 {
|
||||
start = tableCellStart
|
||||
end = tableCellEnd
|
||||
} else {
|
||||
// end rows that aren't terminated by "tableCellEnd" with a cr if end of row
|
||||
if node.Parent.Next == nil && !node.Parent.IsHeader {
|
||||
end = crTag
|
||||
}
|
||||
}
|
||||
}
|
||||
out(w, start)
|
||||
escapeSpecialChars(w, node.Literal)
|
||||
out(w, end)
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
if entering {
|
||||
switch node.Level {
|
||||
@ -230,15 +212,20 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering
|
||||
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||
out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
|
||||
r.listCounters[len(r.listCounters)-1]++
|
||||
} else if node.ListFlags&blackfriday.ListTypeTerm != 0 {
|
||||
// DT (definition term): line just before DD (see below).
|
||||
out(w, dtTag)
|
||||
r.firstDD = true
|
||||
} else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||
// state machine for handling terms and following definitions
|
||||
// since blackfriday does not distinguish them properly, nor
|
||||
// does it seperate them into separate lists as it should
|
||||
if !r.defineTerm {
|
||||
out(w, arglistTag)
|
||||
r.defineTerm = true
|
||||
// DD (definition description): line that starts with ": ".
|
||||
//
|
||||
// We have to distinguish between the first DD and the
|
||||
// subsequent ones, as there should be no vertical
|
||||
// whitespace between the DT and the first DD.
|
||||
if r.firstDD {
|
||||
r.firstDD = false
|
||||
} else {
|
||||
r.defineTerm = false
|
||||
out(w, dd2Tag)
|
||||
}
|
||||
} else {
|
||||
out(w, ".IP \\(bu 2\n")
|
||||
@ -251,7 +238,7 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering
|
||||
func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
if entering {
|
||||
out(w, tableStart)
|
||||
//call walker to count cells (and rows?) so format section can be produced
|
||||
// call walker to count cells (and rows?) so format section can be produced
|
||||
columns := countColumns(node)
|
||||
out(w, strings.Repeat("l ", columns)+"\n")
|
||||
out(w, strings.Repeat("l ", columns)+".\n")
|
||||
@ -261,28 +248,41 @@ func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
var (
|
||||
start, end string
|
||||
)
|
||||
if node.IsHeader {
|
||||
start = codespanTag
|
||||
end = codespanCloseTag
|
||||
}
|
||||
if entering {
|
||||
var start string
|
||||
if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
|
||||
out(w, "\t"+start)
|
||||
} else {
|
||||
out(w, start)
|
||||
start = "\t"
|
||||
}
|
||||
if node.IsHeader {
|
||||
start += codespanTag
|
||||
} else if nodeLiteralSize(node) > 30 {
|
||||
start += tableCellStart
|
||||
}
|
||||
out(w, start)
|
||||
} else {
|
||||
// need to carriage return if we are at the end of the header row
|
||||
if node.IsHeader && node.Next == nil {
|
||||
end = end + crTag
|
||||
var end string
|
||||
if node.IsHeader {
|
||||
end = codespanCloseTag
|
||||
} else if nodeLiteralSize(node) > 30 {
|
||||
end = tableCellEnd
|
||||
}
|
||||
if node.Next == nil && end != tableCellEnd {
|
||||
// Last cell: need to carriage return if we are at the end of the
|
||||
// header row and content isn't wrapped in a "tablecell"
|
||||
end += crTag
|
||||
}
|
||||
out(w, end)
|
||||
}
|
||||
}
|
||||
|
||||
func nodeLiteralSize(node *blackfriday.Node) int {
|
||||
total := 0
|
||||
for n := node.FirstChild; n != nil; n = n.FirstChild {
|
||||
total += len(n.Literal)
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// because roff format requires knowing the column count before outputting any table
|
||||
// data we need to walk a table tree and count the columns
|
||||
func countColumns(node *blackfriday.Node) int {
|
||||
@ -309,15 +309,6 @@ func out(w io.Writer, output string) {
|
||||
io.WriteString(w, output) // nolint: errcheck
|
||||
}
|
||||
|
||||
func needsBackslash(c byte) bool {
|
||||
for _, r := range []byte("-_&\\~") {
|
||||
if c == r {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func escapeSpecialChars(w io.Writer, text []byte) {
|
||||
for i := 0; i < len(text); i++ {
|
||||
// escape initial apostrophe or period
|
||||
@ -328,7 +319,7 @@ func escapeSpecialChars(w io.Writer, text []byte) {
|
||||
// directly copy normal characters
|
||||
org := i
|
||||
|
||||
for i < len(text) && !needsBackslash(text[i]) {
|
||||
for i < len(text) && text[i] != '\\' {
|
||||
i++
|
||||
}
|
||||
if i > org {
|
||||
|
27
vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
generated
vendored
Normal file
27
vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
package challenge
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FROM: https://golang.org/src/net/http/http.go
|
||||
// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
|
||||
// return true if the string includes a port.
|
||||
func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
|
||||
|
||||
// FROM: http://golang.org/src/net/http/transport.go
|
||||
var portMap = map[string]string{
|
||||
"http": "80",
|
||||
"https": "443",
|
||||
}
|
||||
|
||||
// canonicalAddr returns url.Host but always with a ":port" suffix
|
||||
// FROM: http://golang.org/src/net/http/transport.go
|
||||
func canonicalAddr(url *url.URL) string {
|
||||
addr := url.Host
|
||||
if !hasPort(addr) {
|
||||
return addr + ":" + portMap[url.Scheme]
|
||||
}
|
||||
return addr
|
||||
}
|
237
vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
generated
vendored
Normal file
237
vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
generated
vendored
Normal file
@ -0,0 +1,237 @@
|
||||
package challenge
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Challenge carries information from a WWW-Authenticate response header.
|
||||
// See RFC 2617.
|
||||
type Challenge struct {
|
||||
// Scheme is the auth-scheme according to RFC 2617
|
||||
Scheme string
|
||||
|
||||
// Parameters are the auth-params according to RFC 2617
|
||||
Parameters map[string]string
|
||||
}
|
||||
|
||||
// Manager manages the challenges for endpoints.
|
||||
// The challenges are pulled out of HTTP responses. Only
|
||||
// responses which expect challenges should be added to
|
||||
// the manager, since a non-unauthorized request will be
|
||||
// viewed as not requiring challenges.
|
||||
type Manager interface {
|
||||
// GetChallenges returns the challenges for the given
|
||||
// endpoint URL.
|
||||
GetChallenges(endpoint url.URL) ([]Challenge, error)
|
||||
|
||||
// AddResponse adds the response to the challenge
|
||||
// manager. The challenges will be parsed out of
|
||||
// the WWW-Authenicate headers and added to the
|
||||
// URL which was produced the response. If the
|
||||
// response was authorized, any challenges for the
|
||||
// endpoint will be cleared.
|
||||
AddResponse(resp *http.Response) error
|
||||
}
|
||||
|
||||
// NewSimpleManager returns an instance of
|
||||
// Manger which only maps endpoints to challenges
|
||||
// based on the responses which have been added the
|
||||
// manager. The simple manager will make no attempt to
|
||||
// perform requests on the endpoints or cache the responses
|
||||
// to a backend.
|
||||
func NewSimpleManager() Manager {
|
||||
return &simpleManager{
|
||||
Challenges: make(map[string][]Challenge),
|
||||
}
|
||||
}
|
||||
|
||||
type simpleManager struct {
|
||||
sync.RWMutex
|
||||
Challenges map[string][]Challenge
|
||||
}
|
||||
|
||||
func normalizeURL(endpoint *url.URL) {
|
||||
endpoint.Host = strings.ToLower(endpoint.Host)
|
||||
endpoint.Host = canonicalAddr(endpoint)
|
||||
}
|
||||
|
||||
func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
|
||||
normalizeURL(&endpoint)
|
||||
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
challenges := m.Challenges[endpoint.String()]
|
||||
return challenges, nil
|
||||
}
|
||||
|
||||
func (m *simpleManager) AddResponse(resp *http.Response) error {
|
||||
challenges := ResponseChallenges(resp)
|
||||
if resp.Request == nil {
|
||||
return fmt.Errorf("missing request reference")
|
||||
}
|
||||
urlCopy := url.URL{
|
||||
Path: resp.Request.URL.Path,
|
||||
Host: resp.Request.URL.Host,
|
||||
Scheme: resp.Request.URL.Scheme,
|
||||
}
|
||||
normalizeURL(&urlCopy)
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.Challenges[urlCopy.String()] = challenges
|
||||
return nil
|
||||
}
|
||||
|
||||
// Octet types from RFC 2616.
|
||||
type octetType byte
|
||||
|
||||
var octetTypes [256]octetType
|
||||
|
||||
const (
|
||||
isToken octetType = 1 << iota
|
||||
isSpace
|
||||
)
|
||||
|
||||
func init() {
|
||||
// OCTET = <any 8-bit sequence of data>
|
||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
||||
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
||||
// CR = <US-ASCII CR, carriage return (13)>
|
||||
// LF = <US-ASCII LF, linefeed (10)>
|
||||
// SP = <US-ASCII SP, space (32)>
|
||||
// HT = <US-ASCII HT, horizontal-tab (9)>
|
||||
// <"> = <US-ASCII double-quote mark (34)>
|
||||
// CRLF = CR LF
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// TEXT = <any OCTET except CTLs, but including LWS>
|
||||
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
||||
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
||||
// token = 1*<any CHAR except CTLs or separators>
|
||||
// qdtext = <any TEXT except <">>
|
||||
|
||||
for c := 0; c < 256; c++ {
|
||||
var t octetType
|
||||
isCtl := c <= 31 || c == 127
|
||||
isChar := 0 <= c && c <= 127
|
||||
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
|
||||
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
|
||||
t |= isSpace
|
||||
}
|
||||
if isChar && !isCtl && !isSeparator {
|
||||
t |= isToken
|
||||
}
|
||||
octetTypes[c] = t
|
||||
}
|
||||
}
|
||||
|
||||
// ResponseChallenges returns a list of authorization challenges
|
||||
// for the given http Response. Challenges are only checked if
|
||||
// the response status code was a 401.
|
||||
func ResponseChallenges(resp *http.Response) []Challenge {
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
// Parse the WWW-Authenticate Header and store the challenges
|
||||
// on this endpoint object.
|
||||
return parseAuthHeader(resp.Header)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseAuthHeader(header http.Header) []Challenge {
|
||||
challenges := []Challenge{}
|
||||
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
|
||||
v, p := parseValueAndParams(h)
|
||||
if v != "" {
|
||||
challenges = append(challenges, Challenge{Scheme: v, Parameters: p})
|
||||
}
|
||||
}
|
||||
return challenges
|
||||
}
|
||||
|
||||
func parseValueAndParams(header string) (value string, params map[string]string) {
|
||||
params = make(map[string]string)
|
||||
value, s := expectToken(header)
|
||||
if value == "" {
|
||||
return
|
||||
}
|
||||
value = strings.ToLower(value)
|
||||
s = "," + skipSpace(s)
|
||||
for strings.HasPrefix(s, ",") {
|
||||
var pkey string
|
||||
pkey, s = expectToken(skipSpace(s[1:]))
|
||||
if pkey == "" {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(s, "=") {
|
||||
return
|
||||
}
|
||||
var pvalue string
|
||||
pvalue, s = expectTokenOrQuoted(s[1:])
|
||||
if pvalue == "" {
|
||||
return
|
||||
}
|
||||
pkey = strings.ToLower(pkey)
|
||||
params[pkey] = pvalue
|
||||
s = skipSpace(s)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isSpace == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
func expectToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isToken == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
func expectTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return expectToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
76
vendor/github.com/golang/mock/gomock/call.go
generated
vendored
76
vendor/github.com/golang/mock/gomock/call.go
generated
vendored
@ -50,19 +50,22 @@ func newCall(t TestHelper, receiver interface{}, method string, methodType refle
|
||||
t.Helper()
|
||||
|
||||
// TODO: check arity, types.
|
||||
margs := make([]Matcher, len(args))
|
||||
mArgs := make([]Matcher, len(args))
|
||||
for i, arg := range args {
|
||||
if m, ok := arg.(Matcher); ok {
|
||||
margs[i] = m
|
||||
mArgs[i] = m
|
||||
} else if arg == nil {
|
||||
// Handle nil specially so that passing a nil interface value
|
||||
// will match the typed nils of concrete args.
|
||||
margs[i] = Nil()
|
||||
mArgs[i] = Nil()
|
||||
} else {
|
||||
margs[i] = Eq(arg)
|
||||
mArgs[i] = Eq(arg)
|
||||
}
|
||||
}
|
||||
|
||||
// callerInfo's skip should be updated if the number of calls between the user's test
|
||||
// and this line changes, i.e. this code is wrapped in another anonymous function.
|
||||
// 0 is us, 1 is RecordCallWithMethodType(), 2 is the generated recorder, and 3 is the user's test.
|
||||
origin := callerInfo(3)
|
||||
actions := []func([]interface{}) []interface{}{func([]interface{}) []interface{} {
|
||||
// Synthesize the zero value for each of the return args' types.
|
||||
@ -73,7 +76,7 @@ func newCall(t TestHelper, receiver interface{}, method string, methodType refle
|
||||
return rets
|
||||
}}
|
||||
return &Call{t: t, receiver: receiver, method: method, methodType: methodType,
|
||||
args: margs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions}
|
||||
args: mArgs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions}
|
||||
}
|
||||
|
||||
// AnyTimes allows the expectation to be called 0 or more times
|
||||
@ -110,19 +113,25 @@ func (c *Call) DoAndReturn(f interface{}) *Call {
|
||||
v := reflect.ValueOf(f)
|
||||
|
||||
c.addAction(func(args []interface{}) []interface{} {
|
||||
vargs := make([]reflect.Value, len(args))
|
||||
c.t.Helper()
|
||||
vArgs := make([]reflect.Value, len(args))
|
||||
ft := v.Type()
|
||||
if c.methodType.NumIn() != ft.NumIn() {
|
||||
c.t.Fatalf("wrong number of arguments in DoAndReturn func for %T.%v: got %d, want %d [%s]",
|
||||
c.receiver, c.method, ft.NumIn(), c.methodType.NumIn(), c.origin)
|
||||
return nil
|
||||
}
|
||||
for i := 0; i < len(args); i++ {
|
||||
if args[i] != nil {
|
||||
vargs[i] = reflect.ValueOf(args[i])
|
||||
vArgs[i] = reflect.ValueOf(args[i])
|
||||
} else {
|
||||
// Use the zero value for the arg.
|
||||
vargs[i] = reflect.Zero(ft.In(i))
|
||||
vArgs[i] = reflect.Zero(ft.In(i))
|
||||
}
|
||||
}
|
||||
vrets := v.Call(vargs)
|
||||
rets := make([]interface{}, len(vrets))
|
||||
for i, ret := range vrets {
|
||||
vRets := v.Call(vArgs)
|
||||
rets := make([]interface{}, len(vRets))
|
||||
for i, ret := range vRets {
|
||||
rets[i] = ret.Interface()
|
||||
}
|
||||
return rets
|
||||
@ -139,17 +148,23 @@ func (c *Call) Do(f interface{}) *Call {
|
||||
v := reflect.ValueOf(f)
|
||||
|
||||
c.addAction(func(args []interface{}) []interface{} {
|
||||
vargs := make([]reflect.Value, len(args))
|
||||
c.t.Helper()
|
||||
if c.methodType.NumIn() != v.Type().NumIn() {
|
||||
c.t.Fatalf("wrong number of arguments in Do func for %T.%v: got %d, want %d [%s]",
|
||||
c.receiver, c.method, v.Type().NumIn(), c.methodType.NumIn(), c.origin)
|
||||
return nil
|
||||
}
|
||||
vArgs := make([]reflect.Value, len(args))
|
||||
ft := v.Type()
|
||||
for i := 0; i < len(args); i++ {
|
||||
if args[i] != nil {
|
||||
vargs[i] = reflect.ValueOf(args[i])
|
||||
vArgs[i] = reflect.ValueOf(args[i])
|
||||
} else {
|
||||
// Use the zero value for the arg.
|
||||
vargs[i] = reflect.Zero(ft.In(i))
|
||||
vArgs[i] = reflect.Zero(ft.In(i))
|
||||
}
|
||||
}
|
||||
v.Call(vargs)
|
||||
v.Call(vArgs)
|
||||
return nil
|
||||
})
|
||||
return c
|
||||
@ -301,14 +316,9 @@ func (c *Call) matches(args []interface{}) error {
|
||||
|
||||
for i, m := range c.args {
|
||||
if !m.Matches(args[i]) {
|
||||
got := fmt.Sprintf("%v", args[i])
|
||||
if gs, ok := m.(GotFormatter); ok {
|
||||
got = gs.Got(args[i])
|
||||
}
|
||||
|
||||
return fmt.Errorf(
|
||||
"expected call at %s doesn't match the argument at index %d.\nGot: %v\nWant: %v",
|
||||
c.origin, i, got, m,
|
||||
c.origin, i, formatGottenArg(m, args[i]), m,
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -331,7 +341,7 @@ func (c *Call) matches(args []interface{}) error {
|
||||
// Non-variadic args
|
||||
if !m.Matches(args[i]) {
|
||||
return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
|
||||
c.origin, strconv.Itoa(i), args[i], m)
|
||||
c.origin, strconv.Itoa(i), formatGottenArg(m, args[i]), m)
|
||||
}
|
||||
continue
|
||||
}
|
||||
@ -355,12 +365,12 @@ func (c *Call) matches(args []interface{}) error {
|
||||
// matches all the remaining arguments or the lack of any.
|
||||
// Convert the remaining arguments, if any, into a slice of the
|
||||
// expected type.
|
||||
vargsType := c.methodType.In(c.methodType.NumIn() - 1)
|
||||
vargs := reflect.MakeSlice(vargsType, 0, len(args)-i)
|
||||
vArgsType := c.methodType.In(c.methodType.NumIn() - 1)
|
||||
vArgs := reflect.MakeSlice(vArgsType, 0, len(args)-i)
|
||||
for _, arg := range args[i:] {
|
||||
vargs = reflect.Append(vargs, reflect.ValueOf(arg))
|
||||
vArgs = reflect.Append(vArgs, reflect.ValueOf(arg))
|
||||
}
|
||||
if m.Matches(vargs.Interface()) {
|
||||
if m.Matches(vArgs.Interface()) {
|
||||
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, gomock.Any())
|
||||
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, someSliceMatcher)
|
||||
// Got Foo(a, b) want Foo(matcherA, matcherB, gomock.Any())
|
||||
@ -373,16 +383,16 @@ func (c *Call) matches(args []interface{}) error {
|
||||
// Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD, matcherE)
|
||||
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, matcherC, matcherD)
|
||||
// Got Foo(a, b, c) want Foo(matcherA, matcherB)
|
||||
return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
|
||||
c.origin, strconv.Itoa(i), args[i:], c.args[i])
|
||||
|
||||
return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
|
||||
c.origin, strconv.Itoa(i), formatGottenArg(m, args[i:]), c.args[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Check that all prerequisite calls have been satisfied.
|
||||
for _, preReqCall := range c.preReqs {
|
||||
if !preReqCall.satisfied() {
|
||||
return fmt.Errorf("Expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v",
|
||||
return fmt.Errorf("expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v",
|
||||
c.origin, preReqCall, c)
|
||||
}
|
||||
}
|
||||
@ -425,3 +435,11 @@ func setSlice(arg interface{}, v reflect.Value) {
|
||||
func (c *Call) addAction(action func([]interface{}) []interface{}) {
|
||||
c.actions = append(c.actions, action)
|
||||
}
|
||||
|
||||
func formatGottenArg(m Matcher, arg interface{}) string {
|
||||
got := fmt.Sprintf("%v (%T)", arg, arg)
|
||||
if gs, ok := m.(GotFormatter); ok {
|
||||
got = gs.Got(arg)
|
||||
}
|
||||
return got
|
||||
}
|
||||
|
7
vendor/github.com/golang/mock/gomock/callset.go
generated
vendored
7
vendor/github.com/golang/mock/gomock/callset.go
generated
vendored
@ -16,6 +16,7 @@ package gomock
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
@ -84,14 +85,18 @@ func (cs callSet) FindMatch(receiver interface{}, method string, args []interfac
|
||||
for _, call := range exhausted {
|
||||
if err := call.matches(args); err != nil {
|
||||
_, _ = fmt.Fprintf(&callsErrors, "\n%v", err)
|
||||
continue
|
||||
}
|
||||
_, _ = fmt.Fprintf(
|
||||
&callsErrors, "all expected calls for method %q have been exhausted", method,
|
||||
)
|
||||
}
|
||||
|
||||
if len(expected)+len(exhausted) == 0 {
|
||||
_, _ = fmt.Fprintf(&callsErrors, "there are no expected calls of the method %q for that receiver", method)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf(callsErrors.String())
|
||||
return nil, errors.New(callsErrors.String())
|
||||
}
|
||||
|
||||
// Failures returns the calls that are not satisfied.
|
||||
|
110
vendor/github.com/golang/mock/gomock/controller.go
generated
vendored
110
vendor/github.com/golang/mock/gomock/controller.go
generated
vendored
@ -50,9 +50,6 @@
|
||||
// mockObj.EXPECT().SomeMethod(2, "second"),
|
||||
// mockObj.EXPECT().SomeMethod(3, "third"),
|
||||
// )
|
||||
//
|
||||
// TODO:
|
||||
// - Handle different argument/return types (e.g. ..., chan, map, interface).
|
||||
package gomock
|
||||
|
||||
import (
|
||||
@ -77,6 +74,15 @@ type TestHelper interface {
|
||||
Helper()
|
||||
}
|
||||
|
||||
// cleanuper is used to check if TestHelper also has the `Cleanup` method. A
|
||||
// common pattern is to pass in a `*testing.T` to
|
||||
// `NewController(t TestReporter)`. In Go 1.14+, `*testing.T` has a cleanup
|
||||
// method. This can be utilized to call `Finish()` so the caller of this library
|
||||
// does not have to.
|
||||
type cleanuper interface {
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// A Controller represents the top-level control of a mock ecosystem. It
|
||||
// defines the scope and lifetime of mock objects, as well as their
|
||||
// expectations. It is safe to call Controller's methods from multiple
|
||||
@ -115,29 +121,43 @@ type Controller struct {
|
||||
|
||||
// NewController returns a new Controller. It is the preferred way to create a
|
||||
// Controller.
|
||||
//
|
||||
// New in go1.14+, if you are passing a *testing.T into this function you no
|
||||
// longer need to call ctrl.Finish() in your test methods.
|
||||
func NewController(t TestReporter) *Controller {
|
||||
h, ok := t.(TestHelper)
|
||||
if !ok {
|
||||
h = nopTestHelper{t}
|
||||
h = &nopTestHelper{t}
|
||||
}
|
||||
|
||||
return &Controller{
|
||||
ctrl := &Controller{
|
||||
T: h,
|
||||
expectedCalls: newCallSet(),
|
||||
}
|
||||
if c, ok := isCleanuper(ctrl.T); ok {
|
||||
c.Cleanup(func() {
|
||||
ctrl.T.Helper()
|
||||
ctrl.finish(true, nil)
|
||||
})
|
||||
}
|
||||
|
||||
return ctrl
|
||||
}
|
||||
|
||||
type cancelReporter struct {
|
||||
TestHelper
|
||||
t TestHelper
|
||||
cancel func()
|
||||
}
|
||||
|
||||
func (r *cancelReporter) Errorf(format string, args ...interface{}) {
|
||||
r.TestHelper.Errorf(format, args...)
|
||||
r.t.Errorf(format, args...)
|
||||
}
|
||||
func (r *cancelReporter) Fatalf(format string, args ...interface{}) {
|
||||
defer r.cancel()
|
||||
r.TestHelper.Fatalf(format, args...)
|
||||
r.t.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
func (r *cancelReporter) Helper() {
|
||||
r.t.Helper()
|
||||
}
|
||||
|
||||
// WithContext returns a new Controller and a Context, which is cancelled on any
|
||||
@ -145,15 +165,22 @@ func (r *cancelReporter) Fatalf(format string, args ...interface{}) {
|
||||
func WithContext(ctx context.Context, t TestReporter) (*Controller, context.Context) {
|
||||
h, ok := t.(TestHelper)
|
||||
if !ok {
|
||||
h = nopTestHelper{t}
|
||||
h = &nopTestHelper{t: t}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return NewController(&cancelReporter{h, cancel}), ctx
|
||||
return NewController(&cancelReporter{t: h, cancel: cancel}), ctx
|
||||
}
|
||||
|
||||
type nopTestHelper struct {
|
||||
TestReporter
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func (h *nopTestHelper) Errorf(format string, args ...interface{}) {
|
||||
h.t.Errorf(format, args...)
|
||||
}
|
||||
func (h *nopTestHelper) Fatalf(format string, args ...interface{}) {
|
||||
h.t.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
func (h nopTestHelper) Helper() {}
|
||||
@ -197,7 +224,10 @@ func (ctrl *Controller) Call(receiver interface{}, method string, args ...interf
|
||||
|
||||
expected, err := ctrl.expectedCalls.FindMatch(receiver, method, args)
|
||||
if err != nil {
|
||||
origin := callerInfo(2)
|
||||
// callerInfo's skip should be updated if the number of calls between the user's test
|
||||
// and this line changes, i.e. this code is wrapped in another anonymous function.
|
||||
// 0 is us, 1 is controller.Call(), 2 is the generated mock, and 3 is the user's test.
|
||||
origin := callerInfo(3)
|
||||
ctrl.T.Fatalf("Unexpected call to %T.%v(%v) at %s because: %s", receiver, method, args, origin, err)
|
||||
}
|
||||
|
||||
@ -229,21 +259,33 @@ func (ctrl *Controller) Call(receiver interface{}, method string, args ...interf
|
||||
// Finish checks to see if all the methods that were expected to be called
|
||||
// were called. It should be invoked for each Controller. It is not idempotent
|
||||
// and therefore can only be invoked once.
|
||||
//
|
||||
// New in go1.14+, if you are passing a *testing.T into NewController function you no
|
||||
// longer need to call ctrl.Finish() in your test methods.
|
||||
func (ctrl *Controller) Finish() {
|
||||
// If we're currently panicking, probably because this is a deferred call.
|
||||
// This must be recovered in the deferred function.
|
||||
err := recover()
|
||||
ctrl.finish(false, err)
|
||||
}
|
||||
|
||||
func (ctrl *Controller) finish(cleanup bool, panicErr interface{}) {
|
||||
ctrl.T.Helper()
|
||||
|
||||
ctrl.mu.Lock()
|
||||
defer ctrl.mu.Unlock()
|
||||
|
||||
if ctrl.finished {
|
||||
ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.")
|
||||
if _, ok := isCleanuper(ctrl.T); !ok {
|
||||
ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.")
|
||||
}
|
||||
return
|
||||
}
|
||||
ctrl.finished = true
|
||||
|
||||
// If we're currently panicking, probably because this is a deferred call,
|
||||
// pass through the panic.
|
||||
if err := recover(); err != nil {
|
||||
panic(err)
|
||||
// Short-circuit, pass through the panic.
|
||||
if panicErr != nil {
|
||||
panic(panicErr)
|
||||
}
|
||||
|
||||
// Check that all remaining expected calls are satisfied.
|
||||
@ -252,13 +294,43 @@ func (ctrl *Controller) Finish() {
|
||||
ctrl.T.Errorf("missing call(s) to %v", call)
|
||||
}
|
||||
if len(failures) != 0 {
|
||||
ctrl.T.Fatalf("aborting test due to missing call(s)")
|
||||
if !cleanup {
|
||||
ctrl.T.Fatalf("aborting test due to missing call(s)")
|
||||
return
|
||||
}
|
||||
ctrl.T.Errorf("aborting test due to missing call(s)")
|
||||
}
|
||||
}
|
||||
|
||||
// callerInfo returns the file:line of the call site. skip is the number
|
||||
// of stack frames to skip when reporting. 0 is callerInfo's call site.
|
||||
func callerInfo(skip int) string {
|
||||
if _, file, line, ok := runtime.Caller(skip + 1); ok {
|
||||
return fmt.Sprintf("%s:%d", file, line)
|
||||
}
|
||||
return "unknown file"
|
||||
}
|
||||
|
||||
// isCleanuper checks it if t's base TestReporter has a Cleanup method.
|
||||
func isCleanuper(t TestReporter) (cleanuper, bool) {
|
||||
tr := unwrapTestReporter(t)
|
||||
c, ok := tr.(cleanuper)
|
||||
return c, ok
|
||||
}
|
||||
|
||||
// unwrapTestReporter unwraps TestReporter to the base implementation.
|
||||
func unwrapTestReporter(t TestReporter) TestReporter {
|
||||
tr := t
|
||||
switch nt := t.(type) {
|
||||
case *cancelReporter:
|
||||
tr = nt.t
|
||||
if h, check := tr.(*nopTestHelper); check {
|
||||
tr = h.t
|
||||
}
|
||||
case *nopTestHelper:
|
||||
tr = nt.t
|
||||
default:
|
||||
// not wrapped
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
94
vendor/github.com/golang/mock/gomock/matchers.go
generated
vendored
94
vendor/github.com/golang/mock/gomock/matchers.go
generated
vendored
@ -102,11 +102,25 @@ type eqMatcher struct {
|
||||
}
|
||||
|
||||
func (e eqMatcher) Matches(x interface{}) bool {
|
||||
return reflect.DeepEqual(e.x, x)
|
||||
// In case, some value is nil
|
||||
if e.x == nil || x == nil {
|
||||
return reflect.DeepEqual(e.x, x)
|
||||
}
|
||||
|
||||
// Check if types assignable and convert them to common type
|
||||
x1Val := reflect.ValueOf(e.x)
|
||||
x2Val := reflect.ValueOf(x)
|
||||
|
||||
if x1Val.Type().AssignableTo(x2Val.Type()) {
|
||||
x1ValConverted := x1Val.Convert(x2Val.Type())
|
||||
return reflect.DeepEqual(x1ValConverted.Interface(), x2Val.Interface())
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (e eqMatcher) String() string {
|
||||
return fmt.Sprintf("is equal to %v", e.x)
|
||||
return fmt.Sprintf("is equal to %v (%T)", e.x, e.x)
|
||||
}
|
||||
|
||||
type nilMatcher struct{}
|
||||
@ -139,7 +153,6 @@ func (n notMatcher) Matches(x interface{}) bool {
|
||||
}
|
||||
|
||||
func (n notMatcher) String() string {
|
||||
// TODO: Improve this if we add a NotString method to the Matcher interface.
|
||||
return "not(" + n.m.String() + ")"
|
||||
}
|
||||
|
||||
@ -194,6 +207,70 @@ func (m lenMatcher) String() string {
|
||||
return fmt.Sprintf("has length %d", m.i)
|
||||
}
|
||||
|
||||
type inAnyOrderMatcher struct {
|
||||
x interface{}
|
||||
}
|
||||
|
||||
func (m inAnyOrderMatcher) Matches(x interface{}) bool {
|
||||
given, ok := m.prepareValue(x)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
wanted, ok := m.prepareValue(m.x)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if given.Len() != wanted.Len() {
|
||||
return false
|
||||
}
|
||||
|
||||
usedFromGiven := make([]bool, given.Len())
|
||||
foundFromWanted := make([]bool, wanted.Len())
|
||||
for i := 0; i < wanted.Len(); i++ {
|
||||
wantedMatcher := Eq(wanted.Index(i).Interface())
|
||||
for j := 0; j < given.Len(); j++ {
|
||||
if usedFromGiven[j] {
|
||||
continue
|
||||
}
|
||||
if wantedMatcher.Matches(given.Index(j).Interface()) {
|
||||
foundFromWanted[i] = true
|
||||
usedFromGiven[j] = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
missingFromWanted := 0
|
||||
for _, found := range foundFromWanted {
|
||||
if !found {
|
||||
missingFromWanted++
|
||||
}
|
||||
}
|
||||
extraInGiven := 0
|
||||
for _, used := range usedFromGiven {
|
||||
if !used {
|
||||
extraInGiven++
|
||||
}
|
||||
}
|
||||
|
||||
return extraInGiven == 0 && missingFromWanted == 0
|
||||
}
|
||||
|
||||
func (m inAnyOrderMatcher) prepareValue(x interface{}) (reflect.Value, bool) {
|
||||
xValue := reflect.ValueOf(x)
|
||||
switch xValue.Kind() {
|
||||
case reflect.Slice, reflect.Array:
|
||||
return xValue, true
|
||||
default:
|
||||
return reflect.Value{}, false
|
||||
}
|
||||
}
|
||||
|
||||
func (m inAnyOrderMatcher) String() string {
|
||||
return fmt.Sprintf("has the same elements as %v", m.x)
|
||||
}
|
||||
|
||||
// Constructors
|
||||
|
||||
// All returns a composite Matcher that returns true if and only all of the
|
||||
@ -245,7 +322,7 @@ func Not(x interface{}) Matcher {
|
||||
// AssignableToTypeOf(s).Matches(time.Second) // returns true
|
||||
// AssignableToTypeOf(s).Matches(99) // returns false
|
||||
//
|
||||
// var ctx = reflect.TypeOf((*context.Context)).Elem()
|
||||
// var ctx = reflect.TypeOf((*context.Context)(nil)).Elem()
|
||||
// AssignableToTypeOf(ctx).Matches(context.Background()) // returns true
|
||||
func AssignableToTypeOf(x interface{}) Matcher {
|
||||
if xt, ok := x.(reflect.Type); ok {
|
||||
@ -253,3 +330,12 @@ func AssignableToTypeOf(x interface{}) Matcher {
|
||||
}
|
||||
return assignableToTypeOfMatcher{reflect.TypeOf(x)}
|
||||
}
|
||||
|
||||
// InAnyOrder is a Matcher that returns true for collections of the same elements ignoring the order.
|
||||
//
|
||||
// Example usage:
|
||||
// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 3, 2}) // returns true
|
||||
// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 2}) // returns false
|
||||
func InAnyOrder(x interface{}) Matcher {
|
||||
return inAnyOrderMatcher{x}
|
||||
}
|
||||
|
48
vendor/github.com/google/go-containerregistry/internal/and/and_closer.go
generated
vendored
Normal file
48
vendor/github.com/google/go-containerregistry/internal/and/and_closer.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package and provides helpers for adding Close to io.{Reader|Writer}.
|
||||
package and
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// ReadCloser implements io.ReadCloser by reading from a particular io.Reader
|
||||
// and then calling the provided "Close()" method.
|
||||
type ReadCloser struct {
|
||||
io.Reader
|
||||
CloseFunc func() error
|
||||
}
|
||||
|
||||
var _ io.ReadCloser = (*ReadCloser)(nil)
|
||||
|
||||
// Close implements io.ReadCloser
|
||||
func (rac *ReadCloser) Close() error {
|
||||
return rac.CloseFunc()
|
||||
}
|
||||
|
||||
// WriteCloser implements io.WriteCloser by reading from a particular io.Writer
|
||||
// and then calling the provided "Close()" method.
|
||||
type WriteCloser struct {
|
||||
io.Writer
|
||||
CloseFunc func() error
|
||||
}
|
||||
|
||||
var _ io.WriteCloser = (*WriteCloser)(nil)
|
||||
|
||||
// Close implements io.WriteCloser
|
||||
func (wac *WriteCloser) Close() error {
|
||||
return wac.CloseFunc()
|
||||
}
|
55
vendor/github.com/google/go-containerregistry/internal/estargz/estargz.go
generated
vendored
Normal file
55
vendor/github.com/google/go-containerregistry/internal/estargz/estargz.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package estargz adapts the containerd estargz package to our abstractions.
|
||||
package estargz
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containerd/stargz-snapshotter/estargz"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
// Assert that what we're returning is an io.ReadCloser
|
||||
var _ io.ReadCloser = (*estargz.Blob)(nil)
|
||||
|
||||
// ReadCloser reads uncompressed tarball input from the io.ReadCloser and
|
||||
// returns:
|
||||
// * An io.ReadCloser from which compressed data may be read, and
|
||||
// * A v1.Hash with the hash of the estargz table of contents, or
|
||||
// * An error if the estargz processing encountered a problem.
|
||||
//
|
||||
// Refer to estargz for the options:
|
||||
// https://pkg.go.dev/github.com/containerd/stargz-snapshotter/estargz@v0.4.1#Option
|
||||
func ReadCloser(r io.ReadCloser, opts ...estargz.Option) (*estargz.Blob, v1.Hash, error) {
|
||||
defer r.Close()
|
||||
|
||||
// TODO(#876): Avoid buffering into memory.
|
||||
bs, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, v1.Hash{}, err
|
||||
}
|
||||
br := bytes.NewReader(bs)
|
||||
|
||||
rc, err := estargz.Build(io.NewSectionReader(br, 0, int64(len(bs))), opts...)
|
||||
if err != nil {
|
||||
return nil, v1.Hash{}, err
|
||||
}
|
||||
|
||||
h, err := v1.NewHash(rc.TOCDigest().String())
|
||||
return rc, h, err
|
||||
}
|
117
vendor/github.com/google/go-containerregistry/internal/gzip/zip.go
generated
vendored
Normal file
117
vendor/github.com/google/go-containerregistry/internal/gzip/zip.go
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package gzip provides helper functions for interacting with gzipped streams.
|
||||
package gzip
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/and"
|
||||
)
|
||||
|
||||
var gzipMagicHeader = []byte{'\x1f', '\x8b'}
|
||||
|
||||
// ReadCloser reads uncompressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which compressed data may be read.
|
||||
// This uses gzip.BestSpeed for the compression level.
|
||||
func ReadCloser(r io.ReadCloser) io.ReadCloser {
|
||||
return ReadCloserLevel(r, gzip.BestSpeed)
|
||||
}
|
||||
|
||||
// ReadCloserLevel reads uncompressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which compressed data may be read.
|
||||
// Refer to compress/gzip for the level:
|
||||
// https://golang.org/pkg/compress/gzip/#pkg-constants
|
||||
func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser {
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
// For highly compressible layers, gzip.Writer will output a very small
|
||||
// number of bytes per Write(). This is normally fine, but when pushing
|
||||
// to a registry, we want to ensure that we're taking full advantage of
|
||||
// the available bandwidth instead of sending tons of tiny writes over
|
||||
// the wire.
|
||||
// 64K ought to be small enough for anybody.
|
||||
bw := bufio.NewWriterSize(pw, 2<<16)
|
||||
|
||||
// Returns err so we can pw.CloseWithError(err)
|
||||
go func() error {
|
||||
// TODO(go1.14): Just defer {pw,gw,r}.Close like you'd expect.
|
||||
// Context: https://golang.org/issue/24283
|
||||
gw, err := gzip.NewWriterLevel(bw, level)
|
||||
if err != nil {
|
||||
return pw.CloseWithError(err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(gw, r); err != nil {
|
||||
defer r.Close()
|
||||
defer gw.Close()
|
||||
return pw.CloseWithError(err)
|
||||
}
|
||||
|
||||
// Close gzip writer to Flush it and write gzip trailers.
|
||||
if err := gw.Close(); err != nil {
|
||||
return pw.CloseWithError(err)
|
||||
}
|
||||
|
||||
// Flush bufio writer to ensure we write out everything.
|
||||
if err := bw.Flush(); err != nil {
|
||||
return pw.CloseWithError(err)
|
||||
}
|
||||
|
||||
// We don't really care if these fail.
|
||||
defer pw.Close()
|
||||
defer r.Close()
|
||||
|
||||
return nil
|
||||
}()
|
||||
|
||||
return pr
|
||||
}
|
||||
|
||||
// UnzipReadCloser reads compressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which uncompessed data may be read.
|
||||
func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) {
|
||||
gr, err := gzip.NewReader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &and.ReadCloser{
|
||||
Reader: gr,
|
||||
CloseFunc: func() error {
|
||||
// If the unzip fails, then this seems to return the same
|
||||
// error as the read. We don't want this to interfere with
|
||||
// us closing the main ReadCloser, since this could leave
|
||||
// an open file descriptor (fails on Windows).
|
||||
gr.Close()
|
||||
return r.Close()
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Is detects whether the input stream is compressed.
|
||||
func Is(r io.Reader) (bool, error) {
|
||||
magicHeader := make([]byte, 2)
|
||||
n, err := r.Read(magicHeader)
|
||||
if n == 0 && err == io.EOF {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return bytes.Equal(magicHeader, gzipMagicHeader), nil
|
||||
}
|
35
vendor/github.com/google/go-containerregistry/internal/redact/redact.go
generated
vendored
Normal file
35
vendor/github.com/google/go-containerregistry/internal/redact/redact.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package redact contains a simple context signal for redacting requests.
|
||||
package redact
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type contextKey string
|
||||
|
||||
var redactKey = contextKey("redact")
|
||||
|
||||
// NewContext creates a new ctx with the reason for redaction.
|
||||
func NewContext(ctx context.Context, reason string) context.Context {
|
||||
return context.WithValue(ctx, redactKey, reason)
|
||||
}
|
||||
|
||||
// FromContext returns the redaction reason, if any.
|
||||
func FromContext(ctx context.Context) (bool, string) {
|
||||
reason, ok := ctx.Value(redactKey).(string)
|
||||
return ok, reason
|
||||
}
|
77
vendor/github.com/google/go-containerregistry/internal/retry/retry.go
generated
vendored
Normal file
77
vendor/github.com/google/go-containerregistry/internal/retry/retry.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package retry provides methods for retrying operations. It is a thin wrapper
|
||||
// around k8s.io/apimachinery/pkg/util/wait to make certain operations easier.
|
||||
package retry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/retry/wait"
|
||||
)
|
||||
|
||||
// Backoff is an alias of our own wait.Backoff to avoid name conflicts with
|
||||
// the kubernetes wait package. Typing retry.Backoff is aesier than fixing
|
||||
// the wrong import every time you use wait.Backoff.
|
||||
type Backoff = wait.Backoff
|
||||
|
||||
// This is implemented by several errors in the net package as well as our
|
||||
// transport.Error.
|
||||
type temporary interface {
|
||||
Temporary() bool
|
||||
}
|
||||
|
||||
// IsTemporary returns true if err implements Temporary() and it returns true.
|
||||
func IsTemporary(err error) bool {
|
||||
if err == context.DeadlineExceeded {
|
||||
return false
|
||||
}
|
||||
if te, ok := err.(temporary); ok && te.Temporary() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsNotNil returns true if err is not nil.
|
||||
func IsNotNil(err error) bool {
|
||||
return err != nil
|
||||
}
|
||||
|
||||
// Predicate determines whether an error should be retried.
|
||||
type Predicate func(error) (retry bool)
|
||||
|
||||
// Retry retries a given function, f, until a predicate is satisfied, using
|
||||
// exponential backoff. If the predicate is never satisfied, it will return the
|
||||
// last error returned by f.
|
||||
func Retry(f func() error, p Predicate, backoff wait.Backoff) (err error) {
|
||||
if f == nil {
|
||||
return fmt.Errorf("nil f passed to retry")
|
||||
}
|
||||
if p == nil {
|
||||
return fmt.Errorf("nil p passed to retry")
|
||||
}
|
||||
|
||||
condition := func() (bool, error) {
|
||||
err = f()
|
||||
if p(err) {
|
||||
return false, nil
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
wait.ExponentialBackoff(backoff, condition)
|
||||
return
|
||||
}
|
123
vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go
generated
vendored
Normal file
123
vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package wait is a subset of k8s.io/apimachinery to avoid conflicts
|
||||
// in dependencies (specifically, logging).
|
||||
package wait
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Jitter returns a time.Duration between duration and duration + maxFactor *
|
||||
// duration.
|
||||
//
|
||||
// This allows clients to avoid converging on periodic behavior. If maxFactor
|
||||
// is 0.0, a suggested default value will be chosen.
|
||||
func Jitter(duration time.Duration, maxFactor float64) time.Duration {
|
||||
if maxFactor <= 0.0 {
|
||||
maxFactor = 1.0
|
||||
}
|
||||
wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
|
||||
return wait
|
||||
}
|
||||
|
||||
// ErrWaitTimeout is returned when the condition exited without success.
|
||||
var ErrWaitTimeout = errors.New("timed out waiting for the condition")
|
||||
|
||||
// ConditionFunc returns true if the condition is satisfied, or an error
|
||||
// if the loop should be aborted.
|
||||
type ConditionFunc func() (done bool, err error)
|
||||
|
||||
// Backoff holds parameters applied to a Backoff function.
|
||||
type Backoff struct {
|
||||
// The initial duration.
|
||||
Duration time.Duration
|
||||
// Duration is multiplied by factor each iteration, if factor is not zero
|
||||
// and the limits imposed by Steps and Cap have not been reached.
|
||||
// Should not be negative.
|
||||
// The jitter does not contribute to the updates to the duration parameter.
|
||||
Factor float64
|
||||
// The sleep at each iteration is the duration plus an additional
|
||||
// amount chosen uniformly at random from the interval between
|
||||
// zero and `jitter*duration`.
|
||||
Jitter float64
|
||||
// The remaining number of iterations in which the duration
|
||||
// parameter may change (but progress can be stopped earlier by
|
||||
// hitting the cap). If not positive, the duration is not
|
||||
// changed. Used for exponential backoff in combination with
|
||||
// Factor and Cap.
|
||||
Steps int
|
||||
// A limit on revised values of the duration parameter. If a
|
||||
// multiplication by the factor parameter would make the duration
|
||||
// exceed the cap then the duration is set to the cap and the
|
||||
// steps parameter is set to zero.
|
||||
Cap time.Duration
|
||||
}
|
||||
|
||||
// Step (1) returns an amount of time to sleep determined by the
|
||||
// original Duration and Jitter and (2) mutates the provided Backoff
|
||||
// to update its Steps and Duration.
|
||||
func (b *Backoff) Step() time.Duration {
|
||||
if b.Steps < 1 {
|
||||
if b.Jitter > 0 {
|
||||
return Jitter(b.Duration, b.Jitter)
|
||||
}
|
||||
return b.Duration
|
||||
}
|
||||
b.Steps--
|
||||
|
||||
duration := b.Duration
|
||||
|
||||
// calculate the next step
|
||||
if b.Factor != 0 {
|
||||
b.Duration = time.Duration(float64(b.Duration) * b.Factor)
|
||||
if b.Cap > 0 && b.Duration > b.Cap {
|
||||
b.Duration = b.Cap
|
||||
b.Steps = 0
|
||||
}
|
||||
}
|
||||
|
||||
if b.Jitter > 0 {
|
||||
duration = Jitter(duration, b.Jitter)
|
||||
}
|
||||
return duration
|
||||
}
|
||||
|
||||
// ExponentialBackoff repeats a condition check with exponential backoff.
|
||||
//
|
||||
// It repeatedly checks the condition and then sleeps, using `backoff.Step()`
|
||||
// to determine the length of the sleep and adjust Duration and Steps.
|
||||
// Stops and returns as soon as:
|
||||
// 1. the condition check returns true or an error,
|
||||
// 2. `backoff.Steps` checks of the condition have been done, or
|
||||
// 3. a sleep truncated by the cap on duration has been completed.
|
||||
// In case (1) the returned error is what the condition function returned.
|
||||
// In all other cases, ErrWaitTimeout is returned.
|
||||
func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
|
||||
for backoff.Steps > 0 {
|
||||
if ok, err := condition(); err != nil || ok {
|
||||
return err
|
||||
}
|
||||
if backoff.Steps == 1 {
|
||||
break
|
||||
}
|
||||
time.Sleep(backoff.Step())
|
||||
}
|
||||
return ErrWaitTimeout
|
||||
}
|
107
vendor/github.com/google/go-containerregistry/internal/verify/verify.go
generated
vendored
Normal file
107
vendor/github.com/google/go-containerregistry/internal/verify/verify.go
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package verify provides a ReadCloser that verifies content matches the
|
||||
// expected hash values.
|
||||
package verify
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/and"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
// SizeUnknown is a sentinel value to indicate that the expected size is not known.
|
||||
const SizeUnknown = -1
|
||||
|
||||
type verifyReader struct {
|
||||
inner io.Reader
|
||||
hasher hash.Hash
|
||||
expected v1.Hash
|
||||
gotSize, wantSize int64
|
||||
}
|
||||
|
||||
// Read implements io.Reader
|
||||
func (vc *verifyReader) Read(b []byte) (int, error) {
|
||||
n, err := vc.inner.Read(b)
|
||||
vc.gotSize += int64(n)
|
||||
if err == io.EOF {
|
||||
if vc.wantSize != SizeUnknown && vc.gotSize != vc.wantSize {
|
||||
return n, fmt.Errorf("error verifying size; got %d, want %d", vc.gotSize, vc.wantSize)
|
||||
}
|
||||
got := hex.EncodeToString(vc.hasher.Sum(make([]byte, 0, vc.hasher.Size())))
|
||||
if want := vc.expected.Hex; got != want {
|
||||
return n, fmt.Errorf("error verifying %s checksum after reading %d bytes; got %q, want %q",
|
||||
vc.expected.Algorithm, vc.gotSize, got, want)
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ReadCloser wraps the given io.ReadCloser to verify that its contents match
|
||||
// the provided v1.Hash before io.EOF is returned.
|
||||
//
|
||||
// The reader will only be read up to size bytes, to prevent resource
|
||||
// exhaustion. If EOF is returned before size bytes are read, an error is
|
||||
// returned.
|
||||
//
|
||||
// A size of SizeUnknown (-1) indicates disables size verification when the size
|
||||
// is unknown ahead of time.
|
||||
func ReadCloser(r io.ReadCloser, size int64, h v1.Hash) (io.ReadCloser, error) {
|
||||
w, err := v1.Hasher(h.Algorithm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var r2 io.Reader = r
|
||||
if size != SizeUnknown {
|
||||
r2 = io.LimitReader(io.TeeReader(r, w), size)
|
||||
}
|
||||
return &and.ReadCloser{
|
||||
Reader: &verifyReader{
|
||||
inner: r2,
|
||||
hasher: w,
|
||||
expected: h,
|
||||
wantSize: size,
|
||||
},
|
||||
CloseFunc: r.Close,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Descriptor verifies that the embedded Data field matches the Size and Digest
|
||||
// fields of the given v1.Descriptor, returning an error if the Data field is
|
||||
// missing or if it contains incorrect data.
|
||||
func Descriptor(d v1.Descriptor) error {
|
||||
if d.Data == nil {
|
||||
return errors.New("error verifying descriptor; Data == nil")
|
||||
}
|
||||
|
||||
h, sz, err := v1.SHA256(bytes.NewReader(d.Data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if h != d.Digest {
|
||||
return fmt.Errorf("error verifying Digest; got %q, want %q", h, d.Digest)
|
||||
}
|
||||
if sz != d.Size {
|
||||
return fmt.Errorf("error verifying Size; got %d, want %d", sz, d.Size)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
7
vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go
generated
vendored
7
vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go
generated
vendored
@ -16,6 +16,7 @@ package authn
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/cli/cli/config"
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
@ -42,7 +43,9 @@ type Keychain interface {
|
||||
|
||||
// defaultKeychain implements Keychain with the semantics of the standard Docker
|
||||
// credential keychain.
|
||||
type defaultKeychain struct{}
|
||||
type defaultKeychain struct {
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultKeychain implements Keychain by interpreting the docker config file.
|
||||
@ -57,6 +60,8 @@ const (
|
||||
|
||||
// Resolve implements Keychain.
|
||||
func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) {
|
||||
dk.mu.Lock()
|
||||
defer dk.mu.Unlock()
|
||||
cf, err := config.Load(os.Getenv("DOCKER_CONFIG"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
39
vendor/github.com/google/go-containerregistry/pkg/logs/logs.go
generated
vendored
Normal file
39
vendor/github.com/google/go-containerregistry/pkg/logs/logs.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package logs exposes the loggers used by this library.
|
||||
package logs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
)
|
||||
|
||||
var (
|
||||
// Warn is used to log non-fatal errors.
|
||||
Warn = log.New(ioutil.Discard, "", log.LstdFlags)
|
||||
|
||||
// Progress is used to log notable, successful events.
|
||||
Progress = log.New(ioutil.Discard, "", log.LstdFlags)
|
||||
|
||||
// Debug is used to log information that is useful for debugging.
|
||||
Debug = log.New(ioutil.Discard, "", log.LstdFlags)
|
||||
)
|
||||
|
||||
// Enabled checks to see if the logger's writer is set to something other
|
||||
// than ioutil.Discard. This allows callers to avoid expensive operations
|
||||
// that will end up in /dev/null anyway.
|
||||
func Enabled(l *log.Logger) bool {
|
||||
return l.Writer() != ioutil.Discard
|
||||
}
|
4
vendor/github.com/google/go-containerregistry/pkg/name/check.go
generated
vendored
4
vendor/github.com/google/go-containerregistry/pkg/name/check.go
generated
vendored
@ -35,9 +35,9 @@ func stripRunesFn(runes string) func(rune) rune {
|
||||
func checkElement(name, element, allowedRunes string, minRunes, maxRunes int) error {
|
||||
numRunes := utf8.RuneCountInString(element)
|
||||
if (numRunes < minRunes) || (maxRunes < numRunes) {
|
||||
return NewErrBadName("%s must be between %d and %d runes in length: %s", name, minRunes, maxRunes, element)
|
||||
return newErrBadName("%s must be between %d and %d runes in length: %s", name, minRunes, maxRunes, element)
|
||||
} else if len(strings.Map(stripRunesFn(allowedRunes), element)) != 0 {
|
||||
return NewErrBadName("%s can only contain the runes `%s`: %s", name, allowedRunes, element)
|
||||
return newErrBadName("%s can only contain the runes `%s`: %s", name, allowedRunes, element)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
2
vendor/github.com/google/go-containerregistry/pkg/name/digest.go
generated
vendored
2
vendor/github.com/google/go-containerregistry/pkg/name/digest.go
generated
vendored
@ -69,7 +69,7 @@ func NewDigest(name string, opts ...Option) (Digest, error) {
|
||||
// Split on "@"
|
||||
parts := strings.Split(name, digestDelim)
|
||||
if len(parts) != 2 {
|
||||
return Digest{}, NewErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name)
|
||||
return Digest{}, newErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name)
|
||||
}
|
||||
base := parts[0]
|
||||
digest := parts[1]
|
||||
|
15
vendor/github.com/google/go-containerregistry/pkg/name/errors.go
generated
vendored
15
vendor/github.com/google/go-containerregistry/pkg/name/errors.go
generated
vendored
@ -14,7 +14,10 @@
|
||||
|
||||
package name
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ErrBadName is an error for when a bad docker name is supplied.
|
||||
type ErrBadName struct {
|
||||
@ -25,13 +28,15 @@ func (e *ErrBadName) Error() string {
|
||||
return e.info
|
||||
}
|
||||
|
||||
// NewErrBadName returns a ErrBadName which returns the given formatted string from Error().
|
||||
func NewErrBadName(fmtStr string, args ...interface{}) *ErrBadName {
|
||||
// newErrBadName returns a ErrBadName which returns the given formatted string from Error().
|
||||
func newErrBadName(fmtStr string, args ...interface{}) *ErrBadName {
|
||||
return &ErrBadName{fmt.Sprintf(fmtStr, args...)}
|
||||
}
|
||||
|
||||
// IsErrBadName returns true if the given error is an ErrBadName.
|
||||
//
|
||||
// Deprecated: Use errors.Is.
|
||||
func IsErrBadName(err error) bool {
|
||||
_, ok := err.(*ErrBadName)
|
||||
return ok
|
||||
var berr *ErrBadName
|
||||
return errors.As(err, &berr)
|
||||
}
|
||||
|
3
vendor/github.com/google/go-containerregistry/pkg/name/ref.go
generated
vendored
3
vendor/github.com/google/go-containerregistry/pkg/name/ref.go
generated
vendored
@ -44,8 +44,7 @@ func ParseReference(s string, opts ...Option) (Reference, error) {
|
||||
if d, err := NewDigest(s, opts...); err == nil {
|
||||
return d, nil
|
||||
}
|
||||
return nil, NewErrBadName("could not parse reference: " + s)
|
||||
|
||||
return nil, newErrBadName("could not parse reference: " + s)
|
||||
}
|
||||
|
||||
type stringConst string
|
||||
|
4
vendor/github.com/google/go-containerregistry/pkg/name/registry.go
generated
vendored
4
vendor/github.com/google/go-containerregistry/pkg/name/registry.go
generated
vendored
@ -98,7 +98,7 @@ func checkRegistry(name string) error {
|
||||
// Per RFC 3986, registries (authorities) are required to be prefixed with "//"
|
||||
// url.Host == hostname[:port] == authority
|
||||
if url, err := url.Parse("//" + name); err != nil || url.Host != name {
|
||||
return NewErrBadName("registries must be valid RFC 3986 URI authorities: %s", name)
|
||||
return newErrBadName("registries must be valid RFC 3986 URI authorities: %s", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -108,7 +108,7 @@ func checkRegistry(name string) error {
|
||||
func NewRegistry(name string, opts ...Option) (Registry, error) {
|
||||
opt := makeOptions(opts...)
|
||||
if opt.strict && len(name) == 0 {
|
||||
return Registry{}, NewErrBadName("strict validation requires the registry to be explicitly defined")
|
||||
return Registry{}, newErrBadName("strict validation requires the registry to be explicitly defined")
|
||||
}
|
||||
|
||||
if err := checkRegistry(name); err != nil {
|
||||
|
4
vendor/github.com/google/go-containerregistry/pkg/name/repository.go
generated
vendored
4
vendor/github.com/google/go-containerregistry/pkg/name/repository.go
generated
vendored
@ -72,7 +72,7 @@ func checkRepository(repository string) error {
|
||||
func NewRepository(name string, opts ...Option) (Repository, error) {
|
||||
opt := makeOptions(opts...)
|
||||
if len(name) == 0 {
|
||||
return Repository{}, NewErrBadName("a repository name must be specified")
|
||||
return Repository{}, newErrBadName("a repository name must be specified")
|
||||
}
|
||||
|
||||
var registry string
|
||||
@ -95,7 +95,7 @@ func NewRepository(name string, opts ...Option) (Repository, error) {
|
||||
return Repository{}, err
|
||||
}
|
||||
if hasImplicitNamespace(repo, reg) && opt.strict {
|
||||
return Repository{}, NewErrBadName("strict validation requires the full repository path (missing 'library')")
|
||||
return Repository{}, newErrBadName("strict validation requires the full repository path (missing 'library')")
|
||||
}
|
||||
return Repository{reg, repo}, nil
|
||||
}
|
||||
|
133
vendor/github.com/google/go-containerregistry/pkg/v1/config.go
generated
vendored
Normal file
133
vendor/github.com/google/go-containerregistry/pkg/v1/config.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ConfigFile is the configuration file that holds the metadata describing
|
||||
// how to launch a container. See:
|
||||
// https://github.com/opencontainers/image-spec/blob/master/config.md
|
||||
//
|
||||
// docker_version and os.version are not part of the spec but included
|
||||
// for backwards compatibility.
|
||||
type ConfigFile struct {
|
||||
Architecture string `json:"architecture"`
|
||||
Author string `json:"author,omitempty"`
|
||||
Container string `json:"container,omitempty"`
|
||||
Created Time `json:"created,omitempty"`
|
||||
DockerVersion string `json:"docker_version,omitempty"`
|
||||
History []History `json:"history,omitempty"`
|
||||
OS string `json:"os"`
|
||||
RootFS RootFS `json:"rootfs"`
|
||||
Config Config `json:"config"`
|
||||
OSVersion string `json:"os.version,omitempty"`
|
||||
}
|
||||
|
||||
// History is one entry of a list recording how this container image was built.
|
||||
type History struct {
|
||||
Author string `json:"author,omitempty"`
|
||||
Created Time `json:"created,omitempty"`
|
||||
CreatedBy string `json:"created_by,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
EmptyLayer bool `json:"empty_layer,omitempty"`
|
||||
}
|
||||
|
||||
// Time is a wrapper around time.Time to help with deep copying
|
||||
type Time struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time
|
||||
// type is effectively immutable in the time API, so it is safe to
|
||||
// copy-by-assign, despite the presence of (unexported) Pointer fields.
|
||||
func (t *Time) DeepCopyInto(out *Time) {
|
||||
*out = *t
|
||||
}
|
||||
|
||||
// RootFS holds the ordered list of file system deltas that comprise the
|
||||
// container image's root filesystem.
|
||||
type RootFS struct {
|
||||
Type string `json:"type"`
|
||||
DiffIDs []Hash `json:"diff_ids"`
|
||||
}
|
||||
|
||||
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
|
||||
type HealthConfig struct {
|
||||
// Test is the test to perform to check that the container is healthy.
|
||||
// An empty slice means to inherit the default.
|
||||
// The options are:
|
||||
// {} : inherit healthcheck
|
||||
// {"NONE"} : disable healthcheck
|
||||
// {"CMD", args...} : exec arguments directly
|
||||
// {"CMD-SHELL", command} : run command with system's default shell
|
||||
Test []string `json:",omitempty"`
|
||||
|
||||
// Zero means to inherit. Durations are expressed as integer nanoseconds.
|
||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||
StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
|
||||
|
||||
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
|
||||
// Zero means inherit.
|
||||
Retries int `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Config is a submessage of the config file described as:
|
||||
// The execution parameters which SHOULD be used as a base when running
|
||||
// a container using the image.
|
||||
// The names of the fields in this message are chosen to reflect the JSON
|
||||
// payload of the Config as defined here:
|
||||
// https://git.io/vrAET
|
||||
// and
|
||||
// https://github.com/opencontainers/image-spec/blob/master/config.md
|
||||
type Config struct {
|
||||
AttachStderr bool `json:"AttachStderr,omitempty"`
|
||||
AttachStdin bool `json:"AttachStdin,omitempty"`
|
||||
AttachStdout bool `json:"AttachStdout,omitempty"`
|
||||
Cmd []string `json:"Cmd,omitempty"`
|
||||
Healthcheck *HealthConfig `json:"Healthcheck,omitempty"`
|
||||
Domainname string `json:"Domainname,omitempty"`
|
||||
Entrypoint []string `json:"Entrypoint,omitempty"`
|
||||
Env []string `json:"Env,omitempty"`
|
||||
Hostname string `json:"Hostname,omitempty"`
|
||||
Image string `json:"Image,omitempty"`
|
||||
Labels map[string]string `json:"Labels,omitempty"`
|
||||
OnBuild []string `json:"OnBuild,omitempty"`
|
||||
OpenStdin bool `json:"OpenStdin,omitempty"`
|
||||
StdinOnce bool `json:"StdinOnce,omitempty"`
|
||||
Tty bool `json:"Tty,omitempty"`
|
||||
User string `json:"User,omitempty"`
|
||||
Volumes map[string]struct{} `json:"Volumes,omitempty"`
|
||||
WorkingDir string `json:"WorkingDir,omitempty"`
|
||||
ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"`
|
||||
ArgsEscaped bool `json:"ArgsEscaped,omitempty"`
|
||||
NetworkDisabled bool `json:"NetworkDisabled,omitempty"`
|
||||
MacAddress string `json:"MacAddress,omitempty"`
|
||||
StopSignal string `json:"StopSignal,omitempty"`
|
||||
Shell []string `json:"Shell,omitempty"`
|
||||
}
|
||||
|
||||
// ParseConfigFile parses the io.Reader's contents into a ConfigFile.
|
||||
func ParseConfigFile(r io.Reader) (*ConfigFile, error) {
|
||||
cf := ConfigFile{}
|
||||
if err := json.NewDecoder(r).Decode(&cf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cf, nil
|
||||
}
|
18
vendor/github.com/google/go-containerregistry/pkg/v1/doc.go
generated
vendored
Normal file
18
vendor/github.com/google/go-containerregistry/pkg/v1/doc.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
// Package v1 defines structured types for OCI v1 images
|
||||
package v1
|
123
vendor/github.com/google/go-containerregistry/pkg/v1/hash.go
generated
vendored
Normal file
123
vendor/github.com/google/go-containerregistry/pkg/v1/hash.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Hash is an unqualified digest of some content, e.g. sha256:deadbeef
|
||||
type Hash struct {
|
||||
// Algorithm holds the algorithm used to compute the hash.
|
||||
Algorithm string
|
||||
|
||||
// Hex holds the hex portion of the content hash.
|
||||
Hex string
|
||||
}
|
||||
|
||||
// String reverses NewHash returning the string-form of the hash.
|
||||
func (h Hash) String() string {
|
||||
return fmt.Sprintf("%s:%s", h.Algorithm, h.Hex)
|
||||
}
|
||||
|
||||
// NewHash validates the input string is a hash and returns a strongly type Hash object.
|
||||
func NewHash(s string) (Hash, error) {
|
||||
h := Hash{}
|
||||
if err := h.parse(s); err != nil {
|
||||
return Hash{}, err
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler
|
||||
func (h Hash) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(h.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler
|
||||
func (h *Hash) UnmarshalJSON(data []byte) error {
|
||||
s, err := strconv.Unquote(string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return h.parse(s)
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler. This is required to use
|
||||
// v1.Hash as a key in a map when marshalling JSON.
|
||||
func (h Hash) MarshalText() (text []byte, err error) {
|
||||
return []byte(h.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler. This is required to use
|
||||
// v1.Hash as a key in a map when unmarshalling JSON.
|
||||
func (h *Hash) UnmarshalText(text []byte) error {
|
||||
return h.parse(string(text))
|
||||
}
|
||||
|
||||
// Hasher returns a hash.Hash for the named algorithm (e.g. "sha256")
|
||||
func Hasher(name string) (hash.Hash, error) {
|
||||
switch name {
|
||||
case "sha256":
|
||||
return sha256.New(), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported hash: %q", name)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hash) parse(unquoted string) error {
|
||||
parts := strings.Split(unquoted, ":")
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("cannot parse hash: %q", unquoted)
|
||||
}
|
||||
|
||||
rest := strings.TrimLeft(parts[1], "0123456789abcdef")
|
||||
if len(rest) != 0 {
|
||||
return fmt.Errorf("found non-hex character in hash: %c", rest[0])
|
||||
}
|
||||
|
||||
hasher, err := Hasher(parts[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Compare the hex to the expected size (2 hex characters per byte)
|
||||
if len(parts[1]) != hasher.Size()*2 {
|
||||
return fmt.Errorf("wrong number of hex digits for %s: %s", parts[0], parts[1])
|
||||
}
|
||||
|
||||
h.Algorithm = parts[0]
|
||||
h.Hex = parts[1]
|
||||
return nil
|
||||
}
|
||||
|
||||
// SHA256 computes the Hash of the provided io.Reader's content.
|
||||
func SHA256(r io.Reader) (Hash, int64, error) {
|
||||
hasher := sha256.New()
|
||||
n, err := io.Copy(hasher, r)
|
||||
if err != nil {
|
||||
return Hash{}, 0, err
|
||||
}
|
||||
return Hash{
|
||||
Algorithm: "sha256",
|
||||
Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))),
|
||||
}, n, nil
|
||||
}
|
59
vendor/github.com/google/go-containerregistry/pkg/v1/image.go
generated
vendored
Normal file
59
vendor/github.com/google/go-containerregistry/pkg/v1/image.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// Image defines the interface for interacting with an OCI v1 image.
|
||||
type Image interface {
|
||||
// Layers returns the ordered collection of filesystem layers that comprise this image.
|
||||
// The order of the list is oldest/base layer first, and most-recent/top layer last.
|
||||
Layers() ([]Layer, error)
|
||||
|
||||
// MediaType of this image's manifest.
|
||||
MediaType() (types.MediaType, error)
|
||||
|
||||
// Size returns the size of the manifest.
|
||||
Size() (int64, error)
|
||||
|
||||
// ConfigName returns the hash of the image's config file, also known as
|
||||
// the Image ID.
|
||||
ConfigName() (Hash, error)
|
||||
|
||||
// ConfigFile returns this image's config file.
|
||||
ConfigFile() (*ConfigFile, error)
|
||||
|
||||
// RawConfigFile returns the serialized bytes of ConfigFile().
|
||||
RawConfigFile() ([]byte, error)
|
||||
|
||||
// Digest returns the sha256 of this image's manifest.
|
||||
Digest() (Hash, error)
|
||||
|
||||
// Manifest returns this image's Manifest object.
|
||||
Manifest() (*Manifest, error)
|
||||
|
||||
// RawManifest returns the serialized bytes of Manifest()
|
||||
RawManifest() ([]byte, error)
|
||||
|
||||
// LayerByDigest returns a Layer for interacting with a particular layer of
|
||||
// the image, looking it up by "digest" (the compressed hash).
|
||||
LayerByDigest(Hash) (Layer, error)
|
||||
|
||||
// LayerByDiffID is an analog to LayerByDigest, looking up by "diff id"
|
||||
// (the uncompressed hash).
|
||||
LayerByDiffID(Hash) (Layer, error)
|
||||
}
|
43
vendor/github.com/google/go-containerregistry/pkg/v1/index.go
generated
vendored
Normal file
43
vendor/github.com/google/go-containerregistry/pkg/v1/index.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// ImageIndex defines the interface for interacting with an OCI image index.
|
||||
type ImageIndex interface {
|
||||
// MediaType of this image's manifest.
|
||||
MediaType() (types.MediaType, error)
|
||||
|
||||
// Digest returns the sha256 of this index's manifest.
|
||||
Digest() (Hash, error)
|
||||
|
||||
// Size returns the size of the manifest.
|
||||
Size() (int64, error)
|
||||
|
||||
// IndexManifest returns this image index's manifest object.
|
||||
IndexManifest() (*IndexManifest, error)
|
||||
|
||||
// RawManifest returns the serialized bytes of IndexManifest().
|
||||
RawManifest() ([]byte, error)
|
||||
|
||||
// Image returns a v1.Image that this ImageIndex references.
|
||||
Image(Hash) (Image, error)
|
||||
|
||||
// ImageIndex returns a v1.ImageIndex that this ImageIndex references.
|
||||
ImageIndex(Hash) (ImageIndex, error)
|
||||
}
|
42
vendor/github.com/google/go-containerregistry/pkg/v1/layer.go
generated
vendored
Normal file
42
vendor/github.com/google/go-containerregistry/pkg/v1/layer.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// Layer is an interface for accessing the properties of a particular layer of a v1.Image
|
||||
type Layer interface {
|
||||
// Digest returns the Hash of the compressed layer.
|
||||
Digest() (Hash, error)
|
||||
|
||||
// DiffID returns the Hash of the uncompressed layer.
|
||||
DiffID() (Hash, error)
|
||||
|
||||
// Compressed returns an io.ReadCloser for the compressed layer contents.
|
||||
Compressed() (io.ReadCloser, error)
|
||||
|
||||
// Uncompressed returns an io.ReadCloser for the uncompressed layer contents.
|
||||
Uncompressed() (io.ReadCloser, error)
|
||||
|
||||
// Size returns the compressed size of the Layer.
|
||||
Size() (int64, error)
|
||||
|
||||
// MediaType returns the media type of the Layer.
|
||||
MediaType() (types.MediaType, error)
|
||||
}
|
68
vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go
generated
vendored
Normal file
68
vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// Manifest represents the OCI image manifest in a structured way.
|
||||
type Manifest struct {
|
||||
SchemaVersion int64 `json:"schemaVersion"`
|
||||
MediaType types.MediaType `json:"mediaType,omitempty"`
|
||||
Config Descriptor `json:"config"`
|
||||
Layers []Descriptor `json:"layers"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
// IndexManifest represents an OCI image index in a structured way.
|
||||
type IndexManifest struct {
|
||||
SchemaVersion int64 `json:"schemaVersion"`
|
||||
MediaType types.MediaType `json:"mediaType,omitempty"`
|
||||
Manifests []Descriptor `json:"manifests"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
// Descriptor holds a reference from the manifest to one of its constituent elements.
|
||||
type Descriptor struct {
|
||||
MediaType types.MediaType `json:"mediaType"`
|
||||
Size int64 `json:"size"`
|
||||
Digest Hash `json:"digest"`
|
||||
Data []byte `json:"data,omitempty"`
|
||||
URLs []string `json:"urls,omitempty"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
Platform *Platform `json:"platform,omitempty"`
|
||||
}
|
||||
|
||||
// ParseManifest parses the io.Reader's contents into a Manifest.
|
||||
func ParseManifest(r io.Reader) (*Manifest, error) {
|
||||
m := Manifest{}
|
||||
if err := json.NewDecoder(r).Decode(&m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
// ParseIndexManifest parses the io.Reader's contents into an IndexManifest.
|
||||
func ParseIndexManifest(r io.Reader) (*IndexManifest, error) {
|
||||
im := IndexManifest{}
|
||||
if err := json.NewDecoder(r).Decode(&im); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &im, nil
|
||||
}
|
90
vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go
generated
vendored
Normal file
90
vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package match provides functionality for conveniently matching a v1.Descriptor.
|
||||
package match
|
||||
|
||||
import (
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// Matcher function that is given a v1.Descriptor, and returns whether or
|
||||
// not it matches a given rule. Can match on anything it wants in the Descriptor.
|
||||
type Matcher func(desc v1.Descriptor) bool
|
||||
|
||||
// Name returns a match.Matcher that matches based on the value of the
|
||||
// "org.opencontainers.image.ref.name" annotation:
|
||||
// github.com/opencontainers/image-spec/blob/v1.0.1/annotations.md#pre-defined-annotation-keys
|
||||
func Name(name string) Matcher {
|
||||
return Annotation(imagespec.AnnotationRefName, name)
|
||||
}
|
||||
|
||||
// Annotation returns a match.Matcher that matches based on the provided annotation.
|
||||
func Annotation(key, value string) Matcher {
|
||||
return func(desc v1.Descriptor) bool {
|
||||
if desc.Annotations == nil {
|
||||
return false
|
||||
}
|
||||
if aValue, ok := desc.Annotations[key]; ok && aValue == value {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Platforms returns a match.Matcher that matches on any one of the provided platforms.
|
||||
// Ignores any descriptors that do not have a platform.
|
||||
func Platforms(platforms ...v1.Platform) Matcher {
|
||||
return func(desc v1.Descriptor) bool {
|
||||
if desc.Platform == nil {
|
||||
return false
|
||||
}
|
||||
for _, platform := range platforms {
|
||||
if desc.Platform.Equals(platform) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// MediaTypes returns a match.Matcher that matches at least one of the provided media types.
|
||||
func MediaTypes(mediaTypes ...string) Matcher {
|
||||
mts := map[string]bool{}
|
||||
for _, media := range mediaTypes {
|
||||
mts[media] = true
|
||||
}
|
||||
return func(desc v1.Descriptor) bool {
|
||||
if desc.MediaType == "" {
|
||||
return false
|
||||
}
|
||||
if _, ok := mts[string(desc.MediaType)]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Digests returns a match.Matcher that matches at least one of the provided Digests
|
||||
func Digests(digests ...v1.Hash) Matcher {
|
||||
digs := map[v1.Hash]bool{}
|
||||
for _, digest := range digests {
|
||||
digs[digest] = true
|
||||
}
|
||||
return func(desc v1.Descriptor) bool {
|
||||
_, ok := digs[desc.Digest]
|
||||
return ok
|
||||
}
|
||||
}
|
82
vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md
generated
vendored
Normal file
82
vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
# `partial`
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial)
|
||||
|
||||
## Partial Implementations
|
||||
|
||||
There are roughly two kinds of image representations: compressed and uncompressed.
|
||||
|
||||
The implementations for these kinds of images are almost identical, with the only
|
||||
major difference being how blobs (config and layers) are fetched. This common
|
||||
code lives in this package, where you provide a _partial_ implementation of a
|
||||
compressed or uncompressed image, and you get back a full `v1.Image` implementation.
|
||||
|
||||
### Examples
|
||||
|
||||
In a registry, blobs are compressed, so it's easiest to implement a `v1.Image` in terms
|
||||
of compressed layers. `remote.remoteImage` does this by implementing `CompressedImageCore`:
|
||||
|
||||
```go
|
||||
type CompressedImageCore interface {
|
||||
RawConfigFile() ([]byte, error)
|
||||
MediaType() (types.MediaType, error)
|
||||
RawManifest() ([]byte, error)
|
||||
LayerByDigest(v1.Hash) (CompressedLayer, error)
|
||||
}
|
||||
```
|
||||
|
||||
In a tarball, blobs are (often) uncompressed, so it's easiest to implement a `v1.Image` in terms
|
||||
of uncompressed layers. `tarball.uncompressedImage` does this by implementing `UncompressedImageCore`:
|
||||
|
||||
```go
|
||||
type UncompressedImageCore interface {
|
||||
RawConfigFile() ([]byte, error)
|
||||
MediaType() (types.MediaType, error)
|
||||
LayerByDiffID(v1.Hash) (UncompressedLayer, error)
|
||||
}
|
||||
```
|
||||
|
||||
## Optional Methods
|
||||
|
||||
Where possible, we access some information via optional methods as an optimization.
|
||||
|
||||
### [`partial.Descriptor`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#Descriptor)
|
||||
|
||||
There are some properties of a [`Descriptor`](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) that aren't derivable from just image data:
|
||||
|
||||
* `MediaType`
|
||||
* `Platform`
|
||||
* `URLs`
|
||||
* `Annotations`
|
||||
|
||||
For example, in a `tarball.Image`, there is a `LayerSources` field that contains
|
||||
an entire layer descriptor with `URLs` information for foreign layers. This
|
||||
information can be passed through to callers by implementing this optional
|
||||
`Descriptor` method.
|
||||
|
||||
See [`#654`](https://github.com/google/go-containerregistry/pull/654).
|
||||
|
||||
### [`partial.UncompressedSize`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#UncompressedSize)
|
||||
|
||||
Usually, you don't need to know the uncompressed size of a layer, since that
|
||||
information isn't stored in a config file (just he sha256 is needed); however,
|
||||
there are cases where it is very helpful to know the layer size, e.g. when
|
||||
writing the uncompressed layer into a tarball.
|
||||
|
||||
See [`#655`](https://github.com/google/go-containerregistry/pull/655).
|
||||
|
||||
### [`partial.Exists`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#Exists)
|
||||
|
||||
We generally don't care about the existence of something as granular as a
|
||||
layer, and would rather ensure all the invariants of an image are upheld via
|
||||
the `validate` package. However, there are situations where we want to do a
|
||||
quick smoke test to ensure that the underlying storage engine hasn't been
|
||||
corrupted by something e.g. deleting files or blobs. Thus, we've exposed an
|
||||
optional `Exists` method that does an existence check without actually reading
|
||||
any bytes.
|
||||
|
||||
The `remote` package implements this via `HEAD` requests.
|
||||
|
||||
The `layout` package implements this via `os.Stat`.
|
||||
|
||||
See [`#838`](https://github.com/google/go-containerregistry/pull/838).
|
163
vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go
generated
vendored
Normal file
163
vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package partial
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/gzip"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// CompressedLayer represents the bare minimum interface a natively
|
||||
// compressed layer must implement for us to produce a v1.Layer
|
||||
type CompressedLayer interface {
|
||||
// Digest returns the Hash of the compressed layer.
|
||||
Digest() (v1.Hash, error)
|
||||
|
||||
// Compressed returns an io.ReadCloser for the compressed layer contents.
|
||||
Compressed() (io.ReadCloser, error)
|
||||
|
||||
// Size returns the compressed size of the Layer.
|
||||
Size() (int64, error)
|
||||
|
||||
// Returns the mediaType for the compressed Layer
|
||||
MediaType() (types.MediaType, error)
|
||||
}
|
||||
|
||||
// compressedLayerExtender implements v1.Image using the compressed base properties.
|
||||
type compressedLayerExtender struct {
|
||||
CompressedLayer
|
||||
}
|
||||
|
||||
// Uncompressed implements v1.Layer
|
||||
func (cle *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) {
|
||||
r, err := cle.Compressed()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gzip.UnzipReadCloser(r)
|
||||
}
|
||||
|
||||
// DiffID implements v1.Layer
|
||||
func (cle *compressedLayerExtender) DiffID() (v1.Hash, error) {
|
||||
// If our nested CompressedLayer implements DiffID,
|
||||
// then delegate to it instead.
|
||||
if wdi, ok := cle.CompressedLayer.(WithDiffID); ok {
|
||||
return wdi.DiffID()
|
||||
}
|
||||
r, err := cle.Uncompressed()
|
||||
if err != nil {
|
||||
return v1.Hash{}, err
|
||||
}
|
||||
defer r.Close()
|
||||
h, _, err := v1.SHA256(r)
|
||||
return h, err
|
||||
}
|
||||
|
||||
// CompressedToLayer fills in the missing methods from a CompressedLayer so that it implements v1.Layer
|
||||
func CompressedToLayer(ul CompressedLayer) (v1.Layer, error) {
|
||||
return &compressedLayerExtender{ul}, nil
|
||||
}
|
||||
|
||||
// CompressedImageCore represents the base minimum interface a natively
|
||||
// compressed image must implement for us to produce a v1.Image.
|
||||
type CompressedImageCore interface {
|
||||
ImageCore
|
||||
|
||||
// RawManifest returns the serialized bytes of the manifest.
|
||||
RawManifest() ([]byte, error)
|
||||
|
||||
// LayerByDigest is a variation on the v1.Image method, which returns
|
||||
// a CompressedLayer instead.
|
||||
LayerByDigest(v1.Hash) (CompressedLayer, error)
|
||||
}
|
||||
|
||||
// compressedImageExtender implements v1.Image by extending CompressedImageCore with the
|
||||
// appropriate methods computed from the minimal core.
|
||||
type compressedImageExtender struct {
|
||||
CompressedImageCore
|
||||
}
|
||||
|
||||
// Assert that our extender type completes the v1.Image interface
|
||||
var _ v1.Image = (*compressedImageExtender)(nil)
|
||||
|
||||
// Digest implements v1.Image
|
||||
func (i *compressedImageExtender) Digest() (v1.Hash, error) {
|
||||
return Digest(i)
|
||||
}
|
||||
|
||||
// ConfigName implements v1.Image
|
||||
func (i *compressedImageExtender) ConfigName() (v1.Hash, error) {
|
||||
return ConfigName(i)
|
||||
}
|
||||
|
||||
// Layers implements v1.Image
|
||||
func (i *compressedImageExtender) Layers() ([]v1.Layer, error) {
|
||||
hs, err := FSLayers(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ls := make([]v1.Layer, 0, len(hs))
|
||||
for _, h := range hs {
|
||||
l, err := i.LayerByDigest(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ls = append(ls, l)
|
||||
}
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
// LayerByDigest implements v1.Image
|
||||
func (i *compressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) {
|
||||
cl, err := i.CompressedImageCore.LayerByDigest(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return CompressedToLayer(cl)
|
||||
}
|
||||
|
||||
// LayerByDiffID implements v1.Image
|
||||
func (i *compressedImageExtender) LayerByDiffID(h v1.Hash) (v1.Layer, error) {
|
||||
h, err := DiffIDToBlob(i, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return i.LayerByDigest(h)
|
||||
}
|
||||
|
||||
// ConfigFile implements v1.Image
|
||||
func (i *compressedImageExtender) ConfigFile() (*v1.ConfigFile, error) {
|
||||
return ConfigFile(i)
|
||||
}
|
||||
|
||||
// Manifest implements v1.Image
|
||||
func (i *compressedImageExtender) Manifest() (*v1.Manifest, error) {
|
||||
return Manifest(i)
|
||||
}
|
||||
|
||||
// Size implements v1.Image
|
||||
func (i *compressedImageExtender) Size() (int64, error) {
|
||||
return Size(i)
|
||||
}
|
||||
|
||||
// CompressedToImage fills in the missing methods from a CompressedImageCore so that it implements v1.Image
|
||||
func CompressedToImage(cic CompressedImageCore) (v1.Image, error) {
|
||||
return &compressedImageExtender{
|
||||
CompressedImageCore: cic,
|
||||
}, nil
|
||||
}
|
17
vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go
generated
vendored
Normal file
17
vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package partial defines methods for building up a v1.Image from
|
||||
// minimal subsets that are sufficient for defining a v1.Image.
|
||||
package partial
|
28
vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go
generated
vendored
Normal file
28
vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package partial
|
||||
|
||||
import (
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// ImageCore is the core set of properties without which we cannot build a v1.Image
|
||||
type ImageCore interface {
|
||||
// RawConfigFile returns the serialized bytes of this image's config file.
|
||||
RawConfigFile() ([]byte, error)
|
||||
|
||||
// MediaType of this image's manifest.
|
||||
MediaType() (types.MediaType, error)
|
||||
}
|
85
vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go
generated
vendored
Normal file
85
vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package partial
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/match"
|
||||
)
|
||||
|
||||
// FindManifests given a v1.ImageIndex, find the manifests that fit the matcher.
|
||||
func FindManifests(index v1.ImageIndex, matcher match.Matcher) ([]v1.Descriptor, error) {
|
||||
// get the actual manifest list
|
||||
indexManifest, err := index.IndexManifest()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get raw index: %w", err)
|
||||
}
|
||||
manifests := []v1.Descriptor{}
|
||||
// try to get the root of our image
|
||||
for _, manifest := range indexManifest.Manifests {
|
||||
if matcher(manifest) {
|
||||
manifests = append(manifests, manifest)
|
||||
}
|
||||
}
|
||||
return manifests, nil
|
||||
}
|
||||
|
||||
// FindImages given a v1.ImageIndex, find the images that fit the matcher. If a Descriptor
|
||||
// matches the provider Matcher, but the referenced item is not an Image, ignores it.
|
||||
// Only returns those that match the Matcher and are images.
|
||||
func FindImages(index v1.ImageIndex, matcher match.Matcher) ([]v1.Image, error) {
|
||||
matches := []v1.Image{}
|
||||
manifests, err := FindManifests(index, matcher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, desc := range manifests {
|
||||
// if it is not an image, ignore it
|
||||
if !desc.MediaType.IsImage() {
|
||||
continue
|
||||
}
|
||||
img, err := index.Image(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
matches = append(matches, img)
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// FindIndexes given a v1.ImageIndex, find the indexes that fit the matcher. If a Descriptor
|
||||
// matches the provider Matcher, but the referenced item is not an Index, ignores it.
|
||||
// Only returns those that match the Matcher and are indexes.
|
||||
func FindIndexes(index v1.ImageIndex, matcher match.Matcher) ([]v1.ImageIndex, error) {
|
||||
matches := []v1.ImageIndex{}
|
||||
manifests, err := FindManifests(index, matcher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, desc := range manifests {
|
||||
if !desc.MediaType.IsIndex() {
|
||||
continue
|
||||
}
|
||||
// if it is not an index, ignore it
|
||||
idx, err := index.ImageIndex(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
matches = append(matches, idx)
|
||||
}
|
||||
return matches, nil
|
||||
}
|
223
vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go
generated
vendored
Normal file
223
vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go
generated
vendored
Normal file
@ -0,0 +1,223 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package partial
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/gzip"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// UncompressedLayer represents the bare minimum interface a natively
|
||||
// uncompressed layer must implement for us to produce a v1.Layer
|
||||
type UncompressedLayer interface {
|
||||
// DiffID returns the Hash of the uncompressed layer.
|
||||
DiffID() (v1.Hash, error)
|
||||
|
||||
// Uncompressed returns an io.ReadCloser for the uncompressed layer contents.
|
||||
Uncompressed() (io.ReadCloser, error)
|
||||
|
||||
// Returns the mediaType for the compressed Layer
|
||||
MediaType() (types.MediaType, error)
|
||||
}
|
||||
|
||||
// uncompressedLayerExtender implements v1.Image using the uncompressed base properties.
|
||||
type uncompressedLayerExtender struct {
|
||||
UncompressedLayer
|
||||
// Memoize size/hash so that the methods aren't twice as
|
||||
// expensive as doing this manually.
|
||||
hash v1.Hash
|
||||
size int64
|
||||
hashSizeError error
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// Compressed implements v1.Layer
|
||||
func (ule *uncompressedLayerExtender) Compressed() (io.ReadCloser, error) {
|
||||
u, err := ule.Uncompressed()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gzip.ReadCloser(u), nil
|
||||
}
|
||||
|
||||
// Digest implements v1.Layer
|
||||
func (ule *uncompressedLayerExtender) Digest() (v1.Hash, error) {
|
||||
ule.calcSizeHash()
|
||||
return ule.hash, ule.hashSizeError
|
||||
}
|
||||
|
||||
// Size implements v1.Layer
|
||||
func (ule *uncompressedLayerExtender) Size() (int64, error) {
|
||||
ule.calcSizeHash()
|
||||
return ule.size, ule.hashSizeError
|
||||
}
|
||||
|
||||
func (ule *uncompressedLayerExtender) calcSizeHash() {
|
||||
ule.once.Do(func() {
|
||||
var r io.ReadCloser
|
||||
r, ule.hashSizeError = ule.Compressed()
|
||||
if ule.hashSizeError != nil {
|
||||
return
|
||||
}
|
||||
defer r.Close()
|
||||
ule.hash, ule.size, ule.hashSizeError = v1.SHA256(r)
|
||||
})
|
||||
}
|
||||
|
||||
// UncompressedToLayer fills in the missing methods from an UncompressedLayer so that it implements v1.Layer
|
||||
func UncompressedToLayer(ul UncompressedLayer) (v1.Layer, error) {
|
||||
return &uncompressedLayerExtender{UncompressedLayer: ul}, nil
|
||||
}
|
||||
|
||||
// UncompressedImageCore represents the bare minimum interface a natively
|
||||
// uncompressed image must implement for us to produce a v1.Image
|
||||
type UncompressedImageCore interface {
|
||||
ImageCore
|
||||
|
||||
// LayerByDiffID is a variation on the v1.Image method, which returns
|
||||
// an UncompressedLayer instead.
|
||||
LayerByDiffID(v1.Hash) (UncompressedLayer, error)
|
||||
}
|
||||
|
||||
// UncompressedToImage fills in the missing methods from an UncompressedImageCore so that it implements v1.Image.
|
||||
func UncompressedToImage(uic UncompressedImageCore) (v1.Image, error) {
|
||||
return &uncompressedImageExtender{
|
||||
UncompressedImageCore: uic,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// uncompressedImageExtender implements v1.Image by extending UncompressedImageCore with the
|
||||
// appropriate methods computed from the minimal core.
|
||||
type uncompressedImageExtender struct {
|
||||
UncompressedImageCore
|
||||
|
||||
lock sync.Mutex
|
||||
manifest *v1.Manifest
|
||||
}
|
||||
|
||||
// Assert that our extender type completes the v1.Image interface
|
||||
var _ v1.Image = (*uncompressedImageExtender)(nil)
|
||||
|
||||
// Digest implements v1.Image
|
||||
func (i *uncompressedImageExtender) Digest() (v1.Hash, error) {
|
||||
return Digest(i)
|
||||
}
|
||||
|
||||
// Manifest implements v1.Image
|
||||
func (i *uncompressedImageExtender) Manifest() (*v1.Manifest, error) {
|
||||
i.lock.Lock()
|
||||
defer i.lock.Unlock()
|
||||
if i.manifest != nil {
|
||||
return i.manifest, nil
|
||||
}
|
||||
|
||||
b, err := i.RawConfigFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := &v1.Manifest{
|
||||
SchemaVersion: 2,
|
||||
MediaType: types.DockerManifestSchema2,
|
||||
Config: v1.Descriptor{
|
||||
MediaType: types.DockerConfigJSON,
|
||||
Size: cfgSize,
|
||||
Digest: cfgHash,
|
||||
},
|
||||
}
|
||||
|
||||
ls, err := i.Layers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.Layers = make([]v1.Descriptor, len(ls))
|
||||
for i, l := range ls {
|
||||
desc, err := Descriptor(l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.Layers[i] = *desc
|
||||
}
|
||||
|
||||
i.manifest = m
|
||||
return i.manifest, nil
|
||||
}
|
||||
|
||||
// RawManifest implements v1.Image
|
||||
func (i *uncompressedImageExtender) RawManifest() ([]byte, error) {
|
||||
return RawManifest(i)
|
||||
}
|
||||
|
||||
// Size implements v1.Image
|
||||
func (i *uncompressedImageExtender) Size() (int64, error) {
|
||||
return Size(i)
|
||||
}
|
||||
|
||||
// ConfigName implements v1.Image
|
||||
func (i *uncompressedImageExtender) ConfigName() (v1.Hash, error) {
|
||||
return ConfigName(i)
|
||||
}
|
||||
|
||||
// ConfigFile implements v1.Image
|
||||
func (i *uncompressedImageExtender) ConfigFile() (*v1.ConfigFile, error) {
|
||||
return ConfigFile(i)
|
||||
}
|
||||
|
||||
// Layers implements v1.Image
|
||||
func (i *uncompressedImageExtender) Layers() ([]v1.Layer, error) {
|
||||
diffIDs, err := DiffIDs(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ls := make([]v1.Layer, 0, len(diffIDs))
|
||||
for _, h := range diffIDs {
|
||||
l, err := i.LayerByDiffID(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ls = append(ls, l)
|
||||
}
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
// LayerByDiffID implements v1.Image
|
||||
func (i *uncompressedImageExtender) LayerByDiffID(diffID v1.Hash) (v1.Layer, error) {
|
||||
ul, err := i.UncompressedImageCore.LayerByDiffID(diffID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return UncompressedToLayer(ul)
|
||||
}
|
||||
|
||||
// LayerByDigest implements v1.Image
|
||||
func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) {
|
||||
diffID, err := BlobToDiffID(i, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return i.LayerByDiffID(diffID)
|
||||
}
|
389
vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go
generated
vendored
Normal file
389
vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go
generated
vendored
Normal file
@ -0,0 +1,389 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package partial
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// WithRawConfigFile defines the subset of v1.Image used by these helper methods
|
||||
type WithRawConfigFile interface {
|
||||
// RawConfigFile returns the serialized bytes of this image's config file.
|
||||
RawConfigFile() ([]byte, error)
|
||||
}
|
||||
|
||||
// ConfigFile is a helper for implementing v1.Image
|
||||
func ConfigFile(i WithRawConfigFile) (*v1.ConfigFile, error) {
|
||||
b, err := i.RawConfigFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1.ParseConfigFile(bytes.NewReader(b))
|
||||
}
|
||||
|
||||
// ConfigName is a helper for implementing v1.Image
|
||||
func ConfigName(i WithRawConfigFile) (v1.Hash, error) {
|
||||
b, err := i.RawConfigFile()
|
||||
if err != nil {
|
||||
return v1.Hash{}, err
|
||||
}
|
||||
h, _, err := v1.SHA256(bytes.NewReader(b))
|
||||
return h, err
|
||||
}
|
||||
|
||||
type configLayer struct {
|
||||
hash v1.Hash
|
||||
content []byte
|
||||
}
|
||||
|
||||
// Digest implements v1.Layer
|
||||
func (cl *configLayer) Digest() (v1.Hash, error) {
|
||||
return cl.hash, nil
|
||||
}
|
||||
|
||||
// DiffID implements v1.Layer
|
||||
func (cl *configLayer) DiffID() (v1.Hash, error) {
|
||||
return cl.hash, nil
|
||||
}
|
||||
|
||||
// Uncompressed implements v1.Layer
|
||||
func (cl *configLayer) Uncompressed() (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil
|
||||
}
|
||||
|
||||
// Compressed implements v1.Layer
|
||||
func (cl *configLayer) Compressed() (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil
|
||||
}
|
||||
|
||||
// Size implements v1.Layer
|
||||
func (cl *configLayer) Size() (int64, error) {
|
||||
return int64(len(cl.content)), nil
|
||||
}
|
||||
|
||||
func (cl *configLayer) MediaType() (types.MediaType, error) {
|
||||
// Defaulting this to OCIConfigJSON as it should remain
|
||||
// backwards compatible with DockerConfigJSON
|
||||
return types.OCIConfigJSON, nil
|
||||
}
|
||||
|
||||
var _ v1.Layer = (*configLayer)(nil)
|
||||
|
||||
// ConfigLayer implements v1.Layer from the raw config bytes.
|
||||
// This is so that clients (e.g. remote) can access the config as a blob.
|
||||
func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) {
|
||||
h, err := ConfigName(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rcfg, err := i.RawConfigFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &configLayer{
|
||||
hash: h,
|
||||
content: rcfg,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithConfigFile defines the subset of v1.Image used by these helper methods
|
||||
type WithConfigFile interface {
|
||||
// ConfigFile returns this image's config file.
|
||||
ConfigFile() (*v1.ConfigFile, error)
|
||||
}
|
||||
|
||||
// DiffIDs is a helper for implementing v1.Image
|
||||
func DiffIDs(i WithConfigFile) ([]v1.Hash, error) {
|
||||
cfg, err := i.ConfigFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cfg.RootFS.DiffIDs, nil
|
||||
}
|
||||
|
||||
// RawConfigFile is a helper for implementing v1.Image
|
||||
func RawConfigFile(i WithConfigFile) ([]byte, error) {
|
||||
cfg, err := i.ConfigFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(cfg)
|
||||
}
|
||||
|
||||
// WithRawManifest defines the subset of v1.Image used by these helper methods
|
||||
type WithRawManifest interface {
|
||||
// RawManifest returns the serialized bytes of this image's config file.
|
||||
RawManifest() ([]byte, error)
|
||||
}
|
||||
|
||||
// Digest is a helper for implementing v1.Image
|
||||
func Digest(i WithRawManifest) (v1.Hash, error) {
|
||||
mb, err := i.RawManifest()
|
||||
if err != nil {
|
||||
return v1.Hash{}, err
|
||||
}
|
||||
digest, _, err := v1.SHA256(bytes.NewReader(mb))
|
||||
return digest, err
|
||||
}
|
||||
|
||||
// Manifest is a helper for implementing v1.Image
|
||||
func Manifest(i WithRawManifest) (*v1.Manifest, error) {
|
||||
b, err := i.RawManifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1.ParseManifest(bytes.NewReader(b))
|
||||
}
|
||||
|
||||
// WithManifest defines the subset of v1.Image used by these helper methods
|
||||
type WithManifest interface {
|
||||
// Manifest returns this image's Manifest object.
|
||||
Manifest() (*v1.Manifest, error)
|
||||
}
|
||||
|
||||
// RawManifest is a helper for implementing v1.Image
|
||||
func RawManifest(i WithManifest) ([]byte, error) {
|
||||
m, err := i.Manifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(m)
|
||||
}
|
||||
|
||||
// Size is a helper for implementing v1.Image
|
||||
func Size(i WithRawManifest) (int64, error) {
|
||||
b, err := i.RawManifest()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return int64(len(b)), nil
|
||||
}
|
||||
|
||||
// FSLayers is a helper for implementing v1.Image
|
||||
func FSLayers(i WithManifest) ([]v1.Hash, error) {
|
||||
m, err := i.Manifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fsl := make([]v1.Hash, len(m.Layers))
|
||||
for i, l := range m.Layers {
|
||||
fsl[i] = l.Digest
|
||||
}
|
||||
return fsl, nil
|
||||
}
|
||||
|
||||
// BlobSize is a helper for implementing v1.Image
|
||||
func BlobSize(i WithManifest, h v1.Hash) (int64, error) {
|
||||
d, err := BlobDescriptor(i, h)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return d.Size, nil
|
||||
}
|
||||
|
||||
// BlobDescriptor is a helper for implementing v1.Image
|
||||
func BlobDescriptor(i WithManifest, h v1.Hash) (*v1.Descriptor, error) {
|
||||
m, err := i.Manifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if m.Config.Digest == h {
|
||||
return &m.Config, nil
|
||||
}
|
||||
|
||||
for _, l := range m.Layers {
|
||||
if l.Digest == h {
|
||||
return &l, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("blob %v not found", h)
|
||||
}
|
||||
|
||||
// WithManifestAndConfigFile defines the subset of v1.Image used by these helper methods
|
||||
type WithManifestAndConfigFile interface {
|
||||
WithConfigFile
|
||||
|
||||
// Manifest returns this image's Manifest object.
|
||||
Manifest() (*v1.Manifest, error)
|
||||
}
|
||||
|
||||
// BlobToDiffID is a helper for mapping between compressed
|
||||
// and uncompressed blob hashes.
|
||||
func BlobToDiffID(i WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) {
|
||||
blobs, err := FSLayers(i)
|
||||
if err != nil {
|
||||
return v1.Hash{}, err
|
||||
}
|
||||
diffIDs, err := DiffIDs(i)
|
||||
if err != nil {
|
||||
return v1.Hash{}, err
|
||||
}
|
||||
if len(blobs) != len(diffIDs) {
|
||||
return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs))
|
||||
}
|
||||
for i, blob := range blobs {
|
||||
if blob == h {
|
||||
return diffIDs[i], nil
|
||||
}
|
||||
}
|
||||
return v1.Hash{}, fmt.Errorf("unknown blob %v", h)
|
||||
}
|
||||
|
||||
// DiffIDToBlob is a helper for mapping between uncompressed
|
||||
// and compressed blob hashes.
|
||||
func DiffIDToBlob(wm WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) {
|
||||
blobs, err := FSLayers(wm)
|
||||
if err != nil {
|
||||
return v1.Hash{}, err
|
||||
}
|
||||
diffIDs, err := DiffIDs(wm)
|
||||
if err != nil {
|
||||
return v1.Hash{}, err
|
||||
}
|
||||
if len(blobs) != len(diffIDs) {
|
||||
return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs))
|
||||
}
|
||||
for i, diffID := range diffIDs {
|
||||
if diffID == h {
|
||||
return blobs[i], nil
|
||||
}
|
||||
}
|
||||
return v1.Hash{}, fmt.Errorf("unknown diffID %v", h)
|
||||
}
|
||||
|
||||
// WithDiffID defines the subset of v1.Layer for exposing the DiffID method.
|
||||
type WithDiffID interface {
|
||||
DiffID() (v1.Hash, error)
|
||||
}
|
||||
|
||||
// withDescriptor allows partial layer implementations to provide a layer
|
||||
// descriptor to the partial image manifest builder. This allows partial
|
||||
// uncompressed layers to provide foreign layer metadata like URLs to the
|
||||
// uncompressed image manifest.
|
||||
type withDescriptor interface {
|
||||
Descriptor() (*v1.Descriptor, error)
|
||||
}
|
||||
|
||||
// Describable represents something for which we can produce a v1.Descriptor.
|
||||
type Describable interface {
|
||||
Digest() (v1.Hash, error)
|
||||
MediaType() (types.MediaType, error)
|
||||
Size() (int64, error)
|
||||
}
|
||||
|
||||
// Descriptor returns a v1.Descriptor given a Describable. It also encodes
|
||||
// some logic for unwrapping things that have been wrapped by
|
||||
// CompressedToLayer, UncompressedToLayer, CompressedToImage, or
|
||||
// UncompressedToImage.
|
||||
func Descriptor(d Describable) (*v1.Descriptor, error) {
|
||||
// If Describable implements Descriptor itself, return that.
|
||||
if wd, ok := unwrap(d).(withDescriptor); ok {
|
||||
return wd.Descriptor()
|
||||
}
|
||||
|
||||
// If all else fails, compute the descriptor from the individual methods.
|
||||
var (
|
||||
desc v1.Descriptor
|
||||
err error
|
||||
)
|
||||
|
||||
if desc.Size, err = d.Size(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if desc.Digest, err = d.Digest(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if desc.MediaType, err = d.MediaType(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &desc, nil
|
||||
}
|
||||
|
||||
type withUncompressedSize interface {
|
||||
UncompressedSize() (int64, error)
|
||||
}
|
||||
|
||||
// UncompressedSize returns the size of the Uncompressed layer. If the
|
||||
// underlying implementation doesn't implement UncompressedSize directly,
|
||||
// this will compute the uncompressedSize by reading everything returned
|
||||
// by Compressed(). This is potentially expensive and may consume the contents
|
||||
// for streaming layers.
|
||||
func UncompressedSize(l v1.Layer) (int64, error) {
|
||||
// If the layer implements UncompressedSize itself, return that.
|
||||
if wus, ok := unwrap(l).(withUncompressedSize); ok {
|
||||
return wus.UncompressedSize()
|
||||
}
|
||||
|
||||
// The layer doesn't implement UncompressedSize, we need to compute it.
|
||||
rc, err := l.Uncompressed()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
return io.Copy(ioutil.Discard, rc)
|
||||
}
|
||||
|
||||
type withExists interface {
|
||||
Exists() (bool, error)
|
||||
}
|
||||
|
||||
// Exists checks to see if a layer exists. This is a hack to work around the
|
||||
// mistakes of the partial package. Don't use this.
|
||||
func Exists(l v1.Layer) (bool, error) {
|
||||
// If the layer implements Exists itself, return that.
|
||||
if we, ok := unwrap(l).(withExists); ok {
|
||||
return we.Exists()
|
||||
}
|
||||
|
||||
// The layer doesn't implement Exists, so we hope that calling Compressed()
|
||||
// is enough to trigger an error if the layer does not exist.
|
||||
rc, err := l.Compressed()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
// We may want to try actually reading a single byte, but if we need to do
|
||||
// that, we should just fix this hack.
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Recursively unwrap our wrappers so that we can check for the original implementation.
|
||||
// We might want to expose this?
|
||||
func unwrap(i interface{}) interface{} {
|
||||
if ule, ok := i.(*uncompressedLayerExtender); ok {
|
||||
return unwrap(ule.UncompressedLayer)
|
||||
}
|
||||
if cle, ok := i.(*compressedLayerExtender); ok {
|
||||
return unwrap(cle.CompressedLayer)
|
||||
}
|
||||
if uie, ok := i.(*uncompressedImageExtender); ok {
|
||||
return unwrap(uie.UncompressedImageCore)
|
||||
}
|
||||
if cie, ok := i.(*compressedImageExtender); ok {
|
||||
return unwrap(cie.CompressedImageCore)
|
||||
}
|
||||
return i
|
||||
}
|
58
vendor/github.com/google/go-containerregistry/pkg/v1/platform.go
generated
vendored
Normal file
58
vendor/github.com/google/go-containerregistry/pkg/v1/platform.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Platform represents the target os/arch for an image.
|
||||
type Platform struct {
|
||||
Architecture string `json:"architecture"`
|
||||
OS string `json:"os"`
|
||||
OSVersion string `json:"os.version,omitempty"`
|
||||
OSFeatures []string `json:"os.features,omitempty"`
|
||||
Variant string `json:"variant,omitempty"`
|
||||
Features []string `json:"features,omitempty"`
|
||||
}
|
||||
|
||||
// Equals returns true if the given platform is semantically equivalent to this one.
|
||||
// The order of Features and OSFeatures is not important.
|
||||
func (p Platform) Equals(o Platform) bool {
|
||||
return p.OS == o.OS && p.Architecture == o.Architecture && p.Variant == o.Variant && p.OSVersion == o.OSVersion &&
|
||||
stringSliceEqualIgnoreOrder(p.OSFeatures, o.OSFeatures) && stringSliceEqualIgnoreOrder(p.Features, o.Features)
|
||||
}
|
||||
|
||||
// stringSliceEqual compares 2 string slices and returns if their contents are identical.
|
||||
func stringSliceEqual(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, elm := range a {
|
||||
if elm != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// stringSliceEqualIgnoreOrder compares 2 string slices and returns if their contents are identical, ignoring order
|
||||
func stringSliceEqualIgnoreOrder(a, b []string) bool {
|
||||
if a != nil && b != nil {
|
||||
sort.Strings(a)
|
||||
sort.Strings(b)
|
||||
}
|
||||
return stringSliceEqual(a, b)
|
||||
}
|
25
vendor/github.com/google/go-containerregistry/pkg/v1/progress.go
generated
vendored
Normal file
25
vendor/github.com/google/go-containerregistry/pkg/v1/progress.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
// Update representation of an update of transfer progress. Some functions
|
||||
// in this module can take a channel to which updates will be sent while a
|
||||
// transfer is in progress.
|
||||
// +k8s:deepcopy-gen=false
|
||||
type Update struct {
|
||||
Total int64
|
||||
Complete int64
|
||||
Error error
|
||||
}
|
117
vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md
generated
vendored
Normal file
117
vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
||||
# `remote`
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote)
|
||||
|
||||
The `remote` package implements a client for accessing a registry,
|
||||
per the [OCI distribution spec](https://github.com/opencontainers/distribution-spec/blob/master/spec.md).
|
||||
|
||||
It leans heavily on the lower level [`transport`](/pkg/v1/remote/transport) package, which handles the
|
||||
authentication handshake and structured errors.
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ref, err := name.ParseReference("gcr.io/google-containers/pause")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// do stuff with img
|
||||
}
|
||||
```
|
||||
|
||||
## Structure
|
||||
|
||||
<p align="center">
|
||||
<img src="/images/remote.dot.svg" />
|
||||
</p>
|
||||
|
||||
|
||||
## Background
|
||||
|
||||
There are a lot of confusingly similar terms that come up when talking about images in registries.
|
||||
|
||||
### Anatomy of an image
|
||||
|
||||
In general...
|
||||
|
||||
* A tag refers to an image manifest.
|
||||
* An image manifest references a config file and an orderered list of _compressed_ layers by sha256 digest.
|
||||
* A config file references an ordered list of _uncompressed_ layers by sha256 digest and contains runtime configuration.
|
||||
* The sha256 digest of the config file is the [image id](https://github.com/opencontainers/image-spec/blob/master/config.md#imageid) for the image.
|
||||
|
||||
For example, an image with two layers would look something like this:
|
||||
|
||||
![image anatomy](/images/image-anatomy.dot.svg)
|
||||
|
||||
### Anatomy of an index
|
||||
|
||||
In the normal case, an [index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) is used to represent a multi-platform image.
|
||||
This was the original use case for a [manifest
|
||||
list](https://docs.docker.com/registry/spec/manifest-v2-2/#manifest-list).
|
||||
|
||||
![image index anatomy](/images/index-anatomy.dot.svg)
|
||||
|
||||
It is possible for an index to reference another index, per the OCI
|
||||
[image-spec](https://github.com/opencontainers/image-spec/blob/master/media-types.md#compatibility-matrix).
|
||||
In theory, both an image and image index can reference arbitrary things via
|
||||
[descriptors](https://github.com/opencontainers/image-spec/blob/master/descriptor.md),
|
||||
e.g. see the [image layout
|
||||
example](https://github.com/opencontainers/image-spec/blob/master/image-layout.md#index-example),
|
||||
which references an application/xml file from an image index.
|
||||
|
||||
That could look something like this:
|
||||
|
||||
![strange image index anatomy](/images/index-anatomy-strange.dot.svg)
|
||||
|
||||
Using a recursive index like this might not be possible with all registries,
|
||||
but this flexibility allows for some interesting applications, e.g. the
|
||||
[OCI Artifacts](https://github.com/opencontainers/artifacts) effort.
|
||||
|
||||
### Anatomy of an image upload
|
||||
|
||||
The structure of an image requires a delicate ordering when uploading an image to a registry.
|
||||
Below is a (slightly simplified) figure that describes how an image is prepared for upload
|
||||
to a registry and how the data flows between various artifacts:
|
||||
|
||||
![upload](/images/upload.dot.svg)
|
||||
|
||||
Note that:
|
||||
|
||||
* A config file references the uncompressed layer contents by sha256.
|
||||
* A manifest references the compressed layer contents by sha256 and the size of the layer.
|
||||
* A manifest references the config file contents by sha256 and the size of the file.
|
||||
|
||||
It follows that during an upload, we need to upload layers before the config file,
|
||||
and we need to upload the config file before the manifest.
|
||||
|
||||
Sometimes, we know all of this information ahead of time, (e.g. when copying from remote.Image),
|
||||
so the ordering is less important.
|
||||
|
||||
In other cases, e.g. when using a [`stream.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream#Layer),
|
||||
we can't compute anything until we have already uploaded the layer, so we need to be careful about ordering.
|
||||
|
||||
## Caveats
|
||||
|
||||
### schema 1
|
||||
|
||||
This package does not support schema 1 images, see [`#377`](https://github.com/google/go-containerregistry/issues/377),
|
||||
however, it's possible to do _something_ useful with them via [`remote.Get`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote#Get),
|
||||
which doesn't try to interpret what is returned by the registry.
|
||||
|
||||
[`crane.Copy`](https://godoc.org/github.com/google/go-containerregistry/pkg/crane#Copy) takes advantage of this to implement support for copying schema 1 images,
|
||||
see [here](https://github.com/google/go-containerregistry/blob/main/pkg/internal/legacy/copy.go).
|
154
vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go
generated
vendored
Normal file
154
vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go
generated
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
)
|
||||
|
||||
type catalog struct {
|
||||
Repos []string `json:"repositories"`
|
||||
}
|
||||
|
||||
// CatalogPage calls /_catalog, returning the list of repositories on the registry.
|
||||
func CatalogPage(target name.Registry, last string, n int, options ...Option) ([]string, error) {
|
||||
o, err := makeOptions(target, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scopes := []string{target.Scope(transport.PullScope)}
|
||||
tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query := fmt.Sprintf("last=%s&n=%d", url.QueryEscape(last), n)
|
||||
|
||||
uri := url.URL{
|
||||
Scheme: target.Scheme(),
|
||||
Host: target.RegistryStr(),
|
||||
Path: "/v2/_catalog",
|
||||
RawQuery: query,
|
||||
}
|
||||
|
||||
client := http.Client{Transport: tr}
|
||||
req, err := http.NewRequest(http.MethodGet, uri.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Do(req.WithContext(o.context))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var parsed catalog
|
||||
if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return parsed.Repos, nil
|
||||
}
|
||||
|
||||
// Catalog calls /_catalog, returning the list of repositories on the registry.
|
||||
func Catalog(ctx context.Context, target name.Registry, options ...Option) ([]string, error) {
|
||||
o, err := makeOptions(target, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scopes := []string{target.Scope(transport.PullScope)}
|
||||
tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uri := &url.URL{
|
||||
Scheme: target.Scheme(),
|
||||
Host: target.RegistryStr(),
|
||||
Path: "/v2/_catalog",
|
||||
}
|
||||
|
||||
if o.pageSize > 0 {
|
||||
uri.RawQuery = fmt.Sprintf("n=%d", o.pageSize)
|
||||
}
|
||||
|
||||
client := http.Client{Transport: tr}
|
||||
|
||||
// WithContext overrides the ctx passed directly.
|
||||
if o.context != context.Background() {
|
||||
ctx = o.context
|
||||
}
|
||||
|
||||
var (
|
||||
parsed catalog
|
||||
repoList []string
|
||||
)
|
||||
|
||||
// get responses until there is no next page
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", uri.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
repoList = append(repoList, parsed.Repos...)
|
||||
|
||||
uri, err = getNextPageURL(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// no next page
|
||||
if uri == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return repoList, nil
|
||||
}
|
59
vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go
generated
vendored
Normal file
59
vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
)
|
||||
|
||||
// CheckPushPermission returns an error if the given keychain cannot authorize
|
||||
// a push operation to the given ref.
|
||||
//
|
||||
// This can be useful to check whether the caller has permission to push an
|
||||
// image before doing work to construct the image.
|
||||
//
|
||||
// TODO(#412): Remove the need for this method.
|
||||
func CheckPushPermission(ref name.Reference, kc authn.Keychain, t http.RoundTripper) error {
|
||||
auth, err := kc.Resolve(ref.Context().Registry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolving authorization for %v failed: %w", ref.Context().Registry, err)
|
||||
}
|
||||
|
||||
scopes := []string{ref.Scope(transport.PushScope)}
|
||||
tr, err := transport.New(ref.Context().Registry, auth, t, scopes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating push check transport for %v failed: %w", ref.Context().Registry, err)
|
||||
}
|
||||
// TODO(jasonhall): Against GCR, just doing the token handshake is
|
||||
// enough, but this doesn't extend to Dockerhub
|
||||
// (https://github.com/docker/hub-feedback/issues/1771), so we actually
|
||||
// need to initiate an upload to tell whether the credentials can
|
||||
// authorize a push. Figure out how to return early here when we can,
|
||||
// to avoid a roundtrip for spec-compliant registries.
|
||||
w := writer{
|
||||
repo: ref.Context(),
|
||||
client: &http.Client{Transport: tr},
|
||||
context: context.Background(),
|
||||
}
|
||||
loc, _, err := w.initiateUpload("", "")
|
||||
if loc != "" {
|
||||
// Since we're only initiating the upload to check whether we
|
||||
// can, we should attempt to cancel it, in case initiating
|
||||
// reserves some resources on the server. We shouldn't wait for
|
||||
// cancelling to complete, and we don't care if it fails.
|
||||
go w.cancelUpload(loc)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *writer) cancelUpload(loc string) {
|
||||
req, err := http.NewRequest(http.MethodDelete, loc, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _ = w.client.Do(req)
|
||||
}
|
57
vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go
generated
vendored
Normal file
57
vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
)
|
||||
|
||||
// Delete removes the specified image reference from the remote registry.
|
||||
func Delete(ref name.Reference, options ...Option) error {
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scopes := []string{ref.Scope(transport.DeleteScope)}
|
||||
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c := &http.Client{Transport: tr}
|
||||
|
||||
u := url.URL{
|
||||
Scheme: ref.Context().Registry.Scheme(),
|
||||
Host: ref.Context().RegistryStr(),
|
||||
Path: fmt.Sprintf("/v2/%s/manifests/%s", ref.Context().RepositoryStr(), ref.Identifier()),
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodDelete, u.String(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := c.Do(req.WithContext(o.context))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return transport.CheckError(resp, http.StatusOK, http.StatusAccepted)
|
||||
}
|
430
vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go
generated
vendored
Normal file
430
vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go
generated
vendored
Normal file
@ -0,0 +1,430 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/verify"
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// ErrSchema1 indicates that we received a schema1 manifest from the registry.
|
||||
// This library doesn't have plans to support this legacy image format:
|
||||
// https://github.com/google/go-containerregistry/issues/377
|
||||
type ErrSchema1 struct {
|
||||
schema string
|
||||
}
|
||||
|
||||
// newErrSchema1 returns an ErrSchema1 with the unexpected MediaType.
|
||||
func newErrSchema1(schema types.MediaType) error {
|
||||
return &ErrSchema1{
|
||||
schema: string(schema),
|
||||
}
|
||||
}
|
||||
|
||||
// Error implements error.
|
||||
func (e *ErrSchema1) Error() string {
|
||||
return fmt.Sprintf("unsupported MediaType: %q, see https://github.com/google/go-containerregistry/issues/377", e.schema)
|
||||
}
|
||||
|
||||
// Descriptor provides access to metadata about remote artifact and accessors
|
||||
// for efficiently converting it into a v1.Image or v1.ImageIndex.
|
||||
type Descriptor struct {
|
||||
fetcher
|
||||
v1.Descriptor
|
||||
Manifest []byte
|
||||
|
||||
// So we can share this implementation with Image..
|
||||
platform v1.Platform
|
||||
}
|
||||
|
||||
// RawManifest exists to satisfy the Taggable interface.
|
||||
func (d *Descriptor) RawManifest() ([]byte, error) {
|
||||
return d.Manifest, nil
|
||||
}
|
||||
|
||||
// Get returns a remote.Descriptor for the given reference. The response from
|
||||
// the registry is left un-interpreted, for the most part. This is useful for
|
||||
// querying what kind of artifact a reference represents.
|
||||
//
|
||||
// See Head if you don't need the response body.
|
||||
func Get(ref name.Reference, options ...Option) (*Descriptor, error) {
|
||||
acceptable := []types.MediaType{
|
||||
// Just to look at them.
|
||||
types.DockerManifestSchema1,
|
||||
types.DockerManifestSchema1Signed,
|
||||
}
|
||||
acceptable = append(acceptable, acceptableImageMediaTypes...)
|
||||
acceptable = append(acceptable, acceptableIndexMediaTypes...)
|
||||
return get(ref, acceptable, options...)
|
||||
}
|
||||
|
||||
// Head returns a v1.Descriptor for the given reference by issuing a HEAD
|
||||
// request.
|
||||
//
|
||||
// Note that the server response will not have a body, so any errors encountered
|
||||
// should be retried with Get to get more details.
|
||||
func Head(ref name.Reference, options ...Option) (*v1.Descriptor, error) {
|
||||
acceptable := []types.MediaType{
|
||||
// Just to look at them.
|
||||
types.DockerManifestSchema1,
|
||||
types.DockerManifestSchema1Signed,
|
||||
}
|
||||
acceptable = append(acceptable, acceptableImageMediaTypes...)
|
||||
acceptable = append(acceptable, acceptableIndexMediaTypes...)
|
||||
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err := makeFetcher(ref, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f.headManifest(ref, acceptable)
|
||||
}
|
||||
|
||||
// Handle options and fetch the manifest with the acceptable MediaTypes in the
|
||||
// Accept header.
|
||||
func get(ref name.Reference, acceptable []types.MediaType, options ...Option) (*Descriptor, error) {
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := makeFetcher(ref, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, desc, err := f.fetchManifest(ref, acceptable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Descriptor{
|
||||
fetcher: *f,
|
||||
Manifest: b,
|
||||
Descriptor: *desc,
|
||||
platform: o.platform,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Image converts the Descriptor into a v1.Image.
|
||||
//
|
||||
// If the fetched artifact is already an image, it will just return it.
|
||||
//
|
||||
// If the fetched artifact is an index, it will attempt to resolve the index to
|
||||
// a child image with the appropriate platform.
|
||||
//
|
||||
// See WithPlatform to set the desired platform.
|
||||
func (d *Descriptor) Image() (v1.Image, error) {
|
||||
switch d.MediaType {
|
||||
case types.DockerManifestSchema1, types.DockerManifestSchema1Signed:
|
||||
// We don't care to support schema 1 images:
|
||||
// https://github.com/google/go-containerregistry/issues/377
|
||||
return nil, newErrSchema1(d.MediaType)
|
||||
case types.OCIImageIndex, types.DockerManifestList:
|
||||
// We want an image but the registry has an index, resolve it to an image.
|
||||
return d.remoteIndex().imageByPlatform(d.platform)
|
||||
case types.OCIManifestSchema1, types.DockerManifestSchema2:
|
||||
// These are expected. Enumerated here to allow a default case.
|
||||
default:
|
||||
// We could just return an error here, but some registries (e.g. static
|
||||
// registries) don't set the Content-Type headers correctly, so instead...
|
||||
logs.Warn.Printf("Unexpected media type for Image(): %s", d.MediaType)
|
||||
}
|
||||
|
||||
// Wrap the v1.Layers returned by this v1.Image in a hint for downstream
|
||||
// remote.Write calls to facilitate cross-repo "mounting".
|
||||
imgCore, err := partial.CompressedToImage(d.remoteImage())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &mountableImage{
|
||||
Image: imgCore,
|
||||
Reference: d.Ref,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ImageIndex converts the Descriptor into a v1.ImageIndex.
|
||||
func (d *Descriptor) ImageIndex() (v1.ImageIndex, error) {
|
||||
switch d.MediaType {
|
||||
case types.DockerManifestSchema1, types.DockerManifestSchema1Signed:
|
||||
// We don't care to support schema 1 images:
|
||||
// https://github.com/google/go-containerregistry/issues/377
|
||||
return nil, newErrSchema1(d.MediaType)
|
||||
case types.OCIManifestSchema1, types.DockerManifestSchema2:
|
||||
// We want an index but the registry has an image, nothing we can do.
|
||||
return nil, fmt.Errorf("unexpected media type for ImageIndex(): %s; call Image() instead", d.MediaType)
|
||||
case types.OCIImageIndex, types.DockerManifestList:
|
||||
// These are expected.
|
||||
default:
|
||||
// We could just return an error here, but some registries (e.g. static
|
||||
// registries) don't set the Content-Type headers correctly, so instead...
|
||||
logs.Warn.Printf("Unexpected media type for ImageIndex(): %s", d.MediaType)
|
||||
}
|
||||
return d.remoteIndex(), nil
|
||||
}
|
||||
|
||||
func (d *Descriptor) remoteImage() *remoteImage {
|
||||
return &remoteImage{
|
||||
fetcher: d.fetcher,
|
||||
manifest: d.Manifest,
|
||||
mediaType: d.MediaType,
|
||||
descriptor: &d.Descriptor,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Descriptor) remoteIndex() *remoteIndex {
|
||||
return &remoteIndex{
|
||||
fetcher: d.fetcher,
|
||||
manifest: d.Manifest,
|
||||
mediaType: d.MediaType,
|
||||
descriptor: &d.Descriptor,
|
||||
}
|
||||
}
|
||||
|
||||
// fetcher implements methods for reading from a registry.
|
||||
type fetcher struct {
|
||||
Ref name.Reference
|
||||
Client *http.Client
|
||||
context context.Context
|
||||
}
|
||||
|
||||
func makeFetcher(ref name.Reference, o *options) (*fetcher, error) {
|
||||
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, []string{ref.Scope(transport.PullScope)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fetcher{
|
||||
Ref: ref,
|
||||
Client: &http.Client{Transport: tr},
|
||||
context: o.context,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// url returns a url.Url for the specified path in the context of this remote image reference.
|
||||
func (f *fetcher) url(resource, identifier string) url.URL {
|
||||
return url.URL{
|
||||
Scheme: f.Ref.Context().Registry.Scheme(),
|
||||
Host: f.Ref.Context().RegistryStr(),
|
||||
Path: fmt.Sprintf("/v2/%s/%s/%s", f.Ref.Context().RepositoryStr(), resource, identifier),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fetcher) fetchManifest(ref name.Reference, acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) {
|
||||
u := f.url("manifests", ref.Identifier())
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
accept := []string{}
|
||||
for _, mt := range acceptable {
|
||||
accept = append(accept, string(mt))
|
||||
}
|
||||
req.Header.Set("Accept", strings.Join(accept, ","))
|
||||
|
||||
resp, err := f.Client.Do(req.WithContext(f.context))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
manifest, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
digest, size, err := v1.SHA256(bytes.NewReader(manifest))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
mediaType := types.MediaType(resp.Header.Get("Content-Type"))
|
||||
contentDigest, err := v1.NewHash(resp.Header.Get("Docker-Content-Digest"))
|
||||
if err == nil && mediaType == types.DockerManifestSchema1Signed {
|
||||
// If we can parse the digest from the header, and it's a signed schema 1
|
||||
// manifest, let's use that for the digest to appease older registries.
|
||||
digest = contentDigest
|
||||
}
|
||||
|
||||
// Validate the digest matches what we asked for, if pulling by digest.
|
||||
if dgst, ok := ref.(name.Digest); ok {
|
||||
if digest.String() != dgst.DigestStr() {
|
||||
return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref)
|
||||
}
|
||||
}
|
||||
// Do nothing for tags; I give up.
|
||||
//
|
||||
// We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry,
|
||||
// but so many registries implement this incorrectly that it's not worth checking.
|
||||
//
|
||||
// For reference:
|
||||
// https://github.com/GoogleContainerTools/kaniko/issues/298
|
||||
|
||||
// Return all this info since we have to calculate it anyway.
|
||||
desc := v1.Descriptor{
|
||||
Digest: digest,
|
||||
Size: size,
|
||||
MediaType: mediaType,
|
||||
}
|
||||
|
||||
return manifest, &desc, nil
|
||||
}
|
||||
|
||||
func (f *fetcher) headManifest(ref name.Reference, acceptable []types.MediaType) (*v1.Descriptor, error) {
|
||||
u := f.url("manifests", ref.Identifier())
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accept := []string{}
|
||||
for _, mt := range acceptable {
|
||||
accept = append(accept, string(mt))
|
||||
}
|
||||
req.Header.Set("Accept", strings.Join(accept, ","))
|
||||
|
||||
resp, err := f.Client.Do(req.WithContext(f.context))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mth := resp.Header.Get("Content-Type")
|
||||
if mth == "" {
|
||||
return nil, fmt.Errorf("HEAD %s: response did not include Content-Type header", u.String())
|
||||
}
|
||||
mediaType := types.MediaType(mth)
|
||||
|
||||
size := resp.ContentLength
|
||||
if size == -1 {
|
||||
return nil, fmt.Errorf("GET %s: response did not include Content-Length header", u.String())
|
||||
}
|
||||
|
||||
dh := resp.Header.Get("Docker-Content-Digest")
|
||||
if dh == "" {
|
||||
return nil, fmt.Errorf("HEAD %s: response did not include Docker-Content-Digest header", u.String())
|
||||
}
|
||||
digest, err := v1.NewHash(dh)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Validate the digest matches what we asked for, if pulling by digest.
|
||||
if dgst, ok := ref.(name.Digest); ok {
|
||||
if digest.String() != dgst.DigestStr() {
|
||||
return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
// Return all this info since we have to calculate it anyway.
|
||||
return &v1.Descriptor{
|
||||
Digest: digest,
|
||||
Size: size,
|
||||
MediaType: mediaType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fetcher) fetchBlob(ctx context.Context, size int64, h v1.Hash) (io.ReadCloser, error) {
|
||||
u := f.url("blobs", h.String())
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := f.Client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do whatever we can.
|
||||
// If we have an expected size and Content-Length doesn't match, return an error.
|
||||
// If we don't have an expected size and we do have a Content-Length, use Content-Length.
|
||||
if hsize := resp.ContentLength; hsize != -1 {
|
||||
if size == verify.SizeUnknown {
|
||||
size = hsize
|
||||
} else if hsize != size {
|
||||
return nil, fmt.Errorf("GET %s: Content-Length header %d does not match expected size %d", u.String(), hsize, size)
|
||||
}
|
||||
}
|
||||
|
||||
return verify.ReadCloser(resp.Body, size, h)
|
||||
}
|
||||
|
||||
func (f *fetcher) headBlob(h v1.Hash) (*http.Response, error) {
|
||||
u := f.url("blobs", h.String())
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := f.Client.Do(req.WithContext(f.context))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (f *fetcher) blobExists(h v1.Hash) (bool, error) {
|
||||
u := f.url("blobs", h.String())
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
resp, err := f.Client.Do(req.WithContext(f.context))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return resp.StatusCode == http.StatusOK, nil
|
||||
}
|
17
vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go
generated
vendored
Normal file
17
vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package remote provides facilities for reading/writing v1.Images from/to
|
||||
// a remote image registry.
|
||||
package remote
|
248
vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go
generated
vendored
Normal file
248
vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go
generated
vendored
Normal file
@ -0,0 +1,248 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/redact"
|
||||
"github.com/google/go-containerregistry/internal/verify"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
var acceptableImageMediaTypes = []types.MediaType{
|
||||
types.DockerManifestSchema2,
|
||||
types.OCIManifestSchema1,
|
||||
}
|
||||
|
||||
// remoteImage accesses an image from a remote registry
|
||||
type remoteImage struct {
|
||||
fetcher
|
||||
manifestLock sync.Mutex // Protects manifest
|
||||
manifest []byte
|
||||
configLock sync.Mutex // Protects config
|
||||
config []byte
|
||||
mediaType types.MediaType
|
||||
descriptor *v1.Descriptor
|
||||
}
|
||||
|
||||
var _ partial.CompressedImageCore = (*remoteImage)(nil)
|
||||
|
||||
// Image provides access to a remote image reference.
|
||||
func Image(ref name.Reference, options ...Option) (v1.Image, error) {
|
||||
desc, err := Get(ref, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return desc.Image()
|
||||
}
|
||||
|
||||
func (r *remoteImage) MediaType() (types.MediaType, error) {
|
||||
if string(r.mediaType) != "" {
|
||||
return r.mediaType, nil
|
||||
}
|
||||
return types.DockerManifestSchema2, nil
|
||||
}
|
||||
|
||||
func (r *remoteImage) RawManifest() ([]byte, error) {
|
||||
r.manifestLock.Lock()
|
||||
defer r.manifestLock.Unlock()
|
||||
if r.manifest != nil {
|
||||
return r.manifest, nil
|
||||
}
|
||||
|
||||
// NOTE(jonjohnsonjr): We should never get here because the public entrypoints
|
||||
// do type-checking via remote.Descriptor. I've left this here for tests that
|
||||
// directly instantiate a remoteImage.
|
||||
manifest, desc, err := r.fetchManifest(r.Ref, acceptableImageMediaTypes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r.descriptor == nil {
|
||||
r.descriptor = desc
|
||||
}
|
||||
r.mediaType = desc.MediaType
|
||||
r.manifest = manifest
|
||||
return r.manifest, nil
|
||||
}
|
||||
|
||||
func (r *remoteImage) RawConfigFile() ([]byte, error) {
|
||||
r.configLock.Lock()
|
||||
defer r.configLock.Unlock()
|
||||
if r.config != nil {
|
||||
return r.config, nil
|
||||
}
|
||||
|
||||
m, err := partial.Manifest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if m.Config.Data != nil {
|
||||
if err := verify.Descriptor(m.Config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.config = m.Config.Data
|
||||
return r.config, nil
|
||||
}
|
||||
|
||||
body, err := r.fetchBlob(r.context, m.Config.Size, m.Config.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer body.Close()
|
||||
|
||||
r.config, err = ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.config, nil
|
||||
}
|
||||
|
||||
// Descriptor retains the original descriptor from an index manifest.
|
||||
// See partial.Descriptor.
|
||||
func (r *remoteImage) Descriptor() (*v1.Descriptor, error) {
|
||||
// kind of a hack, but RawManifest does appropriate locking/memoization
|
||||
// and makes sure r.descriptor is populated.
|
||||
_, err := r.RawManifest()
|
||||
return r.descriptor, err
|
||||
}
|
||||
|
||||
// remoteImageLayer implements partial.CompressedLayer
|
||||
type remoteImageLayer struct {
|
||||
ri *remoteImage
|
||||
digest v1.Hash
|
||||
}
|
||||
|
||||
// Digest implements partial.CompressedLayer
|
||||
func (rl *remoteImageLayer) Digest() (v1.Hash, error) {
|
||||
return rl.digest, nil
|
||||
}
|
||||
|
||||
// Compressed implements partial.CompressedLayer
|
||||
func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) {
|
||||
urls := []url.URL{rl.ri.url("blobs", rl.digest.String())}
|
||||
|
||||
// Add alternative layer sources from URLs (usually none).
|
||||
d, err := partial.BlobDescriptor(rl, rl.digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if d.Data != nil {
|
||||
return verify.ReadCloser(ioutil.NopCloser(bytes.NewReader(d.Data)), d.Size, d.Digest)
|
||||
}
|
||||
|
||||
// We don't want to log binary layers -- this can break terminals.
|
||||
ctx := redact.NewContext(rl.ri.context, "omitting binary blobs from logs")
|
||||
|
||||
for _, s := range d.URLs {
|
||||
u, err := url.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
urls = append(urls, *u)
|
||||
}
|
||||
|
||||
// The lastErr for most pulls will be the same (the first error), but for
|
||||
// foreign layers we'll want to surface the last one, since we try to pull
|
||||
// from the registry first, which would often fail.
|
||||
// TODO: Maybe we don't want to try pulling from the registry first?
|
||||
var lastErr error
|
||||
for _, u := range urls {
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := rl.ri.Client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
resp.Body.Close()
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
return verify.ReadCloser(resp.Body, d.Size, rl.digest)
|
||||
}
|
||||
|
||||
return nil, lastErr
|
||||
}
|
||||
|
||||
// Manifest implements partial.WithManifest so that we can use partial.BlobSize below.
|
||||
func (rl *remoteImageLayer) Manifest() (*v1.Manifest, error) {
|
||||
return partial.Manifest(rl.ri)
|
||||
}
|
||||
|
||||
// MediaType implements v1.Layer
|
||||
func (rl *remoteImageLayer) MediaType() (types.MediaType, error) {
|
||||
bd, err := partial.BlobDescriptor(rl, rl.digest)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return bd.MediaType, nil
|
||||
}
|
||||
|
||||
// Size implements partial.CompressedLayer
|
||||
func (rl *remoteImageLayer) Size() (int64, error) {
|
||||
// Look up the size of this digest in the manifest to avoid a request.
|
||||
return partial.BlobSize(rl, rl.digest)
|
||||
}
|
||||
|
||||
// ConfigFile implements partial.WithManifestAndConfigFile so that we can use partial.BlobToDiffID below.
|
||||
func (rl *remoteImageLayer) ConfigFile() (*v1.ConfigFile, error) {
|
||||
return partial.ConfigFile(rl.ri)
|
||||
}
|
||||
|
||||
// DiffID implements partial.WithDiffID so that we don't recompute a DiffID that we already have
|
||||
// available in our ConfigFile.
|
||||
func (rl *remoteImageLayer) DiffID() (v1.Hash, error) {
|
||||
return partial.BlobToDiffID(rl, rl.digest)
|
||||
}
|
||||
|
||||
// Descriptor retains the original descriptor from an image manifest.
|
||||
// See partial.Descriptor.
|
||||
func (rl *remoteImageLayer) Descriptor() (*v1.Descriptor, error) {
|
||||
return partial.BlobDescriptor(rl, rl.digest)
|
||||
}
|
||||
|
||||
// See partial.Exists.
|
||||
func (rl *remoteImageLayer) Exists() (bool, error) {
|
||||
return rl.ri.blobExists(rl.digest)
|
||||
}
|
||||
|
||||
// LayerByDigest implements partial.CompressedLayer
|
||||
func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) {
|
||||
return &remoteImageLayer{
|
||||
ri: r,
|
||||
digest: h,
|
||||
}, nil
|
||||
}
|
307
vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go
generated
vendored
Normal file
307
vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go
generated
vendored
Normal file
@ -0,0 +1,307 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/verify"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
var acceptableIndexMediaTypes = []types.MediaType{
|
||||
types.DockerManifestList,
|
||||
types.OCIImageIndex,
|
||||
}
|
||||
|
||||
// remoteIndex accesses an index from a remote registry
|
||||
type remoteIndex struct {
|
||||
fetcher
|
||||
manifestLock sync.Mutex // Protects manifest
|
||||
manifest []byte
|
||||
mediaType types.MediaType
|
||||
descriptor *v1.Descriptor
|
||||
}
|
||||
|
||||
// Index provides access to a remote index reference.
|
||||
func Index(ref name.Reference, options ...Option) (v1.ImageIndex, error) {
|
||||
desc, err := get(ref, acceptableIndexMediaTypes, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return desc.ImageIndex()
|
||||
}
|
||||
|
||||
func (r *remoteIndex) MediaType() (types.MediaType, error) {
|
||||
if string(r.mediaType) != "" {
|
||||
return r.mediaType, nil
|
||||
}
|
||||
return types.DockerManifestList, nil
|
||||
}
|
||||
|
||||
func (r *remoteIndex) Digest() (v1.Hash, error) {
|
||||
return partial.Digest(r)
|
||||
}
|
||||
|
||||
func (r *remoteIndex) Size() (int64, error) {
|
||||
return partial.Size(r)
|
||||
}
|
||||
|
||||
func (r *remoteIndex) RawManifest() ([]byte, error) {
|
||||
r.manifestLock.Lock()
|
||||
defer r.manifestLock.Unlock()
|
||||
if r.manifest != nil {
|
||||
return r.manifest, nil
|
||||
}
|
||||
|
||||
// NOTE(jonjohnsonjr): We should never get here because the public entrypoints
|
||||
// do type-checking via remote.Descriptor. I've left this here for tests that
|
||||
// directly instantiate a remoteIndex.
|
||||
manifest, desc, err := r.fetchManifest(r.Ref, acceptableIndexMediaTypes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r.descriptor == nil {
|
||||
r.descriptor = desc
|
||||
}
|
||||
r.mediaType = desc.MediaType
|
||||
r.manifest = manifest
|
||||
return r.manifest, nil
|
||||
}
|
||||
|
||||
func (r *remoteIndex) IndexManifest() (*v1.IndexManifest, error) {
|
||||
b, err := r.RawManifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1.ParseIndexManifest(bytes.NewReader(b))
|
||||
}
|
||||
|
||||
func (r *remoteIndex) Image(h v1.Hash) (v1.Image, error) {
|
||||
desc, err := r.childByHash(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Descriptor.Image will handle coercing nested indexes into an Image.
|
||||
return desc.Image()
|
||||
}
|
||||
|
||||
// Descriptor retains the original descriptor from an index manifest.
|
||||
// See partial.Descriptor.
|
||||
func (r *remoteIndex) Descriptor() (*v1.Descriptor, error) {
|
||||
// kind of a hack, but RawManifest does appropriate locking/memoization
|
||||
// and makes sure r.descriptor is populated.
|
||||
_, err := r.RawManifest()
|
||||
return r.descriptor, err
|
||||
}
|
||||
|
||||
func (r *remoteIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) {
|
||||
desc, err := r.childByHash(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return desc.ImageIndex()
|
||||
}
|
||||
|
||||
// Workaround for #819.
|
||||
func (r *remoteIndex) Layer(h v1.Hash) (v1.Layer, error) {
|
||||
index, err := r.IndexManifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, childDesc := range index.Manifests {
|
||||
if h == childDesc.Digest {
|
||||
l, err := partial.CompressedToLayer(&remoteLayer{
|
||||
fetcher: r.fetcher,
|
||||
digest: h,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &MountableLayer{
|
||||
Layer: l,
|
||||
Reference: r.Ref.Context().Digest(h.String()),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("layer not found: %s", h)
|
||||
}
|
||||
|
||||
// Experiment with a better API for v1.ImageIndex. We might want to move this
|
||||
// to partial?
|
||||
func (r *remoteIndex) Manifests() ([]partial.Describable, error) {
|
||||
m, err := r.IndexManifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifests := []partial.Describable{}
|
||||
for _, desc := range m.Manifests {
|
||||
switch {
|
||||
case desc.MediaType.IsImage():
|
||||
img, err := r.Image(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifests = append(manifests, img)
|
||||
case desc.MediaType.IsIndex():
|
||||
idx, err := r.ImageIndex(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifests = append(manifests, idx)
|
||||
default:
|
||||
layer, err := r.Layer(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifests = append(manifests, layer)
|
||||
}
|
||||
}
|
||||
|
||||
return manifests, nil
|
||||
}
|
||||
|
||||
func (r *remoteIndex) imageByPlatform(platform v1.Platform) (v1.Image, error) {
|
||||
desc, err := r.childByPlatform(platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Descriptor.Image will handle coercing nested indexes into an Image.
|
||||
return desc.Image()
|
||||
}
|
||||
|
||||
// This naively matches the first manifest with matching platform attributes.
|
||||
//
|
||||
// We should probably use this instead:
|
||||
// github.com/containerd/containerd/platforms
|
||||
//
|
||||
// But first we'd need to migrate to:
|
||||
// github.com/opencontainers/image-spec/specs-go/v1
|
||||
func (r *remoteIndex) childByPlatform(platform v1.Platform) (*Descriptor, error) {
|
||||
index, err := r.IndexManifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, childDesc := range index.Manifests {
|
||||
// If platform is missing from child descriptor, assume it's amd64/linux.
|
||||
p := defaultPlatform
|
||||
if childDesc.Platform != nil {
|
||||
p = *childDesc.Platform
|
||||
}
|
||||
|
||||
if matchesPlatform(p, platform) {
|
||||
return r.childDescriptor(childDesc, platform)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("no child with platform %+v in index %s", platform, r.Ref)
|
||||
}
|
||||
|
||||
func (r *remoteIndex) childByHash(h v1.Hash) (*Descriptor, error) {
|
||||
index, err := r.IndexManifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, childDesc := range index.Manifests {
|
||||
if h == childDesc.Digest {
|
||||
return r.childDescriptor(childDesc, defaultPlatform)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("no child with digest %s in index %s", h, r.Ref)
|
||||
}
|
||||
|
||||
// Convert one of this index's child's v1.Descriptor into a remote.Descriptor, with the given platform option.
|
||||
func (r *remoteIndex) childDescriptor(child v1.Descriptor, platform v1.Platform) (*Descriptor, error) {
|
||||
ref := r.Ref.Context().Digest(child.Digest.String())
|
||||
var (
|
||||
manifest []byte
|
||||
err error
|
||||
)
|
||||
if child.Data != nil {
|
||||
if err := verify.Descriptor(child); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifest = child.Data
|
||||
} else {
|
||||
manifest, _, err = r.fetchManifest(ref, []types.MediaType{child.MediaType})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &Descriptor{
|
||||
fetcher: fetcher{
|
||||
Ref: ref,
|
||||
Client: r.Client,
|
||||
context: r.context,
|
||||
},
|
||||
Manifest: manifest,
|
||||
Descriptor: child,
|
||||
platform: platform,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// matchesPlatform checks if the given platform matches the required platforms.
|
||||
// The given platform matches the required platform if
|
||||
// - architecture and OS are identical.
|
||||
// - OS version and variant are identical if provided.
|
||||
// - features and OS features of the required platform are subsets of those of the given platform.
|
||||
func matchesPlatform(given, required v1.Platform) bool {
|
||||
// Required fields that must be identical.
|
||||
if given.Architecture != required.Architecture || given.OS != required.OS {
|
||||
return false
|
||||
}
|
||||
|
||||
// Optional fields that may be empty, but must be identical if provided.
|
||||
if required.OSVersion != "" && given.OSVersion != required.OSVersion {
|
||||
return false
|
||||
}
|
||||
if required.Variant != "" && given.Variant != required.Variant {
|
||||
return false
|
||||
}
|
||||
|
||||
// Verify required platform's features are a subset of given platform's features.
|
||||
if !isSubset(given.OSFeatures, required.OSFeatures) {
|
||||
return false
|
||||
}
|
||||
if !isSubset(given.Features, required.Features) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// isSubset checks if the required array of strings is a subset of the given lst.
|
||||
func isSubset(lst, required []string) bool {
|
||||
set := make(map[string]bool)
|
||||
for _, value := range lst {
|
||||
set[value] = true
|
||||
}
|
||||
|
||||
for _, value := range required {
|
||||
if _, ok := set[value]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
94
vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go
generated
vendored
Normal file
94
vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/redact"
|
||||
"github.com/google/go-containerregistry/internal/verify"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// remoteImagelayer implements partial.CompressedLayer
|
||||
type remoteLayer struct {
|
||||
fetcher
|
||||
digest v1.Hash
|
||||
}
|
||||
|
||||
// Compressed implements partial.CompressedLayer
|
||||
func (rl *remoteLayer) Compressed() (io.ReadCloser, error) {
|
||||
// We don't want to log binary layers -- this can break terminals.
|
||||
ctx := redact.NewContext(rl.context, "omitting binary blobs from logs")
|
||||
return rl.fetchBlob(ctx, verify.SizeUnknown, rl.digest)
|
||||
}
|
||||
|
||||
// Compressed implements partial.CompressedLayer
|
||||
func (rl *remoteLayer) Size() (int64, error) {
|
||||
resp, err := rl.headBlob(rl.digest)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return resp.ContentLength, nil
|
||||
}
|
||||
|
||||
// Digest implements partial.CompressedLayer
|
||||
func (rl *remoteLayer) Digest() (v1.Hash, error) {
|
||||
return rl.digest, nil
|
||||
}
|
||||
|
||||
// MediaType implements v1.Layer
|
||||
func (rl *remoteLayer) MediaType() (types.MediaType, error) {
|
||||
return types.DockerLayer, nil
|
||||
}
|
||||
|
||||
// See partial.Exists.
|
||||
func (rl *remoteLayer) Exists() (bool, error) {
|
||||
return rl.blobExists(rl.digest)
|
||||
}
|
||||
|
||||
// Layer reads the given blob reference from a registry as a Layer. A blob
|
||||
// reference here is just a punned name.Digest where the digest portion is the
|
||||
// digest of the blob to be read and the repository portion is the repo where
|
||||
// that blob lives.
|
||||
func Layer(ref name.Digest, options ...Option) (v1.Layer, error) {
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := makeFetcher(ref, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h, err := v1.NewHash(ref.Identifier())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l, err := partial.CompressedToLayer(&remoteLayer{
|
||||
fetcher: *f,
|
||||
digest: h,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &MountableLayer{
|
||||
Layer: l,
|
||||
Reference: ref,
|
||||
}, nil
|
||||
}
|
141
vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go
generated
vendored
Normal file
141
vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
)
|
||||
|
||||
type tags struct {
|
||||
Name string `json:"name"`
|
||||
Tags []string `json:"tags"`
|
||||
}
|
||||
|
||||
// ListWithContext calls List with the given context.
|
||||
//
|
||||
// Deprecated: Use List and WithContext. This will be removed in a future release.
|
||||
func ListWithContext(ctx context.Context, repo name.Repository, options ...Option) ([]string, error) {
|
||||
return List(repo, append(options, WithContext(ctx))...)
|
||||
}
|
||||
|
||||
// List calls /tags/list for the given repository, returning the list of tags
|
||||
// in the "tags" property.
|
||||
func List(repo name.Repository, options ...Option) ([]string, error) {
|
||||
o, err := makeOptions(repo, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scopes := []string{repo.Scope(transport.PullScope)}
|
||||
tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uri := &url.URL{
|
||||
Scheme: repo.Registry.Scheme(),
|
||||
Host: repo.Registry.RegistryStr(),
|
||||
Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()),
|
||||
}
|
||||
|
||||
if o.pageSize > 0 {
|
||||
uri.RawQuery = fmt.Sprintf("n=%d", o.pageSize)
|
||||
}
|
||||
|
||||
client := http.Client{Transport: tr}
|
||||
tagList := []string{}
|
||||
parsed := tags{}
|
||||
|
||||
// get responses until there is no next page
|
||||
for {
|
||||
select {
|
||||
case <-o.context.Done():
|
||||
return nil, o.context.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(o.context, "GET", uri.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tagList = append(tagList, parsed.Tags...)
|
||||
|
||||
uri, err = getNextPageURL(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// no next page
|
||||
if uri == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return tagList, nil
|
||||
}
|
||||
|
||||
// getNextPageURL checks if there is a Link header in a http.Response which
|
||||
// contains a link to the next page. If yes it returns the url.URL of the next
|
||||
// page otherwise it returns nil.
|
||||
func getNextPageURL(resp *http.Response) (*url.URL, error) {
|
||||
link := resp.Header.Get("Link")
|
||||
if link == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if link[0] != '<' {
|
||||
return nil, fmt.Errorf("failed to parse link header: missing '<' in: %s", link)
|
||||
}
|
||||
|
||||
end := strings.Index(link, ">")
|
||||
if end == -1 {
|
||||
return nil, fmt.Errorf("failed to parse link header: missing '>' in: %s", link)
|
||||
}
|
||||
link = link[1:end]
|
||||
|
||||
linkURL, err := url.Parse(link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Request == nil || resp.Request.URL == nil {
|
||||
return nil, nil
|
||||
}
|
||||
linkURL = resp.Request.URL.ResolveReference(linkURL)
|
||||
return linkURL, nil
|
||||
}
|
95
vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go
generated
vendored
Normal file
95
vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
)
|
||||
|
||||
// MountableLayer wraps a v1.Layer in a shim that enables the layer to be
|
||||
// "mounted" when published to another registry.
|
||||
type MountableLayer struct {
|
||||
v1.Layer
|
||||
|
||||
Reference name.Reference
|
||||
}
|
||||
|
||||
// Descriptor retains the original descriptor from an image manifest.
|
||||
// See partial.Descriptor.
|
||||
func (ml *MountableLayer) Descriptor() (*v1.Descriptor, error) {
|
||||
return partial.Descriptor(ml.Layer)
|
||||
}
|
||||
|
||||
// Exists is a hack. See partial.Exists.
|
||||
func (ml *MountableLayer) Exists() (bool, error) {
|
||||
return partial.Exists(ml.Layer)
|
||||
}
|
||||
|
||||
// mountableImage wraps the v1.Layer references returned by the embedded v1.Image
|
||||
// in MountableLayer's so that remote.Write might attempt to mount them from their
|
||||
// source repository.
|
||||
type mountableImage struct {
|
||||
v1.Image
|
||||
|
||||
Reference name.Reference
|
||||
}
|
||||
|
||||
// Layers implements v1.Image
|
||||
func (mi *mountableImage) Layers() ([]v1.Layer, error) {
|
||||
ls, err := mi.Image.Layers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mls := make([]v1.Layer, 0, len(ls))
|
||||
for _, l := range ls {
|
||||
mls = append(mls, &MountableLayer{
|
||||
Layer: l,
|
||||
Reference: mi.Reference,
|
||||
})
|
||||
}
|
||||
return mls, nil
|
||||
}
|
||||
|
||||
// LayerByDigest implements v1.Image
|
||||
func (mi *mountableImage) LayerByDigest(d v1.Hash) (v1.Layer, error) {
|
||||
l, err := mi.Image.LayerByDigest(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &MountableLayer{
|
||||
Layer: l,
|
||||
Reference: mi.Reference,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LayerByDiffID implements v1.Image
|
||||
func (mi *mountableImage) LayerByDiffID(d v1.Hash) (v1.Layer, error) {
|
||||
l, err := mi.Image.LayerByDiffID(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &MountableLayer{
|
||||
Layer: l,
|
||||
Reference: mi.Reference,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Descriptor retains the original descriptor from an index manifest.
|
||||
// See partial.Descriptor.
|
||||
func (mi *mountableImage) Descriptor() (*v1.Descriptor, error) {
|
||||
return partial.Descriptor(mi.Image)
|
||||
}
|
303
vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go
generated
vendored
Normal file
303
vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go
generated
vendored
Normal file
@ -0,0 +1,303 @@
|
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// MultiWrite writes the given Images or ImageIndexes to the given refs, as
|
||||
// efficiently as possible, by deduping shared layer blobs and uploading layers
|
||||
// in parallel, then uploading all manifests in parallel.
|
||||
//
|
||||
// Current limitations:
|
||||
// - All refs must share the same repository.
|
||||
// - Images cannot consist of stream.Layers.
|
||||
func MultiWrite(m map[name.Reference]Taggable, options ...Option) (rerr error) {
|
||||
// Determine the repository being pushed to; if asked to push to
|
||||
// multiple repositories, give up.
|
||||
var repo, zero name.Repository
|
||||
for ref := range m {
|
||||
if repo == zero {
|
||||
repo = ref.Context()
|
||||
} else if ref.Context() != repo {
|
||||
return fmt.Errorf("MultiWrite can only push to the same repository (saw %q and %q)", repo, ref.Context())
|
||||
}
|
||||
}
|
||||
|
||||
o, err := makeOptions(repo, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Collect unique blobs (layers and config blobs).
|
||||
blobs := map[v1.Hash]v1.Layer{}
|
||||
newManifests := []map[name.Reference]Taggable{}
|
||||
// Separate originally requested images and indexes, so we can push images first.
|
||||
images, indexes := map[name.Reference]Taggable{}, map[name.Reference]Taggable{}
|
||||
for ref, i := range m {
|
||||
if img, ok := i.(v1.Image); ok {
|
||||
images[ref] = i
|
||||
if err := addImageBlobs(img, blobs, o.allowNondistributableArtifacts); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if idx, ok := i.(v1.ImageIndex); ok {
|
||||
indexes[ref] = i
|
||||
newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, 0, o.allowNondistributableArtifacts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("pushable resource was not Image or ImageIndex: %T", i)
|
||||
}
|
||||
|
||||
// Determine if any of the layers are Mountable, because if so we need
|
||||
// to request Pull scope too.
|
||||
ls := []v1.Layer{}
|
||||
for _, l := range blobs {
|
||||
ls = append(ls, l)
|
||||
}
|
||||
scopes := scopesForUploadingImage(repo, ls)
|
||||
tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := writer{
|
||||
repo: repo,
|
||||
client: &http.Client{Transport: tr},
|
||||
context: o.context,
|
||||
updates: o.updates,
|
||||
lastUpdate: &v1.Update{},
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
}
|
||||
|
||||
// Collect the total size of blobs and manifests we're about to write.
|
||||
if o.updates != nil {
|
||||
defer close(o.updates)
|
||||
defer func() { _ = sendError(o.updates, rerr) }()
|
||||
for _, b := range blobs {
|
||||
size, err := b.Size()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.lastUpdate.Total += size
|
||||
}
|
||||
countManifest := func(t Taggable) error {
|
||||
b, err := t.RawManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.lastUpdate.Total += int64(len(b))
|
||||
return nil
|
||||
}
|
||||
for _, i := range images {
|
||||
if err := countManifest(i); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, nm := range newManifests {
|
||||
for _, i := range nm {
|
||||
if err := countManifest(i); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, i := range indexes {
|
||||
if err := countManifest(i); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Upload individual blobs and collect any errors.
|
||||
blobChan := make(chan v1.Layer, 2*o.jobs)
|
||||
ctx := o.context
|
||||
g, gctx := errgroup.WithContext(o.context)
|
||||
for i := 0; i < o.jobs; i++ {
|
||||
// Start N workers consuming blobs to upload.
|
||||
g.Go(func() error {
|
||||
for b := range blobChan {
|
||||
if err := w.uploadOne(gctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
g.Go(func() error {
|
||||
defer close(blobChan)
|
||||
for _, b := range blobs {
|
||||
select {
|
||||
case blobChan <- b:
|
||||
case <-gctx.Done():
|
||||
return gctx.Err()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
commitMany := func(ctx context.Context, m map[name.Reference]Taggable) error {
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
// With all of the constituent elements uploaded, upload the manifests
|
||||
// to commit the images and indexes, and collect any errors.
|
||||
type task struct {
|
||||
i Taggable
|
||||
ref name.Reference
|
||||
}
|
||||
taskChan := make(chan task, 2*o.jobs)
|
||||
for i := 0; i < o.jobs; i++ {
|
||||
// Start N workers consuming tasks to upload manifests.
|
||||
g.Go(func() error {
|
||||
for t := range taskChan {
|
||||
if err := w.commitManifest(ctx, t.i, t.ref); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
go func() {
|
||||
for ref, i := range m {
|
||||
taskChan <- task{i, ref}
|
||||
}
|
||||
close(taskChan)
|
||||
}()
|
||||
return g.Wait()
|
||||
}
|
||||
// Push originally requested image manifests. These have no
|
||||
// dependencies.
|
||||
if err := commitMany(ctx, images); err != nil {
|
||||
return err
|
||||
}
|
||||
// Push new manifests from lowest levels up.
|
||||
for i := len(newManifests) - 1; i >= 0; i-- {
|
||||
if err := commitMany(ctx, newManifests[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Push originally requested index manifests, which might depend on
|
||||
// newly discovered manifests.
|
||||
|
||||
return commitMany(ctx, indexes)
|
||||
}
|
||||
|
||||
// addIndexBlobs adds blobs to the set of blobs we intend to upload, and
|
||||
// returns the latest copy of the ordered collection of manifests to upload.
|
||||
func addIndexBlobs(idx v1.ImageIndex, blobs map[v1.Hash]v1.Layer, repo name.Repository, newManifests []map[name.Reference]Taggable, lvl int, allowNondistributableArtifacts bool) ([]map[name.Reference]Taggable, error) {
|
||||
if lvl > len(newManifests)-1 {
|
||||
newManifests = append(newManifests, map[name.Reference]Taggable{})
|
||||
}
|
||||
|
||||
im, err := idx.IndexManifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, desc := range im.Manifests {
|
||||
switch desc.MediaType {
|
||||
case types.OCIImageIndex, types.DockerManifestList:
|
||||
idx, err := idx.ImageIndex(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, lvl+1, allowNondistributableArtifacts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Also track the sub-index manifest to upload later by digest.
|
||||
newManifests[lvl][repo.Digest(desc.Digest.String())] = idx
|
||||
case types.OCIManifestSchema1, types.DockerManifestSchema2:
|
||||
img, err := idx.Image(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := addImageBlobs(img, blobs, allowNondistributableArtifacts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Also track the sub-image manifest to upload later by digest.
|
||||
newManifests[lvl][repo.Digest(desc.Digest.String())] = img
|
||||
default:
|
||||
// Workaround for #819.
|
||||
if wl, ok := idx.(withLayer); ok {
|
||||
layer, err := wl.Layer(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := addLayerBlob(layer, blobs, allowNondistributableArtifacts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("unknown media type: %v", desc.MediaType)
|
||||
}
|
||||
}
|
||||
}
|
||||
return newManifests, nil
|
||||
}
|
||||
|
||||
func addLayerBlob(l v1.Layer, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error {
|
||||
// Ignore foreign layers.
|
||||
mt, err := l.MediaType()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mt.IsDistributable() || allowNondistributableArtifacts {
|
||||
d, err := l.Digest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blobs[d] = l
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addImageBlobs(img v1.Image, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error {
|
||||
ls, err := img.Layers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Collect all layers.
|
||||
for _, l := range ls {
|
||||
if err := addLayerBlob(l, blobs, allowNondistributableArtifacts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Collect config blob.
|
||||
cl, err := partial.ConfigLayer(img)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return addLayerBlob(cl, blobs, allowNondistributableArtifacts)
|
||||
}
|
284
vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go
generated
vendored
Normal file
284
vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go
generated
vendored
Normal file
@ -0,0 +1,284 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/retry"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
)
|
||||
|
||||
// Option is a functional option for remote operations.
|
||||
type Option func(*options) error
|
||||
|
||||
type options struct {
|
||||
auth authn.Authenticator
|
||||
keychain authn.Keychain
|
||||
transport http.RoundTripper
|
||||
platform v1.Platform
|
||||
context context.Context
|
||||
jobs int
|
||||
userAgent string
|
||||
allowNondistributableArtifacts bool
|
||||
updates chan<- v1.Update
|
||||
pageSize int
|
||||
retryBackoff Backoff
|
||||
retryPredicate retry.Predicate
|
||||
}
|
||||
|
||||
var defaultPlatform = v1.Platform{
|
||||
Architecture: "amd64",
|
||||
OS: "linux",
|
||||
}
|
||||
|
||||
// Backoff is an alias of retry.Backoff to expose this configuration option to consumers of this lib
|
||||
type Backoff = retry.Backoff
|
||||
|
||||
var defaultRetryPredicate retry.Predicate = func(err error) bool {
|
||||
// Various failure modes here, as we're often reading from and writing to
|
||||
// the network.
|
||||
if retry.IsTemporary(err) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.EPIPE) {
|
||||
logs.Warn.Printf("retrying %v", err)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Try this three times, waiting 1s after first failure, 3s after second.
|
||||
var defaultRetryBackoff = Backoff{
|
||||
Duration: 1.0 * time.Second,
|
||||
Factor: 3.0,
|
||||
Jitter: 0.1,
|
||||
Steps: 3,
|
||||
}
|
||||
|
||||
const (
|
||||
defaultJobs = 4
|
||||
|
||||
// ECR returns an error if n > 1000:
|
||||
// https://github.com/google/go-containerregistry/issues/1091
|
||||
defaultPageSize = 1000
|
||||
)
|
||||
|
||||
// DefaultTransport is based on http.DefaultTransport with modifications
|
||||
// documented inline below.
|
||||
var DefaultTransport = &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
// By default we wrap the transport in retries, so reduce the
|
||||
// default dial timeout to 5s to avoid 5x 30s of connection
|
||||
// timeouts when doing the "ping" on certain http registries.
|
||||
Timeout: 5 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
ForceAttemptHTTP2: true,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
|
||||
func makeOptions(target authn.Resource, opts ...Option) (*options, error) {
|
||||
o := &options{
|
||||
auth: authn.Anonymous,
|
||||
transport: DefaultTransport,
|
||||
platform: defaultPlatform,
|
||||
context: context.Background(),
|
||||
jobs: defaultJobs,
|
||||
pageSize: defaultPageSize,
|
||||
retryPredicate: defaultRetryPredicate,
|
||||
retryBackoff: defaultRetryBackoff,
|
||||
}
|
||||
|
||||
for _, option := range opts {
|
||||
if err := option(o); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if o.keychain != nil {
|
||||
auth, err := o.keychain.Resolve(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.auth = auth
|
||||
}
|
||||
|
||||
// transport.Wrapper is a signal that consumers are opt-ing into providing their own transport without any additional wrapping.
|
||||
// This is to allow consumers full control over the transports logic, such as providing retry logic.
|
||||
if _, ok := o.transport.(*transport.Wrapper); !ok {
|
||||
// Wrap the transport in something that logs requests and responses.
|
||||
// It's expensive to generate the dumps, so skip it if we're writing
|
||||
// to nothing.
|
||||
if logs.Enabled(logs.Debug) {
|
||||
o.transport = transport.NewLogger(o.transport)
|
||||
}
|
||||
|
||||
// Wrap the transport in something that can retry network flakes.
|
||||
o.transport = transport.NewRetry(o.transport)
|
||||
|
||||
// Wrap this last to prevent transport.New from double-wrapping.
|
||||
if o.userAgent != "" {
|
||||
o.transport = transport.NewUserAgent(o.transport, o.userAgent)
|
||||
}
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// WithTransport is a functional option for overriding the default transport
|
||||
// for remote operations.
|
||||
// If transport.Wrapper is provided, this signals that the consumer does *not* want any further wrapping to occur.
|
||||
// i.e. logging, retry and useragent
|
||||
//
|
||||
// The default transport is DefaultTransport.
|
||||
func WithTransport(t http.RoundTripper) Option {
|
||||
return func(o *options) error {
|
||||
o.transport = t
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAuth is a functional option for overriding the default authenticator
|
||||
// for remote operations.
|
||||
//
|
||||
// The default authenticator is authn.Anonymous.
|
||||
func WithAuth(auth authn.Authenticator) Option {
|
||||
return func(o *options) error {
|
||||
o.auth = auth
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAuthFromKeychain is a functional option for overriding the default
|
||||
// authenticator for remote operations, using an authn.Keychain to find
|
||||
// credentials.
|
||||
//
|
||||
// The default authenticator is authn.Anonymous.
|
||||
func WithAuthFromKeychain(keys authn.Keychain) Option {
|
||||
return func(o *options) error {
|
||||
o.keychain = keys
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPlatform is a functional option for overriding the default platform
|
||||
// that Image and Descriptor.Image use for resolving an index to an image.
|
||||
//
|
||||
// The default platform is amd64/linux.
|
||||
func WithPlatform(p v1.Platform) Option {
|
||||
return func(o *options) error {
|
||||
o.platform = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithContext is a functional option for setting the context in http requests
|
||||
// performed by a given function. Note that this context is used for _all_
|
||||
// http requests, not just the initial volley. E.g., for remote.Image, the
|
||||
// context will be set on http requests generated by subsequent calls to
|
||||
// RawConfigFile() and even methods on layers returned by Layers().
|
||||
//
|
||||
// The default context is context.Background().
|
||||
func WithContext(ctx context.Context) Option {
|
||||
return func(o *options) error {
|
||||
o.context = ctx
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithJobs is a functional option for setting the parallelism of remote
|
||||
// operations performed by a given function. Note that not all remote
|
||||
// operations support parallelism.
|
||||
//
|
||||
// The default value is 4.
|
||||
func WithJobs(jobs int) Option {
|
||||
return func(o *options) error {
|
||||
if jobs <= 0 {
|
||||
return errors.New("jobs must be greater than zero")
|
||||
}
|
||||
o.jobs = jobs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithUserAgent adds the given string to the User-Agent header for any HTTP
|
||||
// requests. This header will also include "go-containerregistry/${version}".
|
||||
//
|
||||
// If you want to completely overwrite the User-Agent header, use WithTransport.
|
||||
func WithUserAgent(ua string) Option {
|
||||
return func(o *options) error {
|
||||
o.userAgent = ua
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithNondistributable includes non-distributable (foreign) layers
|
||||
// when writing images, see:
|
||||
// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers
|
||||
//
|
||||
// The default behaviour is to skip these layers
|
||||
func WithNondistributable(o *options) error {
|
||||
o.allowNondistributableArtifacts = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithProgress takes a channel that will receive progress updates as bytes are written.
|
||||
//
|
||||
// Sending updates to an unbuffered channel will block writes, so callers
|
||||
// should provide a buffered channel to avoid potential deadlocks.
|
||||
func WithProgress(updates chan<- v1.Update) Option {
|
||||
return func(o *options) error {
|
||||
o.updates = updates
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPageSize sets the given size as the value of parameter 'n' in the request.
|
||||
//
|
||||
// To omit the `n` parameter entirely, use WithPageSize(0).
|
||||
// The default value is 1000.
|
||||
func WithPageSize(size int) Option {
|
||||
return func(o *options) error {
|
||||
o.pageSize = size
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRetryBackoff sets the httpBackoff for retry HTTP operations.
|
||||
func WithRetryBackoff(backoff Backoff) Option {
|
||||
return func(o *options) error {
|
||||
o.retryBackoff = backoff
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRetryPredicate sets the predicate for retry HTTP operations.
|
||||
func WithRetryPredicate(predicate retry.Predicate) Option {
|
||||
return func(o *options) error {
|
||||
o.retryPredicate = predicate
|
||||
return nil
|
||||
}
|
||||
}
|
129
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md
generated
vendored
Normal file
129
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
||||
# `transport`
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport)
|
||||
|
||||
The [distribution protocol](https://github.com/opencontainers/distribution-spec) is fairly simple, but correctly [implementing authentication](../../../authn/README.md) is **hard**.
|
||||
|
||||
This package [implements](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#New) an [`http.RoundTripper`](https://godoc.org/net/http#RoundTripper)
|
||||
that transparently performs:
|
||||
* [Token
|
||||
Authentication](https://docs.docker.com/registry/spec/auth/token/) and
|
||||
* [OAuth2
|
||||
Authentication](https://docs.docker.com/registry/spec/auth/oauth/)
|
||||
|
||||
for registry clients.
|
||||
|
||||
## Raison d'être
|
||||
|
||||
> Why not just use the [`docker/distribution`](https://godoc.org/github.com/docker/distribution/registry/client/auth) client?
|
||||
|
||||
Great question! Mostly, because I don't want to depend on [`prometheus/client_golang`](https://github.com/prometheus/client_golang).
|
||||
|
||||
As a performance optimization, that client uses [a cache](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/client/repository.go#L173) to keep track of a mapping between blob digests and their [descriptors](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/blobs.go#L57-L86). Unfortunately, the cache [uses prometheus](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/storage/cache/cachedblobdescriptorstore.go#L44) to track hits and misses, so if you want to use that client you have to pull in all of prometheus, which is pretty large.
|
||||
|
||||
![docker/distribution](../../../../images/docker.dot.svg)
|
||||
|
||||
> Why does it matter if you depend on prometheus? Who cares?
|
||||
|
||||
It's generally polite to your downstream to reduce the number of dependencies your package requires:
|
||||
|
||||
* Downloading your package is faster, which helps our Australian friends and people on airplanes.
|
||||
* There is less code to compile, which speeds up builds and saves the planet from global warming.
|
||||
* You reduce the likelihood of inflicting dependency hell upon your consumers.
|
||||
* [Tim Hockin](https://twitter.com/thockin/status/958606077456654336) prefers it based on his experience working on Kubernetes, and he's a pretty smart guy.
|
||||
|
||||
> Okay, what about [`containerd/containerd`](https://godoc.org/github.com/containerd/containerd/remotes/docker)?
|
||||
|
||||
Similar reasons! That ends up pulling in grpc, protobuf, and logrus.
|
||||
|
||||
![containerd/containerd](../../../../images/containerd.dot.svg)
|
||||
|
||||
> Well... what about [`containers/image`](https://godoc.org/github.com/containers/image/docker)?
|
||||
|
||||
That just uses the the `docker/distribution` client... and more!
|
||||
|
||||
![containers/image](../../../../images/containers.dot.svg)
|
||||
|
||||
> Wow, what about this package?
|
||||
|
||||
Of course, this package isn't perfect either. `transport` depends on `authn`,
|
||||
which in turn depends on docker's config file parsing and handling package,
|
||||
which you don't strictly need but almost certainly want if you're going to be
|
||||
interacting with a registry.
|
||||
|
||||
![google/go-containerregistry](../../../../images/ggcr.dot.svg)
|
||||
|
||||
*These graphs were generated by
|
||||
[`kisielk/godepgraph`](https://github.com/kisielk/godepgraph).*
|
||||
|
||||
## Usage
|
||||
|
||||
This is heavily used by the
|
||||
[`remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote)
|
||||
package, which implements higher level image-centric functionality, but this
|
||||
package is useful if you want to interact directly with the registry to do
|
||||
something that `remote` doesn't support, e.g. [to handle with schema 1
|
||||
images](https://github.com/google/go-containerregistry/pull/509).
|
||||
|
||||
This package also includes some [error
|
||||
handling](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#errors)
|
||||
facilities in the form of
|
||||
[`CheckError`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#CheckError),
|
||||
which will parse the response body into a structured error for unexpected http
|
||||
status codes.
|
||||
|
||||
Here's a "simple" program that writes the result of
|
||||
[listing tags](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#tags)
|
||||
for [`gcr.io/google-containers/pause`](https://gcr.io/google-containers/pause)
|
||||
to stdout.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
)
|
||||
|
||||
func main() {
|
||||
repo, err := name.NewRepository("gcr.io/google-containers/pause")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Fetch credentials based on your docker config file, which is $HOME/.docker/config.json or $DOCKER_CONFIG.
|
||||
auth, err := authn.DefaultKeychain.Resolve(repo.Registry)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Construct an http.Client that is authorized to pull from gcr.io/google-containers/pause.
|
||||
scopes := []string{repo.Scope(transport.PullScope)}
|
||||
t, err := transport.New(repo.Registry, auth, http.DefaultTransport, scopes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
client := &http.Client{Transport: t}
|
||||
|
||||
// Make the actual request.
|
||||
resp, err := client.Get("https://gcr.io/v2/google-containers/pause/tags/list")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Assert that we get a 200, otherwise attempt to parse body as a structured error.
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Write the response to stdout.
|
||||
if _, err := io.Copy(os.Stdout, resp.Body); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
```
|
62
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go
generated
vendored
Normal file
62
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
)
|
||||
|
||||
type basicTransport struct {
|
||||
inner http.RoundTripper
|
||||
auth authn.Authenticator
|
||||
target string
|
||||
}
|
||||
|
||||
var _ http.RoundTripper = (*basicTransport)(nil)
|
||||
|
||||
// RoundTrip implements http.RoundTripper
|
||||
func (bt *basicTransport) RoundTrip(in *http.Request) (*http.Response, error) {
|
||||
if bt.auth != authn.Anonymous {
|
||||
auth, err := bt.auth.Authorization()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// http.Client handles redirects at a layer above the http.RoundTripper
|
||||
// abstraction, so to avoid forwarding Authorization headers to places
|
||||
// we are redirected, only set it when the authorization header matches
|
||||
// the host with which we are interacting.
|
||||
// In case of redirect http.Client can use an empty Host, check URL too.
|
||||
if in.Host == bt.target || in.URL.Host == bt.target {
|
||||
if bearer := auth.RegistryToken; bearer != "" {
|
||||
hdr := fmt.Sprintf("Bearer %s", bearer)
|
||||
in.Header.Set("Authorization", hdr)
|
||||
} else if user, pass := auth.Username, auth.Password; user != "" && pass != "" {
|
||||
delimited := fmt.Sprintf("%s:%s", user, pass)
|
||||
encoded := base64.StdEncoding.EncodeToString([]byte(delimited))
|
||||
hdr := fmt.Sprintf("Basic %s", encoded)
|
||||
in.Header.Set("Authorization", hdr)
|
||||
} else if token := auth.Auth; token != "" {
|
||||
hdr := fmt.Sprintf("Basic %s", token)
|
||||
in.Header.Set("Authorization", hdr)
|
||||
}
|
||||
}
|
||||
}
|
||||
return bt.inner.RoundTrip(in)
|
||||
}
|
313
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
generated
vendored
Normal file
313
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
generated
vendored
Normal file
@ -0,0 +1,313 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
authchallenge "github.com/docker/distribution/registry/client/auth/challenge"
|
||||
"github.com/google/go-containerregistry/internal/redact"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
type bearerTransport struct {
|
||||
// Wrapped by bearerTransport.
|
||||
inner http.RoundTripper
|
||||
// Basic credentials that we exchange for bearer tokens.
|
||||
basic authn.Authenticator
|
||||
// Holds the bearer response from the token service.
|
||||
bearer authn.AuthConfig
|
||||
// Registry to which we send bearer tokens.
|
||||
registry name.Registry
|
||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||
realm string
|
||||
// See https://docs.docker.com/registry/spec/auth/token/
|
||||
service string
|
||||
scopes []string
|
||||
// Scheme we should use, determined by ping response.
|
||||
scheme string
|
||||
}
|
||||
|
||||
var _ http.RoundTripper = (*bearerTransport)(nil)
|
||||
|
||||
var portMap = map[string]string{
|
||||
"http": "80",
|
||||
"https": "443",
|
||||
}
|
||||
|
||||
func stringSet(ss []string) map[string]struct{} {
|
||||
set := make(map[string]struct{})
|
||||
for _, s := range ss {
|
||||
set[s] = struct{}{}
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
||||
// RoundTrip implements http.RoundTripper
|
||||
func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) {
|
||||
sendRequest := func() (*http.Response, error) {
|
||||
// http.Client handles redirects at a layer above the http.RoundTripper
|
||||
// abstraction, so to avoid forwarding Authorization headers to places
|
||||
// we are redirected, only set it when the authorization header matches
|
||||
// the registry with which we are interacting.
|
||||
// In case of redirect http.Client can use an empty Host, check URL too.
|
||||
if matchesHost(bt.registry, in, bt.scheme) {
|
||||
hdr := fmt.Sprintf("Bearer %s", bt.bearer.RegistryToken)
|
||||
in.Header.Set("Authorization", hdr)
|
||||
}
|
||||
return bt.inner.RoundTrip(in)
|
||||
}
|
||||
|
||||
res, err := sendRequest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If we hit a WWW-Authenticate challenge, it might be due to expired tokens or insufficient scope.
|
||||
if challenges := authchallenge.ResponseChallenges(res); len(challenges) != 0 {
|
||||
for _, wac := range challenges {
|
||||
// TODO(jonjohnsonjr): Should we also update "realm" or "service"?
|
||||
if scope, ok := wac.Parameters["scope"]; ok {
|
||||
// From https://tools.ietf.org/html/rfc6750#section-3
|
||||
// The "scope" attribute is defined in Section 3.3 of [RFC6749]. The
|
||||
// "scope" attribute is a space-delimited list of case-sensitive scope
|
||||
// values indicating the required scope of the access token for
|
||||
// accessing the requested resource.
|
||||
scopes := strings.Split(scope, " ")
|
||||
|
||||
// Add any scopes that we don't already request.
|
||||
got := stringSet(bt.scopes)
|
||||
for _, want := range scopes {
|
||||
if _, ok := got[want]; !ok {
|
||||
bt.scopes = append(bt.scopes, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(jonjohnsonjr): Teach transport.Error about "error" and "error_description" from challenge.
|
||||
|
||||
// Retry the request to attempt to get a valid token.
|
||||
if err = bt.refresh(in.Context()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sendRequest()
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
// It's unclear which authentication flow to use based purely on the protocol,
|
||||
// so we rely on heuristics and fallbacks to support as many registries as possible.
|
||||
// The basic token exchange is attempted first, falling back to the oauth flow.
|
||||
// If the IdentityToken is set, this indicates that we should start with the oauth flow.
|
||||
func (bt *bearerTransport) refresh(ctx context.Context) error {
|
||||
auth, err := bt.basic.Authorization()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if auth.RegistryToken != "" {
|
||||
bt.bearer.RegistryToken = auth.RegistryToken
|
||||
return nil
|
||||
}
|
||||
|
||||
var content []byte
|
||||
if auth.IdentityToken != "" {
|
||||
// If the secret being stored is an identity token,
|
||||
// the Username should be set to <token>, which indicates
|
||||
// we are using an oauth flow.
|
||||
content, err = bt.refreshOauth(ctx)
|
||||
var terr *Error
|
||||
if errors.As(err, &terr) && terr.StatusCode == http.StatusNotFound {
|
||||
// Note: Not all token servers implement oauth2.
|
||||
// If the request to the endpoint returns 404 using the HTTP POST method,
|
||||
// refer to Token Documentation for using the HTTP GET method supported by all token servers.
|
||||
content, err = bt.refreshBasic(ctx)
|
||||
}
|
||||
} else {
|
||||
content, err = bt.refreshBasic(ctx)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Some registries don't have "token" in the response. See #54.
|
||||
type tokenResponse struct {
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
// TODO: handle expiry?
|
||||
}
|
||||
|
||||
var response tokenResponse
|
||||
if err := json.Unmarshal(content, &response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Some registries set access_token instead of token.
|
||||
if response.AccessToken != "" {
|
||||
response.Token = response.AccessToken
|
||||
}
|
||||
|
||||
// Find a token to turn into a Bearer authenticator
|
||||
if response.Token != "" {
|
||||
bt.bearer.RegistryToken = response.Token
|
||||
} else {
|
||||
return fmt.Errorf("no token in bearer response:\n%s", content)
|
||||
}
|
||||
|
||||
// If we obtained a refresh token from the oauth flow, use that for refresh() now.
|
||||
if response.RefreshToken != "" {
|
||||
bt.basic = authn.FromConfig(authn.AuthConfig{
|
||||
IdentityToken: response.RefreshToken,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func matchesHost(reg name.Registry, in *http.Request, scheme string) bool {
|
||||
canonicalHeaderHost := canonicalAddress(in.Host, scheme)
|
||||
canonicalURLHost := canonicalAddress(in.URL.Host, scheme)
|
||||
canonicalRegistryHost := canonicalAddress(reg.RegistryStr(), scheme)
|
||||
return canonicalHeaderHost == canonicalRegistryHost || canonicalURLHost == canonicalRegistryHost
|
||||
}
|
||||
|
||||
func canonicalAddress(host, scheme string) (address string) {
|
||||
// The host may be any one of:
|
||||
// - hostname
|
||||
// - hostname:port
|
||||
// - ipv4
|
||||
// - ipv4:port
|
||||
// - ipv6
|
||||
// - [ipv6]:port
|
||||
// As net.SplitHostPort returns an error if the host does not contain a port, we should only attempt
|
||||
// to call it when we know that the address contains a port
|
||||
if strings.Count(host, ":") == 1 || (strings.Count(host, ":") >= 2 && strings.Contains(host, "]:")) {
|
||||
hostname, port, err := net.SplitHostPort(host)
|
||||
if err != nil {
|
||||
return host
|
||||
}
|
||||
if port == "" {
|
||||
port = portMap[scheme]
|
||||
}
|
||||
|
||||
return net.JoinHostPort(hostname, port)
|
||||
}
|
||||
|
||||
return net.JoinHostPort(host, portMap[scheme])
|
||||
}
|
||||
|
||||
// https://docs.docker.com/registry/spec/auth/oauth/
|
||||
func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) {
|
||||
auth, err := bt.basic.Authorization()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse(bt.realm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("scope", strings.Join(bt.scopes, " "))
|
||||
v.Set("service", bt.service)
|
||||
v.Set("client_id", defaultUserAgent)
|
||||
if auth.IdentityToken != "" {
|
||||
v.Set("grant_type", "refresh_token")
|
||||
v.Set("refresh_token", auth.IdentityToken)
|
||||
} else if auth.Username != "" && auth.Password != "" {
|
||||
// TODO(#629): This is unreachable.
|
||||
v.Set("grant_type", "password")
|
||||
v.Set("username", auth.Username)
|
||||
v.Set("password", auth.Password)
|
||||
v.Set("access_type", "offline")
|
||||
}
|
||||
|
||||
client := http.Client{Transport: bt.inner}
|
||||
req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
// We don't want to log credentials.
|
||||
ctx = redact.NewContext(ctx, "oauth token response contains credentials")
|
||||
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := CheckError(resp, http.StatusOK); err != nil {
|
||||
logs.Warn.Printf("No matching credentials were found for %q", bt.registry)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
||||
|
||||
// https://docs.docker.com/registry/spec/auth/token/
|
||||
func (bt *bearerTransport) refreshBasic(ctx context.Context) ([]byte, error) {
|
||||
u, err := url.Parse(bt.realm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := &basicTransport{
|
||||
inner: bt.inner,
|
||||
auth: bt.basic,
|
||||
target: u.Host,
|
||||
}
|
||||
client := http.Client{Transport: b}
|
||||
|
||||
v := u.Query()
|
||||
v["scope"] = bt.scopes
|
||||
v.Set("service", bt.service)
|
||||
u.RawQuery = v.Encode()
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We don't want to log credentials.
|
||||
ctx = redact.NewContext(ctx, "basic token response contains credentials")
|
||||
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := CheckError(resp, http.StatusOK); err != nil {
|
||||
logs.Warn.Printf("No matching credentials were found for %q", bt.registry)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
18
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go
generated
vendored
Normal file
18
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package transport provides facilities for setting up an authenticated
|
||||
// http.RoundTripper given an Authenticator and base RoundTripper. See
|
||||
// transport.New for more information.
|
||||
package transport
|
200
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go
generated
vendored
Normal file
200
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go
generated
vendored
Normal file
@ -0,0 +1,200 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// The set of query string keys that we expect to send as part of the registry
|
||||
// protocol. Anything else is potentially dangerous to leak, as it's probably
|
||||
// from a redirect. These redirects often included tokens or signed URLs.
|
||||
var paramAllowlist = map[string]struct{}{
|
||||
// Token exchange
|
||||
"scope": {},
|
||||
"service": {},
|
||||
// Cross-repo mounting
|
||||
"mount": {},
|
||||
"from": {},
|
||||
// Layer PUT
|
||||
"digest": {},
|
||||
// Listing tags and catalog
|
||||
"n": {},
|
||||
"last": {},
|
||||
}
|
||||
|
||||
// Error implements error to support the following error specification:
|
||||
// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors
|
||||
type Error struct {
|
||||
Errors []Diagnostic `json:"errors,omitempty"`
|
||||
// The http status code returned.
|
||||
StatusCode int
|
||||
// The request that failed.
|
||||
Request *http.Request
|
||||
// The raw body if we couldn't understand it.
|
||||
rawBody string
|
||||
}
|
||||
|
||||
// Check that Error implements error
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Error implements error
|
||||
func (e *Error) Error() string {
|
||||
prefix := ""
|
||||
if e.Request != nil {
|
||||
prefix = fmt.Sprintf("%s %s: ", e.Request.Method, redactURL(e.Request.URL))
|
||||
}
|
||||
return prefix + e.responseErr()
|
||||
}
|
||||
|
||||
func (e *Error) responseErr() string {
|
||||
switch len(e.Errors) {
|
||||
case 0:
|
||||
if len(e.rawBody) == 0 {
|
||||
if e.Request != nil && e.Request.Method == http.MethodHead {
|
||||
return fmt.Sprintf("unexpected status code %d %s (HEAD responses have no body, use GET for details)", e.StatusCode, http.StatusText(e.StatusCode))
|
||||
}
|
||||
return fmt.Sprintf("unexpected status code %d %s", e.StatusCode, http.StatusText(e.StatusCode))
|
||||
}
|
||||
return fmt.Sprintf("unexpected status code %d %s: %s", e.StatusCode, http.StatusText(e.StatusCode), e.rawBody)
|
||||
case 1:
|
||||
return e.Errors[0].String()
|
||||
default:
|
||||
var errors []string
|
||||
for _, d := range e.Errors {
|
||||
errors = append(errors, d.String())
|
||||
}
|
||||
return fmt.Sprintf("multiple errors returned: %s",
|
||||
strings.Join(errors, "; "))
|
||||
}
|
||||
}
|
||||
|
||||
// Temporary returns whether the request that preceded the error is temporary.
|
||||
func (e *Error) Temporary() bool {
|
||||
if len(e.Errors) == 0 {
|
||||
_, ok := temporaryStatusCodes[e.StatusCode]
|
||||
return ok
|
||||
}
|
||||
for _, d := range e.Errors {
|
||||
if _, ok := temporaryErrorCodes[d.Code]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// TODO(jonjohnsonjr): Consider moving to internal/redact.
|
||||
func redactURL(original *url.URL) *url.URL {
|
||||
qs := original.Query()
|
||||
for k, v := range qs {
|
||||
for i := range v {
|
||||
if _, ok := paramAllowlist[k]; !ok {
|
||||
// key is not in the Allowlist
|
||||
v[i] = "REDACTED"
|
||||
}
|
||||
}
|
||||
}
|
||||
redacted := *original
|
||||
redacted.RawQuery = qs.Encode()
|
||||
return &redacted
|
||||
}
|
||||
|
||||
// Diagnostic represents a single error returned by a Docker registry interaction.
|
||||
type Diagnostic struct {
|
||||
Code ErrorCode `json:"code"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Detail interface{} `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
// String stringifies the Diagnostic in the form: $Code: $Message[; $Detail]
|
||||
func (d Diagnostic) String() string {
|
||||
msg := fmt.Sprintf("%s: %s", d.Code, d.Message)
|
||||
if d.Detail != nil {
|
||||
msg = fmt.Sprintf("%s; %v", msg, d.Detail)
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// ErrorCode is an enumeration of supported error codes.
|
||||
type ErrorCode string
|
||||
|
||||
// The set of error conditions a registry may return:
|
||||
// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors-2
|
||||
const (
|
||||
BlobUnknownErrorCode ErrorCode = "BLOB_UNKNOWN"
|
||||
BlobUploadInvalidErrorCode ErrorCode = "BLOB_UPLOAD_INVALID"
|
||||
BlobUploadUnknownErrorCode ErrorCode = "BLOB_UPLOAD_UNKNOWN"
|
||||
DigestInvalidErrorCode ErrorCode = "DIGEST_INVALID"
|
||||
ManifestBlobUnknownErrorCode ErrorCode = "MANIFEST_BLOB_UNKNOWN"
|
||||
ManifestInvalidErrorCode ErrorCode = "MANIFEST_INVALID"
|
||||
ManifestUnknownErrorCode ErrorCode = "MANIFEST_UNKNOWN"
|
||||
ManifestUnverifiedErrorCode ErrorCode = "MANIFEST_UNVERIFIED"
|
||||
NameInvalidErrorCode ErrorCode = "NAME_INVALID"
|
||||
NameUnknownErrorCode ErrorCode = "NAME_UNKNOWN"
|
||||
SizeInvalidErrorCode ErrorCode = "SIZE_INVALID"
|
||||
TagInvalidErrorCode ErrorCode = "TAG_INVALID"
|
||||
UnauthorizedErrorCode ErrorCode = "UNAUTHORIZED"
|
||||
DeniedErrorCode ErrorCode = "DENIED"
|
||||
UnsupportedErrorCode ErrorCode = "UNSUPPORTED"
|
||||
TooManyRequestsErrorCode ErrorCode = "TOOMANYREQUESTS"
|
||||
UnknownErrorCode ErrorCode = "UNKNOWN"
|
||||
)
|
||||
|
||||
// TODO: Include other error types.
|
||||
var temporaryErrorCodes = map[ErrorCode]struct{}{
|
||||
BlobUploadInvalidErrorCode: {},
|
||||
TooManyRequestsErrorCode: {},
|
||||
UnknownErrorCode: {},
|
||||
}
|
||||
|
||||
var temporaryStatusCodes = map[int]struct{}{
|
||||
http.StatusRequestTimeout: {},
|
||||
http.StatusInternalServerError: {},
|
||||
http.StatusBadGateway: {},
|
||||
http.StatusServiceUnavailable: {},
|
||||
http.StatusGatewayTimeout: {},
|
||||
}
|
||||
|
||||
// CheckError returns a structured error if the response status is not in codes.
|
||||
func CheckError(resp *http.Response, codes ...int) error {
|
||||
for _, code := range codes {
|
||||
if resp.StatusCode == code {
|
||||
// This is one of the supported status codes.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors
|
||||
structuredError := &Error{}
|
||||
|
||||
// This can fail if e.g. the response body is not valid JSON. That's fine,
|
||||
// we'll construct an appropriate error string from the body and status code.
|
||||
_ = json.Unmarshal(b, structuredError)
|
||||
|
||||
structuredError.rawBody = string(b)
|
||||
structuredError.StatusCode = resp.StatusCode
|
||||
structuredError.Request = resp.Request
|
||||
|
||||
return structuredError
|
||||
}
|
91
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go
generated
vendored
Normal file
91
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/redact"
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
)
|
||||
|
||||
type logTransport struct {
|
||||
inner http.RoundTripper
|
||||
}
|
||||
|
||||
// NewLogger returns a transport that logs requests and responses to
|
||||
// github.com/google/go-containerregistry/pkg/logs.Debug.
|
||||
func NewLogger(inner http.RoundTripper) http.RoundTripper {
|
||||
return &logTransport{inner}
|
||||
}
|
||||
|
||||
func (t *logTransport) RoundTrip(in *http.Request) (out *http.Response, err error) {
|
||||
// Inspired by: github.com/motemen/go-loghttp
|
||||
|
||||
// We redact token responses and binary blobs in response/request.
|
||||
omitBody, reason := redact.FromContext(in.Context())
|
||||
if omitBody {
|
||||
logs.Debug.Printf("--> %s %s [body redacted: %s]", in.Method, in.URL, reason)
|
||||
} else {
|
||||
logs.Debug.Printf("--> %s %s", in.Method, in.URL)
|
||||
}
|
||||
|
||||
// Save these headers so we can redact Authorization.
|
||||
savedHeaders := in.Header.Clone()
|
||||
if in.Header != nil && in.Header.Get("authorization") != "" {
|
||||
in.Header.Set("authorization", "<redacted>")
|
||||
}
|
||||
|
||||
b, err := httputil.DumpRequestOut(in, !omitBody)
|
||||
if err == nil {
|
||||
logs.Debug.Println(string(b))
|
||||
} else {
|
||||
logs.Debug.Printf("Failed to dump request %s %s: %v", in.Method, in.URL, err)
|
||||
}
|
||||
|
||||
// Restore the non-redacted headers.
|
||||
in.Header = savedHeaders
|
||||
|
||||
start := time.Now()
|
||||
out, err = t.inner.RoundTrip(in)
|
||||
duration := time.Since(start)
|
||||
if err != nil {
|
||||
logs.Debug.Printf("<-- %v %s %s (%s)", err, in.Method, in.URL, duration)
|
||||
}
|
||||
if out != nil {
|
||||
msg := fmt.Sprintf("<-- %d", out.StatusCode)
|
||||
if out.Request != nil {
|
||||
msg = fmt.Sprintf("%s %s", msg, out.Request.URL)
|
||||
}
|
||||
msg = fmt.Sprintf("%s (%s)", msg, duration)
|
||||
|
||||
if omitBody {
|
||||
msg = fmt.Sprintf("%s [body redacted: %s]", msg, reason)
|
||||
}
|
||||
|
||||
logs.Debug.Print(msg)
|
||||
|
||||
b, err := httputil.DumpResponse(out, !omitBody)
|
||||
if err == nil {
|
||||
logs.Debug.Println(string(b))
|
||||
} else {
|
||||
logs.Debug.Printf("Failed to dump response %s %s: %v", in.Method, in.URL, err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
147
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go
generated
vendored
Normal file
147
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
authchallenge "github.com/docker/distribution/registry/client/auth/challenge"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
type challenge string
|
||||
|
||||
const (
|
||||
anonymous challenge = "anonymous"
|
||||
basic challenge = "basic"
|
||||
bearer challenge = "bearer"
|
||||
)
|
||||
|
||||
type pingResp struct {
|
||||
challenge challenge
|
||||
|
||||
// Following the challenge there are often key/value pairs
|
||||
// e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz"
|
||||
parameters map[string]string
|
||||
|
||||
// The registry's scheme to use. Communicates whether we fell back to http.
|
||||
scheme string
|
||||
}
|
||||
|
||||
func (c challenge) Canonical() challenge {
|
||||
return challenge(strings.ToLower(string(c)))
|
||||
}
|
||||
|
||||
func parseChallenge(suffix string) map[string]string {
|
||||
kv := make(map[string]string)
|
||||
for _, token := range strings.Split(suffix, ",") {
|
||||
// Trim any whitespace around each token.
|
||||
token = strings.Trim(token, " ")
|
||||
|
||||
// Break the token into a key/value pair
|
||||
if parts := strings.SplitN(token, "=", 2); len(parts) == 2 {
|
||||
// Unquote the value, if it is quoted.
|
||||
kv[parts[0]] = strings.Trim(parts[1], `"`)
|
||||
} else {
|
||||
// If there was only one part, treat is as a key with an empty value
|
||||
kv[token] = ""
|
||||
}
|
||||
}
|
||||
return kv
|
||||
}
|
||||
|
||||
func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingResp, error) {
|
||||
client := http.Client{Transport: t}
|
||||
|
||||
// This first attempts to use "https" for every request, falling back to http
|
||||
// if the registry matches our localhost heuristic or if it is intentionally
|
||||
// set to insecure via name.NewInsecureRegistry.
|
||||
schemes := []string{"https"}
|
||||
if reg.Scheme() == "http" {
|
||||
schemes = append(schemes, "http")
|
||||
}
|
||||
|
||||
var errs []string
|
||||
for _, scheme := range schemes {
|
||||
url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name())
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
// Potentially retry with http.
|
||||
continue
|
||||
}
|
||||
defer func() {
|
||||
// By draining the body, make sure to reuse the connection made by
|
||||
// the ping for the following access to the registry
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
// If we get a 200, then no authentication is needed.
|
||||
return &pingResp{
|
||||
challenge: anonymous,
|
||||
scheme: scheme,
|
||||
}, nil
|
||||
case http.StatusUnauthorized:
|
||||
if challenges := authchallenge.ResponseChallenges(resp); len(challenges) != 0 {
|
||||
// If we hit more than one, let's try to find one that we know how to handle.
|
||||
wac := pickFromMultipleChallenges(challenges)
|
||||
return &pingResp{
|
||||
challenge: challenge(wac.Scheme).Canonical(),
|
||||
parameters: wac.Parameters,
|
||||
scheme: scheme,
|
||||
}, nil
|
||||
}
|
||||
// Otherwise, just return the challenge without parameters.
|
||||
return &pingResp{
|
||||
challenge: challenge(resp.Header.Get("WWW-Authenticate")).Canonical(),
|
||||
scheme: scheme,
|
||||
}, nil
|
||||
default:
|
||||
return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized)
|
||||
}
|
||||
}
|
||||
return nil, errors.New(strings.Join(errs, "; "))
|
||||
}
|
||||
|
||||
func pickFromMultipleChallenges(challenges []authchallenge.Challenge) authchallenge.Challenge {
|
||||
// It might happen there are multiple www-authenticate headers, e.g. `Negotiate` and `Basic`.
|
||||
// Picking simply the first one could result eventually in `unrecognized challenge` error,
|
||||
// that's why we're looping through the challenges in search for one that can be handled.
|
||||
allowedSchemes := []string{"basic", "bearer"}
|
||||
|
||||
for _, wac := range challenges {
|
||||
currentScheme := strings.ToLower(wac.Scheme)
|
||||
for _, allowed := range allowedSchemes {
|
||||
if allowed == currentScheme {
|
||||
return wac
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return challenges[0]
|
||||
}
|
91
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go
generated
vendored
Normal file
91
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/retry"
|
||||
)
|
||||
|
||||
// Sleep for 0.1, 0.3, 0.9, 2.7 seconds. This should cover networking blips.
|
||||
var defaultBackoff = retry.Backoff{
|
||||
Duration: 100 * time.Millisecond,
|
||||
Factor: 3.0,
|
||||
Jitter: 0.1,
|
||||
Steps: 5,
|
||||
}
|
||||
|
||||
var _ http.RoundTripper = (*retryTransport)(nil)
|
||||
|
||||
// retryTransport wraps a RoundTripper and retries temporary network errors.
|
||||
type retryTransport struct {
|
||||
inner http.RoundTripper
|
||||
backoff retry.Backoff
|
||||
predicate retry.Predicate
|
||||
}
|
||||
|
||||
// Option is a functional option for retryTransport.
|
||||
type Option func(*options)
|
||||
|
||||
type options struct {
|
||||
backoff retry.Backoff
|
||||
predicate retry.Predicate
|
||||
}
|
||||
|
||||
// Backoff is an alias of retry.Backoff to expose this configuration option to consumers of this lib
|
||||
type Backoff = retry.Backoff
|
||||
|
||||
// WithRetryBackoff sets the backoff for retry operations.
|
||||
func WithRetryBackoff(backoff Backoff) Option {
|
||||
return func(o *options) {
|
||||
o.backoff = backoff
|
||||
}
|
||||
}
|
||||
|
||||
// WithRetryPredicate sets the predicate for retry operations.
|
||||
func WithRetryPredicate(predicate func(error) bool) Option {
|
||||
return func(o *options) {
|
||||
o.predicate = predicate
|
||||
}
|
||||
}
|
||||
|
||||
// NewRetry returns a transport that retries errors.
|
||||
func NewRetry(inner http.RoundTripper, opts ...Option) http.RoundTripper {
|
||||
o := &options{
|
||||
backoff: defaultBackoff,
|
||||
predicate: retry.IsTemporary,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(o)
|
||||
}
|
||||
|
||||
return &retryTransport{
|
||||
inner: inner,
|
||||
backoff: o.backoff,
|
||||
predicate: o.predicate,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *retryTransport) RoundTrip(in *http.Request) (out *http.Response, err error) {
|
||||
roundtrip := func() error {
|
||||
out, err = t.inner.RoundTrip(in)
|
||||
return err
|
||||
}
|
||||
retry.Retry(roundtrip, t.predicate, t.backoff)
|
||||
return
|
||||
}
|
44
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go
generated
vendored
Normal file
44
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
type schemeTransport struct {
|
||||
// Scheme we should use, determined by ping response.
|
||||
scheme string
|
||||
|
||||
// Registry we're talking to.
|
||||
registry name.Registry
|
||||
|
||||
// Wrapped by schemeTransport.
|
||||
inner http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip implements http.RoundTripper
|
||||
func (st *schemeTransport) RoundTrip(in *http.Request) (*http.Response, error) {
|
||||
// When we ping() the registry, we determine whether to use http or https
|
||||
// based on which scheme was successful. That is only valid for the
|
||||
// registry server and not e.g. a separate token server or blob storage,
|
||||
// so we should only override the scheme if the host is the registry.
|
||||
if matchesHost(st.registry, in, st.scheme) {
|
||||
in.URL.Scheme = st.scheme
|
||||
}
|
||||
return st.inner.RoundTrip(in)
|
||||
}
|
24
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go
generated
vendored
Normal file
24
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
// Scopes suitable to qualify each Repository
|
||||
const (
|
||||
PullScope string = "pull"
|
||||
PushScope string = "push,pull"
|
||||
// For now DELETE is PUSH, which is the read/write ACL.
|
||||
DeleteScope string = PushScope
|
||||
CatalogScope string = "catalog"
|
||||
)
|
114
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go
generated
vendored
Normal file
114
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
// New returns a new RoundTripper based on the provided RoundTripper that has been
|
||||
// setup to authenticate with the remote registry "reg", in the capacity
|
||||
// laid out by the specified scopes.
|
||||
//
|
||||
// TODO(jonjohnsonjr): Deprecate this.
|
||||
func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) {
|
||||
return NewWithContext(context.Background(), reg, auth, t, scopes)
|
||||
}
|
||||
|
||||
// NewWithContext returns a new RoundTripper based on the provided RoundTripper that has been
|
||||
// setup to authenticate with the remote registry "reg", in the capacity
|
||||
// laid out by the specified scopes.
|
||||
func NewWithContext(ctx context.Context, reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) {
|
||||
// The handshake:
|
||||
// 1. Use "t" to ping() the registry for the authentication challenge.
|
||||
//
|
||||
// 2a. If we get back a 200, then simply use "t".
|
||||
//
|
||||
// 2b. If we get back a 401 with a Basic challenge, then use a transport
|
||||
// that just attachs auth each roundtrip.
|
||||
//
|
||||
// 2c. If we get back a 401 with a Bearer challenge, then use a transport
|
||||
// that attaches a bearer token to each request, and refreshes is on 401s.
|
||||
// Perform an initial refresh to seed the bearer token.
|
||||
|
||||
// First we ping the registry to determine the parameters of the authentication handshake
|
||||
// (if one is even necessary).
|
||||
pr, err := ping(ctx, reg, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wrap t with a useragent transport unless we already have one.
|
||||
if _, ok := t.(*userAgentTransport); !ok {
|
||||
t = NewUserAgent(t, "")
|
||||
}
|
||||
|
||||
// Wrap t in a transport that selects the appropriate scheme based on the ping response.
|
||||
t = &schemeTransport{
|
||||
scheme: pr.scheme,
|
||||
registry: reg,
|
||||
inner: t,
|
||||
}
|
||||
|
||||
switch pr.challenge.Canonical() {
|
||||
case anonymous:
|
||||
return &Wrapper{t}, nil
|
||||
case basic:
|
||||
return &Wrapper{&basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}}, nil
|
||||
case bearer:
|
||||
// We require the realm, which tells us where to send our Basic auth to turn it into Bearer auth.
|
||||
realm, ok := pr.parameters["realm"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.parameters)
|
||||
}
|
||||
service, ok := pr.parameters["service"]
|
||||
if !ok {
|
||||
// If the service parameter is not specified, then default it to the registry
|
||||
// with which we are talking.
|
||||
service = reg.String()
|
||||
}
|
||||
bt := &bearerTransport{
|
||||
inner: t,
|
||||
basic: auth,
|
||||
realm: realm,
|
||||
registry: reg,
|
||||
service: service,
|
||||
scopes: scopes,
|
||||
scheme: pr.scheme,
|
||||
}
|
||||
if err := bt.refresh(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Wrapper{bt}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized challenge: %s", pr.challenge)
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper results in *not* wrapping supplied transport with additional logic such as retries, useragent and debug logging
|
||||
// Consumers are opt-ing into providing their own transport without any additional wrapping.
|
||||
type Wrapper struct {
|
||||
inner http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip delegates to the inner RoundTripper
|
||||
func (w *Wrapper) RoundTrip(in *http.Request) (*http.Response, error) {
|
||||
return w.inner.RoundTrip(in)
|
||||
}
|
94
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go
generated
vendored
Normal file
94
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var (
|
||||
// Version can be set via:
|
||||
// -ldflags="-X 'github.com/google/go-containerregistry/pkg/v1/remote/transport.Version=$TAG'"
|
||||
Version string
|
||||
|
||||
ggcrVersion = defaultUserAgent
|
||||
)
|
||||
|
||||
const (
|
||||
defaultUserAgent = "go-containerregistry"
|
||||
moduleName = "github.com/google/go-containerregistry"
|
||||
)
|
||||
|
||||
type userAgentTransport struct {
|
||||
inner http.RoundTripper
|
||||
ua string
|
||||
}
|
||||
|
||||
func init() {
|
||||
if v := version(); v != "" {
|
||||
ggcrVersion = fmt.Sprintf("%s/%s", defaultUserAgent, v)
|
||||
}
|
||||
}
|
||||
|
||||
func version() string {
|
||||
if Version != "" {
|
||||
// Version was set via ldflags, just return it.
|
||||
return Version
|
||||
}
|
||||
|
||||
info, ok := debug.ReadBuildInfo()
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Happens for crane and gcrane.
|
||||
if info.Main.Path == moduleName {
|
||||
return info.Main.Version
|
||||
}
|
||||
|
||||
// Anything else.
|
||||
for _, dep := range info.Deps {
|
||||
if dep.Path == moduleName {
|
||||
return dep.Version
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// NewUserAgent returns an http.Roundtripper that sets the user agent to
|
||||
// The provided string plus additional go-containerregistry information,
|
||||
// e.g. if provided "crane/v0.1.4" and this modules was built at v0.1.4:
|
||||
//
|
||||
// User-Agent: crane/v0.1.4 go-containerregistry/v0.1.4
|
||||
func NewUserAgent(inner http.RoundTripper, ua string) http.RoundTripper {
|
||||
if ua == "" {
|
||||
ua = ggcrVersion
|
||||
} else {
|
||||
ua = fmt.Sprintf("%s %s", ua, ggcrVersion)
|
||||
}
|
||||
return &userAgentTransport{
|
||||
inner: inner,
|
||||
ua: ua,
|
||||
}
|
||||
}
|
||||
|
||||
// RoundTrip implements http.RoundTripper
|
||||
func (ut *userAgentTransport) RoundTrip(in *http.Request) (*http.Response, error) {
|
||||
in.Header.Set("User-Agent", ut.ua)
|
||||
return ut.inner.RoundTrip(in)
|
||||
}
|
892
vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go
generated
vendored
Normal file
892
vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go
generated
vendored
Normal file
@ -0,0 +1,892 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/redact"
|
||||
"github.com/google/go-containerregistry/internal/retry"
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/v1/stream"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Taggable is an interface that enables a manifest PUT (e.g. for tagging).
|
||||
type Taggable interface {
|
||||
RawManifest() ([]byte, error)
|
||||
}
|
||||
|
||||
// Write pushes the provided img to the specified image reference.
|
||||
func Write(ref name.Reference, img v1.Image, options ...Option) (rerr error) {
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var lastUpdate *v1.Update
|
||||
if o.updates != nil {
|
||||
lastUpdate = &v1.Update{}
|
||||
lastUpdate.Total, err = countImage(img, o.allowNondistributableArtifacts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer close(o.updates)
|
||||
defer func() { _ = sendError(o.updates, rerr) }()
|
||||
}
|
||||
return writeImage(o.context, ref, img, o, lastUpdate)
|
||||
}
|
||||
|
||||
func writeImage(ctx context.Context, ref name.Reference, img v1.Image, o *options, lastUpdate *v1.Update) error {
|
||||
ls, err := img.Layers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scopes := scopesForUploadingImage(ref.Context(), ls)
|
||||
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := writer{
|
||||
repo: ref.Context(),
|
||||
client: &http.Client{Transport: tr},
|
||||
context: ctx,
|
||||
updates: o.updates,
|
||||
lastUpdate: lastUpdate,
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
}
|
||||
|
||||
// Upload individual blobs and collect any errors.
|
||||
blobChan := make(chan v1.Layer, 2*o.jobs)
|
||||
g, gctx := errgroup.WithContext(ctx)
|
||||
for i := 0; i < o.jobs; i++ {
|
||||
// Start N workers consuming blobs to upload.
|
||||
g.Go(func() error {
|
||||
for b := range blobChan {
|
||||
if err := w.uploadOne(gctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Upload individual layers in goroutines and collect any errors.
|
||||
// If we can dedupe by the layer digest, try to do so. If we can't determine
|
||||
// the digest for whatever reason, we can't dedupe and might re-upload.
|
||||
g.Go(func() error {
|
||||
defer close(blobChan)
|
||||
uploaded := map[v1.Hash]bool{}
|
||||
for _, l := range ls {
|
||||
l := l
|
||||
|
||||
// Handle foreign layers.
|
||||
mt, err := l.MediaType()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !mt.IsDistributable() && !o.allowNondistributableArtifacts {
|
||||
continue
|
||||
}
|
||||
|
||||
// Streaming layers calculate their digests while uploading them. Assume
|
||||
// an error here indicates we need to upload the layer.
|
||||
h, err := l.Digest()
|
||||
if err == nil {
|
||||
// If we can determine the layer's digest ahead of
|
||||
// time, use it to dedupe uploads.
|
||||
if uploaded[h] {
|
||||
continue // Already uploading.
|
||||
}
|
||||
uploaded[h] = true
|
||||
}
|
||||
select {
|
||||
case blobChan <- l:
|
||||
case <-gctx.Done():
|
||||
return gctx.Err()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if l, err := partial.ConfigLayer(img); err != nil {
|
||||
// We can't read the ConfigLayer, possibly because of streaming layers,
|
||||
// since the layer DiffIDs haven't been calculated yet. Attempt to wait
|
||||
// for the other layers to be uploaded, then try the config again.
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now that all the layers are uploaded, try to upload the config file blob.
|
||||
l, err := partial.ConfigLayer(img)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.uploadOne(ctx, l); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// We *can* read the ConfigLayer, so upload it concurrently with the layers.
|
||||
g.Go(func() error {
|
||||
return w.uploadOne(gctx, l)
|
||||
})
|
||||
|
||||
// Wait for the layers + config.
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// With all of the constituent elements uploaded, upload the manifest
|
||||
// to commit the image.
|
||||
return w.commitManifest(ctx, img, ref)
|
||||
}
|
||||
|
||||
// writer writes the elements of an image to a remote image reference.
|
||||
type writer struct {
|
||||
repo name.Repository
|
||||
client *http.Client
|
||||
context context.Context
|
||||
|
||||
updates chan<- v1.Update
|
||||
lastUpdate *v1.Update
|
||||
backoff Backoff
|
||||
predicate retry.Predicate
|
||||
}
|
||||
|
||||
func sendError(ch chan<- v1.Update, err error) error {
|
||||
if err != nil && ch != nil {
|
||||
ch <- v1.Update{Error: err}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// url returns a url.Url for the specified path in the context of this remote image reference.
|
||||
func (w *writer) url(path string) url.URL {
|
||||
return url.URL{
|
||||
Scheme: w.repo.Registry.Scheme(),
|
||||
Host: w.repo.RegistryStr(),
|
||||
Path: path,
|
||||
}
|
||||
}
|
||||
|
||||
// nextLocation extracts the fully-qualified URL to which we should send the next request in an upload sequence.
|
||||
func (w *writer) nextLocation(resp *http.Response) (string, error) {
|
||||
loc := resp.Header.Get("Location")
|
||||
if len(loc) == 0 {
|
||||
return "", errors.New("missing Location header")
|
||||
}
|
||||
u, err := url.Parse(loc)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If the location header returned is just a url path, then fully qualify it.
|
||||
// We cannot simply call w.url, since there might be an embedded query string.
|
||||
return resp.Request.URL.ResolveReference(u).String(), nil
|
||||
}
|
||||
|
||||
// checkExistingBlob checks if a blob exists already in the repository by making a
|
||||
// HEAD request to the blob store API. GCR performs an existence check on the
|
||||
// initiation if "mount" is specified, even if no "from" sources are specified.
|
||||
// However, this is not broadly applicable to all registries, e.g. ECR.
|
||||
func (w *writer) checkExistingBlob(h v1.Hash) (bool, error) {
|
||||
u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.repo.RepositoryStr(), h.String()))
|
||||
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
resp, err := w.client.Do(req.WithContext(w.context))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return resp.StatusCode == http.StatusOK, nil
|
||||
}
|
||||
|
||||
// checkExistingManifest checks if a manifest exists already in the repository
|
||||
// by making a HEAD request to the manifest API.
|
||||
func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, error) {
|
||||
u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), h.String()))
|
||||
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Accept", string(mt))
|
||||
|
||||
resp, err := w.client.Do(req.WithContext(w.context))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return resp.StatusCode == http.StatusOK, nil
|
||||
}
|
||||
|
||||
// initiateUpload initiates the blob upload, which starts with a POST that can
|
||||
// optionally include the hash of the layer and a list of repositories from
|
||||
// which that layer might be read. On failure, an error is returned.
|
||||
// On success, the layer was either mounted (nothing more to do) or a blob
|
||||
// upload was initiated and the body of that blob should be sent to the returned
|
||||
// location.
|
||||
func (w *writer) initiateUpload(from, mount string) (location string, mounted bool, err error) {
|
||||
u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.repo.RepositoryStr()))
|
||||
uv := url.Values{}
|
||||
if mount != "" && from != "" {
|
||||
// Quay will fail if we specify a "mount" without a "from".
|
||||
uv["mount"] = []string{mount}
|
||||
uv["from"] = []string{from}
|
||||
}
|
||||
u.RawQuery = uv.Encode()
|
||||
|
||||
// Make the request to initiate the blob upload.
|
||||
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
resp, err := w.client.Do(req.WithContext(w.context))
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
// Check the response code to determine the result.
|
||||
switch resp.StatusCode {
|
||||
case http.StatusCreated:
|
||||
// We're done, we were able to fast-path.
|
||||
return "", true, nil
|
||||
case http.StatusAccepted:
|
||||
// Proceed to PATCH, upload has begun.
|
||||
loc, err := w.nextLocation(resp)
|
||||
return loc, false, err
|
||||
default:
|
||||
panic("Unreachable: initiateUpload")
|
||||
}
|
||||
}
|
||||
|
||||
type progressReader struct {
|
||||
rc io.ReadCloser
|
||||
|
||||
count *int64 // number of bytes this reader has read, to support resetting on retry.
|
||||
updates chan<- v1.Update
|
||||
lastUpdate *v1.Update
|
||||
}
|
||||
|
||||
func (r *progressReader) Read(b []byte) (int, error) {
|
||||
n, err := r.rc.Read(b)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
atomic.AddInt64(r.count, int64(n))
|
||||
// TODO: warn/debug log if sending takes too long, or if sending is blocked while context is cancelled.
|
||||
r.updates <- v1.Update{
|
||||
Total: r.lastUpdate.Total,
|
||||
Complete: atomic.AddInt64(&r.lastUpdate.Complete, int64(n)),
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (r *progressReader) Close() error { return r.rc.Close() }
|
||||
|
||||
// streamBlob streams the contents of the blob to the specified location.
|
||||
// On failure, this will return an error. On success, this will return the location
|
||||
// header indicating how to commit the streamed blob.
|
||||
func (w *writer) streamBlob(ctx context.Context, blob io.ReadCloser, streamLocation string) (commitLocation string, rerr error) {
|
||||
reset := func() {}
|
||||
defer func() {
|
||||
if rerr != nil {
|
||||
reset()
|
||||
}
|
||||
}()
|
||||
if w.updates != nil {
|
||||
var count int64
|
||||
blob = &progressReader{rc: blob, updates: w.updates, lastUpdate: w.lastUpdate, count: &count}
|
||||
reset = func() {
|
||||
atomic.AddInt64(&w.lastUpdate.Complete, -count)
|
||||
w.updates <- *w.lastUpdate
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPatch, streamLocation, blob)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
|
||||
resp, err := w.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// The blob has been uploaded, return the location header indicating
|
||||
// how to commit this layer.
|
||||
return w.nextLocation(resp)
|
||||
}
|
||||
|
||||
// commitBlob commits this blob by sending a PUT to the location returned from
|
||||
// streaming the blob.
|
||||
func (w *writer) commitBlob(location, digest string) error {
|
||||
u, err := url.Parse(location)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := u.Query()
|
||||
v.Set("digest", digest)
|
||||
u.RawQuery = v.Encode()
|
||||
|
||||
req, err := http.NewRequest(http.MethodPut, u.String(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
|
||||
resp, err := w.client.Do(req.WithContext(w.context))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return transport.CheckError(resp, http.StatusCreated)
|
||||
}
|
||||
|
||||
// incrProgress increments and sends a progress update, if WithProgress is used.
|
||||
func (w *writer) incrProgress(written int64) {
|
||||
if w.updates == nil {
|
||||
return
|
||||
}
|
||||
w.updates <- v1.Update{
|
||||
Total: w.lastUpdate.Total,
|
||||
Complete: atomic.AddInt64(&w.lastUpdate.Complete, written),
|
||||
}
|
||||
}
|
||||
|
||||
// uploadOne performs a complete upload of a single layer.
|
||||
func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error {
|
||||
var from, mount string
|
||||
if h, err := l.Digest(); err == nil {
|
||||
// If we know the digest, this isn't a streaming layer. Do an existence
|
||||
// check so we can skip uploading the layer if possible.
|
||||
existing, err := w.checkExistingBlob(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if existing {
|
||||
size, err := l.Size()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.incrProgress(size)
|
||||
logs.Progress.Printf("existing blob: %v", h)
|
||||
return nil
|
||||
}
|
||||
|
||||
mount = h.String()
|
||||
}
|
||||
if ml, ok := l.(*MountableLayer); ok {
|
||||
if w.repo.RegistryStr() == ml.Reference.Context().RegistryStr() {
|
||||
from = ml.Reference.Context().RepositoryStr()
|
||||
}
|
||||
}
|
||||
|
||||
tryUpload := func() error {
|
||||
location, mounted, err := w.initiateUpload(from, mount)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if mounted {
|
||||
size, err := l.Size()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.incrProgress(size)
|
||||
h, err := l.Digest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logs.Progress.Printf("mounted blob: %s", h.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only log layers with +json or +yaml. We can let through other stuff if it becomes popular.
|
||||
// TODO(opencontainers/image-spec#791): Would be great to have an actual parser.
|
||||
mt, err := l.MediaType()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
smt := string(mt)
|
||||
if !(strings.HasSuffix(smt, "+json") || strings.HasSuffix(smt, "+yaml")) {
|
||||
ctx = redact.NewContext(ctx, "omitting binary blobs from logs")
|
||||
}
|
||||
|
||||
blob, err := l.Compressed()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
location, err = w.streamBlob(ctx, blob, location)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
h, err := l.Digest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
digest := h.String()
|
||||
|
||||
if err := w.commitBlob(location, digest); err != nil {
|
||||
return err
|
||||
}
|
||||
logs.Progress.Printf("pushed blob: %s", digest)
|
||||
return nil
|
||||
}
|
||||
|
||||
return retry.Retry(tryUpload, w.predicate, w.backoff)
|
||||
}
|
||||
|
||||
type withLayer interface {
|
||||
Layer(v1.Hash) (v1.Layer, error)
|
||||
}
|
||||
|
||||
func (w *writer) writeIndex(ctx context.Context, ref name.Reference, ii v1.ImageIndex, options ...Option) error {
|
||||
index, err := ii.IndexManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(#803): Pipe through remote.WithJobs and upload these in parallel.
|
||||
for _, desc := range index.Manifests {
|
||||
ref := ref.Context().Digest(desc.Digest.String())
|
||||
exists, err := w.checkExistingManifest(desc.Digest, desc.MediaType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
logs.Progress.Print("existing manifest: ", desc.Digest)
|
||||
continue
|
||||
}
|
||||
|
||||
switch desc.MediaType {
|
||||
case types.OCIImageIndex, types.DockerManifestList:
|
||||
ii, err := ii.ImageIndex(desc.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.writeIndex(ctx, ref, ii, options...); err != nil {
|
||||
return err
|
||||
}
|
||||
case types.OCIManifestSchema1, types.DockerManifestSchema2:
|
||||
img, err := ii.Image(desc.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeImage(ctx, ref, img, o, w.lastUpdate); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
// Workaround for #819.
|
||||
if wl, ok := ii.(withLayer); ok {
|
||||
layer, err := wl.Layer(desc.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.uploadOne(ctx, layer); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// With all of the constituent elements uploaded, upload the manifest
|
||||
// to commit the image.
|
||||
return w.commitManifest(ctx, ii, ref)
|
||||
}
|
||||
|
||||
type withMediaType interface {
|
||||
MediaType() (types.MediaType, error)
|
||||
}
|
||||
|
||||
// This is really silly, but go interfaces don't let me satisfy remote.Taggable
|
||||
// with remote.Descriptor because of name collisions between method names and
|
||||
// struct fields.
|
||||
//
|
||||
// Use reflection to either pull the v1.Descriptor out of remote.Descriptor or
|
||||
// create a descriptor based on the RawManifest and (optionally) MediaType.
|
||||
func unpackTaggable(t Taggable) ([]byte, *v1.Descriptor, error) {
|
||||
if d, ok := t.(*Descriptor); ok {
|
||||
return d.Manifest, &d.Descriptor, nil
|
||||
}
|
||||
b, err := t.RawManifest()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// A reasonable default if Taggable doesn't implement MediaType.
|
||||
mt := types.DockerManifestSchema2
|
||||
|
||||
if wmt, ok := t.(withMediaType); ok {
|
||||
m, err := wmt.MediaType()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
mt = m
|
||||
}
|
||||
|
||||
h, sz, err := v1.SHA256(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return b, &v1.Descriptor{
|
||||
MediaType: mt,
|
||||
Size: sz,
|
||||
Digest: h,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// commitManifest does a PUT of the image's manifest.
|
||||
func (w *writer) commitManifest(ctx context.Context, t Taggable, ref name.Reference) error {
|
||||
tryUpload := func() error {
|
||||
raw, desc, err := unpackTaggable(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), ref.Identifier()))
|
||||
|
||||
// Make the request to PUT the serialized manifest
|
||||
req, err := http.NewRequest(http.MethodPut, u.String(), bytes.NewBuffer(raw))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", string(desc.MediaType))
|
||||
|
||||
resp, err := w.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The image was successfully pushed!
|
||||
logs.Progress.Printf("%v: digest: %v size: %d", ref, desc.Digest, desc.Size)
|
||||
w.incrProgress(int64(len(raw)))
|
||||
return nil
|
||||
}
|
||||
|
||||
return retry.Retry(tryUpload, w.predicate, w.backoff)
|
||||
}
|
||||
|
||||
func scopesForUploadingImage(repo name.Repository, layers []v1.Layer) []string {
|
||||
// use a map as set to remove duplicates scope strings
|
||||
scopeSet := map[string]struct{}{}
|
||||
|
||||
for _, l := range layers {
|
||||
if ml, ok := l.(*MountableLayer); ok {
|
||||
// we will add push scope for ref.Context() after the loop.
|
||||
// for now we ask pull scope for references of the same registry
|
||||
if ml.Reference.Context().String() != repo.String() && ml.Reference.Context().Registry.String() == repo.Registry.String() {
|
||||
scopeSet[ml.Reference.Scope(transport.PullScope)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
scopes := make([]string, 0)
|
||||
// Push scope should be the first element because a few registries just look at the first scope to determine access.
|
||||
scopes = append(scopes, repo.Scope(transport.PushScope))
|
||||
|
||||
for scope := range scopeSet {
|
||||
scopes = append(scopes, scope)
|
||||
}
|
||||
|
||||
return scopes
|
||||
}
|
||||
|
||||
// WriteIndex pushes the provided ImageIndex to the specified image reference.
|
||||
// WriteIndex will attempt to push all of the referenced manifests before
|
||||
// attempting to push the ImageIndex, to retain referential integrity.
|
||||
func WriteIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) (rerr error) {
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scopes := []string{ref.Scope(transport.PushScope)}
|
||||
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := writer{
|
||||
repo: ref.Context(),
|
||||
client: &http.Client{Transport: tr},
|
||||
context: o.context,
|
||||
updates: o.updates,
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
}
|
||||
|
||||
if o.updates != nil {
|
||||
w.lastUpdate = &v1.Update{}
|
||||
w.lastUpdate.Total, err = countIndex(ii, o.allowNondistributableArtifacts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer close(o.updates)
|
||||
defer func() { sendError(o.updates, rerr) }()
|
||||
}
|
||||
|
||||
return w.writeIndex(o.context, ref, ii, options...)
|
||||
}
|
||||
|
||||
// countImage counts the total size of all layers + config blob + manifest for
|
||||
// an image. It de-dupes duplicate layers.
|
||||
func countImage(img v1.Image, allowNondistributableArtifacts bool) (int64, error) {
|
||||
var total int64
|
||||
ls, err := img.Layers()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
seen := map[v1.Hash]bool{}
|
||||
for _, l := range ls {
|
||||
// Handle foreign layers.
|
||||
mt, err := l.MediaType()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !mt.IsDistributable() && !allowNondistributableArtifacts {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO: support streaming layers which update the total count as they write.
|
||||
if _, ok := l.(*stream.Layer); ok {
|
||||
return 0, errors.New("cannot use stream.Layer and WithProgress")
|
||||
}
|
||||
|
||||
// Dedupe layers.
|
||||
d, err := l.Digest()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if seen[d] {
|
||||
continue
|
||||
}
|
||||
seen[d] = true
|
||||
|
||||
size, err := l.Size()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
total += size
|
||||
}
|
||||
b, err := img.RawConfigFile()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
total += int64(len(b))
|
||||
size, err := img.Size()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
total += size
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// countIndex counts the total size of all images + sub-indexes for an index.
|
||||
// It does not attempt to de-dupe duplicate images, etc.
|
||||
func countIndex(idx v1.ImageIndex, allowNondistributableArtifacts bool) (int64, error) {
|
||||
var total int64
|
||||
mf, err := idx.IndexManifest()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, desc := range mf.Manifests {
|
||||
switch desc.MediaType {
|
||||
case types.OCIImageIndex, types.DockerManifestList:
|
||||
sidx, err := idx.ImageIndex(desc.Digest)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
size, err := countIndex(sidx, allowNondistributableArtifacts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
total += size
|
||||
case types.OCIManifestSchema1, types.DockerManifestSchema2:
|
||||
simg, err := idx.Image(desc.Digest)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
size, err := countImage(simg, allowNondistributableArtifacts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
total += size
|
||||
default:
|
||||
// Workaround for #819.
|
||||
if wl, ok := idx.(withLayer); ok {
|
||||
layer, err := wl.Layer(desc.Digest)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
size, err := layer.Size()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
total += size
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size, err := idx.Size()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
total += size
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// WriteLayer uploads the provided Layer to the specified repo.
|
||||
func WriteLayer(repo name.Repository, layer v1.Layer, options ...Option) (rerr error) {
|
||||
o, err := makeOptions(repo, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scopes := scopesForUploadingImage(repo, []v1.Layer{layer})
|
||||
tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := writer{
|
||||
repo: repo,
|
||||
client: &http.Client{Transport: tr},
|
||||
context: o.context,
|
||||
updates: o.updates,
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
}
|
||||
|
||||
if o.updates != nil {
|
||||
defer close(o.updates)
|
||||
defer func() { sendError(o.updates, rerr) }()
|
||||
|
||||
// TODO: support streaming layers which update the total count as they write.
|
||||
if _, ok := layer.(*stream.Layer); ok {
|
||||
return errors.New("cannot use stream.Layer and WithProgress")
|
||||
}
|
||||
size, err := layer.Size()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.lastUpdate = &v1.Update{Total: size}
|
||||
}
|
||||
return w.uploadOne(o.context, layer)
|
||||
}
|
||||
|
||||
// Tag adds a tag to the given Taggable via PUT /v2/.../manifests/<tag>
|
||||
//
|
||||
// Notable implementations of Taggable are v1.Image, v1.ImageIndex, and
|
||||
// remote.Descriptor.
|
||||
//
|
||||
// If t implements MediaType, we will use that for the Content-Type, otherwise
|
||||
// we will default to types.DockerManifestSchema2.
|
||||
//
|
||||
// Tag does not attempt to write anything other than the manifest, so callers
|
||||
// should ensure that all blobs or manifests that are referenced by t exist
|
||||
// in the target registry.
|
||||
func Tag(tag name.Tag, t Taggable, options ...Option) error {
|
||||
return Put(tag, t, options...)
|
||||
}
|
||||
|
||||
// Put adds a manifest from the given Taggable via PUT /v1/.../manifest/<ref>
|
||||
//
|
||||
// Notable implementations of Taggable are v1.Image, v1.ImageIndex, and
|
||||
// remote.Descriptor.
|
||||
//
|
||||
// If t implements MediaType, we will use that for the Content-Type, otherwise
|
||||
// we will default to types.DockerManifestSchema2.
|
||||
//
|
||||
// Put does not attempt to write anything other than the manifest, so callers
|
||||
// should ensure that all blobs or manifests that are referenced by t exist
|
||||
// in the target registry.
|
||||
func Put(ref name.Reference, t Taggable, options ...Option) error {
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scopes := []string{ref.Scope(transport.PushScope)}
|
||||
|
||||
// TODO: This *always* does a token exchange. For some registries,
|
||||
// that's pretty slow. Some ideas;
|
||||
// * Tag could take a list of tags.
|
||||
// * Allow callers to pass in a transport.Transport, typecheck
|
||||
// it to allow them to reuse the transport across multiple calls.
|
||||
// * WithTag option to do multiple manifest PUTs in commitManifest.
|
||||
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := writer{
|
||||
repo: ref.Context(),
|
||||
client: &http.Client{Transport: tr},
|
||||
context: o.context,
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
}
|
||||
|
||||
return w.commitManifest(o.context, t, ref)
|
||||
}
|
68
vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md
generated
vendored
Normal file
68
vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
# `stream`
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream)
|
||||
|
||||
The `stream` package contains an implementation of
|
||||
[`v1.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1#Layer)
|
||||
that supports _streaming_ access, i.e. the layer contents are read once and not
|
||||
buffered.
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/v1/stream"
|
||||
)
|
||||
|
||||
// upload the contents of stdin as a layer to a local registry
|
||||
func main() {
|
||||
repo, err := name.NewRepository("localhost:5000/stream")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
layer := stream.NewLayer(os.Stdin)
|
||||
|
||||
if err := remote.WriteLayer(repo, layer); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Structure
|
||||
|
||||
This implements the layer portion of an [image
|
||||
upload](/pkg/v1/remote#anatomy-of-an-image-upload). We launch a goroutine that
|
||||
is responsible for hashing the uncompressed contents to compute the `DiffID`,
|
||||
gzipping them to produce the `Compressed` contents, and hashing/counting the
|
||||
bytes to produce the `Digest`/`Size`. This goroutine writes to an
|
||||
`io.PipeWriter`, which blocks until `Compressed` reads the gzipped contents from
|
||||
the corresponding `io.PipeReader`.
|
||||
|
||||
<p align="center">
|
||||
<img src="/images/stream.dot.svg" />
|
||||
</p>
|
||||
|
||||
## Caveats
|
||||
|
||||
This assumes that you have an uncompressed layer (i.e. a tarball) and would like
|
||||
to compress it. Calling `Uncompressed` is always an error. Likewise, other
|
||||
methods are invalid until the contents of `Compressed` have been completely
|
||||
consumed and `Close`d.
|
||||
|
||||
Using a `stream.Layer` will likely not work without careful consideration. For
|
||||
example, in the `mutate` package, we defer computing the manifest and config
|
||||
file until they are actually called. This allows you to `mutate.Append` a
|
||||
streaming layer to an image without accidentally consuming it. Similarly, in
|
||||
`remote.Write`, if calling `Digest` on a layer fails, we attempt to upload the
|
||||
layer anyway, understanding that we may be dealing with a `stream.Layer` whose
|
||||
contents need to be uploaded before we can upload the config file.
|
||||
|
||||
Given the [structure](#structure) of how this is implemented, forgetting to
|
||||
`Close` a `stream.Layer` will leak a goroutine.
|
242
vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go
generated
vendored
Normal file
242
vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go
generated
vendored
Normal file
@ -0,0 +1,242 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotComputed is returned when the requested value is not yet
|
||||
// computed because the stream has not been consumed yet.
|
||||
ErrNotComputed = errors.New("value not computed until stream is consumed")
|
||||
|
||||
// ErrConsumed is returned by Compressed when the underlying stream has
|
||||
// already been consumed and closed.
|
||||
ErrConsumed = errors.New("stream was already consumed")
|
||||
)
|
||||
|
||||
// Layer is a streaming implementation of v1.Layer.
|
||||
type Layer struct {
|
||||
blob io.ReadCloser
|
||||
consumed bool
|
||||
compression int
|
||||
|
||||
mu sync.Mutex
|
||||
digest, diffID *v1.Hash
|
||||
size int64
|
||||
}
|
||||
|
||||
var _ v1.Layer = (*Layer)(nil)
|
||||
|
||||
// LayerOption applies options to layer
|
||||
type LayerOption func(*Layer)
|
||||
|
||||
// WithCompressionLevel sets the gzip compression. See `gzip.NewWriterLevel` for possible values.
|
||||
func WithCompressionLevel(level int) LayerOption {
|
||||
return func(l *Layer) {
|
||||
l.compression = level
|
||||
}
|
||||
}
|
||||
|
||||
// NewLayer creates a Layer from an io.ReadCloser.
|
||||
func NewLayer(rc io.ReadCloser, opts ...LayerOption) *Layer {
|
||||
layer := &Layer{
|
||||
blob: rc,
|
||||
compression: gzip.BestSpeed,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(layer)
|
||||
}
|
||||
|
||||
return layer
|
||||
}
|
||||
|
||||
// Digest implements v1.Layer.
|
||||
func (l *Layer) Digest() (v1.Hash, error) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.digest == nil {
|
||||
return v1.Hash{}, ErrNotComputed
|
||||
}
|
||||
return *l.digest, nil
|
||||
}
|
||||
|
||||
// DiffID implements v1.Layer.
|
||||
func (l *Layer) DiffID() (v1.Hash, error) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.diffID == nil {
|
||||
return v1.Hash{}, ErrNotComputed
|
||||
}
|
||||
return *l.diffID, nil
|
||||
}
|
||||
|
||||
// Size implements v1.Layer.
|
||||
func (l *Layer) Size() (int64, error) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.size == 0 {
|
||||
return 0, ErrNotComputed
|
||||
}
|
||||
return l.size, nil
|
||||
}
|
||||
|
||||
// MediaType implements v1.Layer
|
||||
func (l *Layer) MediaType() (types.MediaType, error) {
|
||||
// We return DockerLayer for now as uncompressed layers
|
||||
// are unimplemented
|
||||
return types.DockerLayer, nil
|
||||
}
|
||||
|
||||
// Uncompressed implements v1.Layer.
|
||||
func (l *Layer) Uncompressed() (io.ReadCloser, error) {
|
||||
return nil, errors.New("NYI: stream.Layer.Uncompressed is not implemented")
|
||||
}
|
||||
|
||||
// Compressed implements v1.Layer.
|
||||
func (l *Layer) Compressed() (io.ReadCloser, error) {
|
||||
if l.consumed {
|
||||
return nil, ErrConsumed
|
||||
}
|
||||
return newCompressedReader(l)
|
||||
}
|
||||
|
||||
type compressedReader struct {
|
||||
closer io.Closer // original blob's Closer.
|
||||
|
||||
h, zh hash.Hash // collects digests of compressed and uncompressed stream.
|
||||
pr io.Reader
|
||||
bw *bufio.Writer
|
||||
count *countWriter
|
||||
|
||||
l *Layer // stream.Layer to update upon Close.
|
||||
}
|
||||
|
||||
func newCompressedReader(l *Layer) (*compressedReader, error) {
|
||||
h := sha256.New()
|
||||
zh := sha256.New()
|
||||
count := &countWriter{}
|
||||
|
||||
// gzip.Writer writes to the output stream via pipe, a hasher to
|
||||
// capture compressed digest, and a countWriter to capture compressed
|
||||
// size.
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
// Write compressed bytes to be read by the pipe.Reader, hashed by zh, and counted by count.
|
||||
mw := io.MultiWriter(pw, zh, count)
|
||||
|
||||
// Buffer the output of the gzip writer so we don't have to wait on pr to keep writing.
|
||||
// 64K ought to be small enough for anybody.
|
||||
bw := bufio.NewWriterSize(mw, 2<<16)
|
||||
zw, err := gzip.NewWriterLevel(bw, l.compression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cr := &compressedReader{
|
||||
closer: newMultiCloser(zw, l.blob),
|
||||
pr: pr,
|
||||
bw: bw,
|
||||
h: h,
|
||||
zh: zh,
|
||||
count: count,
|
||||
l: l,
|
||||
}
|
||||
go func() {
|
||||
if _, err := io.Copy(io.MultiWriter(h, zw), l.blob); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
// Now close the compressed reader, to flush the gzip stream
|
||||
// and calculate digest/diffID/size. This will cause pr to
|
||||
// return EOF which will cause readers of the Compressed stream
|
||||
// to finish reading.
|
||||
pw.CloseWithError(cr.Close())
|
||||
}()
|
||||
|
||||
return cr, nil
|
||||
}
|
||||
|
||||
func (cr *compressedReader) Read(b []byte) (int, error) { return cr.pr.Read(b) }
|
||||
|
||||
func (cr *compressedReader) Close() error {
|
||||
cr.l.mu.Lock()
|
||||
defer cr.l.mu.Unlock()
|
||||
|
||||
// Close the inner ReadCloser.
|
||||
if err := cr.closer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Flush the buffer.
|
||||
if err := cr.bw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.h.Sum(nil)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cr.l.diffID = &diffID
|
||||
|
||||
digest, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.zh.Sum(nil)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cr.l.digest = &digest
|
||||
|
||||
cr.l.size = cr.count.n
|
||||
cr.l.consumed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// countWriter counts bytes written to it.
|
||||
type countWriter struct{ n int64 }
|
||||
|
||||
func (c *countWriter) Write(p []byte) (int, error) {
|
||||
c.n += int64(len(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// multiCloser is a Closer that collects multiple Closers and Closes them in order.
|
||||
type multiCloser []io.Closer
|
||||
|
||||
var _ io.Closer = (multiCloser)(nil)
|
||||
|
||||
func newMultiCloser(c ...io.Closer) multiCloser { return multiCloser(c) }
|
||||
|
||||
func (m multiCloser) Close() error {
|
||||
for _, c := range m {
|
||||
// NOTE: net/http will call close on success, so if we've already
|
||||
// closed the inner rc, it's not an error.
|
||||
if err := c.Close(); err != nil && !errors.Is(err, os.ErrClosed) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
280
vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md
generated
vendored
Normal file
280
vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md
generated
vendored
Normal file
@ -0,0 +1,280 @@
|
||||
# `tarball`
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball)
|
||||
|
||||
This package produces tarballs that can consumed via `docker load`. Note
|
||||
that this is a _different_ format from the [`legacy`](/pkg/legacy/tarball)
|
||||
tarballs that are produced by `docker save`, but this package is still able to
|
||||
read the legacy tarballs produced by `docker save`.
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Read a tarball from os.Args[1] that contains ubuntu.
|
||||
tag, err := name.NewTag("ubuntu")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
img, err := tarball.ImageFromPath(os.Args[1], &tag)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Write that tarball to os.Args[2] with a different tag.
|
||||
newTag, err := name.NewTag("ubuntu:newest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
f, err := os.Create(os.Args[2])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if err := tarball.Write(newTag, img, f); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Structure
|
||||
|
||||
<p align="center">
|
||||
<img src="/images/tarball.dot.svg" />
|
||||
</p>
|
||||
|
||||
Let's look at what happens when we write out a tarball:
|
||||
|
||||
|
||||
### `ubuntu:latest`
|
||||
|
||||
```
|
||||
$ crane pull ubuntu ubuntu.tar && mkdir ubuntu && tar xf ubuntu.tar -C ubuntu && rm ubuntu.tar
|
||||
$ tree ubuntu/
|
||||
ubuntu/
|
||||
├── 423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954.tar.gz
|
||||
├── b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7.tar.gz
|
||||
├── de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d.tar.gz
|
||||
├── f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b.tar.gz
|
||||
├── manifest.json
|
||||
└── sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c
|
||||
|
||||
0 directories, 6 files
|
||||
```
|
||||
|
||||
There are a couple interesting files here.
|
||||
|
||||
`manifest.json` is the entrypoint: a list of [`tarball.Descriptor`s](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball#Descriptor)
|
||||
that describe the images contained in this tarball.
|
||||
|
||||
For each image, this has the `RepoTags` (how it was pulled), a `Config` file
|
||||
that points to the image's config file, a list of `Layers`, and (optionally)
|
||||
`LayerSources`.
|
||||
|
||||
```
|
||||
$ jq < ubuntu/manifest.json
|
||||
[
|
||||
{
|
||||
"Config": "sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c",
|
||||
"RepoTags": [
|
||||
"ubuntu"
|
||||
],
|
||||
"Layers": [
|
||||
"423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954.tar.gz",
|
||||
"de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d.tar.gz",
|
||||
"f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b.tar.gz",
|
||||
"b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7.tar.gz"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
The config file and layers are exactly what you would expect, and match the
|
||||
registry representations of the same artifacts. You'll notice that the
|
||||
`manifest.json` contains similar information as the registry manifest, but isn't
|
||||
quite the same:
|
||||
|
||||
```
|
||||
$ crane manifest ubuntu@sha256:0925d086715714114c1988f7c947db94064fd385e171a63c07730f1fa014e6f9
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 3408,
|
||||
"digest": "sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c"
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 26692096,
|
||||
"digest": "sha256:423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 35365,
|
||||
"digest": "sha256:de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 852,
|
||||
"digest": "sha256:f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 163,
|
||||
"digest": "sha256:b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
This makes it difficult to maintain image digests when roundtripping images
|
||||
through the tarball format, so it's not a great format if you care about
|
||||
provenance.
|
||||
|
||||
The ubuntu example didn't have any `LayerSources` -- let's look at another image
|
||||
that does.
|
||||
|
||||
### `hello-world:nanoserver`
|
||||
|
||||
```
|
||||
$ crane pull hello-world:nanoserver@sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b nanoserver.tar
|
||||
$ mkdir nanoserver && tar xf nanoserver.tar -C nanoserver && rm nanoserver.tar
|
||||
$ tree nanoserver/
|
||||
nanoserver/
|
||||
├── 10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0.tar.gz
|
||||
├── a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053.tar.gz
|
||||
├── be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a.tar.gz
|
||||
├── manifest.json
|
||||
└── sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6
|
||||
|
||||
0 directories, 5 files
|
||||
|
||||
$ jq < nanoserver/manifest.json
|
||||
[
|
||||
{
|
||||
"Config": "sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6",
|
||||
"RepoTags": [
|
||||
"index.docker.io/library/hello-world:i-was-a-digest"
|
||||
],
|
||||
"Layers": [
|
||||
"a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053.tar.gz",
|
||||
"be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a.tar.gz",
|
||||
"10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0.tar.gz"
|
||||
],
|
||||
"LayerSources": {
|
||||
"sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e": {
|
||||
"mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip",
|
||||
"size": 101145811,
|
||||
"digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053",
|
||||
"urls": [
|
||||
"https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
A couple things to note about this `manifest.json` versus the other:
|
||||
* The `RepoTags` field is a bit weird here. `hello-world` is a multi-platform
|
||||
image, so We had to pull this image by digest, since we're (I'm) on
|
||||
amd64/linux and wanted to grab a windows image. Since the tarball format
|
||||
expects a tag under `RepoTags`, and we didn't pull by tag, we replace the
|
||||
digest with a sentinel `i-was-a-digest` "tag" to appease docker.
|
||||
* The `LayerSources` has enough information to reconstruct the foreign layers
|
||||
pointer when pushing/pulling from the registry. For legal reasons, microsoft
|
||||
doesn't want anyone but them to serve windows base images, so the mediaType
|
||||
here indicates a "foreign" or "non-distributable" layer with an URL for where
|
||||
you can download it from microsoft (see the [OCI
|
||||
image-spec](https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers)).
|
||||
|
||||
We can look at what's in the registry to explain both of these things:
|
||||
```
|
||||
$ crane manifest hello-world:nanoserver | jq .
|
||||
{
|
||||
"manifests": [
|
||||
{
|
||||
"digest": "sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b",
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"platform": {
|
||||
"architecture": "amd64",
|
||||
"os": "windows",
|
||||
"os.version": "10.0.17763.1040"
|
||||
},
|
||||
"size": 1124
|
||||
}
|
||||
],
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
|
||||
"schemaVersion": 2
|
||||
}
|
||||
|
||||
|
||||
# Note the media type and "urls" field.
|
||||
$ crane manifest hello-world:nanoserver@sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b | jq .
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 1721,
|
||||
"digest": "sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6"
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip",
|
||||
"size": 101145811,
|
||||
"digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053",
|
||||
"urls": [
|
||||
"https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053"
|
||||
]
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 1669,
|
||||
"digest": "sha256:be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 949,
|
||||
"digest": "sha256:10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The `LayerSources` map is keyed by the diffid. Note that `sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e` matches the first layer in the config file:
|
||||
```
|
||||
$ jq '.[0].LayerSources' < nanoserver/manifest.json
|
||||
{
|
||||
"sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e": {
|
||||
"mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip",
|
||||
"size": 101145811,
|
||||
"digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053",
|
||||
"urls": [
|
||||
"https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
$ jq < nanoserver/sha256\:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6 | jq .rootfs
|
||||
{
|
||||
"type": "layers",
|
||||
"diff_ids": [
|
||||
"sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e",
|
||||
"sha256:601cf7d78c62e4b4d32a7bbf96a17606a9cea5bd9d22ffa6f34aa431d056b0e8",
|
||||
"sha256:a1e1a3bf6529adcce4d91dce2cad86c2604a66b507ccbc4d2239f3da0ec5aab9"
|
||||
]
|
||||
}
|
||||
```
|
17
vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go
generated
vendored
Normal file
17
vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package tarball provides facilities for reading/writing v1.Images from/to
|
||||
// a tarball on-disk.
|
||||
package tarball
|
423
vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go
generated
vendored
Normal file
423
vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go
generated
vendored
Normal file
@ -0,0 +1,423 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tarball
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/gzip"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
type image struct {
|
||||
opener Opener
|
||||
manifest *Manifest
|
||||
config []byte
|
||||
imgDescriptor *Descriptor
|
||||
|
||||
tag *name.Tag
|
||||
}
|
||||
|
||||
type uncompressedImage struct {
|
||||
*image
|
||||
}
|
||||
|
||||
type compressedImage struct {
|
||||
*image
|
||||
manifestLock sync.Mutex // Protects manifest
|
||||
manifest *v1.Manifest
|
||||
}
|
||||
|
||||
var _ partial.UncompressedImageCore = (*uncompressedImage)(nil)
|
||||
var _ partial.CompressedImageCore = (*compressedImage)(nil)
|
||||
|
||||
// Opener is a thunk for opening a tar file.
|
||||
type Opener func() (io.ReadCloser, error)
|
||||
|
||||
func pathOpener(path string) Opener {
|
||||
return func() (io.ReadCloser, error) {
|
||||
return os.Open(path)
|
||||
}
|
||||
}
|
||||
|
||||
// ImageFromPath returns a v1.Image from a tarball located on path.
|
||||
func ImageFromPath(path string, tag *name.Tag) (v1.Image, error) {
|
||||
return Image(pathOpener(path), tag)
|
||||
}
|
||||
|
||||
// LoadManifest load manifest
|
||||
func LoadManifest(opener Opener) (Manifest, error) {
|
||||
m, err := extractFileFromTar(opener, "manifest.json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer m.Close()
|
||||
|
||||
var manifest Manifest
|
||||
|
||||
if err := json.NewDecoder(m).Decode(&manifest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// Image exposes an image from the tarball at the provided path.
|
||||
func Image(opener Opener, tag *name.Tag) (v1.Image, error) {
|
||||
img := &image{
|
||||
opener: opener,
|
||||
tag: tag,
|
||||
}
|
||||
if err := img.loadTarDescriptorAndConfig(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Peek at the first layer and see if it's compressed.
|
||||
if len(img.imgDescriptor.Layers) > 0 {
|
||||
compressed, err := img.areLayersCompressed()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if compressed {
|
||||
c := compressedImage{
|
||||
image: img,
|
||||
}
|
||||
return partial.CompressedToImage(&c)
|
||||
}
|
||||
}
|
||||
|
||||
uc := uncompressedImage{
|
||||
image: img,
|
||||
}
|
||||
return partial.UncompressedToImage(&uc)
|
||||
}
|
||||
|
||||
func (i *image) MediaType() (types.MediaType, error) {
|
||||
return types.DockerManifestSchema2, nil
|
||||
}
|
||||
|
||||
// Descriptor stores the manifest data for a single image inside a `docker save` tarball.
|
||||
type Descriptor struct {
|
||||
Config string
|
||||
RepoTags []string
|
||||
Layers []string
|
||||
|
||||
// Tracks foreign layer info. Key is DiffID.
|
||||
LayerSources map[v1.Hash]v1.Descriptor `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Manifest represents the manifests of all images as the `manifest.json` file in a `docker save` tarball.
|
||||
type Manifest []Descriptor
|
||||
|
||||
func (m Manifest) findDescriptor(tag *name.Tag) (*Descriptor, error) {
|
||||
if tag == nil {
|
||||
if len(m) != 1 {
|
||||
return nil, errors.New("tarball must contain only a single image to be used with tarball.Image")
|
||||
}
|
||||
return &(m)[0], nil
|
||||
}
|
||||
for _, img := range m {
|
||||
for _, tagStr := range img.RepoTags {
|
||||
repoTag, err := name.NewTag(tagStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Compare the resolved names, since there are several ways to specify the same tag.
|
||||
if repoTag.Name() == tag.Name() {
|
||||
return &img, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("tag %s not found in tarball", tag)
|
||||
}
|
||||
|
||||
func (i *image) areLayersCompressed() (bool, error) {
|
||||
if len(i.imgDescriptor.Layers) == 0 {
|
||||
return false, errors.New("0 layers found in image")
|
||||
}
|
||||
layer := i.imgDescriptor.Layers[0]
|
||||
blob, err := extractFileFromTar(i.opener, layer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer blob.Close()
|
||||
return gzip.Is(blob)
|
||||
}
|
||||
|
||||
func (i *image) loadTarDescriptorAndConfig() error {
|
||||
m, err := extractFileFromTar(i.opener, "manifest.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer m.Close()
|
||||
|
||||
if err := json.NewDecoder(m).Decode(&i.manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if i.manifest == nil {
|
||||
return errors.New("no valid manifest.json in tarball")
|
||||
}
|
||||
|
||||
i.imgDescriptor, err = i.manifest.findDescriptor(i.tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg, err := extractFileFromTar(i.opener, i.imgDescriptor.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cfg.Close()
|
||||
|
||||
i.config, err = ioutil.ReadAll(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *image) RawConfigFile() ([]byte, error) {
|
||||
return i.config, nil
|
||||
}
|
||||
|
||||
// tarFile represents a single file inside a tar. Closing it closes the tar itself.
|
||||
type tarFile struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}
|
||||
|
||||
func extractFileFromTar(opener Opener, filePath string) (io.ReadCloser, error) {
|
||||
f, err := opener()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
close := true
|
||||
defer func() {
|
||||
if close {
|
||||
f.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
tf := tar.NewReader(f)
|
||||
for {
|
||||
hdr, err := tf.Next()
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if hdr.Name == filePath {
|
||||
if hdr.Typeflag == tar.TypeSymlink || hdr.Typeflag == tar.TypeLink {
|
||||
currentDir := filepath.Dir(filePath)
|
||||
return extractFileFromTar(opener, path.Join(currentDir, hdr.Linkname))
|
||||
}
|
||||
close = false
|
||||
return tarFile{
|
||||
Reader: tf,
|
||||
Closer: f,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("file %s not found in tar", filePath)
|
||||
}
|
||||
|
||||
// uncompressedLayerFromTarball implements partial.UncompressedLayer
|
||||
type uncompressedLayerFromTarball struct {
|
||||
diffID v1.Hash
|
||||
mediaType types.MediaType
|
||||
opener Opener
|
||||
filePath string
|
||||
}
|
||||
|
||||
// foreignUncompressedLayer implements partial.UncompressedLayer but returns
|
||||
// a custom descriptor. This allows the foreign layer URLs to be included in
|
||||
// the generated image manifest for uncompressed layers.
|
||||
type foreignUncompressedLayer struct {
|
||||
uncompressedLayerFromTarball
|
||||
desc v1.Descriptor
|
||||
}
|
||||
|
||||
func (fl *foreignUncompressedLayer) Descriptor() (*v1.Descriptor, error) {
|
||||
return &fl.desc, nil
|
||||
}
|
||||
|
||||
// DiffID implements partial.UncompressedLayer
|
||||
func (ulft *uncompressedLayerFromTarball) DiffID() (v1.Hash, error) {
|
||||
return ulft.diffID, nil
|
||||
}
|
||||
|
||||
// Uncompressed implements partial.UncompressedLayer
|
||||
func (ulft *uncompressedLayerFromTarball) Uncompressed() (io.ReadCloser, error) {
|
||||
return extractFileFromTar(ulft.opener, ulft.filePath)
|
||||
}
|
||||
|
||||
func (ulft *uncompressedLayerFromTarball) MediaType() (types.MediaType, error) {
|
||||
return ulft.mediaType, nil
|
||||
}
|
||||
|
||||
func (i *uncompressedImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) {
|
||||
cfg, err := partial.ConfigFile(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for idx, diffID := range cfg.RootFS.DiffIDs {
|
||||
if diffID == h {
|
||||
// Technically the media type should be 'application/tar' but given that our
|
||||
// v1.Layer doesn't force consumers to care about whether the layer is compressed
|
||||
// we should be fine returning the DockerLayer media type
|
||||
mt := types.DockerLayer
|
||||
if bd, ok := i.imgDescriptor.LayerSources[h]; ok {
|
||||
// Overwrite the mediaType for foreign layers.
|
||||
return &foreignUncompressedLayer{
|
||||
uncompressedLayerFromTarball: uncompressedLayerFromTarball{
|
||||
diffID: diffID,
|
||||
mediaType: bd.MediaType,
|
||||
opener: i.opener,
|
||||
filePath: i.imgDescriptor.Layers[idx],
|
||||
},
|
||||
desc: bd,
|
||||
}, nil
|
||||
}
|
||||
return &uncompressedLayerFromTarball{
|
||||
diffID: diffID,
|
||||
mediaType: mt,
|
||||
opener: i.opener,
|
||||
filePath: i.imgDescriptor.Layers[idx],
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("diff id %q not found", h)
|
||||
}
|
||||
|
||||
func (c *compressedImage) Manifest() (*v1.Manifest, error) {
|
||||
c.manifestLock.Lock()
|
||||
defer c.manifestLock.Unlock()
|
||||
if c.manifest != nil {
|
||||
return c.manifest, nil
|
||||
}
|
||||
|
||||
b, err := c.RawConfigFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.manifest = &v1.Manifest{
|
||||
SchemaVersion: 2,
|
||||
MediaType: types.DockerManifestSchema2,
|
||||
Config: v1.Descriptor{
|
||||
MediaType: types.DockerConfigJSON,
|
||||
Size: cfgSize,
|
||||
Digest: cfgHash,
|
||||
},
|
||||
}
|
||||
|
||||
for i, p := range c.imgDescriptor.Layers {
|
||||
cfg, err := partial.ConfigFile(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffid := cfg.RootFS.DiffIDs[i]
|
||||
if d, ok := c.imgDescriptor.LayerSources[diffid]; ok {
|
||||
// If it's a foreign layer, just append the descriptor so we can avoid
|
||||
// reading the entire file.
|
||||
c.manifest.Layers = append(c.manifest.Layers, d)
|
||||
} else {
|
||||
l, err := extractFileFromTar(c.opener, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer l.Close()
|
||||
sha, size, err := v1.SHA256(l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.manifest.Layers = append(c.manifest.Layers, v1.Descriptor{
|
||||
MediaType: types.DockerLayer,
|
||||
Size: size,
|
||||
Digest: sha,
|
||||
})
|
||||
}
|
||||
}
|
||||
return c.manifest, nil
|
||||
}
|
||||
|
||||
func (c *compressedImage) RawManifest() ([]byte, error) {
|
||||
return partial.RawManifest(c)
|
||||
}
|
||||
|
||||
// compressedLayerFromTarball implements partial.CompressedLayer
|
||||
type compressedLayerFromTarball struct {
|
||||
desc v1.Descriptor
|
||||
opener Opener
|
||||
filePath string
|
||||
}
|
||||
|
||||
// Digest implements partial.CompressedLayer
|
||||
func (clft *compressedLayerFromTarball) Digest() (v1.Hash, error) {
|
||||
return clft.desc.Digest, nil
|
||||
}
|
||||
|
||||
// Compressed implements partial.CompressedLayer
|
||||
func (clft *compressedLayerFromTarball) Compressed() (io.ReadCloser, error) {
|
||||
return extractFileFromTar(clft.opener, clft.filePath)
|
||||
}
|
||||
|
||||
// MediaType implements partial.CompressedLayer
|
||||
func (clft *compressedLayerFromTarball) MediaType() (types.MediaType, error) {
|
||||
return clft.desc.MediaType, nil
|
||||
}
|
||||
|
||||
// Size implements partial.CompressedLayer
|
||||
func (clft *compressedLayerFromTarball) Size() (int64, error) {
|
||||
return clft.desc.Size, nil
|
||||
}
|
||||
|
||||
func (c *compressedImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) {
|
||||
m, err := c.Manifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, l := range m.Layers {
|
||||
if l.Digest == h {
|
||||
fp := c.imgDescriptor.Layers[i]
|
||||
return &compressedLayerFromTarball{
|
||||
desc: l,
|
||||
opener: c.opener,
|
||||
filePath: fp,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("blob %v not found", h)
|
||||
}
|
282
vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go
generated
vendored
Normal file
282
vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go
generated
vendored
Normal file
@ -0,0 +1,282 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tarball
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/stargz-snapshotter/estargz"
|
||||
"github.com/google/go-containerregistry/internal/and"
|
||||
gestargz "github.com/google/go-containerregistry/internal/estargz"
|
||||
ggzip "github.com/google/go-containerregistry/internal/gzip"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
type layer struct {
|
||||
digest v1.Hash
|
||||
diffID v1.Hash
|
||||
size int64
|
||||
compressedopener Opener
|
||||
uncompressedopener Opener
|
||||
compression int
|
||||
annotations map[string]string
|
||||
estgzopts []estargz.Option
|
||||
}
|
||||
|
||||
// Descriptor implements partial.withDescriptor.
|
||||
func (l *layer) Descriptor() (*v1.Descriptor, error) {
|
||||
digest, err := l.Digest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &v1.Descriptor{
|
||||
Size: l.size,
|
||||
Digest: digest,
|
||||
Annotations: l.annotations,
|
||||
MediaType: types.DockerLayer,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Digest implements v1.Layer
|
||||
func (l *layer) Digest() (v1.Hash, error) {
|
||||
return l.digest, nil
|
||||
}
|
||||
|
||||
// DiffID implements v1.Layer
|
||||
func (l *layer) DiffID() (v1.Hash, error) {
|
||||
return l.diffID, nil
|
||||
}
|
||||
|
||||
// Compressed implements v1.Layer
|
||||
func (l *layer) Compressed() (io.ReadCloser, error) {
|
||||
return l.compressedopener()
|
||||
}
|
||||
|
||||
// Uncompressed implements v1.Layer
|
||||
func (l *layer) Uncompressed() (io.ReadCloser, error) {
|
||||
return l.uncompressedopener()
|
||||
}
|
||||
|
||||
// Size implements v1.Layer
|
||||
func (l *layer) Size() (int64, error) {
|
||||
return l.size, nil
|
||||
}
|
||||
|
||||
// MediaType implements v1.Layer
|
||||
func (l *layer) MediaType() (types.MediaType, error) {
|
||||
return types.DockerLayer, nil
|
||||
}
|
||||
|
||||
// LayerOption applies options to layer
|
||||
type LayerOption func(*layer)
|
||||
|
||||
// WithCompressionLevel is a functional option for overriding the default
|
||||
// compression level used for compressing uncompressed tarballs.
|
||||
func WithCompressionLevel(level int) LayerOption {
|
||||
return func(l *layer) {
|
||||
l.compression = level
|
||||
}
|
||||
}
|
||||
|
||||
// WithCompressedCaching is a functional option that overrides the
|
||||
// logic for accessing the compressed bytes to memoize the result
|
||||
// and avoid expensive repeated gzips.
|
||||
func WithCompressedCaching(l *layer) {
|
||||
var once sync.Once
|
||||
var err error
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
og := l.compressedopener
|
||||
|
||||
l.compressedopener = func() (io.ReadCloser, error) {
|
||||
once.Do(func() {
|
||||
var rc io.ReadCloser
|
||||
rc, err = og()
|
||||
if err == nil {
|
||||
defer rc.Close()
|
||||
_, err = io.Copy(buf, rc)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())), nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithEstargzOptions is a functional option that allow the caller to pass
|
||||
// through estargz.Options to the underlying compression layer. This is
|
||||
// only meaningful when estargz is enabled.
|
||||
func WithEstargzOptions(opts ...estargz.Option) LayerOption {
|
||||
return func(l *layer) {
|
||||
l.estgzopts = opts
|
||||
}
|
||||
}
|
||||
|
||||
// WithEstargz is a functional option that explicitly enables estargz support.
|
||||
func WithEstargz(l *layer) {
|
||||
oguncompressed := l.uncompressedopener
|
||||
estargz := func() (io.ReadCloser, error) {
|
||||
crc, err := oguncompressed()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eopts := append(l.estgzopts, estargz.WithCompressionLevel(l.compression))
|
||||
rc, h, err := gestargz.ReadCloser(crc, eopts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.annotations[estargz.TOCJSONDigestAnnotation] = h.String()
|
||||
return &and.ReadCloser{
|
||||
Reader: rc,
|
||||
CloseFunc: func() error {
|
||||
err := rc.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// As an optimization, leverage the DiffID exposed by the estargz ReadCloser
|
||||
l.diffID, err = v1.NewHash(rc.DiffID().String())
|
||||
return err
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
uncompressed := func() (io.ReadCloser, error) {
|
||||
urc, err := estargz()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ggzip.UnzipReadCloser(urc)
|
||||
}
|
||||
|
||||
l.compressedopener = estargz
|
||||
l.uncompressedopener = uncompressed
|
||||
}
|
||||
|
||||
// LayerFromFile returns a v1.Layer given a tarball
|
||||
func LayerFromFile(path string, opts ...LayerOption) (v1.Layer, error) {
|
||||
opener := func() (io.ReadCloser, error) {
|
||||
return os.Open(path)
|
||||
}
|
||||
return LayerFromOpener(opener, opts...)
|
||||
}
|
||||
|
||||
// LayerFromOpener returns a v1.Layer given an Opener function.
|
||||
// The Opener may return either an uncompressed tarball (common),
|
||||
// or a compressed tarball (uncommon).
|
||||
//
|
||||
// When using this in conjunction with something like remote.Write
|
||||
// the uncompressed path may end up gzipping things multiple times:
|
||||
// 1. Compute the layer SHA256
|
||||
// 2. Upload the compressed layer.
|
||||
// Since gzip can be expensive, we support an option to memoize the
|
||||
// compression that can be passed here: tarball.WithCompressedCaching
|
||||
func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) {
|
||||
rc, err := opener()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
compressed, err := ggzip.Is(rc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layer := &layer{
|
||||
compression: gzip.BestSpeed,
|
||||
annotations: make(map[string]string, 1),
|
||||
}
|
||||
|
||||
if estgz := os.Getenv("GGCR_EXPERIMENT_ESTARGZ"); estgz == "1" {
|
||||
opts = append([]LayerOption{WithEstargz}, opts...)
|
||||
}
|
||||
|
||||
if compressed {
|
||||
layer.compressedopener = opener
|
||||
layer.uncompressedopener = func() (io.ReadCloser, error) {
|
||||
urc, err := opener()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ggzip.UnzipReadCloser(urc)
|
||||
}
|
||||
} else {
|
||||
layer.uncompressedopener = opener
|
||||
layer.compressedopener = func() (io.ReadCloser, error) {
|
||||
crc, err := opener()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ggzip.ReadCloserLevel(crc, layer.compression), nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(layer)
|
||||
}
|
||||
|
||||
if layer.digest, layer.size, err = computeDigest(layer.compressedopener); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
empty := v1.Hash{}
|
||||
if layer.diffID == empty {
|
||||
if layer.diffID, err = computeDiffID(layer.uncompressedopener); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return layer, nil
|
||||
}
|
||||
|
||||
// LayerFromReader returns a v1.Layer given a io.Reader.
|
||||
func LayerFromReader(reader io.Reader, opts ...LayerOption) (v1.Layer, error) {
|
||||
// Buffering due to Opener requiring multiple calls.
|
||||
a, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return LayerFromOpener(func() (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(bytes.NewReader(a)), nil
|
||||
}, opts...)
|
||||
}
|
||||
|
||||
func computeDigest(opener Opener) (v1.Hash, int64, error) {
|
||||
rc, err := opener()
|
||||
if err != nil {
|
||||
return v1.Hash{}, 0, err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
return v1.SHA256(rc)
|
||||
}
|
||||
|
||||
func computeDiffID(opener Opener) (v1.Hash, error) {
|
||||
rc, err := opener()
|
||||
if err != nil {
|
||||
return v1.Hash{}, err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
digest, _, err := v1.SHA256(rc)
|
||||
return digest, err
|
||||
}
|
453
vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go
generated
vendored
Normal file
453
vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go
generated
vendored
Normal file
@ -0,0 +1,453 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tarball
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
)
|
||||
|
||||
// WriteToFile writes in the compressed format to a tarball, on disk.
|
||||
// This is just syntactic sugar wrapping tarball.Write with a new file.
|
||||
func WriteToFile(p string, ref name.Reference, img v1.Image, opts ...WriteOption) error {
|
||||
w, err := os.Create(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
return Write(ref, img, w, opts...)
|
||||
}
|
||||
|
||||
// MultiWriteToFile writes in the compressed format to a tarball, on disk.
|
||||
// This is just syntactic sugar wrapping tarball.MultiWrite with a new file.
|
||||
func MultiWriteToFile(p string, tagToImage map[name.Tag]v1.Image, opts ...WriteOption) error {
|
||||
refToImage := make(map[name.Reference]v1.Image, len(tagToImage))
|
||||
for i, d := range tagToImage {
|
||||
refToImage[i] = d
|
||||
}
|
||||
return MultiRefWriteToFile(p, refToImage, opts...)
|
||||
}
|
||||
|
||||
// MultiRefWriteToFile writes in the compressed format to a tarball, on disk.
|
||||
// This is just syntactic sugar wrapping tarball.MultiRefWrite with a new file.
|
||||
func MultiRefWriteToFile(p string, refToImage map[name.Reference]v1.Image, opts ...WriteOption) error {
|
||||
w, err := os.Create(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
return MultiRefWrite(refToImage, w, opts...)
|
||||
}
|
||||
|
||||
// Write is a wrapper to write a single image and tag to a tarball.
|
||||
func Write(ref name.Reference, img v1.Image, w io.Writer, opts ...WriteOption) error {
|
||||
return MultiRefWrite(map[name.Reference]v1.Image{ref: img}, w, opts...)
|
||||
}
|
||||
|
||||
// MultiWrite writes the contents of each image to the provided reader, in the compressed format.
|
||||
// The contents are written in the following format:
|
||||
// One manifest.json file at the top level containing information about several images.
|
||||
// One file for each layer, named after the layer's SHA.
|
||||
// One file for the config blob, named after its SHA.
|
||||
func MultiWrite(tagToImage map[name.Tag]v1.Image, w io.Writer, opts ...WriteOption) error {
|
||||
refToImage := make(map[name.Reference]v1.Image, len(tagToImage))
|
||||
for i, d := range tagToImage {
|
||||
refToImage[i] = d
|
||||
}
|
||||
return MultiRefWrite(refToImage, w, opts...)
|
||||
}
|
||||
|
||||
// MultiRefWrite writes the contents of each image to the provided reader, in the compressed format.
|
||||
// The contents are written in the following format:
|
||||
// One manifest.json file at the top level containing information about several images.
|
||||
// One file for each layer, named after the layer's SHA.
|
||||
// One file for the config blob, named after its SHA.
|
||||
func MultiRefWrite(refToImage map[name.Reference]v1.Image, w io.Writer, opts ...WriteOption) error {
|
||||
// process options
|
||||
o := &writeOptions{
|
||||
updates: nil,
|
||||
}
|
||||
for _, option := range opts {
|
||||
if err := option(o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
size, mBytes, err := getSizeAndManifest(refToImage)
|
||||
if err != nil {
|
||||
return sendUpdateReturn(o, err)
|
||||
}
|
||||
|
||||
return writeImagesToTar(refToImage, mBytes, size, w, o)
|
||||
}
|
||||
|
||||
// sendUpdateReturn return the passed in error message, also sending on update channel, if it exists
|
||||
func sendUpdateReturn(o *writeOptions, err error) error {
|
||||
if o != nil && o.updates != nil {
|
||||
o.updates <- v1.Update{
|
||||
Error: err,
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// sendProgressWriterReturn return the passed in error message, also sending on update channel, if it exists, along with downloaded information
|
||||
func sendProgressWriterReturn(pw *progressWriter, err error) error {
|
||||
if pw != nil {
|
||||
return pw.Error(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// writeImagesToTar writes the images to the tarball
|
||||
func writeImagesToTar(refToImage map[name.Reference]v1.Image, m []byte, size int64, w io.Writer, o *writeOptions) (err error) {
|
||||
if w == nil {
|
||||
return sendUpdateReturn(o, errors.New("must pass valid writer"))
|
||||
}
|
||||
imageToTags := dedupRefToImage(refToImage)
|
||||
|
||||
tw := w
|
||||
var pw *progressWriter
|
||||
|
||||
// we only calculate the sizes and use a progressWriter if we were provided
|
||||
// an option with a progress channel
|
||||
if o != nil && o.updates != nil {
|
||||
pw = &progressWriter{
|
||||
w: w,
|
||||
updates: o.updates,
|
||||
size: size,
|
||||
}
|
||||
tw = pw
|
||||
}
|
||||
|
||||
tf := tar.NewWriter(tw)
|
||||
defer tf.Close()
|
||||
|
||||
seenLayerDigests := make(map[string]struct{})
|
||||
|
||||
for img := range imageToTags {
|
||||
// Write the config.
|
||||
cfgName, err := img.ConfigName()
|
||||
if err != nil {
|
||||
return sendProgressWriterReturn(pw, err)
|
||||
}
|
||||
cfgBlob, err := img.RawConfigFile()
|
||||
if err != nil {
|
||||
return sendProgressWriterReturn(pw, err)
|
||||
}
|
||||
if err := writeTarEntry(tf, cfgName.String(), bytes.NewReader(cfgBlob), int64(len(cfgBlob))); err != nil {
|
||||
return sendProgressWriterReturn(pw, err)
|
||||
}
|
||||
|
||||
// Write the layers.
|
||||
layers, err := img.Layers()
|
||||
if err != nil {
|
||||
return sendProgressWriterReturn(pw, err)
|
||||
}
|
||||
layerFiles := make([]string, len(layers))
|
||||
for i, l := range layers {
|
||||
d, err := l.Digest()
|
||||
if err != nil {
|
||||
return sendProgressWriterReturn(pw, err)
|
||||
}
|
||||
// Munge the file name to appease ancient technology.
|
||||
//
|
||||
// tar assumes anything with a colon is a remote tape drive:
|
||||
// https://www.gnu.org/software/tar/manual/html_section/tar_45.html
|
||||
// Drop the algorithm prefix, e.g. "sha256:"
|
||||
hex := d.Hex
|
||||
|
||||
// gunzip expects certain file extensions:
|
||||
// https://www.gnu.org/software/gzip/manual/html_node/Overview.html
|
||||
layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex)
|
||||
|
||||
if _, ok := seenLayerDigests[hex]; ok {
|
||||
continue
|
||||
}
|
||||
seenLayerDigests[hex] = struct{}{}
|
||||
|
||||
r, err := l.Compressed()
|
||||
if err != nil {
|
||||
return sendProgressWriterReturn(pw, err)
|
||||
}
|
||||
blobSize, err := l.Size()
|
||||
if err != nil {
|
||||
return sendProgressWriterReturn(pw, err)
|
||||
}
|
||||
|
||||
if err := writeTarEntry(tf, layerFiles[i], r, blobSize); err != nil {
|
||||
return sendProgressWriterReturn(pw, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := writeTarEntry(tf, "manifest.json", bytes.NewReader(m), int64(len(m))); err != nil {
|
||||
return sendProgressWriterReturn(pw, err)
|
||||
}
|
||||
|
||||
// be sure to close the tar writer so everything is flushed out before we send our EOF
|
||||
if err := tf.Close(); err != nil {
|
||||
return sendProgressWriterReturn(pw, err)
|
||||
}
|
||||
// send an EOF to indicate finished on the channel, but nil as our return error
|
||||
_ = sendProgressWriterReturn(pw, io.EOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// calculateManifest calculates the manifest and optionally the size of the tar file
|
||||
func calculateManifest(refToImage map[name.Reference]v1.Image) (m Manifest, err error) {
|
||||
imageToTags := dedupRefToImage(refToImage)
|
||||
|
||||
if len(imageToTags) == 0 {
|
||||
return nil, errors.New("set of images is empty")
|
||||
}
|
||||
|
||||
for img, tags := range imageToTags {
|
||||
cfgName, err := img.ConfigName()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Store foreign layer info.
|
||||
layerSources := make(map[v1.Hash]v1.Descriptor)
|
||||
|
||||
// Write the layers.
|
||||
layers, err := img.Layers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layerFiles := make([]string, len(layers))
|
||||
for i, l := range layers {
|
||||
d, err := l.Digest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Munge the file name to appease ancient technology.
|
||||
//
|
||||
// tar assumes anything with a colon is a remote tape drive:
|
||||
// https://www.gnu.org/software/tar/manual/html_section/tar_45.html
|
||||
// Drop the algorithm prefix, e.g. "sha256:"
|
||||
hex := d.Hex
|
||||
|
||||
// gunzip expects certain file extensions:
|
||||
// https://www.gnu.org/software/gzip/manual/html_node/Overview.html
|
||||
layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex)
|
||||
|
||||
// Add to LayerSources if it's a foreign layer.
|
||||
desc, err := partial.BlobDescriptor(img, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !desc.MediaType.IsDistributable() {
|
||||
diffid, err := partial.BlobToDiffID(img, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layerSources[diffid] = *desc
|
||||
}
|
||||
}
|
||||
|
||||
// Generate the tar descriptor and write it.
|
||||
m = append(m, Descriptor{
|
||||
Config: cfgName.String(),
|
||||
RepoTags: tags,
|
||||
Layers: layerFiles,
|
||||
LayerSources: layerSources,
|
||||
})
|
||||
}
|
||||
// sort by name of the repotags so it is consistent. Alternatively, we could sort by hash of the
|
||||
// descriptor, but that would make it hard for humans to process
|
||||
sort.Slice(m, func(i, j int) bool {
|
||||
return strings.Join(m[i].RepoTags, ",") < strings.Join(m[j].RepoTags, ",")
|
||||
})
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// CalculateSize calculates the expected complete size of the output tar file
|
||||
func CalculateSize(refToImage map[name.Reference]v1.Image) (size int64, err error) {
|
||||
size, _, err = getSizeAndManifest(refToImage)
|
||||
return size, err
|
||||
}
|
||||
|
||||
func getSizeAndManifest(refToImage map[name.Reference]v1.Image) (int64, []byte, error) {
|
||||
m, err := calculateManifest(refToImage)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("unable to calculate manifest: %w", err)
|
||||
}
|
||||
mBytes, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("could not marshall manifest to bytes: %w", err)
|
||||
}
|
||||
|
||||
size, err := calculateTarballSize(refToImage, mBytes)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("error calculating tarball size: %w", err)
|
||||
}
|
||||
return size, mBytes, nil
|
||||
}
|
||||
|
||||
// calculateTarballSize calculates the size of the tar file
|
||||
func calculateTarballSize(refToImage map[name.Reference]v1.Image, mBytes []byte) (size int64, err error) {
|
||||
imageToTags := dedupRefToImage(refToImage)
|
||||
|
||||
for img, name := range imageToTags {
|
||||
manifest, err := img.Manifest()
|
||||
if err != nil {
|
||||
return size, fmt.Errorf("unable to get manifest for img %s: %w", name, err)
|
||||
}
|
||||
size += calculateSingleFileInTarSize(manifest.Config.Size)
|
||||
for _, l := range manifest.Layers {
|
||||
size += calculateSingleFileInTarSize(l.Size)
|
||||
}
|
||||
}
|
||||
// add the manifest
|
||||
size += calculateSingleFileInTarSize(int64(len(mBytes)))
|
||||
|
||||
// add the two padding blocks that indicate end of a tar file
|
||||
size += 1024
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func dedupRefToImage(refToImage map[name.Reference]v1.Image) map[v1.Image][]string {
|
||||
imageToTags := make(map[v1.Image][]string)
|
||||
|
||||
for ref, img := range refToImage {
|
||||
if tag, ok := ref.(name.Tag); ok {
|
||||
if tags, ok := imageToTags[img]; !ok || tags == nil {
|
||||
imageToTags[img] = []string{}
|
||||
}
|
||||
// Docker cannot load tarballs without an explicit tag:
|
||||
// https://github.com/google/go-containerregistry/issues/890
|
||||
//
|
||||
// We can't use the fully qualified tag.Name() because of rules_docker:
|
||||
// https://github.com/google/go-containerregistry/issues/527
|
||||
//
|
||||
// If the tag is "latest", but tag.String() doesn't end in ":latest",
|
||||
// just append it. Kind of gross, but should work for now.
|
||||
ts := tag.String()
|
||||
if tag.Identifier() == name.DefaultTag && !strings.HasSuffix(ts, ":"+name.DefaultTag) {
|
||||
ts = fmt.Sprintf("%s:%s", ts, name.DefaultTag)
|
||||
}
|
||||
imageToTags[img] = append(imageToTags[img], ts)
|
||||
} else if _, ok := imageToTags[img]; !ok {
|
||||
imageToTags[img] = nil
|
||||
}
|
||||
}
|
||||
|
||||
return imageToTags
|
||||
}
|
||||
|
||||
// writeTarEntry writes a file to the provided writer with a corresponding tar header
|
||||
func writeTarEntry(tf *tar.Writer, path string, r io.Reader, size int64) error {
|
||||
hdr := &tar.Header{
|
||||
Mode: 0644,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: size,
|
||||
Name: path,
|
||||
}
|
||||
if err := tf.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := io.Copy(tf, r)
|
||||
return err
|
||||
}
|
||||
|
||||
// ComputeManifest get the manifest.json that will be written to the tarball
|
||||
// for multiple references
|
||||
func ComputeManifest(refToImage map[name.Reference]v1.Image) (Manifest, error) {
|
||||
return calculateManifest(refToImage)
|
||||
}
|
||||
|
||||
// WriteOption a function option to pass to Write()
|
||||
type WriteOption func(*writeOptions) error
|
||||
type writeOptions struct {
|
||||
updates chan<- v1.Update
|
||||
}
|
||||
|
||||
// WithProgress create a WriteOption for passing to Write() that enables
|
||||
// a channel to receive updates as they are downloaded and written to disk.
|
||||
func WithProgress(updates chan<- v1.Update) WriteOption {
|
||||
return func(o *writeOptions) error {
|
||||
o.updates = updates
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// progressWriter is a writer which will send the download progress
|
||||
type progressWriter struct {
|
||||
w io.Writer
|
||||
updates chan<- v1.Update
|
||||
size, complete int64
|
||||
}
|
||||
|
||||
func (pw *progressWriter) Write(p []byte) (int, error) {
|
||||
n, err := pw.w.Write(p)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
pw.complete += int64(n)
|
||||
|
||||
pw.updates <- v1.Update{
|
||||
Total: pw.size,
|
||||
Complete: pw.complete,
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (pw *progressWriter) Error(err error) error {
|
||||
pw.updates <- v1.Update{
|
||||
Total: pw.size,
|
||||
Complete: pw.complete,
|
||||
Error: err,
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (pw *progressWriter) Close() error {
|
||||
pw.updates <- v1.Update{
|
||||
Total: pw.size,
|
||||
Complete: pw.complete,
|
||||
Error: io.EOF,
|
||||
}
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
// calculateSingleFileInTarSize calculate the size a file will take up in a tar archive,
|
||||
// given the input data. Provided by rounding up to nearest whole block (512)
|
||||
// and adding header 512
|
||||
func calculateSingleFileInTarSize(in int64) (out int64) {
|
||||
// doing this manually, because math.Round() works with float64
|
||||
out += in
|
||||
if remainder := out % 512; remainder != 0 {
|
||||
out += (512 - remainder)
|
||||
}
|
||||
out += 512
|
||||
return out
|
||||
}
|
71
vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go
generated
vendored
Normal file
71
vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
// MediaType is an enumeration of the supported mime types that an element of an image might have.
|
||||
type MediaType string
|
||||
|
||||
// The collection of known MediaType values.
|
||||
const (
|
||||
OCIContentDescriptor MediaType = "application/vnd.oci.descriptor.v1+json"
|
||||
OCIImageIndex MediaType = "application/vnd.oci.image.index.v1+json"
|
||||
OCIManifestSchema1 MediaType = "application/vnd.oci.image.manifest.v1+json"
|
||||
OCIConfigJSON MediaType = "application/vnd.oci.image.config.v1+json"
|
||||
OCILayer MediaType = "application/vnd.oci.image.layer.v1.tar+gzip"
|
||||
OCIRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip"
|
||||
OCIUncompressedLayer MediaType = "application/vnd.oci.image.layer.v1.tar"
|
||||
OCIUncompressedRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar"
|
||||
|
||||
DockerManifestSchema1 MediaType = "application/vnd.docker.distribution.manifest.v1+json"
|
||||
DockerManifestSchema1Signed MediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
|
||||
DockerManifestSchema2 MediaType = "application/vnd.docker.distribution.manifest.v2+json"
|
||||
DockerManifestList MediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
|
||||
DockerLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||
DockerConfigJSON MediaType = "application/vnd.docker.container.image.v1+json"
|
||||
DockerPluginConfig MediaType = "application/vnd.docker.plugin.v1+json"
|
||||
DockerForeignLayer MediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
|
||||
DockerUncompressedLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar"
|
||||
|
||||
OCIVendorPrefix = "vnd.oci"
|
||||
DockerVendorPrefix = "vnd.docker"
|
||||
)
|
||||
|
||||
// IsDistributable returns true if a layer is distributable, see:
|
||||
// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers
|
||||
func (m MediaType) IsDistributable() bool {
|
||||
switch m {
|
||||
case DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsImage returns true if the mediaType represents an image manifest, as opposed to something else, like an index.
|
||||
func (m MediaType) IsImage() bool {
|
||||
switch m {
|
||||
case OCIManifestSchema1, DockerManifestSchema2:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsIndex returns true if the mediaType represents an index, as opposed to something else, like an image.
|
||||
func (m MediaType) IsIndex() bool {
|
||||
switch m {
|
||||
case OCIImageIndex, DockerManifestList:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
324
vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go
generated
vendored
Normal file
324
vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go
generated
vendored
Normal file
@ -0,0 +1,324 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Config) DeepCopyInto(out *Config) {
|
||||
*out = *in
|
||||
if in.Cmd != nil {
|
||||
in, out := &in.Cmd, &out.Cmd
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Healthcheck != nil {
|
||||
in, out := &in.Healthcheck, &out.Healthcheck
|
||||
*out = new(HealthConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Entrypoint != nil {
|
||||
in, out := &in.Entrypoint, &out.Entrypoint
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Env != nil {
|
||||
in, out := &in.Env, &out.Env
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Labels != nil {
|
||||
in, out := &in.Labels, &out.Labels
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.OnBuild != nil {
|
||||
in, out := &in.OnBuild, &out.OnBuild
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Volumes != nil {
|
||||
in, out := &in.Volumes, &out.Volumes
|
||||
*out = make(map[string]struct{}, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.ExposedPorts != nil {
|
||||
in, out := &in.ExposedPorts, &out.ExposedPorts
|
||||
*out = make(map[string]struct{}, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.Shell != nil {
|
||||
in, out := &in.Shell, &out.Shell
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
|
||||
func (in *Config) DeepCopy() *Config {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Config)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ConfigFile) DeepCopyInto(out *ConfigFile) {
|
||||
*out = *in
|
||||
in.Created.DeepCopyInto(&out.Created)
|
||||
if in.History != nil {
|
||||
in, out := &in.History, &out.History
|
||||
*out = make([]History, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
in.RootFS.DeepCopyInto(&out.RootFS)
|
||||
in.Config.DeepCopyInto(&out.Config)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigFile.
|
||||
func (in *ConfigFile) DeepCopy() *ConfigFile {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ConfigFile)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Descriptor) DeepCopyInto(out *Descriptor) {
|
||||
*out = *in
|
||||
out.Digest = in.Digest
|
||||
if in.Data != nil {
|
||||
in, out := &in.Data, &out.Data
|
||||
*out = make([]byte, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.URLs != nil {
|
||||
in, out := &in.URLs, &out.URLs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Annotations != nil {
|
||||
in, out := &in.Annotations, &out.Annotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.Platform != nil {
|
||||
in, out := &in.Platform, &out.Platform
|
||||
*out = new(Platform)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Descriptor.
|
||||
func (in *Descriptor) DeepCopy() *Descriptor {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Descriptor)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Hash) DeepCopyInto(out *Hash) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hash.
|
||||
func (in *Hash) DeepCopy() *Hash {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Hash)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HealthConfig) DeepCopyInto(out *HealthConfig) {
|
||||
*out = *in
|
||||
if in.Test != nil {
|
||||
in, out := &in.Test, &out.Test
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthConfig.
|
||||
func (in *HealthConfig) DeepCopy() *HealthConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HealthConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *History) DeepCopyInto(out *History) {
|
||||
*out = *in
|
||||
in.Created.DeepCopyInto(&out.Created)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new History.
|
||||
func (in *History) DeepCopy() *History {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(History)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IndexManifest) DeepCopyInto(out *IndexManifest) {
|
||||
*out = *in
|
||||
if in.Manifests != nil {
|
||||
in, out := &in.Manifests, &out.Manifests
|
||||
*out = make([]Descriptor, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Annotations != nil {
|
||||
in, out := &in.Annotations, &out.Annotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManifest.
|
||||
func (in *IndexManifest) DeepCopy() *IndexManifest {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IndexManifest)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Manifest) DeepCopyInto(out *Manifest) {
|
||||
*out = *in
|
||||
in.Config.DeepCopyInto(&out.Config)
|
||||
if in.Layers != nil {
|
||||
in, out := &in.Layers, &out.Layers
|
||||
*out = make([]Descriptor, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Annotations != nil {
|
||||
in, out := &in.Annotations, &out.Annotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Manifest.
|
||||
func (in *Manifest) DeepCopy() *Manifest {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Manifest)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Platform) DeepCopyInto(out *Platform) {
|
||||
*out = *in
|
||||
if in.OSFeatures != nil {
|
||||
in, out := &in.OSFeatures, &out.OSFeatures
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Features != nil {
|
||||
in, out := &in.Features, &out.Features
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform.
|
||||
func (in *Platform) DeepCopy() *Platform {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Platform)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RootFS) DeepCopyInto(out *RootFS) {
|
||||
*out = *in
|
||||
if in.DiffIDs != nil {
|
||||
in, out := &in.DiffIDs, &out.DiffIDs
|
||||
*out = make([]Hash, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootFS.
|
||||
func (in *RootFS) DeepCopy() *RootFS {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RootFS)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time.
|
||||
func (in *Time) DeepCopy() *Time {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Time)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
12
vendor/github.com/klauspost/compress/LICENSE
generated
vendored
12
vendor/github.com/klauspost/compress/LICENSE
generated
vendored
@ -290,3 +290,15 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
-----------------
|
||||
|
||||
Files: s2/cmd/internal/filepathx/*
|
||||
|
||||
Copyright 2016 The filepathx Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
12
vendor/github.com/klauspost/compress/README.md
generated
vendored
12
vendor/github.com/klauspost/compress/README.md
generated
vendored
@ -17,12 +17,17 @@ This package provides various compression algorithms.
|
||||
|
||||
# changelog
|
||||
|
||||
* Aug 30, 2021 (v1.13.5)
|
||||
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
|
||||
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
|
||||
* zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426)
|
||||
* Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421)
|
||||
|
||||
* Aug 12, 2021 (v1.13.4)
|
||||
* Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy).
|
||||
* zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415)
|
||||
|
||||
* Aug 3, 2021 (v1.13.3)
|
||||
|
||||
* zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404)
|
||||
* zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411)
|
||||
* gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406)
|
||||
@ -31,7 +36,6 @@ This package provides various compression algorithms.
|
||||
* zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410)
|
||||
|
||||
* Jun 14, 2021 (v1.13.1)
|
||||
|
||||
* s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396)
|
||||
* zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394)
|
||||
* gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389)
|
||||
@ -64,6 +68,9 @@ This package provides various compression algorithms.
|
||||
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
|
||||
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
|
||||
|
||||
<details>
|
||||
<summary>See changes prior to v1.12.1</summary>
|
||||
|
||||
* Mar 26, 2021 (v1.11.13)
|
||||
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
|
||||
* zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336)
|
||||
@ -118,6 +125,7 @@ This package provides various compression algorithms.
|
||||
* zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281)
|
||||
* zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282)
|
||||
* inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes prior to v1.11.0</summary>
|
||||
|
267
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
267
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
@ -344,35 +344,241 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||
var buf [256]byte
|
||||
var off uint8
|
||||
|
||||
shift := (8 - d.actualTableLog) & 7
|
||||
switch d.actualTableLog {
|
||||
case 8:
|
||||
const shift = 8 - 8
|
||||
for br.off >= 4 {
|
||||
br.fillFast()
|
||||
v := dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+0] = uint8(v.entry >> 8)
|
||||
|
||||
//fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog)
|
||||
for br.off >= 4 {
|
||||
br.fillFast()
|
||||
v := dt[br.peekByteFast()>>shift]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+0] = uint8(v.entry >> 8)
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+1] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[br.peekByteFast()>>shift]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+1] = uint8(v.entry >> 8)
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+2] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[br.peekByteFast()>>shift]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+2] = uint8(v.entry >> 8)
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+3] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[br.peekByteFast()>>shift]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+3] = uint8(v.entry >> 8)
|
||||
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
dst = append(dst, buf[:]...)
|
||||
}
|
||||
dst = append(dst, buf[:]...)
|
||||
}
|
||||
case 7:
|
||||
const shift = 8 - 7
|
||||
for br.off >= 4 {
|
||||
br.fillFast()
|
||||
v := dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+0] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+1] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+2] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+3] = uint8(v.entry >> 8)
|
||||
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
dst = append(dst, buf[:]...)
|
||||
}
|
||||
}
|
||||
case 6:
|
||||
const shift = 8 - 6
|
||||
for br.off >= 4 {
|
||||
br.fillFast()
|
||||
v := dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+0] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+1] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+2] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+3] = uint8(v.entry >> 8)
|
||||
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
dst = append(dst, buf[:]...)
|
||||
}
|
||||
}
|
||||
case 5:
|
||||
const shift = 8 - 5
|
||||
for br.off >= 4 {
|
||||
br.fillFast()
|
||||
v := dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+0] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+1] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+2] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+3] = uint8(v.entry >> 8)
|
||||
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
dst = append(dst, buf[:]...)
|
||||
}
|
||||
}
|
||||
case 4:
|
||||
const shift = 8 - 4
|
||||
for br.off >= 4 {
|
||||
br.fillFast()
|
||||
v := dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+0] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+1] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+2] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+3] = uint8(v.entry >> 8)
|
||||
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
dst = append(dst, buf[:]...)
|
||||
}
|
||||
}
|
||||
case 3:
|
||||
const shift = 8 - 3
|
||||
for br.off >= 4 {
|
||||
br.fillFast()
|
||||
v := dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+0] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+1] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+2] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+3] = uint8(v.entry >> 8)
|
||||
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
dst = append(dst, buf[:]...)
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
const shift = 8 - 2
|
||||
for br.off >= 4 {
|
||||
br.fillFast()
|
||||
v := dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+0] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+1] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+2] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+3] = uint8(v.entry >> 8)
|
||||
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
dst = append(dst, buf[:]...)
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
const shift = 8 - 1
|
||||
for br.off >= 4 {
|
||||
br.fillFast()
|
||||
v := dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+0] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+1] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+2] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[uint8(br.value>>(56+shift))]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+3] = uint8(v.entry >> 8)
|
||||
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
dst = append(dst, buf[:]...)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
|
||||
}
|
||||
|
||||
if len(dst)+int(off) > maxDecodedSize {
|
||||
@ -383,6 +589,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||
|
||||
// br < 4, so uint8 is fine
|
||||
bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead))
|
||||
shift := (8 - d.actualTableLog) & 7
|
||||
|
||||
for bitsLeft > 0 {
|
||||
if br.bitsRead >= 64-8 {
|
||||
for br.off > 0 {
|
||||
@ -423,24 +631,24 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||
var buf [256]byte
|
||||
var off uint8
|
||||
|
||||
const shift = 0
|
||||
const shift = 56
|
||||
|
||||
//fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog)
|
||||
for br.off >= 4 {
|
||||
br.fillFast()
|
||||
v := dt[br.peekByteFast()>>shift]
|
||||
v := dt[uint8(br.value>>shift)]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+0] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[br.peekByteFast()>>shift]
|
||||
v = dt[uint8(br.value>>shift)]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+1] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[br.peekByteFast()>>shift]
|
||||
v = dt[uint8(br.value>>shift)]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+2] = uint8(v.entry >> 8)
|
||||
|
||||
v = dt[br.peekByteFast()>>shift]
|
||||
v = dt[uint8(br.value>>shift)]
|
||||
br.advance(uint8(v.entry))
|
||||
buf[off+3] = uint8(v.entry >> 8)
|
||||
|
||||
@ -474,7 +682,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
v := dt[br.peekByteFast()>>shift]
|
||||
v := dt[br.peekByteFast()]
|
||||
nBits := uint8(v.entry)
|
||||
br.advance(nBits)
|
||||
bitsLeft -= int8(nBits)
|
||||
@ -709,7 +917,6 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
shift := (8 - d.actualTableLog) & 7
|
||||
|
||||
const tlSize = 1 << 8
|
||||
const tlMask = tlSize - 1
|
||||
single := d.dt.single[:tlSize]
|
||||
|
||||
// Use temp table to avoid bound checks/append penalty.
|
||||
|
1
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
generated
vendored
1
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
generated
vendored
@ -195,7 +195,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
b = b[len(d.mem):]
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
generated
vendored
@ -9,4 +9,4 @@ package xxhash
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(*Digest, []byte) int
|
||||
func writeBlocks(d *Digest, b []byte) int
|
||||
|
66
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
generated
vendored
66
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
generated
vendored
@ -6,7 +6,7 @@
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// CX pointer to advance through b
|
||||
// SI pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
@ -16,39 +16,39 @@
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// R15 prime4v
|
||||
// DI prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in CX.
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (CX), R12 \
|
||||
ADDQ $8, CX \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ R15, acc
|
||||
ADDQ DI, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), R15
|
||||
MOVQ ·prime4v(SB), DI
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), CX
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until CX > BX.
|
||||
// Loop until SI > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
@ -100,16 +100,16 @@ noBlocks:
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (CX), R8
|
||||
ADDQ $8, CX
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
@ -117,18 +117,18 @@ wordLoop:
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ R15, AX
|
||||
ADDQ DI, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JG singles
|
||||
|
||||
MOVL (CX), R8
|
||||
ADDQ $4, CX
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
@ -138,19 +138,19 @@ fourByte:
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (CX), R12
|
||||
ADDQ $1, CX
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
@ -179,13 +179,13 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ arg1_base+8(FP), CX
|
||||
MOVQ arg1_len+16(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ arg+0(FP), AX
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
@ -199,7 +199,7 @@ blockLoop:
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
@ -208,8 +208,8 @@ blockLoop:
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is CX minus the old base pointer.
|
||||
SUBQ arg1_base+8(FP), CX
|
||||
MOVQ CX, ret+32(FP)
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
|
||||
RET
|
||||
|
8
vendor/github.com/mitchellh/mapstructure/.travis.yml
generated
vendored
8
vendor/github.com/mitchellh/mapstructure/.travis.yml
generated
vendored
@ -1,8 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- "1.11.x"
|
||||
- tip
|
||||
|
||||
script:
|
||||
- go test
|
52
vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
generated
vendored
52
vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
generated
vendored
@ -1,3 +1,55 @@
|
||||
## unreleased
|
||||
|
||||
* Fix regression where `*time.Time` value would be set to empty and not be sent
|
||||
to decode hooks properly [GH-232]
|
||||
|
||||
## 1.4.0
|
||||
|
||||
* A new decode hook type `DecodeHookFuncValue` has been added that has
|
||||
access to the full values. [GH-183]
|
||||
* Squash is now supported with embedded fields that are struct pointers [GH-205]
|
||||
* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206]
|
||||
|
||||
## 1.3.3
|
||||
|
||||
* Decoding maps from maps creates a settable value for decode hooks [GH-203]
|
||||
|
||||
## 1.3.2
|
||||
|
||||
* Decode into interface type with a struct value is supported [GH-187]
|
||||
|
||||
## 1.3.1
|
||||
|
||||
* Squash should only squash embedded structs. [GH-194]
|
||||
|
||||
## 1.3.0
|
||||
|
||||
* Added `",omitempty"` support. This will ignore zero values in the source
|
||||
structure when encoding. [GH-145]
|
||||
|
||||
## 1.2.3
|
||||
|
||||
* Fix duplicate entries in Keys list with pointer values. [GH-185]
|
||||
|
||||
## 1.2.2
|
||||
|
||||
* Do not add unsettable (unexported) values to the unused metadata key
|
||||
or "remain" value. [GH-150]
|
||||
|
||||
## 1.2.1
|
||||
|
||||
* Go modules checksum mismatch fix
|
||||
|
||||
## 1.2.0
|
||||
|
||||
* Added support to capture unused values in a field using the `",remain"` value
|
||||
in the mapstructure tag. There is an example to showcase usage.
|
||||
* Added `DecoderConfig` option to always squash embedded structs
|
||||
* `json.Number` can decode into `uint` types
|
||||
* Empty slices are preserved and not replaced with nil slices
|
||||
* Fix panic that can occur in when decoding a map into a nil slice of structs
|
||||
* Improved package documentation for godoc
|
||||
|
||||
## 1.1.2
|
||||
|
||||
* Fix error when decode hook decodes interface implementation into interface
|
||||
|
71
vendor/github.com/mitchellh/mapstructure/decode_hooks.go
generated
vendored
71
vendor/github.com/mitchellh/mapstructure/decode_hooks.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
package mapstructure
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
@ -16,10 +17,11 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
|
||||
// Create variables here so we can reference them with the reflect pkg
|
||||
var f1 DecodeHookFuncType
|
||||
var f2 DecodeHookFuncKind
|
||||
var f3 DecodeHookFuncValue
|
||||
|
||||
// Fill in the variables into this interface and the rest is done
|
||||
// automatically using the reflect package.
|
||||
potential := []interface{}{f1, f2}
|
||||
potential := []interface{}{f1, f2, f3}
|
||||
|
||||
v := reflect.ValueOf(h)
|
||||
vt := v.Type()
|
||||
@ -38,13 +40,15 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
|
||||
// that took reflect.Kind instead of reflect.Type.
|
||||
func DecodeHookExec(
|
||||
raw DecodeHookFunc,
|
||||
from reflect.Type, to reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
from reflect.Value, to reflect.Value) (interface{}, error) {
|
||||
|
||||
switch f := typedDecodeHook(raw).(type) {
|
||||
case DecodeHookFuncType:
|
||||
return f(from, to, data)
|
||||
return f(from.Type(), to.Type(), from.Interface())
|
||||
case DecodeHookFuncKind:
|
||||
return f(from.Kind(), to.Kind(), data)
|
||||
return f(from.Kind(), to.Kind(), from.Interface())
|
||||
case DecodeHookFuncValue:
|
||||
return f(from, to)
|
||||
default:
|
||||
return nil, errors.New("invalid decode hook signature")
|
||||
}
|
||||
@ -56,22 +60,16 @@ func DecodeHookExec(
|
||||
// The composed funcs are called in order, with the result of the
|
||||
// previous transformation.
|
||||
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
|
||||
var err error
|
||||
var data interface{}
|
||||
newFrom := f
|
||||
for _, f1 := range fs {
|
||||
data, err = DecodeHookExec(f1, f, t, data)
|
||||
data, err = DecodeHookExec(f1, newFrom, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Modify the from kind to be correct with the new data
|
||||
f = nil
|
||||
if val := reflect.ValueOf(data); val.IsValid() {
|
||||
f = val.Type()
|
||||
}
|
||||
newFrom = reflect.ValueOf(data)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
@ -215,3 +213,44 @@ func WeaklyTypedHook(
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func RecursiveStructToMapHookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
|
||||
if f.Kind() != reflect.Struct {
|
||||
return f.Interface(), nil
|
||||
}
|
||||
|
||||
var i interface{} = struct{}{}
|
||||
if t.Type() != reflect.TypeOf(&i).Elem() {
|
||||
return f.Interface(), nil
|
||||
}
|
||||
|
||||
m := make(map[string]interface{})
|
||||
t.Set(reflect.ValueOf(m))
|
||||
|
||||
return f.Interface(), nil
|
||||
}
|
||||
}
|
||||
|
||||
// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
|
||||
// strings to the UnmarshalText function, when the target type
|
||||
// implements the encoding.TextUnmarshaler interface
|
||||
func TextUnmarshallerHookFunc() DecodeHookFuncType {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
result := reflect.New(t).Interface()
|
||||
unmarshaller, ok := result.(encoding.TextUnmarshaler)
|
||||
if !ok {
|
||||
return data, nil
|
||||
}
|
||||
if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user