Update vendor

This commit is contained in:
Erik Wilson 2019-09-30 16:25:17 -07:00
parent fb7c8fe069
commit ca70e11d54
220 changed files with 6517 additions and 1819 deletions

View File

@ -5,5 +5,5 @@ go 1.12
require (
github.com/pkg/errors v0.8.1
github.com/sirupsen/logrus v1.4.1
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
)

View File

@ -14,3 +14,5 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

53
vendor/github.com/Microsoft/hcsshim/Protobuild.toml generated vendored Normal file
View File

@ -0,0 +1,53 @@
version = "unstable"
generator = "gogoctrd"
plugins = ["grpc", "fieldpath"]
# Control protoc include paths. Below are usually some good defaults, but feel
# free to try it without them if it works for your project.
[includes]
# Include paths that will be added before all others. Typically, you want to
# treat the root of the project as an include, but this may not be necessary.
before = ["./protobuf"]
# Paths that should be treated as include roots in relation to the vendor
# directory. These will be calculated with the vendor directory nearest the
# target package.
packages = ["github.com/gogo/protobuf"]
# Paths that will be added untouched to the end of the includes. We use
# `/usr/local/include` to pickup the common install location of protobuf.
# This is the default.
after = ["/usr/local/include"]
# This section maps protobuf imports to Go packages. These will become
# `-M` directives in the call to the go protobuf generator.
[packages]
"gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto"
"google/protobuf/any.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/empty.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
"google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/duration.proto" = "github.com/gogo/protobuf/types"
[[overrides]]
prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"]
plugins = ["ttrpc"]
# Lock down runhcs config
[[descriptors]]
prefix = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options"
target = "cmd/containerd-shim-runhcs-v1/options/next.pb.txt"
ignore_files = [
"google/protobuf/descriptor.proto",
"gogoproto/gogo.proto"
]
[[descriptors]]
prefix = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats"
target = "cmd/containerd-shim-runhcs-v1/stats/next.pb.txt"
ignore_files = [
"google/protobuf/descriptor.proto",
"gogoproto/gogo.proto"
]

View File

@ -8,22 +8,34 @@ environment:
GOPATH: c:\gopath
PATH: C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin;%GOPATH%\bin;C:\gometalinter-2.0.12-windows-amd64;%PATH%
stack: go 1.11
stack: go 1.12.9
build_script:
- appveyor DownloadFile https://github.com/alecthomas/gometalinter/releases/download/v2.0.12/gometalinter-2.0.12-windows-amd64.zip
- 7z x gometalinter-2.0.12-windows-amd64.zip -y -oC:\ > NUL
- gometalinter.exe --config .gometalinter.json ./...
- go build ./cmd/wclayer
- go build ./cmd/containerd-shim-runhcs-v1
- go build ./cmd/runhcs
- go build ./cmd/tar2ext4
- go build ./cmd/wclayer
- go build ./internal/tools/grantvmgroupaccess
- go build ./internal/tools/uvmboot
- go build ./internal/tools/zapdir
- go test -v ./... -tags admin
- go test -c ./test/containerd-shim-runhcs-v1/ -tags functional
- go test -c ./test/cri-containerd/ -tags functional
- go test -c ./test/functional/ -tags functional
- go test -c ./test/runhcs/ -tags integration
- go test -c ./test/runhcs/ -tags functional
artifacts:
- path: 'wclayer.exe'
- path: 'containerd-shim-runhcs-v1.exe'
- path: 'runhcs.exe'
- path: 'tar2ext4.exe'
- path: 'wclayer.exe'
- path: 'grantvmgroupaccess.exe'
- path: 'uvmboot.exe'
- path: 'zapdir.exe'
- path: 'containerd-shim-runhcs-v1.test.exe'
- path: 'cri-containerd.test.exe'
- path: 'functional.test.exe'
- path: 'runhcs.test.exe'

View File

@ -0,0 +1 @@
package options

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,63 @@
syntax = "proto3";
package containerd.runhcs.v1;
import weak "gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
option go_package = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options;options";
// Options are the set of customizations that can be passed at Create time.
message Options {
// enable debug tracing
bool debug = 1;
enum DebugType {
NPIPE = 0;
FILE = 1;
ETW = 2;
}
// debug tracing output type
DebugType debug_type = 2;
// registry key root for storage of the runhcs container state
string registry_root = 3;
// sandbox_image is the image to use for the sandbox that matches the
// sandbox_platform.
string sandbox_image = 4;
// sandbox_platform is a CRI setting that specifies the platform
// architecture for all sandbox's in this runtime. Values are
// 'windows/amd64' and 'linux/amd64'.
string sandbox_platform = 5;
enum SandboxIsolation {
PROCESS = 0;
HYPERVISOR = 1;
}
// sandbox_isolation is a CRI setting that specifies the isolation level of
// the sandbox. For Windows runtime PROCESS and HYPERVISOR are valid. For
// LCOW only HYPERVISOR is valid and default if omitted.
SandboxIsolation sandbox_isolation = 6;
// boot_files_root_path is the path to the directory containing the LCOW
// kernel and root FS files.
string boot_files_root_path = 7;
}
// ProcessDetails contains additional information about a process. This is the additional
// info returned in the Pids query.
message ProcessDetails {
string image_name = 1;
google.protobuf.Timestamp created_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
uint64 kernel_time_100_ns = 3;
uint64 memory_commit_bytes = 4;
uint64 memory_working_set_private_bytes = 5;
uint64 memory_working_set_shared_bytes = 6;
uint32 process_id = 7;
uint64 user_time_100_ns = 8;
string exec_id = 9;
}

View File

@ -1,8 +1,10 @@
package hcsshim
import (
"context"
"fmt"
"os"
"sync"
"time"
"github.com/Microsoft/hcsshim/internal/hcs"
@ -52,7 +54,10 @@ const (
type ResourceModificationRequestResponse = schema1.ResourceModificationRequestResponse
type container struct {
system *hcs.System
system *hcs.System
waitOnce sync.Once
waitErr error
waitCh chan struct{}
}
// createComputeSystemAdditionalJSON is read from the environment at initialisation
@ -71,61 +76,87 @@ func CreateContainer(id string, c *ContainerConfig) (Container, error) {
return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err)
}
system, err := hcs.CreateComputeSystem(id, fullConfig)
system, err := hcs.CreateComputeSystem(context.Background(), id, fullConfig)
if err != nil {
return nil, err
}
return &container{system}, err
return &container{system: system}, err
}
// OpenContainer opens an existing container by ID.
func OpenContainer(id string) (Container, error) {
system, err := hcs.OpenComputeSystem(id)
system, err := hcs.OpenComputeSystem(context.Background(), id)
if err != nil {
return nil, err
}
return &container{system}, err
return &container{system: system}, err
}
// GetContainers gets a list of the containers on the system that match the query
func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) {
return hcs.GetComputeSystems(q)
return hcs.GetComputeSystems(context.Background(), q)
}
// Start synchronously starts the container.
func (container *container) Start() error {
return convertSystemError(container.system.Start(), container)
return convertSystemError(container.system.Start(context.Background()), container)
}
// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds.
func (container *container) Shutdown() error {
return convertSystemError(container.system.Shutdown(), container)
err := container.system.Shutdown(context.Background())
if err != nil {
return convertSystemError(err, container)
}
return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Shutdown"}
}
// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds.
func (container *container) Terminate() error {
return convertSystemError(container.system.Terminate(), container)
err := container.system.Terminate(context.Background())
if err != nil {
return convertSystemError(err, container)
}
return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Terminate"}
}
// Waits synchronously waits for the container to shutdown or terminate.
func (container *container) Wait() error {
return convertSystemError(container.system.Wait(), container)
err := container.system.Wait()
if err == nil {
err = container.system.ExitError()
}
return convertSystemError(err, container)
}
// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It
// returns false if timeout occurs.
func (container *container) WaitTimeout(t time.Duration) error {
return convertSystemError(container.system.WaitTimeout(t), container)
func (container *container) WaitTimeout(timeout time.Duration) error {
container.waitOnce.Do(func() {
container.waitCh = make(chan struct{})
go func() {
container.waitErr = container.Wait()
close(container.waitCh)
}()
})
t := time.NewTimer(timeout)
defer t.Stop()
select {
case <-t.C:
return &ContainerError{Container: container, Err: ErrTimeout, Operation: "hcsshim::ComputeSystem::Wait"}
case <-container.waitCh:
return container.waitErr
}
}
// Pause pauses the execution of a container.
func (container *container) Pause() error {
return convertSystemError(container.system.Pause(), container)
return convertSystemError(container.system.Pause(context.Background()), container)
}
// Resume resumes the execution of a container.
func (container *container) Resume() error {
return convertSystemError(container.system.Resume(), container)
return convertSystemError(container.system.Resume(context.Background()), container)
}
// HasPendingUpdates returns true if the container has updates pending to install
@ -135,7 +166,7 @@ func (container *container) HasPendingUpdates() (bool, error) {
// Statistics returns statistics for the container. This is a legacy v1 call
func (container *container) Statistics() (Statistics, error) {
properties, err := container.system.Properties(schema1.PropertyTypeStatistics)
properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeStatistics)
if err != nil {
return Statistics{}, convertSystemError(err, container)
}
@ -145,7 +176,7 @@ func (container *container) Statistics() (Statistics, error) {
// ProcessList returns an array of ProcessListItems for the container. This is a legacy v1 call
func (container *container) ProcessList() ([]ProcessListItem, error) {
properties, err := container.system.Properties(schema1.PropertyTypeProcessList)
properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeProcessList)
if err != nil {
return nil, convertSystemError(err, container)
}
@ -155,7 +186,7 @@ func (container *container) ProcessList() ([]ProcessListItem, error) {
// This is a legacy v1 call
func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) {
properties, err := container.system.Properties(schema1.PropertyTypeMappedVirtualDisk)
properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeMappedVirtualDisk)
if err != nil {
return nil, convertSystemError(err, container)
}
@ -165,20 +196,20 @@ func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskContr
// CreateProcess launches a new process within the container.
func (container *container) CreateProcess(c *ProcessConfig) (Process, error) {
p, err := container.system.CreateProcess(c)
p, err := container.system.CreateProcessNoStdio(c)
if err != nil {
return nil, convertSystemError(err, container)
}
return &process{p}, nil
return &process{p: p.(*hcs.Process)}, nil
}
// OpenProcess gets an interface to an existing process within the container.
func (container *container) OpenProcess(pid int) (Process, error) {
p, err := container.system.OpenProcess(pid)
p, err := container.system.OpenProcess(context.Background(), pid)
if err != nil {
return nil, convertSystemError(err, container)
}
return &process{p}, nil
return &process{p: p}, nil
}
// Close cleans up any state associated with the container but does not terminate or wait for it.
@ -188,5 +219,5 @@ func (container *container) Close() error {
// Modify the System
func (container *container) Modify(config *ResourceModificationRequestResponse) error {
return convertSystemError(container.system.Modify(config), container)
return convertSystemError(container.system.Modify(context.Background(), config), container)
}

39
vendor/github.com/Microsoft/hcsshim/go.mod generated vendored Normal file
View File

@ -0,0 +1,39 @@
module github.com/Microsoft/hcsshim
go 1.12
require (
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
github.com/blang/semver v3.1.0+incompatible // indirect
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/gogo/googleapis v1.2.0 // indirect
github.com/gogo/protobuf v1.2.1
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce // indirect
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874 // indirect
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f // indirect
github.com/opencontainers/runtime-spec v0.0.0-20190207185410-29686dbc5559
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39
github.com/pkg/errors v0.8.1
github.com/sirupsen/logrus v1.4.1
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 // indirect
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f // indirect
go.opencensus.io v0.22.0
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
google.golang.org/grpc v1.20.1
gotest.tools v2.2.0+incompatible // indirect
k8s.io/kubernetes v1.13.0
)

142
vendor/github.com/Microsoft/hcsshim/go.sum generated vendored Normal file
View File

@ -0,0 +1,142 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/blang/semver v3.1.0+incompatible h1:7hqmJYuaEK3qwVjWubYiht3j93YI0WQBuysxHIfUriU=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/containerd v0.0.0-20190214164719-faec567304bb h1:TeJqRxMMwB7ex9yxtnc18AV+vVnjMePVQEhT6cQFhUU=
github.com/containerd/containerd v0.0.0-20190214164719-faec567304bb/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.2.8 h1:oM84oDW6+A0FQ4aWW5wnnexazvrQA5Hw6iXAi4rczWw=
github.com/containerd/containerd v1.3.0-beta.2.0.20190826204247-d618c80077fe h1:rqBP1w6ViOtCCAFKMerm0U9e/hEmTrJXStmQph9YbOQ=
github.com/containerd/containerd v1.3.0-beta.2.0.20190826204247-d618c80077fe/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69 h1:rG1clvJbgsUcmb50J82YUJhUMopWNtZvyMZjb+4fqGw=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 h1:PUD50EuOMkXVcpBIA/R95d56duJR9VxhwncsFbNnxW4=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3 h1:esQOJREg8nw8aXj6uCN5dfW5cKUBiEJ/+nni1Q/D/sw=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/ttrpc v0.0.0-20180920185216-2a805f718635 h1:Hh9KYLzbpTyhtCnW4p0Iy+bJNO4fGKFZp1ylELZw6TI=
github.com/containerd/ttrpc v0.0.0-20180920185216-2a805f718635/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190613183316-1fb3814edf44 h1:vG5QXCUakUhR2CRI44aD3joCWcvb5mfZRxcwVqBVGeU=
github.com/containerd/ttrpc v0.0.0-20190613183316-1fb3814edf44/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190826154248-f969a7f076a2 h1:uR0Zz83OrfOhXWwDdwVYirFZI/LMdZXMzCHzfnQFO9w=
github.com/containerd/ttrpc v0.0.0-20190826154248-f969a7f076a2/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de h1:dlfGmNcE3jDAecLqwKPMNX6nk2qh1c1Vg1/YTzpOOF4=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd h1:JNn81o/xG+8NEo3bC/vx9pbi/g2WI8mtP2/nXzu297Y=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/gogo/googleapis v1.2.0 h1:Z0v3OJDotX9ZBpdz2V+AI7F4fITSZhVE5mg6GQppwMM=
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4=
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874 h1:cAv7ZbSmyb1wjn6T4TIiyFCkpcfgpbcNNC3bM2srLaI=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2 h1:QhPf3A2AZW3tTGvHPg0TA+CR3oHbVLlXUhlghqISp1I=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f h1:a969LJ4IQFwRHYqonHtUDMSh9i54WcKggeEkQ3fZMl4=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v0.0.0-20190207185410-29686dbc5559 h1:pVIiB5BBYCSqbku9gTus5uZ+dmmZiWtmHAaI8Y1hpb4=
github.com/opencontainers/runtime-spec v0.0.0-20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39 h1:H7DMc6FAjgwZZi8BRqjrAAHWoqEr5e5L6pS4V0ezet4=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 h1:zLV6q4e8Jv9EHjNg/iHfzwDkCve6Ua5jCygptrtXHvI=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 h1:MCfT24H3f//U5+UCrZp1/riVO3B50BovxtDiNn0XKkk=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f h1:mvXjJIHRZyhNuGassLTcXTwjiWq7NmjdavZsUnmFybQ=
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/kubernetes v1.13.0 h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=

View File

@ -7,7 +7,7 @@ import (
"fmt"
"syscall"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/go-winio/pkg/guid"
)
//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go hcn.go

View File

@ -2,8 +2,9 @@ package hcn
import (
"encoding/json"
"errors"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/go-winio/pkg/guid"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/sirupsen/logrus"
)
@ -121,7 +122,10 @@ func enumerateEndpoints(query string) ([]HostComputeEndpoint, error) {
}
func createEndpoint(networkId string, endpointSettings string) (*HostComputeEndpoint, error) {
networkGuid := guid.FromString(networkId)
networkGuid, err := guid.FromString(networkId)
if err != nil {
return nil, errInvalidNetworkID
}
// Open network.
var networkHandle hcnNetwork
var resultBuffer *uint16
@ -167,7 +171,10 @@ func createEndpoint(networkId string, endpointSettings string) (*HostComputeEndp
}
func modifyEndpoint(endpointId string, settings string) (*HostComputeEndpoint, error) {
endpointGuid := guid.FromString(endpointId)
endpointGuid, err := guid.FromString(endpointId)
if err != nil {
return nil, errInvalidEndpointID
}
// Open endpoint
var (
endpointHandle hcnEndpoint
@ -208,7 +215,10 @@ func modifyEndpoint(endpointId string, settings string) (*HostComputeEndpoint, e
}
func deleteEndpoint(endpointId string) error {
endpointGuid := guid.FromString(endpointId)
endpointGuid, err := guid.FromString(endpointId)
if err != nil {
return errInvalidEndpointID
}
var resultBuffer *uint16
hr := hcnDeleteEndpoint(&endpointGuid, &resultBuffer)
if err := checkForErrors("hcnDeleteEndpoint", hr, resultBuffer); err != nil {
@ -299,6 +309,10 @@ func GetEndpointByName(endpointName string) (*HostComputeEndpoint, error) {
func (endpoint *HostComputeEndpoint) Create() (*HostComputeEndpoint, error) {
logrus.Debugf("hcn::HostComputeEndpoint::Create id=%s", endpoint.Id)
if endpoint.HostComputeNamespace != "" {
return nil, errors.New("endpoint create error, endpoint json HostComputeNamespace is read only and should not be set")
}
jsonString, err := json.Marshal(endpoint)
if err != nil {
return nil, err
@ -339,7 +353,7 @@ func ModifyEndpointSettings(endpointId string, request *ModifyEndpointSettingReq
}
// ApplyPolicy applies a Policy (ex: ACL) on the Endpoint.
func (endpoint *HostComputeEndpoint) ApplyPolicy(endpointPolicy PolicyEndpointRequest) error {
func (endpoint *HostComputeEndpoint) ApplyPolicy(requestType RequestType, endpointPolicy PolicyEndpointRequest) error {
logrus.Debugf("hcn::HostComputeEndpoint::ApplyPolicy id=%s", endpoint.Id)
settingsJson, err := json.Marshal(endpointPolicy)
@ -348,7 +362,7 @@ func (endpoint *HostComputeEndpoint) ApplyPolicy(endpointPolicy PolicyEndpointRe
}
requestMessage := &ModifyEndpointSettingRequest{
ResourceType: EndpointResourceTypePolicy,
RequestType: RequestTypeUpdate,
RequestType: requestType,
Settings: settingsJson,
}

View File

@ -3,13 +3,22 @@
package hcn
import (
"errors"
"fmt"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/sirupsen/logrus"
)
var (
errInvalidNetworkID = errors.New("invalid network ID")
errInvalidEndpointID = errors.New("invalid endpoint ID")
errInvalidNamespaceID = errors.New("invalid namespace ID")
errInvalidLoadBalancerID = errors.New("invalid load balancer ID")
)
func checkForErrors(methodName string, hr error, resultBuffer *uint16) error {
errorFound := false
@ -81,7 +90,7 @@ func (e LoadBalancerNotFoundError) Error() string {
// IsNotFoundError returns a boolean indicating whether the error was caused by
// a resource not being found.
func IsNotFoundError(err error) bool {
switch err.(type) {
switch pe := err.(type) {
case NetworkNotFoundError:
return true
case EndpointNotFoundError:
@ -90,6 +99,8 @@ func IsNotFoundError(err error) bool {
return true
case LoadBalancerNotFoundError:
return true
case *hcserror.HcsError:
return pe.Err == hcs.ErrElementNotFound
}
return false
}

View File

@ -24,7 +24,7 @@ var (
// HNSVersion1803 added ACL functionality.
HNSVersion1803 = Version{Major: 7, Minor: 2}
// V2ApiSupport allows the use of V2 Api calls and V2 Schema.
V2ApiSupport = Version{Major: 9, Minor: 1}
V2ApiSupport = Version{Major: 9, Minor: 2}
// Remote Subnet allows for Remote Subnet policies on Overlay networks
RemoteSubnetVersion = Version{Major: 9, Minor: 2}
// A Host Route policy allows for local container to local host communication Overlay networks

View File

@ -3,7 +3,7 @@ package hcn
import (
"encoding/json"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/go-winio/pkg/guid"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/sirupsen/logrus"
)
@ -148,7 +148,10 @@ func createLoadBalancer(settings string) (*HostComputeLoadBalancer, error) {
}
func modifyLoadBalancer(loadBalancerId string, settings string) (*HostComputeLoadBalancer, error) {
loadBalancerGuid := guid.FromString(loadBalancerId)
loadBalancerGuid, err := guid.FromString(loadBalancerId)
if err != nil {
return nil, errInvalidLoadBalancerID
}
// Open loadBalancer.
var (
loadBalancerHandle hcnLoadBalancer
@ -189,7 +192,10 @@ func modifyLoadBalancer(loadBalancerId string, settings string) (*HostComputeLoa
}
func deleteLoadBalancer(loadBalancerId string) error {
loadBalancerGuid := guid.FromString(loadBalancerId)
loadBalancerGuid, err := guid.FromString(loadBalancerId)
if err != nil {
return errInvalidLoadBalancerID
}
var resultBuffer *uint16
hr := hcnDeleteLoadBalancer(&loadBalancerGuid, &resultBuffer)
if err := checkForErrors("hcnDeleteLoadBalancer", hr, resultBuffer); err != nil {

View File

@ -5,8 +5,8 @@ import (
"os"
"syscall"
"github.com/Microsoft/go-winio/pkg/guid"
icni "github.com/Microsoft/hcsshim/internal/cni"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/Microsoft/hcsshim/internal/regstate"
"github.com/Microsoft/hcsshim/internal/runhcs"
@ -165,7 +165,10 @@ func createNamespace(settings string) (*HostComputeNamespace, error) {
}
func modifyNamespace(namespaceId string, settings string) (*HostComputeNamespace, error) {
namespaceGuid := guid.FromString(namespaceId)
namespaceGuid, err := guid.FromString(namespaceId)
if err != nil {
return nil, errInvalidNamespaceID
}
// Open namespace.
var (
namespaceHandle hcnNamespace
@ -206,7 +209,10 @@ func modifyNamespace(namespaceId string, settings string) (*HostComputeNamespace
}
func deleteNamespace(namespaceId string) error {
namespaceGuid := guid.FromString(namespaceId)
namespaceGuid, err := guid.FromString(namespaceId)
if err != nil {
return errInvalidNamespaceID
}
var resultBuffer *uint16
hr := hcnDeleteNamespace(&namespaceGuid, &resultBuffer)
if err := checkForErrors("hcnDeleteNamespace", hr, resultBuffer); err != nil {
@ -241,7 +247,11 @@ func ListNamespacesQuery(query HostComputeQuery) ([]HostComputeNamespace, error)
// GetNamespaceByID returns the Namespace specified by Id.
func GetNamespaceByID(namespaceId string) (*HostComputeNamespace, error) {
return getNamespace(guid.FromString(namespaceId), defaultQueryJson())
g, err := guid.FromString(namespaceId)
if err != nil {
return nil, errInvalidNamespaceID
}
return getNamespace(g, defaultQueryJson())
}
// GetNamespaceEndpointIds returns the endpoints of the Namespace specified by Id.

View File

@ -2,8 +2,9 @@ package hcn
import (
"encoding/json"
"errors"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/go-winio/pkg/guid"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/sirupsen/logrus"
)
@ -132,6 +133,12 @@ func getNetwork(networkGuid guid.GUID, query string) (*HostComputeNetwork, error
}
// Convert output to HostComputeNetwork
var outputNetwork HostComputeNetwork
// If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode),
// the value will be omitted from the JSON blob. We therefore need to initialize NAT here before
// unmarshaling the JSON blob.
outputNetwork.Type = NAT
if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil {
return nil, err
}
@ -196,6 +203,12 @@ func createNetwork(settings string) (*HostComputeNetwork, error) {
}
// Convert output to HostComputeNetwork
var outputNetwork HostComputeNetwork
// If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode),
// the value will be omitted from the JSON blob. We therefore need to initialize NAT here before
// unmarshaling the JSON blob.
outputNetwork.Type = NAT
if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil {
return nil, err
}
@ -203,7 +216,10 @@ func createNetwork(settings string) (*HostComputeNetwork, error) {
}
func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, error) {
networkGuid := guid.FromString(networkId)
networkGuid, err := guid.FromString(networkId)
if err != nil {
return nil, errInvalidNetworkID
}
// Open Network
var (
networkHandle hcnNetwork
@ -237,6 +253,12 @@ func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, erro
}
// Convert output to HostComputeNetwork
var outputNetwork HostComputeNetwork
// If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode),
// the value will be omitted from the JSON blob. We therefore need to initialize NAT here before
// unmarshaling the JSON blob.
outputNetwork.Type = NAT
if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil {
return nil, err
}
@ -244,7 +266,10 @@ func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, erro
}
func deleteNetwork(networkId string) error {
networkGuid := guid.FromString(networkId)
networkGuid, err := guid.FromString(networkId)
if err != nil {
return errInvalidNetworkID
}
var resultBuffer *uint16
hr := hcnDeleteNetwork(&networkGuid, &resultBuffer)
if err := checkForErrors("hcnDeleteNetwork", hr, resultBuffer); err != nil {
@ -320,6 +345,24 @@ func GetNetworkByName(networkName string) (*HostComputeNetwork, error) {
// Create Network.
func (network *HostComputeNetwork) Create() (*HostComputeNetwork, error) {
logrus.Debugf("hcn::HostComputeNetwork::Create id=%s", network.Id)
for _, ipam := range network.Ipams {
for _, subnet := range ipam.Subnets {
if subnet.IpAddressPrefix != "" {
hasDefault := false
for _, route := range subnet.Routes {
if route.NextHop == "" {
return nil, errors.New("network create error, subnet has address prefix but no gateway specified")
}
if route.DestinationPrefix == "0.0.0.0/0" || route.DestinationPrefix == "::/0" {
hasDefault = true
}
}
if !hasDefault {
return nil, errors.New("network create error, no default gateway")
}
}
}
}
jsonString, err := json.Marshal(network)
if err != nil {

View File

@ -1,6 +1,8 @@
package hcn
import "encoding/json"
import (
"encoding/json"
)
// EndpointPolicyType are the potential Policies that apply to Endpoints.
type EndpointPolicyType string
@ -64,14 +66,18 @@ type SubnetPolicy struct {
Settings json.RawMessage `json:",omitempty"`
}
// NatFlags are flags for portmappings.
type NatFlags uint32
/// Endpoint Policy objects
// PortMappingPolicySetting defines Port Mapping (NAT)
type PortMappingPolicySetting struct {
Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17
InternalPort uint16 `json:",omitempty"`
ExternalPort uint16 `json:",omitempty"`
VIP string `json:",omitempty"`
Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17
InternalPort uint16 `json:",omitempty"`
ExternalPort uint16 `json:",omitempty"`
VIP string `json:",omitempty"`
Flags NatFlags `json:",omitempty"`
}
// ActionType associated with ACLs. Value is either Allow or Block.
@ -131,6 +137,26 @@ type SDNRoutePolicySetting struct {
NeedEncap bool `json:",omitempty"`
}
// A ProxyType is a type of proxy used by the L4 proxy policy.
type ProxyType int
const (
// ProxyTypeVFP specifies a Virtual Filtering Protocol proxy.
ProxyTypeVFP ProxyType = iota
// ProxyTypeWFP specifies a Windows Filtering Platform proxy.
ProxyTypeWFP
)
// FiveTuple is nested in L4ProxyPolicySetting for WFP support.
type FiveTuple struct {
Protocols string `json:",omitempty"`
LocalAddresses string `json:",omitempty"`
RemoteAddresses string `json:",omitempty"`
LocalPorts string `json:",omitempty"`
RemotePorts string `json:",omitempty"`
Priority uint16 `json:",omitempty"`
}
// L4ProxyPolicySetting sets Layer-4 Proxy on an endpoint.
type L4ProxyPolicySetting struct {
IP string `json:",omitempty"`
@ -139,6 +165,12 @@ type L4ProxyPolicySetting struct {
ExceptionList []string `json:",omitempty"`
Destination string `json:","`
OutboundNat bool `json:",omitempty"`
// For the WFP proxy
FilterTuple FiveTuple `json:",omitempty"`
ProxyType ProxyType `json:",omitempty"`
UserSID string `json:",omitempty"`
CompartmentID uint32 `json:",omitempty"`
}
// PortnameEndpointPolicySetting sets the port name for an endpoint.

View File

@ -39,11 +39,21 @@ func HNSListEndpointRequest() ([]HNSEndpoint, error) {
// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container
func HotAttachEndpoint(containerID string, endpointID string) error {
endpoint, err := GetHNSEndpointByID(endpointID)
isAttached, err := endpoint.IsAttached(containerID)
if isAttached {
return err
}
return modifyNetworkEndpoint(containerID, endpointID, Add)
}
// HotDetachEndpoint makes a HCS Call to detach the endpoint from the container
func HotDetachEndpoint(containerID string, endpointID string) error {
endpoint, err := GetHNSEndpointByID(endpointID)
isAttached, err := endpoint.IsAttached(containerID)
if !isAttached {
return err
}
return modifyNetworkEndpoint(containerID, endpointID, Remove)
}

View File

@ -3,7 +3,7 @@ package cni
import (
"errors"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/go-winio/pkg/guid"
"github.com/Microsoft/hcsshim/internal/regstate"
)

View File

@ -0,0 +1,80 @@
package cow
import (
"context"
"io"
"github.com/Microsoft/hcsshim/internal/schema1"
)
// Process is the interface for an OS process running in a container or utility VM.
type Process interface {
// Close releases resources associated with the process and closes the
// writer and readers returned by Stdio. Depending on the implementation,
// this may also terminate the process.
Close() error
// CloseStdin causes the process's stdin handle to receive EOF/EPIPE/whatever
// is appropriate to indicate that no more data is available.
CloseStdin(ctx context.Context) error
// Pid returns the process ID.
Pid() int
// Stdio returns the stdio streams for a process. These may be nil if a stream
// was not requested during CreateProcess.
Stdio() (_ io.Writer, _ io.Reader, _ io.Reader)
// ResizeConsole resizes the virtual terminal associated with the process.
ResizeConsole(ctx context.Context, width, height uint16) error
// Kill sends a SIGKILL or equivalent signal to the process and returns whether
// the signal was delivered. It does not wait for the process to terminate.
Kill(ctx context.Context) (bool, error)
// Signal sends a signal to the process and returns whether the signal was
// delivered. The input is OS specific (either
// guestrequest.SignalProcessOptionsWCOW or
// guestrequest.SignalProcessOptionsLCOW). It does not wait for the process
// to terminate.
Signal(ctx context.Context, options interface{}) (bool, error)
// Wait waits for the process to complete, or for a connection to the process to be
// terminated by some error condition (including calling Close).
Wait() error
// ExitCode returns the exit code of the process. Returns an error if the process is
// not running.
ExitCode() (int, error)
}
// ProcessHost is the interface for creating processes.
type ProcessHost interface {
// CreateProcess creates a process. The configuration is host specific
// (either hcsschema.ProcessParameters or lcow.ProcessParameters).
CreateProcess(ctx context.Context, config interface{}) (Process, error)
// OS returns the host's operating system, "linux" or "windows".
OS() string
// IsOCI specifies whether this is an OCI-compliant process host. If true,
// then the configuration passed to CreateProcess should have an OCI process
// spec (or nil if this is the initial process in an OCI container).
// Otherwise, it should have the HCS-specific process parameters.
IsOCI() bool
}
// Container is the interface for container objects, either running on the host or
// in a utility VM.
type Container interface {
ProcessHost
// Close releases the resources associated with the container. Depending on
// the implementation, this may also terminate the container.
Close() error
// ID returns the container ID.
ID() string
// Properties returns the requested container properties.
Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error)
// Start starts a container.
Start(ctx context.Context) error
// Shutdown sends a shutdown request to the container (but does not wait for
// the shutdown to complete).
Shutdown(ctx context.Context) error
// Terminate sends a terminate request to the container (but does not wait
// for the terminate to complete).
Terminate(ctx context.Context) error
// Wait waits for the container to terminate, or for the connection to the
// container to be terminated by some error condition (including calling
// Close).
Wait() error
}

View File

@ -1,100 +0,0 @@
package guestrequest
import (
"github.com/Microsoft/hcsshim/internal/schema2"
)
// Arguably, many of these (at least CombinedLayers) should have been generated
// by swagger.
//
// This will also change package name due to an inbound breaking change.
// This class is used by a modify request to add or remove a combined layers
// structure in the guest. For windows, the GCS applies a filter in ContainerRootPath
// using the specified layers as the parent content. Ignores property ScratchPath
// since the container path is already the scratch path. For linux, the GCS unions
// the specified layers and ScratchPath together, placing the resulting union
// filesystem at ContainerRootPath.
type CombinedLayers struct {
ContainerRootPath string `json:"ContainerRootPath,omitempty"`
Layers []hcsschema.Layer `json:"Layers,omitempty"`
ScratchPath string `json:"ScratchPath,omitempty"`
}
// Defines the schema for hosted settings passed to GCS and/or OpenGCS
// SCSI. Scratch space for remote file-system commands, or R/W layer for containers
type LCOWMappedVirtualDisk struct {
MountPath string `json:"MountPath,omitempty"` // /tmp/scratch for an LCOW utility VM being used as a service VM
Lun uint8 `json:"Lun,omitempty"`
Controller uint8 `json:"Controller,omitempty"`
ReadOnly bool `json:"ReadOnly,omitempty"`
}
type WCOWMappedVirtualDisk struct {
ContainerPath string `json:"ContainerPath,omitempty"`
Lun int32 `json:"Lun,omitempty"`
}
type LCOWMappedDirectory struct {
MountPath string `json:"MountPath,omitempty"`
Port int32 `json:"Port,omitempty"`
ShareName string `json:"ShareName,omitempty"` // If empty not using ANames (not currently supported)
ReadOnly bool `json:"ReadOnly,omitempty"`
}
// Read-only layers over VPMem
type LCOWMappedVPMemDevice struct {
DeviceNumber uint32 `json:"DeviceNumber,omitempty"`
MountPath string `json:"MountPath,omitempty"` // /tmp/pN
}
type LCOWNetworkAdapter struct {
NamespaceID string `json:",omitempty"`
ID string `json:",omitempty"`
MacAddress string `json:",omitempty"`
IPAddress string `json:",omitempty"`
PrefixLength uint8 `json:",omitempty"`
GatewayAddress string `json:",omitempty"`
DNSSuffix string `json:",omitempty"`
DNSServerList string `json:",omitempty"`
EnableLowMetric bool `json:",omitempty"`
EncapOverhead uint16 `json:",omitempty"`
}
type ResourceType string
const (
// These are constants for v2 schema modify guest requests.
ResourceTypeMappedDirectory ResourceType = "MappedDirectory"
ResourceTypeMappedVirtualDisk ResourceType = "MappedVirtualDisk"
ResourceTypeNetwork ResourceType = "Network"
ResourceTypeNetworkNamespace ResourceType = "NetworkNamespace"
ResourceTypeCombinedLayers ResourceType = "CombinedLayers"
ResourceTypeVPMemDevice ResourceType = "VPMemDevice"
)
// GuestRequest is for modify commands passed to the guest.
type GuestRequest struct {
RequestType string `json:"RequestType,omitempty"`
ResourceType ResourceType `json:"ResourceType,omitempty"`
Settings interface{} `json:"Settings,omitempty"`
}
type NetworkModifyRequest struct {
AdapterId string `json:"AdapterId,omitempty"`
RequestType string `json:"RequestType,omitempty"`
Settings interface{} `json:"Settings,omitempty"`
}
type RS4NetworkModifyRequest struct {
AdapterInstanceId string `json:"AdapterInstanceId,omitempty"`
RequestType string `json:"RequestType,omitempty"`
Settings interface{} `json:"Settings,omitempty"`
}
// SignalProcessOptions is the options passed to either WCOW or LCOW
// to signal a given process.
type SignalProcessOptions struct {
Signal int `json:,omitempty`
}

View File

@ -1,69 +0,0 @@
package guid
import (
"crypto/rand"
"encoding/json"
"fmt"
"io"
"strconv"
"strings"
)
var _ = (json.Marshaler)(&GUID{})
var _ = (json.Unmarshaler)(&GUID{})
type GUID [16]byte
func New() GUID {
g := GUID{}
_, err := io.ReadFull(rand.Reader, g[:])
if err != nil {
panic(err)
}
return g
}
func (g GUID) String() string {
return fmt.Sprintf("%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x-%02x", g[3], g[2], g[1], g[0], g[5], g[4], g[7], g[6], g[8:10], g[10:])
}
func FromString(s string) GUID {
if len(s) != 36 {
panic(fmt.Sprintf("invalid GUID length: %d", len(s)))
}
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
panic("invalid GUID format")
}
indexOrder := [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34,
}
byteOrder := [16]int{
3, 2, 1, 0,
5, 4,
7, 6,
8, 9,
10, 11, 12, 13, 14, 15,
}
var g GUID
for i, x := range indexOrder {
b, err := strconv.ParseInt(s[x:x+2], 16, 16)
if err != nil {
panic(err)
}
g[byteOrder[i]] = byte(b)
}
return g
}
func (g GUID) MarshalJSON() ([]byte, error) {
return json.Marshal(g.String())
}
func (g *GUID) UnmarshalJSON(data []byte) error {
*g = FromString(strings.Trim(string(data), "\""))
return nil
}

View File

@ -1,10 +1,13 @@
package hcs
import (
"fmt"
"sync"
"syscall"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/vmcompute"
"github.com/sirupsen/logrus"
)
@ -40,35 +43,83 @@ var (
)
type hcsNotification uint32
func (hn hcsNotification) String() string {
switch hn {
case hcsNotificationSystemExited:
return "SystemExited"
case hcsNotificationSystemCreateCompleted:
return "SystemCreateCompleted"
case hcsNotificationSystemStartCompleted:
return "SystemStartCompleted"
case hcsNotificationSystemPauseCompleted:
return "SystemPauseCompleted"
case hcsNotificationSystemResumeCompleted:
return "SystemResumeCompleted"
case hcsNotificationSystemCrashReport:
return "SystemCrashReport"
case hcsNotificationSystemSiloJobCreated:
return "SystemSiloJobCreated"
case hcsNotificationSystemSaveCompleted:
return "SystemSaveCompleted"
case hcsNotificationSystemRdpEnhancedModeStateChanged:
return "SystemRdpEnhancedModeStateChanged"
case hcsNotificationSystemShutdownFailed:
return "SystemShutdownFailed"
case hcsNotificationSystemGetPropertiesCompleted:
return "SystemGetPropertiesCompleted"
case hcsNotificationSystemModifyCompleted:
return "SystemModifyCompleted"
case hcsNotificationSystemCrashInitiated:
return "SystemCrashInitiated"
case hcsNotificationSystemGuestConnectionClosed:
return "SystemGuestConnectionClosed"
case hcsNotificationProcessExited:
return "ProcessExited"
case hcsNotificationInvalid:
return "Invalid"
case hcsNotificationServiceDisconnect:
return "ServiceDisconnect"
default:
return fmt.Sprintf("Unknown: %d", hn)
}
}
type notificationChannel chan error
type notifcationWatcherContext struct {
channels notificationChannels
handle hcsCallback
handle vmcompute.HcsCallback
systemID string
processID int
}
type notificationChannels map[hcsNotification]notificationChannel
func newChannels() notificationChannels {
func newSystemChannels() notificationChannels {
channels := make(notificationChannels)
for _, notif := range []hcsNotification{
hcsNotificationServiceDisconnect,
hcsNotificationSystemExited,
hcsNotificationSystemCreateCompleted,
hcsNotificationSystemStartCompleted,
hcsNotificationSystemPauseCompleted,
hcsNotificationSystemResumeCompleted,
} {
channels[notif] = make(notificationChannel, 1)
}
return channels
}
channels[hcsNotificationSystemExited] = make(notificationChannel, 1)
channels[hcsNotificationSystemCreateCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemStartCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemPauseCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1)
channels[hcsNotificationProcessExited] = make(notificationChannel, 1)
channels[hcsNotificationServiceDisconnect] = make(notificationChannel, 1)
channels[hcsNotificationSystemCrashReport] = make(notificationChannel, 1)
channels[hcsNotificationSystemSiloJobCreated] = make(notificationChannel, 1)
channels[hcsNotificationSystemSaveCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemRdpEnhancedModeStateChanged] = make(notificationChannel, 1)
channels[hcsNotificationSystemShutdownFailed] = make(notificationChannel, 1)
channels[hcsNotificationSystemGetPropertiesCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemModifyCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemCrashInitiated] = make(notificationChannel, 1)
channels[hcsNotificationSystemGuestConnectionClosed] = make(notificationChannel, 1)
func newProcessChannels() notificationChannels {
channels := make(notificationChannels)
for _, notif := range []hcsNotification{
hcsNotificationServiceDisconnect,
hcsNotificationProcessExited,
} {
channels[notif] = make(notificationChannel, 1)
}
return channels
}
@ -92,12 +143,17 @@ func notificationWatcher(notificationType hcsNotification, callbackNumber uintpt
return 0
}
log := logrus.WithFields(logrus.Fields{
"notification-type": notificationType.String(),
"system-id": context.systemID,
})
if context.processID != 0 {
log.Data[logfields.ProcessID] = context.processID
}
log.Debug("HCS notification")
if channel, ok := context.channels[notificationType]; ok {
channel <- result
} else {
logrus.WithFields(logrus.Fields{
"notification-type": notificationType,
}).Warn("Received a callback of an unsupported type")
}
return 0

View File

@ -1,14 +1,14 @@
package hcs
import (
"context"
"encoding/json"
"errors"
"fmt"
"net"
"syscall"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/sirupsen/logrus"
"github.com/Microsoft/hcsshim/internal/log"
)
var (
@ -117,17 +117,11 @@ func (ev *ErrorEvent) String() string {
return evs
}
func processHcsResult(resultp *uint16) []ErrorEvent {
if resultp != nil {
resultj := interop.ConvertAndFreeCoTaskMemString(resultp)
logrus.WithField(logfields.JSON, resultj).
Debug("HCS Result")
func processHcsResult(ctx context.Context, resultJSON string) []ErrorEvent {
if resultJSON != "" {
result := &hcsResult{}
if err := json.Unmarshal([]byte(resultj), result); err != nil {
logrus.WithFields(logrus.Fields{
logfields.JSON: resultj,
logrus.ErrorKey: err,
}).Warning("Could not unmarshal HCS result")
if err := json.Unmarshal([]byte(resultJSON), result); err != nil {
log.G(ctx).WithError(err).Warning("Could not unmarshal HCS result")
return nil
}
return result.ErrorEvents
@ -141,6 +135,8 @@ type HcsError struct {
Events []ErrorEvent
}
var _ net.Error = &HcsError{}
func (e *HcsError) Error() string {
s := e.Op + ": " + e.Err.Error()
for _, ev := range e.Events {
@ -149,6 +145,16 @@ func (e *HcsError) Error() string {
return s
}
func (e *HcsError) Temporary() bool {
err, ok := e.Err.(net.Error)
return ok && err.Temporary()
}
func (e *HcsError) Timeout() bool {
err, ok := e.Err.(net.Error)
return ok && err.Timeout()
}
// ProcessError is an error encountered in HCS during an operation on a Process object
type ProcessError struct {
SystemID string
@ -158,6 +164,8 @@ type ProcessError struct {
Events []ErrorEvent
}
var _ net.Error = &ProcessError{}
// SystemError is an error encountered in HCS during an operation on a Container object
type SystemError struct {
ID string
@ -167,6 +175,8 @@ type SystemError struct {
Events []ErrorEvent
}
var _ net.Error = &SystemError{}
func (e *SystemError) Error() string {
s := e.Op + " " + e.ID + ": " + e.Err.Error()
for _, ev := range e.Events {
@ -178,6 +188,16 @@ func (e *SystemError) Error() string {
return s
}
func (e *SystemError) Temporary() bool {
err, ok := e.Err.(net.Error)
return ok && err.Temporary()
}
func (e *SystemError) Timeout() bool {
err, ok := e.Err.(net.Error)
return ok && err.Timeout()
}
func makeSystemError(system *System, op string, extra string, err error, events []ErrorEvent) error {
// Don't double wrap errors
if _, ok := err.(*SystemError); ok {
@ -200,6 +220,16 @@ func (e *ProcessError) Error() string {
return s
}
func (e *ProcessError) Temporary() bool {
err, ok := e.Err.(net.Error)
return ok && err.Temporary()
}
func (e *ProcessError) Timeout() bool {
err, ok := e.Err.(net.Error)
return ok && err.Timeout()
}
func makeProcessError(process *Process, op string, err error, events []ErrorEvent) error {
// Don't double wrap errors
if _, ok := err.(*ProcessError); ok {
@ -242,6 +272,9 @@ func IsPending(err error) bool {
// IsTimeout returns a boolean indicating whether the error is caused by
// a timeout waiting for the operation to complete.
func IsTimeout(err error) bool {
if err, ok := err.(net.Error); ok && err.Timeout() {
return true
}
err = getInnerError(err)
return err == ErrTimeout
}
@ -272,6 +305,13 @@ func IsNotSupported(err error) bool {
err == ErrVmcomputeUnknownMessage
}
// IsOperationInvalidState returns true when err is caused by
// `ErrVmcomputeOperationInvalidState`.
func IsOperationInvalidState(err error) bool {
err = getInnerError(err)
return err == ErrVmcomputeOperationInvalidState
}
func getInnerError(err error) error {
switch pe := err.(type) {
case nil:
@ -285,3 +325,12 @@ func getInnerError(err error) error {
}
return err
}
func getOperationLogResult(err error) (string, error) {
switch err {
case nil:
return "Success", nil
default:
return "Error", err
}
}

View File

@ -1,48 +0,0 @@
// Shim for the Host Compute Service (HCS) to manage Windows Server
// containers and Hyper-V containers.
package hcs
import (
"syscall"
)
//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go hcs.go
//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems?
//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem?
//sys hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem?
//sys hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem?
//sys hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem?
//sys hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem?
//sys hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem?
//sys hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem?
//sys hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem?
//sys hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties?
//sys hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem?
//sys hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback?
//sys hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback?
//sys hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess?
//sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess?
//sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess?
//sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess?
//sys hcsSignalProcess(process hcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsTerminateProcess?
//sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo?
//sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties?
//sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess?
//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties?
//sys hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback?
//sys hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback?
type hcsSystem syscall.Handle
type hcsProcess syscall.Handle
type hcsCallback syscall.Handle
type hcsProcessInformation struct {
ProcessId uint32
Reserved uint32
StdInput syscall.Handle
StdOutput syscall.Handle
StdError syscall.Handle
}

View File

@ -1,20 +0,0 @@
package hcs
import "github.com/sirupsen/logrus"
func logOperationBegin(ctx logrus.Fields, msg string) {
logrus.WithFields(ctx).Debug(msg)
}
func logOperationEnd(ctx logrus.Fields, msg string, err error) {
// Copy the log and fields first.
log := logrus.WithFields(ctx)
if err == nil {
log.Debug(msg)
} else {
// Edit only the copied field data to avoid race conditions on the
// write.
log.Data[logrus.ErrorKey] = err
log.Error(msg)
}
}

View File

@ -1,48 +1,45 @@
package hcs
import (
"context"
"encoding/json"
"io"
"sync"
"syscall"
"time"
"github.com/Microsoft/hcsshim/internal/guestrequest"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/sirupsen/logrus"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/vmcompute"
"go.opencensus.io/trace"
)
// ContainerError is an error encountered in HCS
type Process struct {
handleLock sync.RWMutex
handle hcsProcess
handle vmcompute.HcsProcess
processID int
system *System
cachedPipes *cachedPipes
stdin io.WriteCloser
stdout io.ReadCloser
stderr io.ReadCloser
callbackNumber uintptr
logctx logrus.Fields
closedWaitOnce sync.Once
waitBlock chan struct{}
exitCode int
waitError error
}
func newProcess(process hcsProcess, processID int, computeSystem *System) *Process {
func newProcess(process vmcompute.HcsProcess, processID int, computeSystem *System) *Process {
return &Process{
handle: process,
processID: processID,
system: computeSystem,
logctx: logrus.Fields{
logfields.ContainerID: computeSystem.ID(),
logfields.ProcessID: processID,
},
waitBlock: make(chan struct{}),
}
}
type cachedPipes struct {
stdIn syscall.Handle
stdOut syscall.Handle
stdErr syscall.Handle
}
type processModifyRequest struct {
Operation string
ConsoleSize *consoleSize `json:",omitempty"`
@ -58,7 +55,7 @@ type closeHandle struct {
Handle string
}
type ProcessStatus struct {
type processStatus struct {
ProcessID uint32
Exited bool
ExitCode uint32
@ -86,120 +83,153 @@ func (process *Process) SystemID() string {
return process.system.ID()
}
func (process *Process) logOperationBegin(operation string) {
logOperationBegin(
process.logctx,
operation+" - Begin Operation")
}
func (process *Process) logOperationEnd(operation string, err error) {
var result string
if err == nil {
result = "Success"
} else {
result = "Error"
func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) {
switch err {
case nil:
return true, nil
case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound:
select {
case <-process.waitBlock:
// The process exit notification has already arrived.
default:
// The process should be gone, but we have not received the notification.
// After a second, force unblock the process wait to work around a possible
// deadlock in the HCS.
go func() {
time.Sleep(time.Second)
process.closedWaitOnce.Do(func() {
log.G(ctx).WithError(err).Warn("force unblocking process waits")
process.exitCode = -1
process.waitError = err
close(process.waitBlock)
})
}()
}
return false, nil
default:
return false, err
}
logOperationEnd(
process.logctx,
operation+" - End Operation - "+result,
err)
}
// Signal signals the process with `options`.
func (process *Process) Signal(options guestrequest.SignalProcessOptions) (err error) {
//
// For LCOW `guestrequest.SignalProcessOptionsLCOW`.
//
// For WCOW `guestrequest.SignalProcessOptionsWCOW`.
func (process *Process) Signal(ctx context.Context, options interface{}) (bool, error) {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "hcsshim::Process::Signal"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
return false, makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
optionsb, err := json.Marshal(options)
if err != nil {
return err
return false, err
}
optionsStr := string(optionsb)
var resultp *uint16
syscallWatcher(process.logctx, func() {
err = hcsSignalProcess(process.handle, optionsStr, &resultp)
})
events := processHcsResult(resultp)
resultJSON, err := vmcompute.HcsSignalProcess(ctx, process.handle, string(optionsb))
events := processHcsResult(ctx, resultJSON)
delivered, err := process.processSignalResult(ctx, err)
if err != nil {
return makeProcessError(process, operation, err, events)
err = makeProcessError(process, operation, err, events)
}
return nil
return delivered, err
}
// Kill signals the process to terminate but does not wait for it to finish terminating.
func (process *Process) Kill() (err error) {
func (process *Process) Kill(ctx context.Context) (bool, error) {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "hcsshim::Process::Kill"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
return false, makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
var resultp *uint16
syscallWatcher(process.logctx, func() {
err = hcsTerminateProcess(process.handle, &resultp)
resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle)
events := processHcsResult(ctx, resultJSON)
delivered, err := process.processSignalResult(ctx, err)
if err != nil {
err = makeProcessError(process, operation, err, events)
}
return delivered, err
}
// waitBackground waits for the process exit notification. Once received sets
// `process.waitError` (if any) and unblocks all `Wait` calls.
//
// This MUST be called exactly once per `process.handle` but `Wait` is safe to
// call multiple times.
func (process *Process) waitBackground() {
operation := "hcsshim::Process::waitBackground"
ctx, span := trace.StartSpan(context.Background(), operation)
defer span.End()
span.AddAttributes(
trace.StringAttribute("cid", process.SystemID()),
trace.Int64Attribute("pid", int64(process.processID)))
var (
err error
exitCode = -1
)
err = waitForNotification(ctx, process.callbackNumber, hcsNotificationProcessExited, nil)
if err != nil {
err = makeProcessError(process, operation, err, nil)
log.G(ctx).WithError(err).Error("failed wait")
} else {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
// Make sure we didnt race with Close() here
if process.handle != 0 {
propertiesJSON, resultJSON, err := vmcompute.HcsGetProcessProperties(ctx, process.handle)
events := processHcsResult(ctx, resultJSON)
if err != nil {
err = makeProcessError(process, operation, err, events)
} else {
properties := &processStatus{}
err = json.Unmarshal([]byte(propertiesJSON), properties)
if err != nil {
err = makeProcessError(process, operation, err, nil)
} else {
if properties.LastWaitResult != 0 {
log.G(ctx).WithField("wait-result", properties.LastWaitResult).Warning("non-zero last wait result")
} else {
exitCode = int(properties.ExitCode)
}
}
}
}
}
log.G(ctx).WithField("exitCode", exitCode).Debug("process exited")
process.closedWaitOnce.Do(func() {
process.exitCode = exitCode
process.waitError = err
close(process.waitBlock)
})
events := processHcsResult(resultp)
if err != nil {
return makeProcessError(process, operation, err, events)
}
return nil
oc.SetSpanStatus(span, err)
}
// Wait waits for the process to exit.
func (process *Process) Wait() (err error) {
operation := "hcsshim::Process::Wait"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(operation, err) }()
err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
if err != nil {
return makeProcessError(process, operation, err, nil)
}
return nil
}
// WaitTimeout waits for the process to exit or the duration to elapse. It returns
// false if timeout occurs.
func (process *Process) WaitTimeout(timeout time.Duration) (err error) {
operation := "hcssshim::Process::WaitTimeout"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(operation, err) }()
err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout)
if err != nil {
return makeProcessError(process, operation, err, nil)
}
return nil
// Wait waits for the process to exit. If the process has already exited returns
// the pervious error (if any).
func (process *Process) Wait() error {
<-process.waitBlock
return process.waitError
}
// ResizeConsole resizes the console of the process.
func (process *Process) ResizeConsole(width, height uint16) (err error) {
func (process *Process) ResizeConsole(ctx context.Context, width, height uint16) error {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "hcsshim::Process::ResizeConsole"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
@ -218,11 +248,8 @@ func (process *Process) ResizeConsole(width, height uint16) (err error) {
return err
}
modifyRequestStr := string(modifyRequestb)
var resultp *uint16
err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
events := processHcsResult(resultp)
resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb))
events := processHcsResult(ctx, resultJSON)
if err != nil {
return makeProcessError(process, operation, err, events)
}
@ -230,104 +257,46 @@ func (process *Process) ResizeConsole(width, height uint16) (err error) {
return nil
}
func (process *Process) Properties() (_ *ProcessStatus, err error) {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "hcsshim::Process::Properties"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
var (
resultp *uint16
propertiesp *uint16
)
syscallWatcher(process.logctx, func() {
err = hcsGetProcessProperties(process.handle, &propertiesp, &resultp)
})
events := processHcsResult(resultp)
if err != nil {
return nil, makeProcessError(process, operation, err, events)
}
if propertiesp == nil {
return nil, ErrUnexpectedValue
}
propertiesRaw := interop.ConvertAndFreeCoTaskMemBytes(propertiesp)
properties := &ProcessStatus{}
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
return nil, makeProcessError(process, operation, err, nil)
}
return properties, nil
}
// ExitCode returns the exit code of the process. The process must have
// already terminated.
func (process *Process) ExitCode() (_ int, err error) {
operation := "hcsshim::Process::ExitCode"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(operation, err) }()
properties, err := process.Properties()
if err != nil {
return 0, makeProcessError(process, operation, err, nil)
func (process *Process) ExitCode() (int, error) {
select {
case <-process.waitBlock:
if process.waitError != nil {
return -1, process.waitError
}
return process.exitCode, nil
default:
return -1, makeProcessError(process, "hcsshim::Process::ExitCode", ErrInvalidProcessState, nil)
}
if properties.Exited == false {
return 0, makeProcessError(process, operation, ErrInvalidProcessState, nil)
}
if properties.LastWaitResult != 0 {
return 0, makeProcessError(process, operation, syscall.Errno(properties.LastWaitResult), nil)
}
return int(properties.ExitCode), nil
}
// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing
// these pipes does not close the underlying pipes; it should be possible to
// call this multiple times to get multiple interfaces.
func (process *Process) Stdio() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) {
// StdioLegacy returns the stdin, stdout, and stderr pipes, respectively. Closing
// these pipes does not close the underlying pipes; but this function can only
// be called once on each Process.
func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) {
operation := "hcsshim::Process::StdioLegacy"
ctx, span := trace.StartSpan(context.Background(), operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
trace.StringAttribute("cid", process.SystemID()),
trace.Int64Attribute("pid", int64(process.processID)))
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "hcsshim::Process::Stdio"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
var stdIn, stdOut, stdErr syscall.Handle
if process.cachedPipes == nil {
var (
processInfo hcsProcessInformation
resultp *uint16
)
err = hcsGetProcessInfo(process.handle, &processInfo, &resultp)
events := processHcsResult(resultp)
if err != nil {
return nil, nil, nil, makeProcessError(process, operation, err, events)
}
stdIn, stdOut, stdErr = processInfo.StdInput, processInfo.StdOutput, processInfo.StdError
} else {
// Use cached pipes
stdIn, stdOut, stdErr = process.cachedPipes.stdIn, process.cachedPipes.stdOut, process.cachedPipes.stdErr
// Invalidate the cache
process.cachedPipes = nil
processInfo, resultJSON, err := vmcompute.HcsGetProcessInfo(ctx, process.handle)
events := processHcsResult(ctx, resultJSON)
if err != nil {
return nil, nil, nil, makeProcessError(process, operation, err, events)
}
pipes, err := makeOpenFiles([]syscall.Handle{stdIn, stdOut, stdErr})
pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError})
if err != nil {
return nil, nil, nil, makeProcessError(process, operation, err, nil)
}
@ -335,15 +304,19 @@ func (process *Process) Stdio() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadClo
return pipes[0], pipes[1], pipes[2], nil
}
// Stdio returns the stdin, stdout, and stderr pipes, respectively.
// To close them, close the process handle.
func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) {
return process.stdin, process.stdout, process.stderr
}
// CloseStdin closes the write side of the stdin pipe so that the process is
// notified on the read side that there is no more data in stdin.
func (process *Process) CloseStdin() (err error) {
func (process *Process) CloseStdin(ctx context.Context) error {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "hcsshim::Process::CloseStdin"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
@ -361,96 +334,116 @@ func (process *Process) CloseStdin() (err error) {
return err
}
modifyRequestStr := string(modifyRequestb)
var resultp *uint16
err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
events := processHcsResult(resultp)
resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb))
events := processHcsResult(ctx, resultJSON)
if err != nil {
return makeProcessError(process, operation, err, events)
}
if process.stdin != nil {
process.stdin.Close()
}
return nil
}
// Close cleans up any state associated with the process but does not kill
// or wait on it.
func (process *Process) Close() (err error) {
operation := "hcsshim::Process::Close"
ctx, span := trace.StartSpan(context.Background(), operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
trace.StringAttribute("cid", process.SystemID()),
trace.Int64Attribute("pid", int64(process.processID)))
process.handleLock.Lock()
defer process.handleLock.Unlock()
operation := "hcsshim::Process::Close"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(operation, err) }()
// Don't double free this
if process.handle == 0 {
return nil
}
if err = process.unregisterCallback(); err != nil {
if process.stdin != nil {
process.stdin.Close()
}
if process.stdout != nil {
process.stdout.Close()
}
if process.stderr != nil {
process.stderr.Close()
}
if err = process.unregisterCallback(ctx); err != nil {
return makeProcessError(process, operation, err, nil)
}
if err = hcsCloseProcess(process.handle); err != nil {
if err = vmcompute.HcsCloseProcess(ctx, process.handle); err != nil {
return makeProcessError(process, operation, err, nil)
}
process.handle = 0
process.closedWaitOnce.Do(func() {
process.exitCode = -1
process.waitError = ErrAlreadyClosed
close(process.waitBlock)
})
return nil
}
func (process *Process) registerCallback() error {
context := &notifcationWatcherContext{
channels: newChannels(),
func (process *Process) registerCallback(ctx context.Context) error {
callbackContext := &notifcationWatcherContext{
channels: newProcessChannels(),
systemID: process.SystemID(),
processID: process.processID,
}
callbackMapLock.Lock()
callbackNumber := nextCallback
nextCallback++
callbackMap[callbackNumber] = context
callbackMap[callbackNumber] = callbackContext
callbackMapLock.Unlock()
var callbackHandle hcsCallback
err := hcsRegisterProcessCallback(process.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
callbackHandle, err := vmcompute.HcsRegisterProcessCallback(ctx, process.handle, notificationWatcherCallback, callbackNumber)
if err != nil {
return err
}
context.handle = callbackHandle
callbackContext.handle = callbackHandle
process.callbackNumber = callbackNumber
return nil
}
func (process *Process) unregisterCallback() error {
func (process *Process) unregisterCallback(ctx context.Context) error {
callbackNumber := process.callbackNumber
callbackMapLock.RLock()
context := callbackMap[callbackNumber]
callbackContext := callbackMap[callbackNumber]
callbackMapLock.RUnlock()
if context == nil {
if callbackContext == nil {
return nil
}
handle := context.handle
handle := callbackContext.handle
if handle == 0 {
return nil
}
// hcsUnregisterProcessCallback has its own syncronization
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
err := hcsUnregisterProcessCallback(handle)
// vmcompute.HcsUnregisterProcessCallback has its own synchronization to
// wait for all callbacks to complete. We must NOT hold the callbackMapLock.
err := vmcompute.HcsUnregisterProcessCallback(ctx, handle)
if err != nil {
return err
}
closeChannels(context.channels)
closeChannels(callbackContext.channels)
callbackMapLock.Lock()
callbackMap[callbackNumber] = nil
delete(callbackMap, callbackNumber)
callbackMapLock.Unlock()
handle = 0

View File

@ -1,18 +1,23 @@
package hcs
import (
"context"
"encoding/json"
"errors"
"os"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/cow"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/schema1"
"github.com/Microsoft/hcsshim/internal/timeout"
"github.com/sirupsen/logrus"
"github.com/Microsoft/hcsshim/internal/vmcompute"
"go.opencensus.io/trace"
)
// currentContainerStarts is used to limit the number of concurrent container
@ -38,49 +43,37 @@ func init() {
type System struct {
handleLock sync.RWMutex
handle hcsSystem
handle vmcompute.HcsSystem
id string
callbackNumber uintptr
logctx logrus.Fields
closedWaitOnce sync.Once
waitBlock chan struct{}
waitError error
exitError error
os, typ string
}
func newSystem(id string) *System {
return &System{
id: id,
logctx: logrus.Fields{
logfields.ContainerID: id,
},
id: id,
waitBlock: make(chan struct{}),
}
}
func (computeSystem *System) logOperationBegin(operation string) {
logOperationBegin(
computeSystem.logctx,
operation+" - Begin Operation")
}
func (computeSystem *System) logOperationEnd(operation string, err error) {
var result string
if err == nil {
result = "Success"
} else {
result = "Error"
}
logOperationEnd(
computeSystem.logctx,
operation+" - End Operation - "+result,
err)
}
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System, err error) {
func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) {
operation := "hcsshim::CreateComputeSystem"
// hcsCreateComputeSystemContext is an async operation. Start the outer span
// here to measure the full create time.
ctx, span := trace.StartSpan(ctx, operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", id))
computeSystem := newSystem(id)
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(operation, err) }()
hcsDocumentB, err := json.Marshal(hcsDocumentInterface)
if err != nil {
@ -89,126 +82,114 @@ func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System
hcsDocument := string(hcsDocumentB)
logrus.WithFields(computeSystem.logctx).
WithField(logfields.JSON, hcsDocument).
Debug("HCS ComputeSystem Document")
var (
resultp *uint16
identity syscall.Handle
resultJSON string
createError error
)
syscallWatcher(computeSystem.logctx, func() {
createError = hcsCreateComputeSystem(id, hcsDocument, identity, &computeSystem.handle, &resultp)
})
computeSystem.handle, resultJSON, createError = vmcompute.HcsCreateComputeSystem(ctx, id, hcsDocument, identity)
if createError == nil || IsPending(createError) {
if err = computeSystem.registerCallback(); err != nil {
defer func() {
if err != nil {
computeSystem.Close()
}
}()
if err = computeSystem.registerCallback(ctx); err != nil {
// Terminate the compute system if it still exists. We're okay to
// ignore a failure here.
computeSystem.Terminate()
computeSystem.Terminate(ctx)
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
}
events, err := processAsyncHcsResult(createError, resultp, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate)
events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate)
if err != nil {
if err == ErrTimeout {
// Terminate the compute system if it still exists. We're okay to
// ignore a failure here.
computeSystem.Terminate()
computeSystem.Terminate(ctx)
}
return nil, makeSystemError(computeSystem, operation, hcsDocument, err, events)
}
go computeSystem.waitBackground()
if err = computeSystem.getCachedProperties(ctx); err != nil {
return nil, err
}
return computeSystem, nil
}
// OpenComputeSystem opens an existing compute system by ID.
func OpenComputeSystem(id string) (_ *System, err error) {
func OpenComputeSystem(ctx context.Context, id string) (*System, error) {
operation := "hcsshim::OpenComputeSystem"
computeSystem := newSystem(id)
computeSystem.logOperationBegin(operation)
defer func() {
if IsNotExist(err) {
computeSystem.logOperationEnd(operation, nil)
} else {
computeSystem.logOperationEnd(operation, err)
}
}()
var (
handle hcsSystem
resultp *uint16
)
err = hcsOpenComputeSystem(id, &handle, &resultp)
events := processHcsResult(resultp)
handle, resultJSON, err := vmcompute.HcsOpenComputeSystem(ctx, id)
events := processHcsResult(ctx, resultJSON)
if err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, events)
}
computeSystem.handle = handle
if err = computeSystem.registerCallback(); err != nil {
defer func() {
if err != nil {
computeSystem.Close()
}
}()
if err = computeSystem.registerCallback(ctx); err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
go computeSystem.waitBackground()
if err = computeSystem.getCachedProperties(ctx); err != nil {
return nil, err
}
return computeSystem, nil
}
func (computeSystem *System) getCachedProperties(ctx context.Context) error {
props, err := computeSystem.Properties(ctx)
if err != nil {
return err
}
computeSystem.typ = strings.ToLower(props.SystemType)
computeSystem.os = strings.ToLower(props.RuntimeOSType)
if computeSystem.os == "" && computeSystem.typ == "container" {
// Pre-RS5 HCS did not return the OS, but it only supported containers
// that ran Windows.
computeSystem.os = "windows"
}
return nil
}
// OS returns the operating system of the compute system, "linux" or "windows".
func (computeSystem *System) OS() string {
return computeSystem.os
}
// IsOCI returns whether processes in the compute system should be created via
// OCI.
func (computeSystem *System) IsOCI() bool {
return computeSystem.os == "linux" && computeSystem.typ == "container"
}
// GetComputeSystems gets a list of the compute systems on the system that match the query
func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerProperties, err error) {
func GetComputeSystems(ctx context.Context, q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) {
operation := "hcsshim::GetComputeSystems"
fields := logrus.Fields{}
logOperationBegin(
fields,
operation+" - Begin Operation")
defer func() {
var result string
if err == nil {
result = "Success"
} else {
result = "Error"
}
logOperationEnd(
fields,
operation+" - End Operation - "+result,
err)
}()
queryb, err := json.Marshal(q)
if err != nil {
return nil, err
}
query := string(queryb)
logrus.WithFields(fields).
WithField(logfields.JSON, query).
Debug("HCS ComputeSystem Query")
var (
resultp *uint16
computeSystemsp *uint16
)
syscallWatcher(fields, func() {
err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp)
})
events := processHcsResult(resultp)
computeSystemsJSON, resultJSON, err := vmcompute.HcsEnumerateComputeSystems(ctx, string(queryb))
events := processHcsResult(ctx, resultJSON)
if err != nil {
return nil, &HcsError{Op: operation, Err: err, Events: events}
}
if computeSystemsp == nil {
if computeSystemsJSON == "" {
return nil, ErrUnexpectedValue
}
computeSystemsRaw := interop.ConvertAndFreeCoTaskMemBytes(computeSystemsp)
computeSystems := []schema1.ContainerProperties{}
if err = json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil {
if err = json.Unmarshal([]byte(computeSystemsJSON), &computeSystems); err != nil {
return nil, err
}
@ -216,16 +197,21 @@ func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerPrope
}
// Start synchronously starts the computeSystem.
func (computeSystem *System) Start() (err error) {
func (computeSystem *System) Start(ctx context.Context) (err error) {
operation := "hcsshim::System::Start"
// hcsStartComputeSystemContext is an async operation. Start the outer span
// here to measure the full start time.
ctx, span := trace.StartSpan(ctx, operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
operation := "hcsshim::ComputeSystem::Start"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(operation, err) }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Start", "", ErrAlreadyClosed, nil)
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
}
// This is a very simple backoff-retry loop to limit the number
@ -254,13 +240,10 @@ func (computeSystem *System) Start() (err error) {
}()
}
var resultp *uint16
syscallWatcher(computeSystem.logctx, func() {
err = hcsStartComputeSystem(computeSystem.handle, "", &resultp)
})
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart)
resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "")
events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart)
if err != nil {
return makeSystemError(computeSystem, "Start", "", err, events)
return makeSystemError(computeSystem, operation, "", err, events)
}
return nil
@ -271,360 +254,358 @@ func (computeSystem *System) ID() string {
return computeSystem.id
}
// Shutdown requests a compute system shutdown, if IsPending() on the error returned is true,
// it may not actually be shut down until Wait() succeeds.
func (computeSystem *System) Shutdown() (err error) {
// Shutdown requests a compute system shutdown.
func (computeSystem *System) Shutdown(ctx context.Context) error {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
operation := "hcsshim::ComputeSystem::Shutdown"
computeSystem.logOperationBegin(operation)
defer func() {
if IsAlreadyStopped(err) {
computeSystem.logOperationEnd(operation, nil)
} else {
computeSystem.logOperationEnd(operation, err)
}
}()
operation := "hcsshim::System::Shutdown"
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Shutdown", "", ErrAlreadyClosed, nil)
return nil
}
var resultp *uint16
syscallWatcher(computeSystem.logctx, func() {
err = hcsShutdownComputeSystem(computeSystem.handle, "", &resultp)
})
events := processHcsResult(resultp)
if err != nil {
return makeSystemError(computeSystem, "Shutdown", "", err, events)
resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "")
events := processHcsResult(ctx, resultJSON)
switch err {
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
default:
return makeSystemError(computeSystem, operation, "", err, events)
}
return nil
}
// Terminate requests a compute system terminate, if IsPending() on the error returned is true,
// it may not actually be shut down until Wait() succeeds.
func (computeSystem *System) Terminate() (err error) {
// Terminate requests a compute system terminate.
func (computeSystem *System) Terminate(ctx context.Context) error {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
operation := "hcsshim::ComputeSystem::Terminate"
computeSystem.logOperationBegin(operation)
defer func() {
if IsPending(err) {
computeSystem.logOperationEnd(operation, nil)
} else {
computeSystem.logOperationEnd(operation, err)
}
}()
operation := "hcsshim::System::Terminate"
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Terminate", "", ErrAlreadyClosed, nil)
return nil
}
var resultp *uint16
syscallWatcher(computeSystem.logctx, func() {
err = hcsTerminateComputeSystem(computeSystem.handle, "", &resultp)
resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "")
events := processHcsResult(ctx, resultJSON)
switch err {
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
default:
return makeSystemError(computeSystem, operation, "", err, events)
}
return nil
}
// waitBackground waits for the compute system exit notification. Once received
// sets `computeSystem.waitError` (if any) and unblocks all `Wait` calls.
//
// This MUST be called exactly once per `computeSystem.handle` but `Wait` is
// safe to call multiple times.
func (computeSystem *System) waitBackground() {
operation := "hcsshim::System::waitBackground"
ctx, span := trace.StartSpan(context.Background(), operation)
defer span.End()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
switch err {
case nil:
log.G(ctx).Debug("system exited")
case ErrVmcomputeUnexpectedExit:
log.G(ctx).Debug("unexpected system exit")
computeSystem.exitError = makeSystemError(computeSystem, operation, "", err, nil)
err = nil
default:
err = makeSystemError(computeSystem, operation, "", err, nil)
}
computeSystem.closedWaitOnce.Do(func() {
computeSystem.waitError = err
close(computeSystem.waitBlock)
})
events := processHcsResult(resultp)
if err != nil && err != ErrVmcomputeAlreadyStopped {
return makeSystemError(computeSystem, "Terminate", "", err, events)
}
return nil
oc.SetSpanStatus(span, err)
}
// Wait synchronously waits for the compute system to shutdown or terminate.
func (computeSystem *System) Wait() (err error) {
operation := "hcsshim::ComputeSystem::Wait"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(operation, err) }()
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
if err != nil {
return makeSystemError(computeSystem, "Wait", "", err, nil)
}
return nil
// Wait synchronously waits for the compute system to shutdown or terminate. If
// the compute system has already exited returns the previous error (if any).
func (computeSystem *System) Wait() error {
<-computeSystem.waitBlock
return computeSystem.waitError
}
// WaitExpectedError synchronously waits for the compute system to shutdown or
// terminate, and ignores the passed error if it occurs.
func (computeSystem *System) WaitExpectedError(expected error) (err error) {
operation := "hcsshim::ComputeSystem::WaitExpectedError"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(operation, err) }()
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
if err != nil && getInnerError(err) != expected {
return makeSystemError(computeSystem, "WaitExpectedError", "", err, nil)
// ExitError returns an error describing the reason the compute system terminated.
func (computeSystem *System) ExitError() error {
select {
case <-computeSystem.waitBlock:
if computeSystem.waitError != nil {
return computeSystem.waitError
}
return computeSystem.exitError
default:
return errors.New("container not exited")
}
return nil
}
// WaitTimeout synchronously waits for the compute system to terminate or the duration to elapse.
// If the timeout expires, IsTimeout(err) == true
func (computeSystem *System) WaitTimeout(timeout time.Duration) (err error) {
operation := "hcsshim::ComputeSystem::WaitTimeout"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(operation, err) }()
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, &timeout)
if err != nil {
return makeSystemError(computeSystem, "WaitTimeout", "", err, nil)
}
return nil
}
func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schema1.ContainerProperties, err error) {
func (computeSystem *System) Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
operation := "hcsshim::ComputeSystem::Properties"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(operation, err) }()
operation := "hcsshim::System::Properties"
queryj, err := json.Marshal(schema1.PropertyQuery{types})
queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types})
if err != nil {
return nil, makeSystemError(computeSystem, "Properties", "", err, nil)
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
logrus.WithFields(computeSystem.logctx).
WithField(logfields.JSON, queryj).
Debug("HCS ComputeSystem Properties Query")
var resultp, propertiesp *uint16
syscallWatcher(computeSystem.logctx, func() {
err = hcsGetComputeSystemProperties(computeSystem.handle, string(queryj), &propertiesp, &resultp)
})
events := processHcsResult(resultp)
propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes))
events := processHcsResult(ctx, resultJSON)
if err != nil {
return nil, makeSystemError(computeSystem, "Properties", "", err, events)
return nil, makeSystemError(computeSystem, operation, "", err, events)
}
if propertiesp == nil {
if propertiesJSON == "" {
return nil, ErrUnexpectedValue
}
propertiesRaw := interop.ConvertAndFreeCoTaskMemBytes(propertiesp)
properties := &schema1.ContainerProperties{}
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
return nil, makeSystemError(computeSystem, "Properties", "", err, nil)
if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
return properties, nil
}
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.
func (computeSystem *System) Pause() (err error) {
func (computeSystem *System) Pause(ctx context.Context) (err error) {
operation := "hcsshim::System::Pause"
// hcsPauseComputeSystemContext is an async peration. Start the outer span
// here to measure the full pause time.
ctx, span := trace.StartSpan(ctx, operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
operation := "hcsshim::ComputeSystem::Pause"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(operation, err) }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Pause", "", ErrAlreadyClosed, nil)
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
}
var resultp *uint16
syscallWatcher(computeSystem.logctx, func() {
err = hcsPauseComputeSystem(computeSystem.handle, "", &resultp)
})
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause)
resultJSON, err := vmcompute.HcsPauseComputeSystem(ctx, computeSystem.handle, "")
events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause)
if err != nil {
return makeSystemError(computeSystem, "Pause", "", err, events)
return makeSystemError(computeSystem, operation, "", err, events)
}
return nil
}
// Resume resumes the execution of the computeSystem. This feature is not enabled in TP5.
func (computeSystem *System) Resume() (err error) {
func (computeSystem *System) Resume(ctx context.Context) (err error) {
operation := "hcsshim::System::Resume"
// hcsResumeComputeSystemContext is an async operation. Start the outer span
// here to measure the full restore time.
ctx, span := trace.StartSpan(ctx, operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
operation := "hcsshim::ComputeSystem::Resume"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(operation, err) }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Resume", "", ErrAlreadyClosed, nil)
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
}
var resultp *uint16
syscallWatcher(computeSystem.logctx, func() {
err = hcsResumeComputeSystem(computeSystem.handle, "", &resultp)
})
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume)
resultJSON, err := vmcompute.HcsResumeComputeSystem(ctx, computeSystem.handle, "")
events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume)
if err != nil {
return makeSystemError(computeSystem, "Resume", "", err, events)
return makeSystemError(computeSystem, operation, "", err, events)
}
return nil
}
// CreateProcess launches a new process within the computeSystem.
func (computeSystem *System) CreateProcess(c interface{}) (_ *Process, err error) {
func (computeSystem *System) createProcess(ctx context.Context, operation string, c interface{}) (*Process, *vmcompute.HcsProcessInformation, error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
operation := "hcsshim::ComputeSystem::CreateProcess"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(operation, err) }()
var (
processInfo hcsProcessInformation
processHandle hcsProcess
resultp *uint16
)
if computeSystem.handle == 0 {
return nil, makeSystemError(computeSystem, "CreateProcess", "", ErrAlreadyClosed, nil)
return nil, nil, makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
}
configurationb, err := json.Marshal(c)
if err != nil {
return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil)
return nil, nil, makeSystemError(computeSystem, operation, "", err, nil)
}
configuration := string(configurationb)
logrus.WithFields(computeSystem.logctx).
WithField(logfields.JSON, configuration).
Debug("HCS ComputeSystem Process Document")
syscallWatcher(computeSystem.logctx, func() {
err = hcsCreateProcess(computeSystem.handle, configuration, &processInfo, &processHandle, &resultp)
})
events := processHcsResult(resultp)
processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration)
events := processHcsResult(ctx, resultJSON)
if err != nil {
return nil, makeSystemError(computeSystem, "CreateProcess", configuration, err, events)
return nil, nil, makeSystemError(computeSystem, operation, configuration, err, events)
}
logrus.WithFields(computeSystem.logctx).
WithField(logfields.ProcessID, processInfo.ProcessId).
Debug("HCS ComputeSystem CreateProcess PID")
log.G(ctx).WithField("pid", processInfo.ProcessId).Debug("created process pid")
return newProcess(processHandle, int(processInfo.ProcessId), computeSystem), &processInfo, nil
}
process := newProcess(processHandle, int(processInfo.ProcessId), computeSystem)
process.cachedPipes = &cachedPipes{
stdIn: processInfo.StdInput,
stdOut: processInfo.StdOutput,
stdErr: processInfo.StdError,
}
// CreateProcessNoStdio launches a new process within the computeSystem. The
// Stdio handles are not cached on the process struct.
func (computeSystem *System) CreateProcessNoStdio(c interface{}) (_ cow.Process, err error) {
operation := "hcsshim::System::CreateProcessNoStdio"
ctx, span := trace.StartSpan(context.Background(), operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
if err = process.registerCallback(); err != nil {
return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil)
process, processInfo, err := computeSystem.createProcess(ctx, operation, c)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
process.Close()
}
}()
// We don't do anything with these handles. Close them so they don't leak.
syscall.Close(processInfo.StdInput)
syscall.Close(processInfo.StdOutput)
syscall.Close(processInfo.StdError)
if err = process.registerCallback(ctx); err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
go process.waitBackground()
return process, nil
}
// CreateProcess launches a new process within the computeSystem.
func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) (cow.Process, error) {
operation := "hcsshim::System::CreateProcess"
process, processInfo, err := computeSystem.createProcess(ctx, operation, c)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
process.Close()
}
}()
pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError})
if err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
process.stdin = pipes[0]
process.stdout = pipes[1]
process.stderr = pipes[2]
if err = process.registerCallback(ctx); err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
go process.waitBackground()
return process, nil
}
// OpenProcess gets an interface to an existing process within the computeSystem.
func (computeSystem *System) OpenProcess(pid int) (_ *Process, err error) {
func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process, error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
// Add PID for the context of this operation
computeSystem.logctx[logfields.ProcessID] = pid
defer delete(computeSystem.logctx, logfields.ProcessID)
operation := "hcsshim::ComputeSystem::OpenProcess"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(operation, err) }()
var (
processHandle hcsProcess
resultp *uint16
)
operation := "hcsshim::System::OpenProcess"
if computeSystem.handle == 0 {
return nil, makeSystemError(computeSystem, "OpenProcess", "", ErrAlreadyClosed, nil)
return nil, makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
}
syscallWatcher(computeSystem.logctx, func() {
err = hcsOpenProcess(computeSystem.handle, uint32(pid), &processHandle, &resultp)
})
events := processHcsResult(resultp)
processHandle, resultJSON, err := vmcompute.HcsOpenProcess(ctx, computeSystem.handle, uint32(pid))
events := processHcsResult(ctx, resultJSON)
if err != nil {
return nil, makeSystemError(computeSystem, "OpenProcess", "", err, events)
return nil, makeSystemError(computeSystem, operation, "", err, events)
}
process := newProcess(processHandle, pid, computeSystem)
if err = process.registerCallback(); err != nil {
return nil, makeSystemError(computeSystem, "OpenProcess", "", err, nil)
if err = process.registerCallback(ctx); err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
go process.waitBackground()
return process, nil
}
// Close cleans up any state associated with the compute system but does not terminate or wait for it.
func (computeSystem *System) Close() (err error) {
operation := "hcsshim::System::Close"
ctx, span := trace.StartSpan(context.Background(), operation)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
computeSystem.handleLock.Lock()
defer computeSystem.handleLock.Unlock()
operation := "hcsshim::ComputeSystem::Close"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(operation, err) }()
// Don't double free this
if computeSystem.handle == 0 {
return nil
}
if err = computeSystem.unregisterCallback(); err != nil {
return makeSystemError(computeSystem, "Close", "", err, nil)
if err = computeSystem.unregisterCallback(ctx); err != nil {
return makeSystemError(computeSystem, operation, "", err, nil)
}
syscallWatcher(computeSystem.logctx, func() {
err = hcsCloseComputeSystem(computeSystem.handle)
})
err = vmcompute.HcsCloseComputeSystem(ctx, computeSystem.handle)
if err != nil {
return makeSystemError(computeSystem, "Close", "", err, nil)
return makeSystemError(computeSystem, operation, "", err, nil)
}
computeSystem.handle = 0
computeSystem.closedWaitOnce.Do(func() {
computeSystem.waitError = ErrAlreadyClosed
close(computeSystem.waitBlock)
})
return nil
}
func (computeSystem *System) registerCallback() error {
context := &notifcationWatcherContext{
channels: newChannels(),
func (computeSystem *System) registerCallback(ctx context.Context) error {
callbackContext := &notifcationWatcherContext{
channels: newSystemChannels(),
systemID: computeSystem.id,
}
callbackMapLock.Lock()
callbackNumber := nextCallback
nextCallback++
callbackMap[callbackNumber] = context
callbackMap[callbackNumber] = callbackContext
callbackMapLock.Unlock()
var callbackHandle hcsCallback
err := hcsRegisterComputeSystemCallback(computeSystem.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle, notificationWatcherCallback, callbackNumber)
if err != nil {
return err
}
context.handle = callbackHandle
callbackContext.handle = callbackHandle
computeSystem.callbackNumber = callbackNumber
return nil
}
func (computeSystem *System) unregisterCallback() error {
func (computeSystem *System) unregisterCallback(ctx context.Context) error {
callbackNumber := computeSystem.callbackNumber
callbackMapLock.RLock()
context := callbackMap[callbackNumber]
callbackContext := callbackMap[callbackNumber]
callbackMapLock.RUnlock()
if context == nil {
if callbackContext == nil {
return nil
}
handle := context.handle
handle := callbackContext.handle
if handle == 0 {
return nil
@ -632,15 +613,15 @@ func (computeSystem *System) unregisterCallback() error {
// hcsUnregisterComputeSystemCallback has its own syncronization
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
err := hcsUnregisterComputeSystemCallback(handle)
err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle)
if err != nil {
return err
}
closeChannels(context.channels)
closeChannels(callbackContext.channels)
callbackMapLock.Lock()
callbackMap[callbackNumber] = nil
delete(callbackMap, callbackNumber)
callbackMapLock.Unlock()
handle = 0
@ -649,36 +630,26 @@ func (computeSystem *System) unregisterCallback() error {
}
// Modify the System by sending a request to HCS
func (computeSystem *System) Modify(config interface{}) (err error) {
func (computeSystem *System) Modify(ctx context.Context, config interface{}) error {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
operation := "hcsshim::ComputeSystem::Modify"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(operation, err) }()
operation := "hcsshim::System::Modify"
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Modify", "", ErrAlreadyClosed, nil)
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
}
requestJSON, err := json.Marshal(config)
requestBytes, err := json.Marshal(config)
if err != nil {
return err
}
requestString := string(requestJSON)
logrus.WithFields(computeSystem.logctx).
WithField(logfields.JSON, requestString).
Debug("HCS ComputeSystem Modify Document")
var resultp *uint16
syscallWatcher(computeSystem.logctx, func() {
err = hcsModifyComputeSystem(computeSystem.handle, requestString, &resultp)
})
events := processHcsResult(resultp)
requestJSON := string(requestBytes)
resultJSON, err := vmcompute.HcsModifyComputeSystem(ctx, computeSystem.handle, requestJSON)
events := processHcsResult(ctx, resultJSON)
if err != nil {
return makeSystemError(computeSystem, "Modify", requestString, err, events)
return makeSystemError(computeSystem, operation, requestJSON, err, events)
}
return nil

View File

@ -1,28 +1,34 @@
package hcs
import (
"context"
"time"
"github.com/sirupsen/logrus"
"github.com/Microsoft/hcsshim/internal/log"
)
func processAsyncHcsResult(err error, resultp *uint16, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) {
events := processHcsResult(resultp)
func processAsyncHcsResult(ctx context.Context, err error, resultJSON string, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) {
events := processHcsResult(ctx, resultJSON)
if IsPending(err) {
return nil, waitForNotification(callbackNumber, expectedNotification, timeout)
return nil, waitForNotification(ctx, callbackNumber, expectedNotification, timeout)
}
return events, err
}
func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
func waitForNotification(ctx context.Context, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
callbackMapLock.RLock()
if _, ok := callbackMap[callbackNumber]; !ok {
callbackMapLock.RUnlock()
log.G(ctx).WithField("callbackNumber", callbackNumber).Error("failed to waitForNotification: callbackNumber does not exist in callbackMap")
return ErrHandleClose
}
channels := callbackMap[callbackNumber].channels
callbackMapLock.RUnlock()
expectedChannel := channels[expectedNotification]
if expectedChannel == nil {
logrus.Errorf("unknown notification type in waitForNotification %x", expectedNotification)
log.G(ctx).WithField("type", expectedNotification).Error("unknown notification type in waitForNotification")
return ErrInvalidNotificationType
}

View File

@ -1,41 +0,0 @@
package hcs
import (
"context"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/timeout"
"github.com/sirupsen/logrus"
)
// syscallWatcher is used as a very simple goroutine around calls into
// the platform. In some cases, we have seen HCS APIs not returning due to
// various bugs, and the goroutine making the syscall ends up not returning,
// prior to its async callback. By spinning up a syscallWatcher, it allows
// us to at least log a warning if a syscall doesn't complete in a reasonable
// amount of time.
//
// Usage is:
//
// syscallWatcher(logContext, func() {
// err = <syscall>(args...)
// })
//
func syscallWatcher(logContext logrus.Fields, syscallLambda func()) {
ctx, cancel := context.WithTimeout(context.Background(), timeout.SyscallWatcher)
defer cancel()
go watchFunc(ctx, logContext)
syscallLambda()
}
func watchFunc(ctx context.Context, logContext logrus.Fields) {
select {
case <-ctx.Done():
if ctx.Err() != context.Canceled {
logrus.WithFields(logContext).
WithField(logfields.Timeout, timeout.SyscallWatcher).
Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.")
}
}
}

View File

@ -3,6 +3,7 @@ package hns
import (
"encoding/json"
"net"
"strings"
"github.com/sirupsen/logrus"
)
@ -94,6 +95,27 @@ func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) {
return nil, EndpointNotFoundError{EndpointName: endpointName}
}
type endpointAttachInfo struct {
SharedContainers json.RawMessage `json:",omitempty"`
}
func (endpoint *HNSEndpoint) IsAttached(vID string) (bool, error) {
attachInfo := endpointAttachInfo{}
err := hnsCall("GET", "/endpoints/"+endpoint.Id, "", &attachInfo)
// Return false allows us to just return the err
if err != nil {
return false, err
}
if strings.Contains(strings.ToLower(string(attachInfo.SharedContainers)), strings.ToLower(vID)) {
return true, nil
}
return false, nil
}
// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods
func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) {
operation := "Create"

View File

@ -9,23 +9,30 @@ import (
"github.com/sirupsen/logrus"
)
func hnsCall(method, path, request string, returnResponse interface{}) error {
func hnsCallRawResponse(method, path, request string) (*hnsResponse, error) {
var responseBuffer *uint16
logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request)
err := _hnsCall(method, path, request, &responseBuffer)
if err != nil {
return hcserror.New(err, "hnsCall ", "")
return nil, hcserror.New(err, "hnsCall ", "")
}
response := interop.ConvertAndFreeCoTaskMemString(responseBuffer)
hnsresponse := &hnsResponse{}
if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil {
return err
return nil, err
}
return hnsresponse, nil
}
func hnsCall(method, path, request string, returnResponse interface{}) error {
hnsresponse, err := hnsCallRawResponse(method, path, request)
if err != nil {
return fmt.Errorf("failed during hnsCallRawResponse: %v", err)
}
if !hnsresponse.Success {
return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error)
return fmt.Errorf("hns failed with error : %s", hnsresponse.Error)
}
if len(hnsresponse.Output) == 0 {

View File

@ -2,9 +2,9 @@ package hns
import (
"encoding/json"
"net"
"errors"
"github.com/sirupsen/logrus"
"net"
)
// Subnet is assoicated with a network and represents a list
@ -98,6 +98,12 @@ func (network *HNSNetwork) Create() (*HNSNetwork, error) {
title := "hcsshim::HNSNetwork::" + operation
logrus.Debugf(title+" id=%s", network.Id)
for _, subnet := range network.Subnets {
if (subnet.AddressPrefix != "") && (subnet.GatewayAddress == "") {
return nil, errors.New("network create error, subnet has address prefix but no gateway specified")
}
}
jsonString, err := json.Marshal(network)
if err != nil {
return nil, err

View File

@ -15,10 +15,6 @@ func ConvertAndFreeCoTaskMemString(buffer *uint16) string {
return str
}
func ConvertAndFreeCoTaskMemBytes(buffer *uint16) []byte {
return []byte(ConvertAndFreeCoTaskMemString(buffer))
}
func Win32FromHresult(hr uintptr) syscall.Errno {
if hr&0x1fff0000 == 0x00070000 {
return syscall.Errno(hr & 0xffff)

23
vendor/github.com/Microsoft/hcsshim/internal/log/g.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package log
import (
"context"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// G returns a `logrus.Entry` with the `TraceID, SpanID` from `ctx` if `ctx`
// contains an OpenCensus `trace.Span`.
func G(ctx context.Context) *logrus.Entry {
span := trace.FromContext(ctx)
if span != nil {
sctx := span.SpanContext()
return logrus.WithFields(logrus.Fields{
"traceID": sctx.TraceID.String(),
"spanID": sctx.SpanID.String(),
// "parentSpanID": TODO: JTERRY75 - Try to convince OC to export this?
})
}
return logrus.NewEntry(logrus.StandardLogger())
}

View File

@ -0,0 +1,43 @@
package oc
import (
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
var _ = (trace.Exporter)(&LogrusExporter{})
// LogrusExporter is an OpenCensus `trace.Exporter` that exports
// `trace.SpanData` to logrus output.
type LogrusExporter struct {
}
// ExportSpan exports `s` based on the the following rules:
//
// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`,
// `s.ParentSpanID` for correlation
//
// 2. Any calls to .Annotate will not be supported.
//
// 3. The span itself will be written at `logrus.InfoLevel` unless
// `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel`
// providing `s.Status.Message` as the error value.
func (le *LogrusExporter) ExportSpan(s *trace.SpanData) {
// Combine all span annotations with traceID, spanID, parentSpanID
baseEntry := logrus.WithFields(logrus.Fields(s.Attributes))
baseEntry.Data["traceID"] = s.TraceID.String()
baseEntry.Data["spanID"] = s.SpanID.String()
baseEntry.Data["parentSpanID"] = s.ParentSpanID.String()
baseEntry.Data["startTime"] = s.StartTime
baseEntry.Data["endTime"] = s.EndTime
baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String()
baseEntry.Data["name"] = s.Name
baseEntry.Time = s.StartTime
level := logrus.InfoLevel
if s.Status.Code != 0 {
level = logrus.ErrorLevel
baseEntry.Data[logrus.ErrorKey] = s.Status.Message
}
baseEntry.Log(level, "Span")
}

View File

@ -0,0 +1,17 @@
package oc
import (
"go.opencensus.io/trace"
)
// SetSpanStatus sets `span.SetStatus` to the proper status depending on `err`. If
// `err` is `nil` assumes `trace.StatusCodeOk`.
func SetSpanStatus(span *trace.Span, err error) {
status := trace.Status{}
if err != nil {
// TODO: JTERRY75 - Handle errors in a non-generic way
status.Code = trace.StatusCodeUnknown
status.Message = err.Error()
}
span.SetStatus(status)
}

View File

@ -10,7 +10,7 @@ import (
"syscall"
"time"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/go-winio/pkg/guid"
)
// ContainerState represents the platform agnostic pieces relating to a

View File

@ -4,7 +4,8 @@ import (
"encoding/json"
"time"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/go-winio/pkg/guid"
hcsschema "github.com/Microsoft/hcsshim/internal/schema2"
)
// ProcessConfig is used as both the input of Container.CreateProcess
@ -62,7 +63,7 @@ type MappedVirtualDisk struct {
CreateInUtilityVM bool `json:",omitempty"`
ReadOnly bool `json:",omitempty"`
Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing"
AttachOnly bool `json:",omitempty:`
AttachOnly bool `json:",omitempty"`
}
// AssignedDevice represents a device that has been directly assigned to a container
@ -133,9 +134,10 @@ type ContainerProperties struct {
State string
Name string
SystemType string
RuntimeOSType string `json:"RuntimeOsType,omitempty"`
Owner string
SiloGUID string `json:"SiloGuid,omitempty"`
RuntimeID string `json:"RuntimeId,omitempty"`
RuntimeID guid.GUID `json:"RuntimeId,omitempty"`
IsRuntimeTemplate bool `json:",omitempty"`
RuntimeImagePath string `json:",omitempty"`
Stopped bool `json:",omitempty"`
@ -214,6 +216,7 @@ type MappedVirtualDiskController struct {
type GuestDefinedCapabilities struct {
NamespaceAddRequestSupported bool `json:",omitempty"`
SignalProcessSupported bool `json:",omitempty"`
DumpStacksSupported bool `json:",omitempty"`
}
// GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM

View File

@ -10,7 +10,6 @@
package hcsschema
type Attachment struct {
Type_ string `json:"Type,omitempty"`
Path string `json:"Path,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type CacheQueryStatsResponse struct {
L3OccupancyBytes int32 `json:"L3OccupancyBytes,omitempty"`
L3TotalBwBytes int32 `json:"L3TotalBwBytes,omitempty"`

View File

@ -10,6 +10,5 @@
package hcsschema
type CloseHandle struct {
Handle string `json:"Handle,omitempty"`
}

View File

@ -11,7 +11,6 @@ package hcsschema
// ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port.
type ComPort struct {
NamedPipe string `json:"NamedPipe,omitempty"`
OptimizeForDebugger bool `json:"OptimizeForDebugger,omitempty"`

View File

@ -10,14 +10,13 @@
package hcsschema
type ComputeSystem struct {
Owner string `json:"Owner,omitempty"`
SchemaVersion *Version `json:"SchemaVersion,omitempty"`
HostingSystemId string `json:"HostingSystemId,omitempty"`
HostedSystem *HostedSystem `json:"HostedSystem,omitempty"`
HostedSystem interface{} `json:"HostedSystem,omitempty"`
Container *Container `json:"Container,omitempty"`

View File

@ -25,37 +25,37 @@ func (c contextKey) String() string {
var (
// ContextOAuth2 takes a oauth2.TokenSource as authentication for the request.
ContextOAuth2 = contextKey("token")
ContextOAuth2 = contextKey("token")
// ContextBasicAuth takes BasicAuth as authentication for the request.
ContextBasicAuth = contextKey("basic")
ContextBasicAuth = contextKey("basic")
// ContextAccessToken takes a string oauth2 access token as authentication for the request.
ContextAccessToken = contextKey("accesstoken")
ContextAccessToken = contextKey("accesstoken")
// ContextAPIKey takes an APIKey as authentication for the request
ContextAPIKey = contextKey("apikey")
ContextAPIKey = contextKey("apikey")
)
// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth
// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth
type BasicAuth struct {
UserName string `json:"userName,omitempty"`
Password string `json:"password,omitempty"`
UserName string `json:"userName,omitempty"`
Password string `json:"password,omitempty"`
}
// APIKey provides API key based authentication to a request passed via context using ContextAPIKey
type APIKey struct {
Key string
Prefix string
Key string
Prefix string
}
type Configuration struct {
BasePath string `json:"basePath,omitempty"`
Host string `json:"host,omitempty"`
Scheme string `json:"scheme,omitempty"`
DefaultHeader map[string]string `json:"defaultHeader,omitempty"`
UserAgent string `json:"userAgent,omitempty"`
HTTPClient *http.Client
BasePath string `json:"basePath,omitempty"`
Host string `json:"host,omitempty"`
Scheme string `json:"scheme,omitempty"`
DefaultHeader map[string]string `json:"defaultHeader,omitempty"`
UserAgent string `json:"userAgent,omitempty"`
HTTPClient *http.Client
}
func NewConfiguration() *Configuration {
@ -69,4 +69,4 @@ func NewConfiguration() *Configuration {
func (c *Configuration) AddDefaultHeader(key string, value string) {
c.DefaultHeader[key] = value
}
}

View File

@ -10,7 +10,6 @@
package hcsschema
type ConsoleSize struct {
Height int32 `json:"Height,omitempty"`
Width int32 `json:"Width,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type Container struct {
GuestOs *GuestOs `json:"GuestOs,omitempty"`
Storage *Storage `json:"Storage,omitempty"`

View File

@ -11,7 +11,6 @@ package hcsschema
// memory usage as viewed from within the container
type ContainerMemoryInformation struct {
TotalPhysicalBytes int32 `json:"TotalPhysicalBytes,omitempty"`
TotalUsage int32 `json:"TotalUsage,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type Devices struct {
ComPorts map[string]ComPort `json:"ComPorts,omitempty"`
Scsi map[string]Scsi `json:"Scsi,omitempty"`

View File

@ -10,6 +10,5 @@
package hcsschema
type EnhancedModeVideo struct {
ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"`
}

View File

@ -10,7 +10,6 @@
package hcsschema
type FlexibleIoDevice struct {
EmulatorId string `json:"EmulatorId,omitempty"`
HostingModel string `json:"HostingModel,omitempty"`

View File

@ -10,6 +10,5 @@
package hcsschema
type GuestCrashReporting struct {
WindowsCrashSettings *WindowsCrashReporting `json:"WindowsCrashSettings,omitempty"`
}

View File

@ -10,6 +10,5 @@
package hcsschema
type GuestOs struct {
HostName string `json:"HostName,omitempty"`
}

View File

@ -10,7 +10,6 @@
package hcsschema
type HostedSystem struct {
SchemaVersion *Version `json:"SchemaVersion,omitempty"`
Container *Container `json:"Container,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type HvSocket struct {
Config *HvSocketSystemConfig `json:"Config,omitempty"`
EnablePowerShellDirect bool `json:"EnablePowerShellDirect,omitempty"`

View File

@ -11,6 +11,5 @@ package hcsschema
// HvSocket configuration for a VM
type HvSocket2 struct {
HvSocketConfig *HvSocketSystemConfig `json:"HvSocketConfig,omitempty"`
}

View File

@ -10,7 +10,6 @@
package hcsschema
type Layer struct {
Id string `json:"Id,omitempty"`
Path string `json:"Path,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type MappedDirectory struct {
HostPath string `json:"HostPath,omitempty"`
HostPathType string `json:"HostPathType,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type MappedPipe struct {
ContainerPipeName string `json:"ContainerPipeName,omitempty"`
HostPath string `json:"HostPath,omitempty"`

View File

@ -10,6 +10,5 @@
package hcsschema
type Memory struct {
SizeInMB int32 `json:"SizeInMB,omitempty"`
}

View File

@ -10,7 +10,6 @@
package hcsschema
type MemoryInformationForVm struct {
VirtualNodeCount int32 `json:"VirtualNodeCount,omitempty"`
VirtualMachineMemory *VmMemory `json:"VirtualMachineMemory,omitempty"`

View File

@ -11,7 +11,6 @@ package hcsschema
// Memory runtime statistics
type MemoryStats struct {
MemoryUsageCommitBytes int32 `json:"MemoryUsageCommitBytes,omitempty"`
MemoryUsageCommitPeakBytes int32 `json:"MemoryUsageCommitPeakBytes,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type NetworkAdapter struct {
EndpointId string `json:"EndpointId,omitempty"`
MacAddress string `json:"MacAddress,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type Networking struct {
AllowUnqualifiedDnsQuery bool `json:"AllowUnqualifiedDnsQuery,omitempty"`
DnsSearchList string `json:"DnsSearchList,omitempty"`

View File

@ -11,6 +11,5 @@ package hcsschema
// Notification data that is indicated to components running in the Virtual Machine.
type PauseNotification struct {
Reason string `json:"Reason,omitempty"`
}

View File

@ -11,7 +11,6 @@ package hcsschema
// Options for HcsPauseComputeSystem
type PauseOptions struct {
SuspensionLevel string `json:"SuspensionLevel,omitempty"`
HostedNotification *PauseNotification `json:"HostedNotification,omitempty"`

View File

@ -10,6 +10,5 @@
package hcsschema
type Plan9 struct {
Shares []Plan9Share `json:"Shares,omitempty"`
}

View File

@ -10,7 +10,6 @@
package hcsschema
type Plan9Share struct {
Name string `json:"Name,omitempty"`
// The name by which the guest operation system can access this share, via the aname parameter in the Plan9 protocol.
@ -30,4 +29,6 @@ type Plan9Share struct {
ReadOnly bool `json:"ReadOnly,omitempty"`
UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"`
AllowedFiles []string `json:"AllowedFiles,omitempty"`
}

View File

@ -15,7 +15,6 @@ import (
// Information about a process running in a container
type ProcessDetails struct {
ProcessId int32 `json:"ProcessId,omitempty"`
ImageName string `json:"ImageName,omitempty"`

View File

@ -11,7 +11,6 @@ package hcsschema
// Passed to HcsRpc_ModifyProcess
type ProcessModifyRequest struct {
Operation string `json:"Operation,omitempty"`
ConsoleSize *ConsoleSize `json:"ConsoleSize,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type ProcessParameters struct {
ApplicationName string `json:"ApplicationName,omitempty"`
CommandLine string `json:"CommandLine,omitempty"`

View File

@ -11,7 +11,6 @@ package hcsschema
// Status of a process running in a container
type ProcessStatus struct {
ProcessId int32 `json:"ProcessId,omitempty"`
Exited bool `json:"Exited,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type Processor struct {
Count int32 `json:"Count,omitempty"`
Maximum int32 `json:"Maximum,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type Processor2 struct {
Count int32 `json:"Count,omitempty"`
Limit int32 `json:"Limit,omitempty"`

View File

@ -11,7 +11,6 @@ package hcsschema
// CPU runtime statistics
type ProcessorStats struct {
TotalRuntime100ns int32 `json:"TotalRuntime100ns,omitempty"`
RuntimeUser100ns int32 `json:"RuntimeUser100ns,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type Properties struct {
Id string `json:"Id,omitempty"`
SystemType string `json:"SystemType,omitempty"`

View File

@ -9,8 +9,7 @@
package hcsschema
// By default the basic properties will be returned. This query provides a way to request specific properties.
// By default the basic properties will be returned. This query provides a way to request specific properties.
type PropertyQuery struct {
PropertyTypes []string `json:"PropertyTypes,omitempty"`
}

View File

@ -10,7 +10,6 @@
package hcsschema
type RdpConnectionOptions struct {
AccessSids []string `json:"AccessSids,omitempty"`
NamedPipe string `json:"NamedPipe,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type RegistryChanges struct {
AddValues []RegistryValue `json:"AddValues,omitempty"`
DeleteKeys []RegistryKey `json:"DeleteKeys,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type RegistryKey struct {
Hive string `json:"Hive,omitempty"`
Name string `json:"Name,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type RegistryValue struct {
Key *RegistryKey `json:"Key,omitempty"`
Name string `json:"Name,omitempty"`

View File

@ -10,6 +10,5 @@
package hcsschema
type SharedMemoryConfiguration struct {
Regions []SharedMemoryRegion `json:"Regions,omitempty"`
}

View File

@ -10,7 +10,6 @@
package hcsschema
type SharedMemoryRegion struct {
SectionName string `json:"SectionName,omitempty"`
StartOffset int32 `json:"StartOffset,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type SharedMemoryRegionInfo struct {
SectionName string `json:"SectionName,omitempty"`
GuestPhysicalAddress int32 `json:"GuestPhysicalAddress,omitempty"`

View File

@ -11,7 +11,6 @@ package hcsschema
// Silo job information
type SiloProperties struct {
Enabled bool `json:"Enabled,omitempty"`
JobName string `json:"JobName,omitempty"`

View File

@ -15,7 +15,6 @@ import (
// Runtime statistics for a container
type Statistics struct {
Timestamp time.Time `json:"Timestamp,omitempty"`
ContainerStartTime time.Time `json:"ContainerStartTime,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type StorageQoS struct {
IopsMaximum int32 `json:"IopsMaximum,omitempty"`
BandwidthMaximum int32 `json:"BandwidthMaximum,omitempty"`

View File

@ -11,7 +11,6 @@ package hcsschema
// Storage runtime statistics
type StorageStats struct {
ReadCountNormalized int32 `json:"ReadCountNormalized,omitempty"`
ReadSizeBytes int32 `json:"ReadSizeBytes,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type Topology struct {
Memory *Memory2 `json:"Memory,omitempty"`
Processor *Processor2 `json:"Processor,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type Uefi struct {
EnableDebugger bool `json:"EnableDebugger,omitempty"`
SecureBootTemplateId string `json:"SecureBootTemplateId,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type UefiBootEntry struct {
DeviceType string `json:"DeviceType,omitempty"`
DevicePath string `json:"DevicePath,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type Version struct {
Major int32 `json:"Major,omitempty"`
Minor int32 `json:"Minor,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type VideoMonitor struct {
HorizontalResolution int32 `json:"HorizontalResolution,omitempty"`
VerticalResolution int32 `json:"VerticalResolution,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type VirtualNodeInfo struct {
VirtualNodeIndex int32 `json:"VirtualNodeIndex,omitempty"`
PhysicalNodeNumber int32 `json:"PhysicalNodeNumber,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type VirtualPMemDevice struct {
HostPath string `json:"HostPath,omitempty"`
ReadOnly bool `json:"ReadOnly,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type VirtualSmb struct {
Shares []VirtualSmbShare `json:"Shares,omitempty"`
DirectFileMappingInMB int64 `json:"DirectFileMappingInMB,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type VirtualSmbShare struct {
Name string `json:"Name,omitempty"`
Path string `json:"Path,omitempty"`

View File

@ -10,7 +10,6 @@
package hcsschema
type VirtualSmbShareOptions struct {
ReadOnly bool `json:"ReadOnly,omitempty"`
// convert exclusive access to shared read access

Some files were not shown because too many files have changed in this diff Show More