Update dependencies to add service fabric mesh via new azure-sdk-go
This commit is contained in:
14
vendor/github.com/vmware/vic/lib/apiservers/engine/errors/errors.go
generated
vendored
14
vendor/github.com/vmware/vic/lib/apiservers/engine/errors/errors.go
generated
vendored
@@ -34,6 +34,14 @@ func (e InvalidVolumeError) Error() string {
|
||||
return fmt.Sprintf("mounting directories as a data volume is not supported.")
|
||||
}
|
||||
|
||||
type VolumeExistError struct {
|
||||
Volume string
|
||||
}
|
||||
|
||||
func (e VolumeExistError) Error() string {
|
||||
return fmt.Sprintf("A volume named %s already exists. Choose a different volume name.", e.Volume)
|
||||
}
|
||||
|
||||
// InvalidBindError is returned when create/run -v has more params than allowed.
|
||||
type InvalidBindError struct {
|
||||
Volume string
|
||||
@@ -187,6 +195,12 @@ func IsServerNotReady(err error) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
func IsVolumeExistError(err error) bool {
|
||||
_, ok := err.(VolumeExistError)
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
type DetachError struct{}
|
||||
|
||||
func (DetachError) Error() string {
|
||||
|
||||
18
vendor/github.com/vmware/vic/lib/apiservers/engine/proxy/archive_proxy.go
generated
vendored
18
vendor/github.com/vmware/vic/lib/apiservers/engine/proxy/archive_proxy.go
generated
vendored
@@ -33,7 +33,7 @@ import (
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
type VicArchiveProxy interface {
|
||||
type ArchiveProxy interface {
|
||||
ArchiveExportReader(op trace.Operation, store, ancestorStore, deviceID, ancestor string, data bool, filterSpec archive.FilterSpec) (io.ReadCloser, error)
|
||||
ArchiveImportWriter(op trace.Operation, store, deviceID string, filterSpec archive.FilterSpec, wg *sync.WaitGroup, errchan chan error) (io.WriteCloser, error)
|
||||
StatPath(op trace.Operation, store, deviceID string, filterSpec archive.FilterSpec) (*types.ContainerPathStat, error)
|
||||
@@ -43,23 +43,23 @@ type VicArchiveProxy interface {
|
||||
// ArchiveProxy
|
||||
//------------------------------------
|
||||
|
||||
type ArchiveProxy struct {
|
||||
type VicArchiveProxy struct {
|
||||
client *client.PortLayer
|
||||
}
|
||||
|
||||
var archiveProxy *ArchiveProxy
|
||||
var archiveProxy *VicArchiveProxy
|
||||
|
||||
func NewArchiveProxy(client *client.PortLayer) VicArchiveProxy {
|
||||
return &ArchiveProxy{client: client}
|
||||
func NewArchiveProxy(client *client.PortLayer) *VicArchiveProxy {
|
||||
return &VicArchiveProxy{client: client}
|
||||
}
|
||||
|
||||
func GetArchiveProxy() VicArchiveProxy {
|
||||
func GetArchiveProxy() ArchiveProxy {
|
||||
return archiveProxy
|
||||
}
|
||||
|
||||
// ArchiveExportReader streams a tar archive from the portlayer. Once the stream is complete,
|
||||
// an io.Reader is returned and the caller can use that reader to parse the data.
|
||||
func (a *ArchiveProxy) ArchiveExportReader(op trace.Operation, store, ancestorStore, deviceID, ancestor string, data bool, filterSpec archive.FilterSpec) (io.ReadCloser, error) {
|
||||
func (a *VicArchiveProxy) ArchiveExportReader(op trace.Operation, store, ancestorStore, deviceID, ancestor string, data bool, filterSpec archive.FilterSpec) (io.ReadCloser, error) {
|
||||
defer trace.End(trace.Begin(deviceID))
|
||||
|
||||
if a.client == nil {
|
||||
@@ -141,7 +141,7 @@ func (a *ArchiveProxy) ArchiveExportReader(op trace.Operation, store, ancestorSt
|
||||
|
||||
// ArchiveImportWriter initializes a write stream for a path. This is usually called
|
||||
// for getting a writer during docker cp TO container.
|
||||
func (a *ArchiveProxy) ArchiveImportWriter(op trace.Operation, store, deviceID string, filterSpec archive.FilterSpec, wg *sync.WaitGroup, errchan chan error) (io.WriteCloser, error) {
|
||||
func (a *VicArchiveProxy) ArchiveImportWriter(op trace.Operation, store, deviceID string, filterSpec archive.FilterSpec, wg *sync.WaitGroup, errchan chan error) (io.WriteCloser, error) {
|
||||
defer trace.End(trace.Begin(deviceID))
|
||||
|
||||
if a.client == nil {
|
||||
@@ -232,7 +232,7 @@ func (a *ArchiveProxy) ArchiveImportWriter(op trace.Operation, store, deviceID s
|
||||
|
||||
// StatPath requests the portlayer to stat the filesystem resource at the
|
||||
// specified path in the container vc.
|
||||
func (a *ArchiveProxy) StatPath(op trace.Operation, store, deviceID string, filterSpec archive.FilterSpec) (*types.ContainerPathStat, error) {
|
||||
func (a *VicArchiveProxy) StatPath(op trace.Operation, store, deviceID string, filterSpec archive.FilterSpec) (*types.ContainerPathStat, error) {
|
||||
defer trace.End(trace.Begin(deviceID))
|
||||
|
||||
if a.client == nil {
|
||||
|
||||
247
vendor/github.com/vmware/vic/lib/apiservers/engine/proxy/container_proxy.go
generated
vendored
247
vendor/github.com/vmware/vic/lib/apiservers/engine/proxy/container_proxy.go
generated
vendored
@@ -74,7 +74,7 @@ import (
|
||||
)
|
||||
|
||||
// VicContainerProxy interface
|
||||
type VicContainerProxy interface {
|
||||
type ContainerProxy interface {
|
||||
CreateContainerHandle(ctx context.Context, vc *viccontainer.VicContainer, config types.ContainerCreateConfig) (string, string, error)
|
||||
AddImageToContainer(ctx context.Context, handle string, deltaID string, layerID string, imageID string, config types.ContainerCreateConfig) (string, error)
|
||||
CreateContainerTask(ctx context.Context, handle string, id string, layerID string, config types.ContainerCreateConfig) (string, error)
|
||||
@@ -91,7 +91,7 @@ type VicContainerProxy interface {
|
||||
// TODO: we should not be returning a swagger model here, however we do not have a solid architected return for this yet.
|
||||
InspectTask(op trace.Operation, handle string, eid string, cid string) (*models.TaskInspectResponse, error)
|
||||
BindTask(op trace.Operation, handle string, eid string) (string, error)
|
||||
WaitTask(op trace.Operation, cid string, cname string, eid string) error
|
||||
WaitTask(op trace.Operation, handle string, cid string, eid string) error
|
||||
|
||||
Handle(ctx context.Context, id, name string) (string, error)
|
||||
|
||||
@@ -108,7 +108,7 @@ type VicContainerProxy interface {
|
||||
}
|
||||
|
||||
// ContainerProxy struct
|
||||
type ContainerProxy struct {
|
||||
type VicContainerProxy struct {
|
||||
client *client.PortLayer
|
||||
portlayerAddr string
|
||||
portlayerName string
|
||||
@@ -126,20 +126,24 @@ const (
|
||||
)
|
||||
|
||||
// NewContainerProxy will create a new proxy
|
||||
func NewContainerProxy(plClient *client.PortLayer, portlayerAddr string, portlayerName string) *ContainerProxy {
|
||||
return &ContainerProxy{client: plClient, portlayerAddr: portlayerAddr, portlayerName: portlayerName}
|
||||
func NewContainerProxy(plClient *client.PortLayer, portlayerAddr string, portlayerName string) *VicContainerProxy {
|
||||
return &VicContainerProxy{client: plClient, portlayerAddr: portlayerAddr, portlayerName: portlayerName}
|
||||
}
|
||||
|
||||
// Handle retrieves a handle to a VIC container. Handles should be treated as opaque strings.
|
||||
//
|
||||
// returns:
|
||||
// (handle string, error)
|
||||
func (c *ContainerProxy) Handle(ctx context.Context, id, name string) (string, error) {
|
||||
func (c *VicContainerProxy) Handle(ctx context.Context, id, name string) (string, error) {
|
||||
op := trace.FromContext(ctx, "Handle: %s", id)
|
||||
defer trace.End(trace.Begin(name, op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return "", errors.NillPortlayerClientError("ContainerProxy")
|
||||
}
|
||||
|
||||
resp, err := c.client.Containers.Get(containers.NewGetParamsWithContext(ctx).WithID(id))
|
||||
resp, err := c.client.Containers.Get(containers.NewGetParamsWithContext(ctx).WithOpID(&opID).WithID(id))
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *containers.GetNotFound:
|
||||
@@ -158,8 +162,10 @@ func (c *ContainerProxy) Handle(ctx context.Context, id, name string) (string, e
|
||||
//
|
||||
// returns:
|
||||
// (containerID, containerHandle, error)
|
||||
func (c *ContainerProxy) CreateContainerHandle(ctx context.Context, vc *viccontainer.VicContainer, config types.ContainerCreateConfig) (string, string, error) {
|
||||
defer trace.End(trace.Begin(vc.ImageID))
|
||||
func (c *VicContainerProxy) CreateContainerHandle(ctx context.Context, vc *viccontainer.VicContainer, config types.ContainerCreateConfig) (string, string, error) {
|
||||
op := trace.FromContext(ctx, "CreateContainerHandle: %s", vc.Name)
|
||||
defer trace.End(trace.Begin(vc.Name, op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return "", "", errors.NillPortlayerClientError("ContainerProxy")
|
||||
@@ -179,12 +185,12 @@ func (c *ContainerProxy) CreateContainerHandle(ctx context.Context, vc *vicconta
|
||||
return "", "", errors.InternalServerError("ContainerProxy.CreateContainerHandle got unexpected error getting VCH UUID")
|
||||
}
|
||||
|
||||
plCreateParams := dockerContainerCreateParamsToPortlayer(ctx, config, vc, host)
|
||||
plCreateParams := dockerContainerCreateParamsToPortlayer(ctx, config, vc, host).WithOpID(&opID)
|
||||
createResults, err := c.client.Containers.Create(plCreateParams)
|
||||
if err != nil {
|
||||
if _, ok := err.(*containers.CreateNotFound); ok {
|
||||
cerr := fmt.Errorf("No such image: %s", vc.ImageID)
|
||||
log.Errorf("%s (%s)", cerr, err)
|
||||
op.Errorf("%s (%s)", cerr, err)
|
||||
return "", "", errors.NotFoundError(cerr.Error())
|
||||
}
|
||||
|
||||
@@ -203,7 +209,7 @@ func (c *ContainerProxy) CreateContainerHandle(ctx context.Context, vc *vicconta
|
||||
//
|
||||
// returns:
|
||||
// modified handle
|
||||
func (c *ContainerProxy) AddImageToContainer(ctx context.Context, handle, deltaID, layerID, imageID string, config types.ContainerCreateConfig) (string, error) {
|
||||
func (c *VicContainerProxy) AddImageToContainer(ctx context.Context, handle, deltaID, layerID, imageID string, config types.ContainerCreateConfig) (string, error) {
|
||||
defer trace.End(trace.Begin(handle))
|
||||
|
||||
if c.client == nil {
|
||||
@@ -237,20 +243,23 @@ func (c *ContainerProxy) AddImageToContainer(ctx context.Context, handle, deltaI
|
||||
//
|
||||
// returns:
|
||||
// (containerHandle, error)
|
||||
func (c *ContainerProxy) CreateContainerTask(ctx context.Context, handle, id, layerID string, config types.ContainerCreateConfig) (string, error) {
|
||||
defer trace.End(trace.Begin(""))
|
||||
func (c *VicContainerProxy) CreateContainerTask(ctx context.Context, handle, id, layerID string, config types.ContainerCreateConfig) (string, error) {
|
||||
op := trace.FromContext(ctx, "CreateContainerTask: %s", id)
|
||||
defer trace.End(trace.Begin(id, op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return "", errors.NillPortlayerClientError("ContainerProxy")
|
||||
}
|
||||
|
||||
plTaskParams := dockerContainerCreateParamsToTask(ctx, id, layerID, config)
|
||||
plTaskParams := dockerContainerCreateParamsToTask(op, id, layerID, config)
|
||||
plTaskParams.Config.Handle = handle
|
||||
plTaskParams.WithOpID(&opID)
|
||||
|
||||
log.Infof("*** CreateContainerTask - params = %#v", *plTaskParams.Config)
|
||||
op.Infof("*** CreateContainerTask - params = %#v", *plTaskParams.Config)
|
||||
responseJoin, err := c.client.Tasks.Join(plTaskParams)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to join primary task to container: %+v", err)
|
||||
op.Errorf("Unable to join primary task to container: %+v", err)
|
||||
return "", errors.InternalServerError(err.Error())
|
||||
}
|
||||
|
||||
@@ -259,10 +268,13 @@ func (c *ContainerProxy) CreateContainerTask(ctx context.Context, handle, id, la
|
||||
return "", errors.InternalServerError(fmt.Sprintf("Type assertion failed on handle from task join: %#+v", handle))
|
||||
}
|
||||
|
||||
plBindParams := tasks.NewBindParamsWithContext(ctx).WithConfig(&models.TaskBindConfig{Handle: handle, ID: id})
|
||||
plBindParams := tasks.NewBindParamsWithContext(ctx).
|
||||
WithOpID(&opID).
|
||||
WithConfig(&models.TaskBindConfig{Handle: handle, ID: id})
|
||||
|
||||
responseBind, err := c.client.Tasks.Bind(plBindParams)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to bind primary task to container: %+v", err)
|
||||
op.Errorf("Unable to bind primary task to container: %+v", err)
|
||||
return "", errors.InternalServerError(err.Error())
|
||||
}
|
||||
|
||||
@@ -274,8 +286,10 @@ func (c *ContainerProxy) CreateContainerTask(ctx context.Context, handle, id, la
|
||||
return handle, nil
|
||||
}
|
||||
|
||||
func (c *ContainerProxy) CreateExecTask(ctx context.Context, handle string, config *types.ExecConfig) (string, string, error) {
|
||||
defer trace.End(trace.Begin(""))
|
||||
func (c *VicContainerProxy) CreateExecTask(ctx context.Context, handle string, config *types.ExecConfig) (string, string, error) {
|
||||
op := trace.FromContext(ctx, "CreateExecTask: %s", handle)
|
||||
defer trace.End(trace.Begin(handle, op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return "", "", errors.NillPortlayerClientError("ContainerProxy")
|
||||
@@ -293,7 +307,7 @@ func (c *ContainerProxy) CreateExecTask(ctx context.Context, handle string, conf
|
||||
}
|
||||
|
||||
// call Join with JoinParams
|
||||
joinparams := tasks.NewJoinParamsWithContext(ctx).WithConfig(joinconfig)
|
||||
joinparams := tasks.NewJoinParamsWithContext(ctx).WithOpID(&opID).WithConfig(joinconfig)
|
||||
resp, err := c.client.Tasks.Join(joinparams)
|
||||
if err != nil {
|
||||
return "", "", errors.InternalServerError(err.Error())
|
||||
@@ -313,18 +327,21 @@ func (c *ContainerProxy) CreateExecTask(ctx context.Context, handle string, conf
|
||||
//
|
||||
// returns:
|
||||
// modified handle
|
||||
func (c *ContainerProxy) AddContainerToScope(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) {
|
||||
defer trace.End(trace.Begin(handle))
|
||||
func (c *VicContainerProxy) AddContainerToScope(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) {
|
||||
op := trace.FromContext(ctx, "AddContainerToScope: %s", handle)
|
||||
defer trace.End(trace.Begin(handle, op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return "", errors.NillPortlayerClientError("ContainerProxy")
|
||||
}
|
||||
|
||||
log.Debugf("Network Configuration Section - Container Create")
|
||||
op.Debugf("Network Configuration Section - Container Create")
|
||||
// configure network
|
||||
netConf := toModelsNetworkConfig(config)
|
||||
if netConf != nil {
|
||||
addContRes, err := c.client.Scopes.AddContainer(scopes.NewAddContainerParamsWithContext(ctx).
|
||||
WithOpID(&opID).
|
||||
WithScope(netConf.NetworkName).
|
||||
WithConfig(&models.ScopesAddContainerConfig{
|
||||
Handle: handle,
|
||||
@@ -332,7 +349,7 @@ func (c *ContainerProxy) AddContainerToScope(ctx context.Context, handle string,
|
||||
}))
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("ContainerProxy.AddContainerToScope: Scopes error: %s", err.Error())
|
||||
op.Errorf("ContainerProxy.AddContainerToScope: Scopes error: %s", err.Error())
|
||||
return handle, errors.InternalServerError(err.Error())
|
||||
}
|
||||
|
||||
@@ -341,8 +358,11 @@ func (c *ContainerProxy) AddContainerToScope(ctx context.Context, handle string,
|
||||
return
|
||||
}
|
||||
// roll back the AddContainer call
|
||||
if _, err2 := c.client.Scopes.RemoveContainer(scopes.NewRemoveContainerParamsWithContext(ctx).WithHandle(handle).WithScope(netConf.NetworkName)); err2 != nil {
|
||||
log.Warnf("could not roll back container add: %s", err2)
|
||||
if _, err2 := c.client.Scopes.RemoveContainer(scopes.NewRemoveContainerParamsWithContext(ctx).
|
||||
WithHandle(handle).
|
||||
WithScope(netConf.NetworkName).
|
||||
WithOpID(&opID)); err2 != nil {
|
||||
op.Warnf("could not roll back container add: %s", err2)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -357,14 +377,17 @@ func (c *ContainerProxy) AddContainerToScope(ctx context.Context, handle string,
|
||||
//
|
||||
// returns:
|
||||
// modified handle
|
||||
func (c *ContainerProxy) AddLoggingToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) {
|
||||
defer trace.End(trace.Begin(handle))
|
||||
func (c *VicContainerProxy) AddLoggingToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) {
|
||||
op := trace.FromContext(ctx, "AddLoggingToContainer: %s", handle)
|
||||
defer trace.End(trace.Begin(handle, op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return "", errors.NillPortlayerClientError("ContainerProxy")
|
||||
}
|
||||
|
||||
response, err := c.client.Logging.LoggingJoin(logging.NewLoggingJoinParamsWithContext(ctx).
|
||||
WithOpID(&opID).
|
||||
WithConfig(&models.LoggingJoinConfig{
|
||||
Handle: handle,
|
||||
}))
|
||||
@@ -384,14 +407,17 @@ func (c *ContainerProxy) AddLoggingToContainer(ctx context.Context, handle strin
|
||||
//
|
||||
// returns:
|
||||
// modified handle
|
||||
func (c *ContainerProxy) AddInteractionToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) {
|
||||
defer trace.End(trace.Begin(handle))
|
||||
func (c *VicContainerProxy) AddInteractionToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) {
|
||||
op := trace.FromContext(ctx, "AddLoggingToContainer: %s", handle)
|
||||
defer trace.End(trace.Begin(handle, op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return "", errors.NillPortlayerClientError("ContainerProxy")
|
||||
}
|
||||
|
||||
response, err := c.client.Interaction.InteractionJoin(interaction.NewInteractionJoinParamsWithContext(ctx).
|
||||
WithOpID(&opID).
|
||||
WithConfig(&models.InteractionJoinConfig{
|
||||
Handle: handle,
|
||||
}))
|
||||
@@ -407,8 +433,10 @@ func (c *ContainerProxy) AddInteractionToContainer(ctx context.Context, handle s
|
||||
}
|
||||
|
||||
// BindInteraction enables interaction capabilities
|
||||
func (c *ContainerProxy) BindInteraction(ctx context.Context, handle string, name string, id string) (string, error) {
|
||||
defer trace.End(trace.Begin(handle))
|
||||
func (c *VicContainerProxy) BindInteraction(ctx context.Context, handle string, name string, id string) (string, error) {
|
||||
op := trace.FromContext(ctx, "BindInteraction: %s", handle)
|
||||
defer trace.End(trace.Begin(handle, op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return "", errors.NillPortlayerClientError("ContainerProxy")
|
||||
@@ -416,6 +444,7 @@ func (c *ContainerProxy) BindInteraction(ctx context.Context, handle string, nam
|
||||
|
||||
bind, err := c.client.Interaction.InteractionBind(
|
||||
interaction.NewInteractionBindParamsWithContext(ctx).
|
||||
WithOpID(&opID).
|
||||
WithConfig(&models.InteractionBindConfig{
|
||||
Handle: handle,
|
||||
ID: id,
|
||||
@@ -436,8 +465,10 @@ func (c *ContainerProxy) BindInteraction(ctx context.Context, handle string, nam
|
||||
}
|
||||
|
||||
// UnbindInteraction disables interaction capabilities
|
||||
func (c *ContainerProxy) UnbindInteraction(ctx context.Context, handle string, name string, id string) (string, error) {
|
||||
defer trace.End(trace.Begin(handle))
|
||||
func (c *VicContainerProxy) UnbindInteraction(ctx context.Context, handle string, name string, id string) (string, error) {
|
||||
op := trace.FromContext(ctx, "UnbindInteraction: %s", handle)
|
||||
defer trace.End(trace.Begin(handle, op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return "", errors.NillPortlayerClientError("ContainerProxy")
|
||||
@@ -445,6 +476,7 @@ func (c *ContainerProxy) UnbindInteraction(ctx context.Context, handle string, n
|
||||
|
||||
unbind, err := c.client.Interaction.InteractionUnbind(
|
||||
interaction.NewInteractionUnbindParamsWithContext(ctx).
|
||||
WithOpID(&opID).
|
||||
WithConfig(&models.InteractionUnbindConfig{
|
||||
Handle: handle,
|
||||
ID: id,
|
||||
@@ -464,18 +496,21 @@ func (c *ContainerProxy) UnbindInteraction(ctx context.Context, handle string, n
|
||||
//
|
||||
// Args:
|
||||
// waitTime <= 0 means no wait time
|
||||
func (c *ContainerProxy) CommitContainerHandle(ctx context.Context, handle, containerID string, waitTime int32) error {
|
||||
defer trace.End(trace.Begin(handle))
|
||||
func (c *VicContainerProxy) CommitContainerHandle(ctx context.Context, handle, containerID string, waitTime int32) error {
|
||||
op := trace.FromContext(ctx, "CommitContainerHandle: %s", handle)
|
||||
defer trace.End(trace.Begin(handle, op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return errors.NillPortlayerClientError("ContainerProxy")
|
||||
}
|
||||
|
||||
var commitParams *containers.CommitParams
|
||||
commitParams := containers.NewCommitParamsWithContext(ctx).
|
||||
WithOpID(&opID).
|
||||
WithHandle(handle)
|
||||
|
||||
if waitTime > 0 {
|
||||
commitParams = containers.NewCommitParamsWithContext(ctx).WithHandle(handle).WithWaitTime(&waitTime)
|
||||
} else {
|
||||
commitParams = containers.NewCommitParamsWithContext(ctx).WithHandle(handle)
|
||||
commitParams.WithWaitTime(&waitTime)
|
||||
}
|
||||
|
||||
_, err := c.client.Containers.Commit(commitParams)
|
||||
@@ -495,8 +530,9 @@ func (c *ContainerProxy) CommitContainerHandle(ctx context.Context, handle, cont
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ContainerProxy) InspectTask(op trace.Operation, handle string, eid string, cid string) (*models.TaskInspectResponse, error) {
|
||||
defer trace.End(trace.Begin(fmt.Sprintf("handle(%s), eid(%s), cid(%s)", handle, eid, cid)))
|
||||
func (c *VicContainerProxy) InspectTask(op trace.Operation, handle string, eid string, cid string) (*models.TaskInspectResponse, error) {
|
||||
defer trace.End(trace.Begin(fmt.Sprintf("handle(%s), eid(%s), cid(%s)", handle, eid, cid), op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return nil, errors.NillPortlayerClientError("ContainerProxy")
|
||||
@@ -510,7 +546,7 @@ func (c *ContainerProxy) InspectTask(op trace.Operation, handle string, eid stri
|
||||
|
||||
// FIXME: right now we are only using this path for exec targets. But later the error messages may need to be changed
|
||||
// to be more accurate.
|
||||
params := tasks.NewInspectParamsWithContext(op).WithConfig(config)
|
||||
params := tasks.NewInspectParamsWithContext(op).WithOpID(&opID).WithConfig(config)
|
||||
resp, err := c.client.Tasks.Inspect(params)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
@@ -529,8 +565,9 @@ func (c *ContainerProxy) InspectTask(op trace.Operation, handle string, eid stri
|
||||
return resp.Payload, nil
|
||||
}
|
||||
|
||||
func (c *ContainerProxy) BindTask(op trace.Operation, handle string, eid string) (string, error) {
|
||||
defer trace.End(trace.Begin(fmt.Sprintf("handle(%s), eid(%s)", handle, eid)))
|
||||
func (c *VicContainerProxy) BindTask(op trace.Operation, handle string, eid string) (string, error) {
|
||||
defer trace.End(trace.Begin(fmt.Sprintf("handle(%s), eid(%s)", handle, eid), op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return "", errors.NillPortlayerClientError("ContainerProxy")
|
||||
@@ -540,7 +577,7 @@ func (c *ContainerProxy) BindTask(op trace.Operation, handle string, eid string)
|
||||
Handle: handle,
|
||||
ID: eid,
|
||||
}
|
||||
bindparams := tasks.NewBindParamsWithContext(op).WithConfig(bindconfig)
|
||||
bindparams := tasks.NewBindParamsWithContext(op).WithOpID(&opID).WithConfig(bindconfig)
|
||||
|
||||
// call Bind with bindparams
|
||||
resp, err := c.client.Tasks.Bind(bindparams)
|
||||
@@ -569,28 +606,26 @@ func (c *ContainerProxy) BindTask(op trace.Operation, handle string, eid string)
|
||||
return respHandle, nil
|
||||
}
|
||||
|
||||
func (c *ContainerProxy) WaitTask(op trace.Operation, cid string, cname string, eid string) error {
|
||||
func (c *VicContainerProxy) WaitTask(op trace.Operation, handle string, cid string, eid string) error {
|
||||
defer trace.End(trace.Begin(fmt.Sprintf("handle(%s), cid(%s)", handle, cid), op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return errors.NillPortlayerClientError("ContainerProxy")
|
||||
}
|
||||
|
||||
handle, err := c.Handle(op, cid, cname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// wait the Task to start
|
||||
// wait for the Task to change in state
|
||||
config := &models.TaskWaitConfig{
|
||||
Handle: handle,
|
||||
ID: eid,
|
||||
}
|
||||
|
||||
params := tasks.NewWaitParamsWithContext(op).WithConfig(config)
|
||||
_, err = c.client.Tasks.Wait(params)
|
||||
params := tasks.NewWaitParamsWithContext(op).WithOpID(&opID).WithConfig(config)
|
||||
_, err := c.client.Tasks.Wait(params)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *tasks.WaitNotFound:
|
||||
return errors.InternalServerError(fmt.Sprintf("the Container(%s) has been shutdown during execution of the exec operation", cid))
|
||||
return errors.InternalServerError(fmt.Sprintf("the container(%s) has been shutdown during execution of the exec operation", cid))
|
||||
case *tasks.WaitPreconditionRequired:
|
||||
return errors.InternalServerError(fmt.Sprintf("container(%s) must be powered on in order to perform the desired exec operation", cid))
|
||||
case *tasks.WaitInternalServerError:
|
||||
@@ -608,8 +643,10 @@ func (c *ContainerProxy) WaitTask(op trace.Operation, cid string, cname string,
|
||||
//
|
||||
// returns
|
||||
// error
|
||||
func (c *ContainerProxy) Stop(ctx context.Context, vc *viccontainer.VicContainer, name string, seconds *int, unbound bool) error {
|
||||
defer trace.End(trace.Begin(vc.ContainerID))
|
||||
func (c *VicContainerProxy) Stop(ctx context.Context, vc *viccontainer.VicContainer, name string, seconds *int, unbound bool) error {
|
||||
op := trace.FromContext(ctx, "Stop: %s", name)
|
||||
defer trace.End(trace.Begin(fmt.Sprintf("Name: %s, container id: %s", name, vc.ContainerID), op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return errors.NillPortlayerClientError("ContainerProxy")
|
||||
@@ -648,7 +685,11 @@ func (c *ContainerProxy) Stop(ctx context.Context, vc *viccontainer.VicContainer
|
||||
}
|
||||
|
||||
// change the state of the container
|
||||
changeParams := containers.NewStateChangeParamsWithContext(ctx).WithHandle(handle).WithState("STOPPED")
|
||||
changeParams := containers.NewStateChangeParamsWithContext(ctx).
|
||||
WithOpID(&opID).
|
||||
WithHandle(handle).
|
||||
WithState("STOPPED")
|
||||
|
||||
stateChangeResponse, err := c.client.Containers.StateChange(changeParams)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
@@ -682,20 +723,22 @@ func (c *ContainerProxy) Stop(ctx context.Context, vc *viccontainer.VicContainer
|
||||
}
|
||||
|
||||
// UnbindContainerFromNetwork unbinds a container from the networks that it connects to
|
||||
func (c *ContainerProxy) UnbindContainerFromNetwork(ctx context.Context, vc *viccontainer.VicContainer, handle string) (string, error) {
|
||||
defer trace.End(trace.Begin(vc.ContainerID))
|
||||
func (c *VicContainerProxy) UnbindContainerFromNetwork(ctx context.Context, vc *viccontainer.VicContainer, handle string) (string, error) {
|
||||
op := trace.FromContext(ctx, "UnbindContainerFromNetwork: %s", vc.ContainerID)
|
||||
defer trace.End(trace.Begin(vc.ContainerID, op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return "", errors.NillPortlayerClientError("ContainerProxy")
|
||||
}
|
||||
|
||||
unbindParams := scopes.NewUnbindContainerParamsWithContext(ctx).WithHandle(handle)
|
||||
unbindParams := scopes.NewUnbindContainerParamsWithContext(ctx).WithOpID(&opID).WithHandle(handle)
|
||||
ub, err := c.client.Scopes.UnbindContainer(unbindParams)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *scopes.UnbindContainerNotFound:
|
||||
// ignore error
|
||||
log.Warnf("Container %s not found by network unbind", vc.ContainerID)
|
||||
op.Warnf("Container %s not found by network unbind", vc.ContainerID)
|
||||
case *scopes.UnbindContainerInternalServerError:
|
||||
return "", errors.InternalServerError(err.Payload.Message)
|
||||
default:
|
||||
@@ -707,14 +750,18 @@ func (c *ContainerProxy) UnbindContainerFromNetwork(ctx context.Context, vc *vic
|
||||
}
|
||||
|
||||
// State returns container state
|
||||
func (c *ContainerProxy) State(ctx context.Context, vc *viccontainer.VicContainer) (*types.ContainerState, error) {
|
||||
defer trace.End(trace.Begin(""))
|
||||
func (c *VicContainerProxy) State(ctx context.Context, vc *viccontainer.VicContainer) (*types.ContainerState, error) {
|
||||
op := trace.FromContext(ctx, "State: %s", vc.ContainerID)
|
||||
defer trace.End(trace.Begin(vc.ContainerID, op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return nil, errors.NillPortlayerClientError("ContainerProxy")
|
||||
}
|
||||
|
||||
results, err := c.client.Containers.GetContainerInfo(containers.NewGetContainerInfoParamsWithContext(ctx).WithID(vc.ContainerID))
|
||||
results, err := c.client.Containers.GetContainerInfo(containers.NewGetContainerInfoParamsWithContext(ctx).
|
||||
WithOpID(&opID).
|
||||
WithID(vc.ContainerID))
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *containers.GetContainerInfoNotFound:
|
||||
@@ -734,14 +781,15 @@ func (c *ContainerProxy) State(ctx context.Context, vc *viccontainer.VicContaine
|
||||
}
|
||||
|
||||
// GetStateFromHandle takes a handle and returns the state of the container based on that handle. Also returns handle that comes back with the response.
|
||||
func (c *ContainerProxy) GetStateFromHandle(op trace.Operation, handle string) (string, string, error) {
|
||||
func (c *VicContainerProxy) GetStateFromHandle(op trace.Operation, handle string) (string, string, error) {
|
||||
defer trace.End(trace.Begin(fmt.Sprintf("handle(%s)", handle), op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return "", "", errors.NillPortlayerClientError("ContainerProxy")
|
||||
}
|
||||
|
||||
params := containers.NewGetStateParams().WithHandle(handle)
|
||||
params := containers.NewGetStateParams().WithOpID(&opID).WithHandle(handle)
|
||||
resp, err := c.client.Containers.GetState(params)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
@@ -756,14 +804,19 @@ func (c *ContainerProxy) GetStateFromHandle(op trace.Operation, handle string) (
|
||||
}
|
||||
|
||||
// ExitCode returns container exitCode
|
||||
func (c *ContainerProxy) ExitCode(ctx context.Context, vc *viccontainer.VicContainer) (string, error) {
|
||||
defer trace.End(trace.Begin(""))
|
||||
func (c *VicContainerProxy) ExitCode(ctx context.Context, vc *viccontainer.VicContainer) (string, error) {
|
||||
op := trace.FromContext(ctx, "ExitCode: %s", vc.ContainerID)
|
||||
defer trace.End(trace.Begin(vc.ContainerID, op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return "", errors.NillPortlayerClientError("ContainerProxy")
|
||||
}
|
||||
|
||||
results, err := c.client.Containers.GetContainerInfo(containers.NewGetContainerInfoParamsWithContext(ctx).WithID(vc.ContainerID))
|
||||
results, err := c.client.Containers.GetContainerInfo(containers.NewGetContainerInfoParamsWithContext(ctx).
|
||||
WithOpID(&opID).
|
||||
WithID(vc.ContainerID))
|
||||
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *containers.GetContainerInfoNotFound:
|
||||
@@ -783,8 +836,11 @@ func (c *ContainerProxy) ExitCode(ctx context.Context, vc *viccontainer.VicConta
|
||||
return strconv.Itoa(dockerState.ExitCode), nil
|
||||
}
|
||||
|
||||
func (c *ContainerProxy) Wait(ctx context.Context, vc *viccontainer.VicContainer, timeout time.Duration) (
|
||||
func (c *VicContainerProxy) Wait(ctx context.Context, vc *viccontainer.VicContainer, timeout time.Duration) (
|
||||
*types.ContainerState, error) {
|
||||
op := trace.FromContext(ctx, "Wait: %s", vc.ContainerID)
|
||||
defer trace.End(trace.Begin(vc.ContainerID, op))
|
||||
opID := op.ID()
|
||||
|
||||
defer trace.End(trace.Begin(vc.ContainerID))
|
||||
|
||||
@@ -798,8 +854,10 @@ func (c *ContainerProxy) Wait(ctx context.Context, vc *viccontainer.VicContainer
|
||||
}
|
||||
|
||||
params := containers.NewContainerWaitParamsWithContext(ctx).
|
||||
WithOpID(&opID).
|
||||
WithTimeout(int64(timeout.Seconds())).
|
||||
WithID(vc.ContainerID)
|
||||
|
||||
results, err := c.client.Containers.ContainerWait(params)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
@@ -826,8 +884,10 @@ func (c *ContainerProxy) Wait(ctx context.Context, vc *viccontainer.VicContainer
|
||||
return dockerState, nil
|
||||
}
|
||||
|
||||
func (c *ContainerProxy) Signal(ctx context.Context, vc *viccontainer.VicContainer, sig uint64) error {
|
||||
defer trace.End(trace.Begin(vc.ContainerID))
|
||||
func (c *VicContainerProxy) Signal(ctx context.Context, vc *viccontainer.VicContainer, sig uint64) error {
|
||||
op := trace.FromContext(ctx, "Signal: %s", vc.ContainerID)
|
||||
defer trace.End(trace.Begin(vc.ContainerID, op))
|
||||
opID := op.ID()
|
||||
|
||||
if vc == nil {
|
||||
return errors.InternalServerError("Signal bad arguments")
|
||||
@@ -845,7 +905,11 @@ func (c *ContainerProxy) Signal(ctx context.Context, vc *viccontainer.VicContain
|
||||
if sig == 0 {
|
||||
sig = uint64(syscall.SIGKILL)
|
||||
}
|
||||
params := containers.NewContainerSignalParamsWithContext(ctx).WithID(vc.ContainerID).WithSignal(int64(sig))
|
||||
params := containers.NewContainerSignalParamsWithContext(ctx).
|
||||
WithOpID(&opID).
|
||||
WithID(vc.ContainerID).
|
||||
WithSignal(int64(sig))
|
||||
|
||||
if _, err := c.client.Containers.ContainerSignal(params); err != nil {
|
||||
switch err := err.(type) {
|
||||
case *containers.ContainerSignalNotFound:
|
||||
@@ -867,7 +931,11 @@ func (c *ContainerProxy) Signal(ctx context.Context, vc *viccontainer.VicContain
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ContainerProxy) Resize(ctx context.Context, id string, height, width int32) error {
|
||||
func (c *VicContainerProxy) Resize(ctx context.Context, id string, height, width int32) error {
|
||||
op := trace.FromContext(ctx, "Resize: %s", id)
|
||||
defer trace.End(trace.Begin(id, op))
|
||||
opID := op.ID()
|
||||
|
||||
defer trace.End(trace.Begin(id))
|
||||
|
||||
if c.client == nil {
|
||||
@@ -875,6 +943,7 @@ func (c *ContainerProxy) Resize(ctx context.Context, id string, height, width in
|
||||
}
|
||||
|
||||
plResizeParam := interaction.NewContainerResizeParamsWithContext(ctx).
|
||||
WithOpID(&opID).
|
||||
WithID(id).
|
||||
WithHeight(height).
|
||||
WithWidth(width)
|
||||
@@ -894,8 +963,10 @@ func (c *ContainerProxy) Resize(ctx context.Context, id string, height, width in
|
||||
|
||||
// Rename calls the portlayer's RenameContainerHandler to update the container name in the handle,
|
||||
// and then commit the new name to vSphere
|
||||
func (c *ContainerProxy) Rename(ctx context.Context, vc *viccontainer.VicContainer, newName string) error {
|
||||
defer trace.End(trace.Begin(vc.ContainerID))
|
||||
func (c *VicContainerProxy) Rename(ctx context.Context, vc *viccontainer.VicContainer, newName string) error {
|
||||
op := trace.FromContext(ctx, "Rename: %s", vc.ContainerID)
|
||||
defer trace.End(trace.Begin(vc.ContainerID, op))
|
||||
opID := op.ID()
|
||||
|
||||
//retrieve client to portlayer
|
||||
handle, err := c.Handle(context.TODO(), vc.ContainerID, vc.Name)
|
||||
@@ -908,7 +979,11 @@ func (c *ContainerProxy) Rename(ctx context.Context, vc *viccontainer.VicContain
|
||||
}
|
||||
|
||||
// Call the rename functionality in the portlayer.
|
||||
renameParams := containers.NewContainerRenameParamsWithContext(ctx).WithName(newName).WithHandle(handle)
|
||||
renameParams := containers.NewContainerRenameParamsWithContext(ctx).
|
||||
WithOpID(&opID).
|
||||
WithName(newName).
|
||||
WithHandle(handle)
|
||||
|
||||
result, err := c.client.Containers.ContainerRename(renameParams)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
@@ -942,15 +1017,19 @@ func (c *ContainerProxy) Rename(ctx context.Context, vc *viccontainer.VicContain
|
||||
|
||||
// Remove calls the portlayer's ContainerRemove handler to remove the container and its
|
||||
// anonymous volumes if the remove flag is set.
|
||||
func (c *ContainerProxy) Remove(ctx context.Context, vc *viccontainer.VicContainer, config *types.ContainerRmConfig) error {
|
||||
defer trace.End(trace.Begin(vc.ContainerID))
|
||||
func (c *VicContainerProxy) Remove(ctx context.Context, vc *viccontainer.VicContainer, config *types.ContainerRmConfig) error {
|
||||
op := trace.FromContext(ctx, "Remove: %s", vc.ContainerID)
|
||||
defer trace.End(trace.Begin(vc.ContainerID, op))
|
||||
opID := op.ID()
|
||||
|
||||
if c.client == nil {
|
||||
return errors.NillPortlayerClientError("ContainerProxy")
|
||||
}
|
||||
|
||||
id := vc.ContainerID
|
||||
_, err := c.client.Containers.ContainerRemove(containers.NewContainerRemoveParamsWithContext(ctx).WithID(id))
|
||||
_, err := c.client.Containers.ContainerRemove(containers.NewContainerRemoveParamsWithContext(ctx).
|
||||
WithOpID(&opID).
|
||||
WithID(id))
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *containers.ContainerRemoveNotFound:
|
||||
|
||||
213
vendor/github.com/vmware/vic/lib/apiservers/engine/proxy/storage_proxy.go
generated
vendored
213
vendor/github.com/vmware/vic/lib/apiservers/engine/proxy/storage_proxy.go
generated
vendored
@@ -39,20 +39,22 @@ import (
|
||||
"github.com/vmware/vic/pkg/trace"
|
||||
)
|
||||
|
||||
type VicStorageProxy interface {
|
||||
type StorageProxy interface {
|
||||
Create(ctx context.Context, name, driverName string, volumeData, labels map[string]string) (*types.Volume, error)
|
||||
VolumeExist(ctx context.Context, name string) (bool, error)
|
||||
VolumeList(ctx context.Context, filter string) ([]*models.VolumeResponse, error)
|
||||
VolumeInfo(ctx context.Context, name string) (*models.VolumeResponse, error)
|
||||
Remove(ctx context.Context, name string) error
|
||||
|
||||
VolumeJoin(ctx context.Context, handle, volName, mountPath string, flags map[string]string) (string, error)
|
||||
AddVolumesToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error)
|
||||
}
|
||||
|
||||
type StorageProxy struct {
|
||||
type VicStorageProxy struct {
|
||||
client *client.PortLayer
|
||||
}
|
||||
|
||||
type volumeFields struct {
|
||||
type VolumeFields struct {
|
||||
ID string
|
||||
Dest string
|
||||
Flags string
|
||||
@@ -96,26 +98,27 @@ var SupportedVolDrivers = map[string]struct{}{
|
||||
//Validation pattern for Volume Names
|
||||
var volumeNameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$")
|
||||
|
||||
func NewStorageProxy(client *client.PortLayer) VicStorageProxy {
|
||||
func NewStorageProxy(client *client.PortLayer) *VicStorageProxy {
|
||||
if client == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &StorageProxy{client: client}
|
||||
return &VicStorageProxy{client: client}
|
||||
}
|
||||
|
||||
func (s *StorageProxy) Create(ctx context.Context, name, driverName string, volumeData, labels map[string]string) (*types.Volume, error) {
|
||||
defer trace.End(trace.Begin(""))
|
||||
func (s *VicStorageProxy) Create(ctx context.Context, name, driverName string, volumeData, labels map[string]string) (*types.Volume, error) {
|
||||
op := trace.FromContext(ctx, "VolumeCreate: %s", name)
|
||||
defer trace.End(trace.Begin(name, op))
|
||||
|
||||
if s.client == nil {
|
||||
return nil, errors.NillPortlayerClientError("StorageProxy")
|
||||
}
|
||||
|
||||
result, err := s.volumeCreate(ctx, name, driverName, volumeData, labels)
|
||||
result, err := s.volumeCreate(op, name, driverName, volumeData, labels)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *storage.CreateVolumeConflict:
|
||||
return result, errors.VolumeInternalServerError(fmt.Errorf("A volume named %s already exists. Choose a different volume name.", name))
|
||||
return result, errors.VolumeExistError{Volume: name}
|
||||
case *storage.CreateVolumeNotFound:
|
||||
return result, errors.VolumeInternalServerError(fmt.Errorf("No volume store named (%s) exists", volumeStore(volumeData)))
|
||||
case *storage.CreateVolumeInternalServerError:
|
||||
@@ -132,8 +135,9 @@ func (s *StorageProxy) Create(ctx context.Context, name, driverName string, volu
|
||||
}
|
||||
|
||||
// volumeCreate issues a CreateVolume request to the portlayer
|
||||
func (s *StorageProxy) volumeCreate(ctx context.Context, name, driverName string, volumeData, labels map[string]string) (*types.Volume, error) {
|
||||
defer trace.End(trace.Begin(""))
|
||||
func (s *VicStorageProxy) volumeCreate(op trace.Operation, name, driverName string, volumeData, labels map[string]string) (*types.Volume, error) {
|
||||
defer trace.End(trace.Begin(name, op))
|
||||
opID := op.ID()
|
||||
result := &types.Volume{}
|
||||
|
||||
if s.client == nil {
|
||||
@@ -150,9 +154,9 @@ func (s *StorageProxy) volumeCreate(ctx context.Context, name, driverName string
|
||||
if varErr != nil {
|
||||
return result, varErr
|
||||
}
|
||||
log.Infof("Finalized model for volume create request to portlayer: %#v", req)
|
||||
op.Infof("Finalized model for volume create request to portlayer: %#v", req)
|
||||
|
||||
res, err := s.client.Storage.CreateVolume(storage.NewCreateVolumeParamsWithContext(ctx).WithVolumeRequest(req))
|
||||
res, err := s.client.Storage.CreateVolume(storage.NewCreateVolumeParamsWithContext(op).WithOpID(&opID).WithVolumeRequest(req))
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
@@ -160,14 +164,31 @@ func (s *StorageProxy) volumeCreate(ctx context.Context, name, driverName string
|
||||
return NewVolumeModel(res.Payload, labels), nil
|
||||
}
|
||||
|
||||
func (s *StorageProxy) VolumeList(ctx context.Context, filter string) ([]*models.VolumeResponse, error) {
|
||||
defer trace.End(trace.Begin(""))
|
||||
func (s *VicStorageProxy) VolumeExist(ctx context.Context, name string) (bool, error) {
|
||||
defer trace.End(trace.Begin(name))
|
||||
|
||||
vols, err := s.VolumeList(ctx, "")
|
||||
if err == nil {
|
||||
for _, v := range vols {
|
||||
if name == v.Name {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (s *VicStorageProxy) VolumeList(ctx context.Context, filter string) ([]*models.VolumeResponse, error) {
|
||||
op := trace.FromContext(ctx, "VolumeList")
|
||||
defer trace.End(trace.Begin("", op))
|
||||
opID := op.ID()
|
||||
|
||||
if s.client == nil {
|
||||
return nil, errors.NillPortlayerClientError("StorageProxy")
|
||||
}
|
||||
|
||||
res, err := s.client.Storage.ListVolumes(storage.NewListVolumesParamsWithContext(ctx).WithFilterString(&filter))
|
||||
res, err := s.client.Storage.ListVolumes(storage.NewListVolumesParamsWithContext(op).WithOpID(&opID).WithFilterString(&filter))
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *storage.ListVolumesInternalServerError:
|
||||
@@ -182,8 +203,10 @@ func (s *StorageProxy) VolumeList(ctx context.Context, filter string) ([]*models
|
||||
return res.Payload, nil
|
||||
}
|
||||
|
||||
func (s *StorageProxy) VolumeInfo(ctx context.Context, name string) (*models.VolumeResponse, error) {
|
||||
defer trace.End(trace.Begin(name))
|
||||
func (s *VicStorageProxy) VolumeInfo(ctx context.Context, name string) (*models.VolumeResponse, error) {
|
||||
op := trace.FromContext(ctx, "VolumeInfo: %s", name)
|
||||
defer trace.End(trace.Begin(name, op))
|
||||
opID := op.ID()
|
||||
|
||||
if name == "" {
|
||||
return nil, nil
|
||||
@@ -193,7 +216,7 @@ func (s *StorageProxy) VolumeInfo(ctx context.Context, name string) (*models.Vol
|
||||
return nil, errors.NillPortlayerClientError("StorageProxy")
|
||||
}
|
||||
|
||||
param := storage.NewGetVolumeParamsWithContext(ctx).WithName(name)
|
||||
param := storage.NewGetVolumeParamsWithContext(op).WithOpID(&opID).WithName(name)
|
||||
res, err := s.client.Storage.GetVolume(param)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
@@ -207,14 +230,16 @@ func (s *StorageProxy) VolumeInfo(ctx context.Context, name string) (*models.Vol
|
||||
return res.Payload, nil
|
||||
}
|
||||
|
||||
func (s *StorageProxy) Remove(ctx context.Context, name string) error {
|
||||
defer trace.End(trace.Begin(name))
|
||||
func (s *VicStorageProxy) Remove(ctx context.Context, name string) error {
|
||||
op := trace.FromContext(ctx, "VolumeRemove: %s", name)
|
||||
defer trace.End(trace.Begin(name, op))
|
||||
opID := op.ID()
|
||||
|
||||
if s.client == nil {
|
||||
return errors.NillPortlayerClientError("StorageProxy")
|
||||
}
|
||||
|
||||
_, err := s.client.Storage.RemoveVolume(storage.NewRemoveVolumeParamsWithContext(ctx).WithName(name))
|
||||
_, err := s.client.Storage.RemoveVolume(storage.NewRemoveVolumeParamsWithContext(op).WithOpID(&opID).WithName(name))
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *storage.RemoveVolumeNotFound:
|
||||
@@ -236,16 +261,17 @@ func (s *StorageProxy) Remove(ctx context.Context, name string) error {
|
||||
//
|
||||
// returns:
|
||||
// modified handle
|
||||
func (s *StorageProxy) AddVolumesToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) {
|
||||
defer trace.End(trace.Begin(handle))
|
||||
func (s *VicStorageProxy) AddVolumesToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) {
|
||||
op := trace.FromContext(ctx, "AddVolumesToContainer: %s", handle)
|
||||
defer trace.End(trace.Begin(handle, op))
|
||||
|
||||
if s.client == nil {
|
||||
return "", errors.NillPortlayerClientError("StorageProxy")
|
||||
}
|
||||
|
||||
// Volume Attachment Section
|
||||
log.Debugf("ContainerProxy.AddVolumesToContainer - VolumeSection")
|
||||
log.Debugf("Raw volume arguments: binds: %#v, volumes: %#v", config.HostConfig.Binds, config.Config.Volumes)
|
||||
op.Debugf("ContainerProxy.AddVolumesToContainer - VolumeSection")
|
||||
op.Debugf("Raw volume arguments: binds: %#v, volumes: %#v", config.HostConfig.Binds, config.Config.Volumes)
|
||||
|
||||
// Collect all volume mappings. In a docker create/run, they
|
||||
// can be anonymous (-v /dir) or specific (-v vol-name:/dir).
|
||||
@@ -260,7 +286,7 @@ func (s *StorageProxy) AddVolumesToContainer(ctx context.Context, handle string,
|
||||
if err != nil {
|
||||
return handle, errors.BadRequestError(err.Error())
|
||||
}
|
||||
log.Infof("Finalized volume list: %#v", volList)
|
||||
op.Infof("Finalized volume list: %#v", volList)
|
||||
|
||||
if len(config.Config.Volumes) > 0 {
|
||||
// override anonymous volume list with generated volume id
|
||||
@@ -269,7 +295,7 @@ func (s *StorageProxy) AddVolumesToContainer(ctx context.Context, handle string,
|
||||
delete(config.Config.Volumes, vol.Dest)
|
||||
mount := getMountString(vol.ID, vol.Dest, vol.Flags)
|
||||
config.Config.Volumes[mount] = struct{}{}
|
||||
log.Debugf("Replace anonymous volume config %s with %s", vol.Dest, mount)
|
||||
op.Debugf("Replace anonymous volume config %s with %s", vol.Dest, mount)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -285,59 +311,75 @@ func (s *StorageProxy) AddVolumesToContainer(ctx context.Context, handle string,
|
||||
// NOTE: calling volumeCreate regardless of whether the volume is already
|
||||
// present can be avoided by adding an extra optional param to VolumeJoin,
|
||||
// which would then call volumeCreate if the volume does not exist.
|
||||
_, err := s.volumeCreate(ctx, fields.ID, "vsphere", volumeData, nil)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *storage.CreateVolumeConflict:
|
||||
// Implicitly ignore the error where a volume with the same name
|
||||
// already exists. We can just join the said volume to the container.
|
||||
log.Infof("a volume with the name %s already exists", fields.ID)
|
||||
case *storage.CreateVolumeNotFound:
|
||||
return handle, errors.VolumeCreateNotFoundError(volumeStore(volumeData))
|
||||
default:
|
||||
return handle, errors.InternalServerError(err.Error())
|
||||
}
|
||||
} else {
|
||||
log.Infof("volumeCreate succeeded. Volume mount section ID: %s", fields.ID)
|
||||
_, err := s.Create(op, fields.ID, "vsphere", volumeData, nil)
|
||||
if err != nil && !errors.IsVolumeExistError(err) {
|
||||
return handle, err
|
||||
}
|
||||
|
||||
flags := make(map[string]string)
|
||||
//NOTE: for now we are passing the flags directly through. This is NOT SAFE and only a stop gap.
|
||||
flags[constants.Mode] = fields.Flags
|
||||
joinParams := storage.NewVolumeJoinParamsWithContext(ctx).WithJoinArgs(&models.VolumeJoinConfig{
|
||||
Flags: flags,
|
||||
Handle: handle,
|
||||
MountPath: fields.Dest,
|
||||
}).WithName(fields.ID)
|
||||
|
||||
res, err := s.client.Storage.VolumeJoin(joinParams)
|
||||
h, err := s.VolumeJoin(op, handle, fields.ID, fields.Dest, flags)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *storage.VolumeJoinInternalServerError:
|
||||
return handle, errors.InternalServerError(err.Payload.Message)
|
||||
case *storage.VolumeJoinDefault:
|
||||
return handle, errors.InternalServerError(err.Payload.Message)
|
||||
case *storage.VolumeJoinNotFound:
|
||||
return handle, errors.VolumeJoinNotFoundError(err.Payload.Message)
|
||||
default:
|
||||
return handle, errors.InternalServerError(err.Error())
|
||||
}
|
||||
return handle, err
|
||||
}
|
||||
|
||||
handle = res.Payload
|
||||
handle = h
|
||||
}
|
||||
|
||||
return handle, nil
|
||||
}
|
||||
|
||||
// VolumeJoin declares a volume mount for a container. This should be called on container create.
|
||||
func (s *VicStorageProxy) VolumeJoin(ctx context.Context, handle, volName, mountPath string, flags map[string]string) (string, error) {
|
||||
op := trace.FromContext(ctx, "VolumeJoin: %s", handle)
|
||||
defer trace.End(trace.Begin(handle, op))
|
||||
opID := op.ID()
|
||||
|
||||
if s.client == nil {
|
||||
return "", errors.NillPortlayerClientError("StorageProxy")
|
||||
}
|
||||
|
||||
joinParams := storage.NewVolumeJoinParamsWithContext(op).
|
||||
WithOpID(&opID).
|
||||
WithJoinArgs(&models.VolumeJoinConfig{
|
||||
Flags: flags,
|
||||
Handle: handle,
|
||||
MountPath: mountPath,
|
||||
}).WithName(volName)
|
||||
|
||||
res, err := s.client.Storage.VolumeJoin(joinParams)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *storage.VolumeJoinInternalServerError:
|
||||
return handle, errors.InternalServerError(err.Payload.Message)
|
||||
case *storage.VolumeJoinDefault:
|
||||
return handle, errors.InternalServerError(err.Payload.Message)
|
||||
case *storage.VolumeJoinNotFound:
|
||||
return handle, errors.VolumeJoinNotFoundError(err.Payload.Message)
|
||||
default:
|
||||
return handle, errors.InternalServerError(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
handle = res.Payload
|
||||
|
||||
return handle, nil
|
||||
}
|
||||
|
||||
// allContainers obtains all containers from the portlayer, akin to `docker ps -a`.
|
||||
func (s *StorageProxy) allContainers(ctx context.Context) ([]*models.ContainerInfo, error) {
|
||||
func (s *VicStorageProxy) allContainers(op trace.Operation) ([]*models.ContainerInfo, error) {
|
||||
defer trace.End(trace.Begin("", op))
|
||||
opID := op.ID()
|
||||
|
||||
if s.client == nil {
|
||||
return nil, errors.NillPortlayerClientError("StorageProxy")
|
||||
}
|
||||
|
||||
all := true
|
||||
cons, err := s.client.Containers.GetContainerList(containers.NewGetContainerListParamsWithContext(ctx).WithAll(&all))
|
||||
cons, err := s.client.Containers.GetContainerList(containers.NewGetContainerListParamsWithContext(op).
|
||||
WithOpID(&opID).
|
||||
WithAll(&all))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -347,8 +389,9 @@ func (s *StorageProxy) allContainers(ctx context.Context) ([]*models.ContainerIn
|
||||
|
||||
// fetchJoinedVolumes obtains all containers from the portlayer and returns a map with all
|
||||
// volumes that are joined to at least one container.
|
||||
func (s *StorageProxy) fetchJoinedVolumes(ctx context.Context) (map[string]struct{}, error) {
|
||||
conts, err := s.allContainers(ctx)
|
||||
func (s *VicStorageProxy) fetchJoinedVolumes(op trace.Operation) (map[string]struct{}, error) {
|
||||
defer trace.End(trace.Begin("", op))
|
||||
conts, err := s.allContainers(op)
|
||||
if err != nil {
|
||||
return nil, errors.VolumeInternalServerError(err)
|
||||
}
|
||||
@@ -410,6 +453,10 @@ func createVolumeMetadata(req *models.VolumeRequest, driverargs, labels map[stri
|
||||
// to at least one other container, and calls the portlayer to remove this container's
|
||||
// anonymous volumes if they are dangling. Errors, if any, are only logged.
|
||||
func RemoveAnonContainerVols(ctx context.Context, pl *client.PortLayer, cID string, vc *viccontainer.VicContainer) {
|
||||
op := trace.FromContext(ctx, "RemoveAnonContainerVols: %s", cID)
|
||||
defer trace.End(trace.Begin(cID, op))
|
||||
opID := op.ID()
|
||||
|
||||
// NOTE: these strings come in the form of <volume id>:<destination>:<volume options>
|
||||
volumes := vc.Config.Volumes
|
||||
// NOTE: these strings come in the form of <volume id>:<destination path>
|
||||
@@ -420,17 +467,17 @@ func RemoveAnonContainerVols(ctx context.Context, pl *client.PortLayer, cID stri
|
||||
for _, entry := range namedVolumes {
|
||||
fields := strings.SplitN(entry, ":", 2)
|
||||
if len(fields) != 2 {
|
||||
log.Errorf("Invalid entry in the HostConfig.Binds metadata section for container %s: %s", cID, entry)
|
||||
op.Errorf("Invalid entry in the HostConfig.Binds metadata section for container %s: %s", cID, entry)
|
||||
continue
|
||||
}
|
||||
destPath := fields[1]
|
||||
namedMaskList[destPath] = struct{}{}
|
||||
}
|
||||
|
||||
proxy := StorageProxy{client: pl}
|
||||
joinedVols, err := proxy.fetchJoinedVolumes(ctx)
|
||||
proxy := VicStorageProxy{client: pl}
|
||||
joinedVols, err := proxy.fetchJoinedVolumes(op)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to obtain joined volumes from portlayer, skipping removal of anonymous volumes for %s: %s", cID, err.Error())
|
||||
op.Errorf("Unable to obtain joined volumes from portlayer, skipping removal of anonymous volumes for %s: %s", cID, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -440,7 +487,7 @@ func RemoveAnonContainerVols(ctx context.Context, pl *client.PortLayer, cID stri
|
||||
|
||||
// NOTE(mavery): this check will start to fail when we fix our metadata correctness issues
|
||||
if len(volFields) != 3 {
|
||||
log.Debugf("Invalid entry in the volumes metadata section for container %s: %s", cID, vol)
|
||||
op.Debugf("Invalid entry in the volumes metadata section for container %s: %s", cID, vol)
|
||||
continue
|
||||
}
|
||||
volName := volFields[0]
|
||||
@@ -449,27 +496,27 @@ func RemoveAnonContainerVols(ctx context.Context, pl *client.PortLayer, cID stri
|
||||
_, isNamed := namedMaskList[volPath]
|
||||
_, joined := joinedVols[volName]
|
||||
if !joined && !isNamed {
|
||||
_, err := pl.Storage.RemoveVolume(storage.NewRemoveVolumeParamsWithContext(ctx).WithName(volName))
|
||||
_, err := pl.Storage.RemoveVolume(storage.NewRemoveVolumeParamsWithContext(op).WithOpID(&opID).WithName(volName))
|
||||
if err != nil {
|
||||
log.Debugf("Unable to remove anonymous volume %s in container %s: %s", volName, cID, err.Error())
|
||||
op.Debugf("Unable to remove anonymous volume %s in container %s: %s", volName, cID, err.Error())
|
||||
continue
|
||||
}
|
||||
log.Debugf("Successfully removed anonymous volume %s during remove operation against container(%s)", volName, cID)
|
||||
op.Debugf("Successfully removed anonymous volume %s during remove operation against container(%s)", volName, cID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processVolumeParam is used to turn any call from docker create -v <stuff> into a volumeFields object.
|
||||
// processVolumeParam is used to turn any call from docker create -v <stuff> into a VolumeFields object.
|
||||
// The -v has 3 forms. -v <anonymous mount path>, -v <Volume Name>:<Destination Mount Path> and
|
||||
// -v <Volume Name>:<Destination Mount Path>:<mount flags>
|
||||
func processVolumeParam(volString string) (volumeFields, error) {
|
||||
func processVolumeParam(volString string) (VolumeFields, error) {
|
||||
volumeStrings := strings.Split(volString, ":")
|
||||
fields := volumeFields{}
|
||||
fields := VolumeFields{}
|
||||
|
||||
// Error out if the intended volume is a directory on the client filesystem.
|
||||
numVolParams := len(volumeStrings)
|
||||
if numVolParams > 1 && strings.HasPrefix(volumeStrings[0], "/") {
|
||||
return volumeFields{}, errors.InvalidVolumeError{}
|
||||
return VolumeFields{}, errors.InvalidVolumeError{}
|
||||
}
|
||||
|
||||
// This switch determines which type of -v was invoked.
|
||||
@@ -492,7 +539,7 @@ func processVolumeParam(volString string) (volumeFields, error) {
|
||||
fields.Flags = volumeStrings[2]
|
||||
default:
|
||||
// NOTE: the docker cli should cover this case. This is here for posterity.
|
||||
return volumeFields{}, errors.InvalidBindError{Volume: volString}
|
||||
return VolumeFields{}, errors.InvalidBindError{Volume: volString}
|
||||
}
|
||||
return fields, nil
|
||||
}
|
||||
@@ -500,8 +547,8 @@ func processVolumeParam(volString string) (volumeFields, error) {
|
||||
// processVolumeFields parses fields for volume mappings specified in a create/run -v.
|
||||
// It returns a map of unique mountable volumes. This means that it removes dupes favoring
|
||||
// specified volumes over anonymous volumes.
|
||||
func processVolumeFields(volumes []string) (map[string]volumeFields, error) {
|
||||
volumeFields := make(map[string]volumeFields)
|
||||
func processVolumeFields(volumes []string) (map[string]VolumeFields, error) {
|
||||
vf := make(map[string]VolumeFields)
|
||||
|
||||
for _, v := range volumes {
|
||||
fields, err := processVolumeParam(v)
|
||||
@@ -509,12 +556,12 @@ func processVolumeFields(volumes []string) (map[string]volumeFields, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
volumeFields[fields.Dest] = fields
|
||||
vf[fields.Dest] = fields
|
||||
}
|
||||
return volumeFields, nil
|
||||
return vf, nil
|
||||
}
|
||||
|
||||
func finalizeVolumeList(specifiedVolumes, anonymousVolumes []string) ([]volumeFields, error) {
|
||||
func finalizeVolumeList(specifiedVolumes, anonymousVolumes []string) ([]VolumeFields, error) {
|
||||
log.Infof("Specified Volumes : %#v", specifiedVolumes)
|
||||
processedVolumes, err := processVolumeFields(specifiedVolumes)
|
||||
if err != nil {
|
||||
@@ -532,7 +579,7 @@ func finalizeVolumeList(specifiedVolumes, anonymousVolumes []string) ([]volumeFi
|
||||
processedAnonVolumes[k] = v
|
||||
}
|
||||
|
||||
finalizedVolumes := make([]volumeFields, 0, len(processedAnonVolumes))
|
||||
finalizedVolumes := make([]VolumeFields, 0, len(processedAnonVolumes))
|
||||
for _, v := range processedAnonVolumes {
|
||||
finalizedVolumes = append(finalizedVolumes, v)
|
||||
}
|
||||
|
||||
183
vendor/github.com/vmware/vic/lib/apiservers/engine/proxy/stream_proxy.go
generated
vendored
183
vendor/github.com/vmware/vic/lib/apiservers/engine/proxy/stream_proxy.go
generated
vendored
@@ -22,7 +22,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/go-openapi/strfmt"
|
||||
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
@@ -33,16 +32,18 @@ import (
|
||||
"github.com/vmware/vic/lib/apiservers/portlayer/client"
|
||||
"github.com/vmware/vic/lib/apiservers/portlayer/client/containers"
|
||||
"github.com/vmware/vic/lib/apiservers/portlayer/client/interaction"
|
||||
"github.com/vmware/vic/lib/apiservers/portlayer/client/events"
|
||||
"github.com/vmware/vic/pkg/trace"
|
||||
)
|
||||
|
||||
type VicStreamProxy interface {
|
||||
AttachStreams(ctx context.Context, ac *AttachConfig, stdin io.ReadCloser, stdout, stderr io.Writer) error
|
||||
type StreamProxy interface {
|
||||
AttachStreams(ctx context.Context, ac *AttachConfig, stdin io.ReadCloser, stdout, stderr io.Writer, autoclose bool) error
|
||||
StreamContainerLogs(ctx context.Context, name string, out io.Writer, started chan struct{}, showTimestamps bool, followLogs bool, since int64, tailLines int64) error
|
||||
StreamContainerStats(ctx context.Context, config *convert.ContainerStatsConfig) error
|
||||
StreamEvents(ctx context.Context, out io.Writer) error
|
||||
}
|
||||
|
||||
type StreamProxy struct {
|
||||
type VicStreamProxy struct {
|
||||
client *client.PortLayer
|
||||
}
|
||||
|
||||
@@ -70,14 +71,18 @@ type AttachConfig struct {
|
||||
CloseStdin bool
|
||||
}
|
||||
|
||||
func NewStreamProxy(client *client.PortLayer) VicStreamProxy {
|
||||
return &StreamProxy{client: client}
|
||||
func NewStreamProxy(client *client.PortLayer) *VicStreamProxy {
|
||||
return &VicStreamProxy{client: client}
|
||||
}
|
||||
|
||||
// AttachStreams takes the the hijacked connections from the calling client and attaches
|
||||
// them to the 3 streams from the portlayer's rest server.
|
||||
// stdin, stdout, stderr are the hijacked connection
|
||||
func (s *StreamProxy) AttachStreams(ctx context.Context, ac *AttachConfig, stdin io.ReadCloser, stdout, stderr io.Writer) error {
|
||||
// autoclose controls whether the underlying client transport will be closed when stdout/stderr
|
||||
func (s *VicStreamProxy) AttachStreams(ctx context.Context, ac *AttachConfig, stdin io.ReadCloser, stdout, stderr io.Writer, autoclose bool) error {
|
||||
op := trace.FromContext(ctx, "")
|
||||
defer trace.End(trace.Begin("", op))
|
||||
|
||||
// Cancel will close the child connections.
|
||||
var wg, outWg sync.WaitGroup
|
||||
|
||||
@@ -96,10 +101,11 @@ func (s *StreamProxy) AttachStreams(ctx context.Context, ac *AttachConfig, stdin
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
ctx, cancel := context.WithCancel(op)
|
||||
defer cancel()
|
||||
|
||||
if ac.UseStdin {
|
||||
if ac.UseStdin && autoclose {
|
||||
// if we're not autoclosing then we don't want to block waiting for copyStdin to exit
|
||||
wg.Add(1)
|
||||
}
|
||||
|
||||
@@ -125,22 +131,26 @@ func (s *StreamProxy) AttachStreams(ctx context.Context, ac *AttachConfig, stdin
|
||||
|
||||
if ac.UseStdin {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := copyStdIn(ctx, s.client, ac, stdin, keys)
|
||||
if autoclose {
|
||||
defer wg.Done()
|
||||
}
|
||||
err := copyStdIn(ctx, s.client, ac, stdin, keys, autoclose)
|
||||
if err != nil {
|
||||
log.Errorf("container attach: stdin (%s): %s", ac.ID, err)
|
||||
op.Errorf("container attach: stdin (%s): %s", ac.ID, err)
|
||||
} else {
|
||||
log.Infof("container attach: stdin (%s) done", ac.ID)
|
||||
op.Infof("container attach: stdin (%s) done", ac.ID)
|
||||
}
|
||||
|
||||
// Check for EOF or canceled context. We can only detect EOF by checking the error string returned by swagger :/
|
||||
// We check this before calling cancel so that we will be sure to return detach errors before stdout/err exits,
|
||||
// even in the !autoclose case.
|
||||
if EOForCanceled(err) {
|
||||
errChan <- err
|
||||
}
|
||||
|
||||
if !ac.CloseStdin || ac.UseTty {
|
||||
cancel()
|
||||
}
|
||||
|
||||
// Check for EOF or canceled context. We can only detect EOF by checking the error string returned by swagger :/
|
||||
if EOForCanceled(err) {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -151,9 +161,9 @@ func (s *StreamProxy) AttachStreams(ctx context.Context, ac *AttachConfig, stdin
|
||||
|
||||
err := copyStdOut(ctx, s.client, ac, stdout, attachAttemptTimeout)
|
||||
if err != nil {
|
||||
log.Errorf("container attach: stdout (%s): %s", ac.ID, err)
|
||||
op.Errorf("container attach: stdout (%s): %s", ac.ID, err)
|
||||
} else {
|
||||
log.Infof("container attach: stdout (%s) done", ac.ID)
|
||||
op.Infof("container attach: stdout (%s) done", ac.ID)
|
||||
}
|
||||
|
||||
// Check for EOF or canceled context. We can only detect EOF by checking the error string returned by swagger :/
|
||||
@@ -170,9 +180,9 @@ func (s *StreamProxy) AttachStreams(ctx context.Context, ac *AttachConfig, stdin
|
||||
|
||||
err := copyStdErr(ctx, s.client, ac, stderr)
|
||||
if err != nil {
|
||||
log.Errorf("container attach: stderr (%s): %s", ac.ID, err)
|
||||
op.Errorf("container attach: stderr (%s): %s", ac.ID, err)
|
||||
} else {
|
||||
log.Infof("container attach: stderr (%s) done", ac.ID)
|
||||
op.Infof("container attach: stderr (%s) done", ac.ID)
|
||||
}
|
||||
|
||||
// Check for EOF or canceled context. We can only detect EOF by checking the error string returned by swagger :/
|
||||
@@ -188,12 +198,12 @@ func (s *StreamProxy) AttachStreams(ctx context.Context, ac *AttachConfig, stdin
|
||||
// close the channel so that we don't leak (if there is an error)/or get blocked (if there are no errors)
|
||||
close(errChan)
|
||||
|
||||
log.Infof("cleaned up connections to %s. Checking errors", ac.ID)
|
||||
op.Infof("cleaned up connections to %s", ac.ID)
|
||||
for err := range errChan {
|
||||
if err != nil {
|
||||
// check if we got DetachError
|
||||
if _, ok := err.(errors.DetachError); ok {
|
||||
log.Infof("Detached from container detected")
|
||||
op.Infof("Detached from container detected")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -202,20 +212,21 @@ func (s *StreamProxy) AttachStreams(ctx context.Context, ac *AttachConfig, stdin
|
||||
// Go-swagger returns untyped errors to us if the error is not one that we define
|
||||
// in the swagger spec. Even EOF. Therefore, we must scan the error string (if there
|
||||
// is an error string in the untyped error) for the term EOF.
|
||||
log.Errorf("container attach error: %s", err)
|
||||
op.Errorf("container attach error: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("No error found. Returning nil...")
|
||||
return nil
|
||||
}
|
||||
|
||||
// StreamContainerLogs reads the log stream from the portlayer rest server and writes
|
||||
// it directly to the io.Writer that is passed in.
|
||||
func (s *StreamProxy) StreamContainerLogs(ctx context.Context, name string, out io.Writer, started chan struct{}, showTimestamps bool, followLogs bool, since int64, tailLines int64) error {
|
||||
defer trace.End(trace.Begin(""))
|
||||
func (s *VicStreamProxy) StreamContainerLogs(ctx context.Context, name string, out io.Writer, started chan struct{}, showTimestamps bool, followLogs bool, since int64, tailLines int64) error {
|
||||
op := trace.FromContext(ctx, "")
|
||||
defer trace.End(trace.Begin("", op))
|
||||
opID := op.ID()
|
||||
|
||||
if s.client == nil {
|
||||
return errors.NillPortlayerClientError("StreamProxy")
|
||||
@@ -223,12 +234,13 @@ func (s *StreamProxy) StreamContainerLogs(ctx context.Context, name string, out
|
||||
|
||||
close(started)
|
||||
|
||||
params := containers.NewGetContainerLogsParamsWithContext(ctx).
|
||||
params := containers.NewGetContainerLogsParamsWithContext(op).
|
||||
WithID(name).
|
||||
WithFollow(&followLogs).
|
||||
WithTimestamp(&showTimestamps).
|
||||
WithSince(&since).
|
||||
WithTaillines(&tailLines)
|
||||
WithTaillines(&tailLines).
|
||||
WithOpID(&opID)
|
||||
_, err := s.client.Containers.GetContainerLogs(params, out)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
@@ -253,8 +265,10 @@ func (s *StreamProxy) StreamContainerLogs(ctx context.Context, name string, out
|
||||
// StreamContainerStats will provide a stream of container stats written to the provided
|
||||
// io.Writer. Prior to writing to the provided io.Writer there will be a transformation
|
||||
// from the portLayer representation of stats to the docker format
|
||||
func (s *StreamProxy) StreamContainerStats(ctx context.Context, config *convert.ContainerStatsConfig) error {
|
||||
defer trace.End(trace.Begin(config.ContainerID))
|
||||
func (s *VicStreamProxy) StreamContainerStats(ctx context.Context, config *convert.ContainerStatsConfig) error {
|
||||
op := trace.FromContext(ctx, "")
|
||||
defer trace.End(trace.Begin(config.ContainerID, op))
|
||||
opID := op.ID()
|
||||
|
||||
if s.client == nil {
|
||||
return errors.NillPortlayerClientError("StreamProxy")
|
||||
@@ -264,7 +278,7 @@ func (s *StreamProxy) StreamContainerStats(ctx context.Context, config *convert.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
params := containers.NewGetContainerStatsParamsWithContext(ctx)
|
||||
params := containers.NewGetContainerStatsParamsWithContext(op).WithOpID(&opID)
|
||||
params.ID = config.ContainerID
|
||||
params.Stream = config.Stream
|
||||
|
||||
@@ -303,11 +317,48 @@ func (s *StreamProxy) StreamContainerStats(ctx context.Context, config *convert.
|
||||
return nil
|
||||
}
|
||||
|
||||
// StreamEvents() handles all swagger interaction to the Portlayer's event manager
|
||||
//
|
||||
// Input:
|
||||
// context and a io.Writer
|
||||
func (s *VicStreamProxy) StreamEvents(ctx context.Context, out io.Writer) error {
|
||||
op := trace.FromContext(ctx, "")
|
||||
defer trace.End(trace.Begin("", op))
|
||||
opID := op.ID()
|
||||
|
||||
if s.client == nil {
|
||||
return errors.NillPortlayerClientError("StreamProxy")
|
||||
}
|
||||
|
||||
params := events.NewGetEventsParamsWithContext(ctx).WithOpID(&opID)
|
||||
if _, err := s.client.Events.GetEvents(params, out); err != nil {
|
||||
switch err := err.(type) {
|
||||
case *events.GetEventsInternalServerError:
|
||||
return errors.InternalServerError("Server error from the events port layer")
|
||||
default:
|
||||
//Check for EOF. Since the connection, transport, and data handling are
|
||||
//encapsulated inside of Swagger, we can only detect EOF by checking the
|
||||
//error string
|
||||
if strings.Contains(err.Error(), SwaggerSubstringEOF) {
|
||||
return nil
|
||||
}
|
||||
return errors.InternalServerError(fmt.Sprintf("Unknown error from the interaction port layer: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
//------------------------------------
|
||||
// ContainerAttach() Utility Functions
|
||||
//------------------------------------
|
||||
|
||||
func copyStdIn(ctx context.Context, pl *client.PortLayer, ac *AttachConfig, stdin io.ReadCloser, keys []byte) error {
|
||||
func copyStdIn(ctx context.Context, pl *client.PortLayer, ac *AttachConfig, stdin io.ReadCloser, keys []byte, autoclose bool) error {
|
||||
op := trace.FromContext(ctx, "")
|
||||
defer trace.End(trace.Begin("", op))
|
||||
opID := op.ID()
|
||||
|
||||
// Pipe for stdin so we can interject and watch the input streams for detach keys.
|
||||
stdinReader, stdinWriter := io.Pipe()
|
||||
defer stdinReader.Close()
|
||||
@@ -315,26 +366,28 @@ func copyStdIn(ctx context.Context, pl *client.PortLayer, ac *AttachConfig, stdi
|
||||
var detach bool
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
// make sure we get out of io.Copy if context is canceled
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// This will cause the transport to the API client to be shut down, so all output
|
||||
// streams will get closed as well.
|
||||
// See the closer in container_routes.go:postContainersAttach
|
||||
if autoclose {
|
||||
go func() {
|
||||
// make sure we get out of io.Copy if context is canceled
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// This will cause the transport to the API client to be shut down, so all output
|
||||
// streams will get closed as well.
|
||||
// See the closer in container_routes.go:postContainersAttach
|
||||
|
||||
// We're closing this here to disrupt the io.Copy below
|
||||
// TODO: seems like we should be providing an io.Copy impl with ctx argument that honors
|
||||
// cancelation with the amount of code dedicated to working around it
|
||||
// We're closing this here to disrupt the io.Copy below
|
||||
// TODO: seems like we should be providing an io.Copy impl with ctx argument that honors
|
||||
// cancelation with the amount of code dedicated to working around it
|
||||
|
||||
// TODO: I think this still leaves a race between closing of the API client transport and
|
||||
// copying of the output streams, it's just likely the error will be dropped as the transport is
|
||||
// closed when it occurs.
|
||||
// We should move away from needing to close transports to interrupt reads.
|
||||
stdin.Close()
|
||||
case <-done:
|
||||
}
|
||||
}()
|
||||
// TODO: I think this still leaves a race between closing of the API client transport and
|
||||
// copying of the output streams, it's just likely the error will be dropped as the transport is
|
||||
// closed when it occurs.
|
||||
// We should move away from needing to close transports to interrupt reads.
|
||||
stdin.Close()
|
||||
case <-done:
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
@@ -347,7 +400,7 @@ func copyStdIn(ctx context.Context, pl *client.PortLayer, ac *AttachConfig, stdi
|
||||
// Write some init bytes into the pipe to force Swagger to make the initial
|
||||
// call to the portlayer, prior to any user input in whatever attach client
|
||||
// he/she is using.
|
||||
log.Debugf("copyStdIn writing primer bytes")
|
||||
op.Debugf("copyStdIn writing primer bytes")
|
||||
stdinWriter.Write([]byte(attachStdinInitString))
|
||||
if ac.UseTty {
|
||||
_, err = copyEscapable(stdinWriter, stdin, keys)
|
||||
@@ -357,10 +410,10 @@ func copyStdIn(ctx context.Context, pl *client.PortLayer, ac *AttachConfig, stdi
|
||||
|
||||
if err != nil {
|
||||
if _, ok := err.(errors.DetachError); ok {
|
||||
log.Infof("stdin detach detected")
|
||||
op.Infof("stdin detach detected")
|
||||
detach = true
|
||||
} else {
|
||||
log.Errorf("stdin err: %s", err)
|
||||
op.Errorf("stdin err: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -369,19 +422,21 @@ func copyStdIn(ctx context.Context, pl *client.PortLayer, ac *AttachConfig, stdi
|
||||
|
||||
// Swagger wants an io.reader so give it the reader pipe. Also, the swagger call
|
||||
// to set the stdin is synchronous so we need to run in a goroutine
|
||||
setStdinParams := interaction.NewContainerSetStdinParamsWithContext(ctx).WithID(id)
|
||||
setStdinParams = setStdinParams.WithRawStream(stdinReader)
|
||||
setStdinParams := interaction.NewContainerSetStdinParamsWithContext(op).
|
||||
WithID(id).
|
||||
WithRawStream(stdinReader).
|
||||
WithOpID(&opID)
|
||||
|
||||
_, err := pl.Interaction.ContainerSetStdin(setStdinParams)
|
||||
<-done
|
||||
|
||||
if ac.CloseStdin && !ac.UseTty {
|
||||
// Close the stdin connection. Mimicing Docker's behavior.
|
||||
log.Errorf("Attach stream has stdinOnce set. Closing the stdin.")
|
||||
op.Errorf("Attach stream has stdinOnce set. Closing the stdin.")
|
||||
params := interaction.NewContainerCloseStdinParamsWithContext(ctx).WithID(id)
|
||||
_, err := pl.Interaction.ContainerCloseStdin(params)
|
||||
if err != nil {
|
||||
log.Errorf("CloseStdin failed with %s", err)
|
||||
op.Errorf("CloseStdin failed with %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -394,19 +449,21 @@ func copyStdIn(ctx context.Context, pl *client.PortLayer, ac *AttachConfig, stdi
|
||||
}
|
||||
|
||||
func copyStdOut(ctx context.Context, pl *client.PortLayer, ac *AttachConfig, stdout io.Writer, attemptTimeout time.Duration) error {
|
||||
op := trace.FromContext(ctx, "")
|
||||
defer trace.End(trace.Begin("", op))
|
||||
id := ac.ID
|
||||
|
||||
//Calculate how much time to let portlayer attempt
|
||||
plAttemptTimeout := attemptTimeout - attachPLAttemptDiff //assumes personality deadline longer than portlayer's deadline
|
||||
plAttemptDeadline := time.Now().Add(plAttemptTimeout)
|
||||
swaggerDeadline := strfmt.DateTime(plAttemptDeadline)
|
||||
log.Debugf("* stdout portlayer deadline: %s", plAttemptDeadline.Format(time.UnixDate))
|
||||
log.Debugf("* stdout personality deadline: %s", time.Now().Add(attemptTimeout).Format(time.UnixDate))
|
||||
op.Debugf("* stdout portlayer deadline: %s", plAttemptDeadline.Format(time.UnixDate))
|
||||
op.Debugf("* stdout personality deadline: %s", time.Now().Add(attemptTimeout).Format(time.UnixDate))
|
||||
|
||||
log.Debugf("* stdout attach start %s", time.Now().Format(time.UnixDate))
|
||||
op.Debugf("* stdout attach start %s", time.Now().Format(time.UnixDate))
|
||||
getStdoutParams := interaction.NewContainerGetStdoutParamsWithContext(ctx).WithID(id).WithDeadline(&swaggerDeadline)
|
||||
_, err := pl.Interaction.ContainerGetStdout(getStdoutParams, stdout)
|
||||
log.Debugf("* stdout attach end %s", time.Now().Format(time.UnixDate))
|
||||
op.Debugf("* stdout attach end %s", time.Now().Format(time.UnixDate))
|
||||
if err != nil {
|
||||
if _, ok := err.(*interaction.ContainerGetStdoutNotFound); ok {
|
||||
return errors.ContainerResourceNotFoundError(id, "interaction connection")
|
||||
|
||||
35
vendor/github.com/vmware/vic/lib/apiservers/engine/proxy/system_proxy.go
generated
vendored
35
vendor/github.com/vmware/vic/lib/apiservers/engine/proxy/system_proxy.go
generated
vendored
@@ -37,7 +37,6 @@ import (
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
derr "github.com/docker/docker/api/errors"
|
||||
|
||||
"github.com/vmware/vic/lib/apiservers/engine/errors"
|
||||
@@ -48,36 +47,37 @@ import (
|
||||
"github.com/vmware/vic/pkg/trace"
|
||||
)
|
||||
|
||||
type VicSystemProxy interface {
|
||||
type SystemProxy interface {
|
||||
PingPortlayer(ctx context.Context) bool
|
||||
ContainerCount(ctx context.Context) (int, int, int, error)
|
||||
VCHInfo(ctx context.Context) (*models.VCHInfo, error)
|
||||
}
|
||||
|
||||
type SystemProxy struct {
|
||||
type VicSystemProxy struct {
|
||||
client *client.PortLayer
|
||||
}
|
||||
|
||||
func NewSystemProxy(client *client.PortLayer) VicSystemProxy {
|
||||
func NewSystemProxy(client *client.PortLayer) *VicSystemProxy {
|
||||
if client == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &SystemProxy{client: client}
|
||||
return &VicSystemProxy{client: client}
|
||||
}
|
||||
|
||||
func (s *SystemProxy) PingPortlayer(ctx context.Context) bool {
|
||||
defer trace.End(trace.Begin(""))
|
||||
func (s *VicSystemProxy) PingPortlayer(ctx context.Context) bool {
|
||||
op := trace.FromContext(ctx, "")
|
||||
defer trace.End(trace.Begin("", op))
|
||||
|
||||
if s.client == nil {
|
||||
log.Errorf("Portlayer client is invalid")
|
||||
op.Errorf("Portlayer client is invalid")
|
||||
return false
|
||||
}
|
||||
|
||||
pingParams := misc.NewPingParamsWithContext(ctx)
|
||||
_, err := s.client.Misc.Ping(pingParams)
|
||||
if err != nil {
|
||||
log.Info("Ping to portlayer failed")
|
||||
op.Info("Ping to portlayer failed")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -85,8 +85,10 @@ func (s *SystemProxy) PingPortlayer(ctx context.Context) bool {
|
||||
|
||||
// Use the Portlayer's support for docker ps to get the container count
|
||||
// return order: running, paused, stopped counts
|
||||
func (s *SystemProxy) ContainerCount(ctx context.Context) (int, int, int, error) {
|
||||
defer trace.End(trace.Begin(""))
|
||||
func (s *VicSystemProxy) ContainerCount(ctx context.Context) (int, int, int, error) {
|
||||
op := trace.FromContext(ctx, "")
|
||||
defer trace.End(trace.Begin("", op))
|
||||
opID := op.ID()
|
||||
|
||||
var running, paused, stopped int
|
||||
|
||||
@@ -95,7 +97,8 @@ func (s *SystemProxy) ContainerCount(ctx context.Context) (int, int, int, error)
|
||||
}
|
||||
|
||||
all := true
|
||||
containList, err := s.client.Containers.GetContainerList(containers.NewGetContainerListParamsWithContext(ctx).WithAll(&all))
|
||||
params := containers.NewGetContainerListParamsWithContext(ctx).WithAll(&all).WithOpID(&opID)
|
||||
containList, err := s.client.Containers.GetContainerList(params)
|
||||
if err != nil {
|
||||
return 0, 0, 0, derr.NewErrorWithStatusCode(fmt.Errorf("Failed to get container list: %s", err), http.StatusInternalServerError)
|
||||
}
|
||||
@@ -112,14 +115,16 @@ func (s *SystemProxy) ContainerCount(ctx context.Context) (int, int, int, error)
|
||||
return running, paused, stopped, nil
|
||||
}
|
||||
|
||||
func (s *SystemProxy) VCHInfo(ctx context.Context) (*models.VCHInfo, error) {
|
||||
defer trace.End(trace.Begin(""))
|
||||
func (s *VicSystemProxy) VCHInfo(ctx context.Context) (*models.VCHInfo, error) {
|
||||
op := trace.FromContext(ctx, "")
|
||||
defer trace.End(trace.Begin("", op))
|
||||
opID := op.ID()
|
||||
|
||||
if s.client == nil {
|
||||
return nil, errors.NillPortlayerClientError("SystemProxy")
|
||||
}
|
||||
|
||||
params := misc.NewGetVCHInfoParamsWithContext(ctx)
|
||||
params := misc.NewGetVCHInfoParamsWithContext(ctx).WithOpID(&opID)
|
||||
resp, err := s.client.Misc.GetVCHInfo(params)
|
||||
if err != nil {
|
||||
//There are no custom error for this operation. If we get back an error, it's
|
||||
|
||||
Reference in New Issue
Block a user