VMware vSphere Integrated Containers provider (#206)

* Add Virtual Kubelet provider for VIC

Initial virtual kubelet provider for VMware VIC.  This provider currently
handles creating and starting of a pod VM via the VIC portlayer and persona
server.  Image store handling via the VIC persona server.  This provider
currently requires the feature/wolfpack branch of VIC.

* Added pod stop and delete.  Also added node capacity.

Added the ability to stop and delete pod VMs via VIC.  Also retrieve
node capacity information from the VCH.

* Cleanup and readme file

Some file clean up and added a Readme.md markdown file for the VIC
provider.

* Cleaned up errors, added function comments, moved operation code

1. Cleaned up error handling.  Set standard for creating errors.
2. Added method prototype comments for all interface functions.
3. Moved PodCreator, PodStarter, PodStopper, and PodDeleter to a new folder.

* Add mocking code and unit tests for podcache, podcreator, and podstarter

Used the unit test framework used in VIC to handle assertions in the provider's
unit test.  Mocking code generated using OSS project mockery, which is compatible
with the testify assertion framework.

* Vendored packages for the VIC provider

Requires feature/wolfpack branch of VIC and a few specific commit sha of
projects used within VIC.

* Implementation of POD Stopper and Deleter unit tests (#4)

* Updated files for initial PR
This commit is contained in:
Loc Nguyen
2018-06-04 15:41:32 -07:00
committed by Ria Bhatia
parent 98a111e8b7
commit 513cebe7b7
6296 changed files with 1123685 additions and 8 deletions

View File

@@ -0,0 +1,23 @@
// Copyright 2017 VMware, Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plugins
// import all plugin packages here to register plugins
import (
// imported for the side effect
_ "github.com/vmware/vic/lib/migration/samples/plugins/plugin1"
_ "github.com/vmware/vic/lib/migration/samples/plugins/plugin2"
)

View File

@@ -0,0 +1,111 @@
// Copyright 2017 VMware, Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plugin1
import (
"context"
"fmt"
log "github.com/Sirupsen/logrus"
"github.com/vmware/vic/lib/migration/errors"
"github.com/vmware/vic/lib/migration/manager"
"github.com/vmware/vic/pkg/trace"
"github.com/vmware/vic/pkg/vsphere/extraconfig"
"github.com/vmware/vic/pkg/vsphere/session"
)
// sample plugin to migrate data in appliance configuration VirtualContainerHost
// If only a couple of items changed in the configuration, you don't have to copy all VirtualContainerHost. Only define the few items used by
// this upgrade plugin will simplify the extraconfig encoding/decoding process
const (
version = 1
target = manager.ApplianceConfigure
)
func init() {
defer trace.End(trace.Begin(fmt.Sprintf("Registering plugin %s:%d", target, version)))
if err := manager.Migrator.Register(version, target, &ApplianceStopSignalRename{}); err != nil {
log.Errorf("Failed to register plugin %s:%d, %s", target, version, err)
}
}
// ApplianceStopSignalRename is plugin for vic 0.8.0-GA version upgrade
type ApplianceStopSignalRename struct {
}
type OldStopSignal struct {
ExecutorConfig `vic:"0.1" scope:"read-only" key:"init"`
}
type ExecutorConfig struct {
Sessions map[string]*SessionConfig `vic:"0.1" scope:"read-only" key:"sessions"`
}
type SessionConfig struct {
StopSignal string `vic:"0.1" scope:"read-only" key:"stopSignal"`
}
type NewStopSignal struct {
NewExecutorConfig `vic:"0.1" scope:"read-only" key:"init"`
}
type NewExecutorConfig struct {
Sessions map[string]*NewSessionConfig `vic:"0.1" scope:"read-only" key:"sessions"`
}
type NewSessionConfig struct {
StopSignal string `vic:"0.1" scope:"read-only" key:"forceStopSignal"`
}
func (p *ApplianceStopSignalRename) Migrate(ctx context.Context, s *session.Session, data interface{}) error {
defer trace.End(trace.Begin(fmt.Sprintf("%d", version)))
if data == nil {
return nil
}
mapData := data.(map[string]string)
oldStruct := &OldStopSignal{}
result := extraconfig.Decode(extraconfig.MapSource(mapData), oldStruct)
if result == nil {
return &errors.DecodeError{}
}
keys := extraconfig.CalculateKeys(oldStruct, "ExecutorConfig.Sessions.*.StopSignal", "")
for _, key := range keys {
log.Debugf("old %s:%s", key, mapData[key])
}
newStruct := &NewStopSignal{}
if len(oldStruct.ExecutorConfig.Sessions) == 0 {
return nil
}
newStruct.Sessions = make(map[string]*NewSessionConfig)
for id, sess := range oldStruct.ExecutorConfig.Sessions {
newSess := &NewSessionConfig{}
newSess.StopSignal = sess.StopSignal
newStruct.Sessions[id] = newSess
}
cfg := make(map[string]string)
extraconfig.Encode(extraconfig.MapSink(cfg), newStruct)
// remove old data
for _, key := range keys {
delete(mapData, key)
}
for k, v := range cfg {
log.Debugf("New data: %s:%s", k, v)
mapData[k] = v
}
return nil
}

View File

@@ -0,0 +1,123 @@
// Copyright 2017 VMware, Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plugin2
import (
"context"
"fmt"
"os"
log "github.com/Sirupsen/logrus"
"github.com/vmware/vic/lib/migration/errors"
"github.com/vmware/vic/lib/migration/manager"
"github.com/vmware/vic/lib/migration/samples/config/v2"
"github.com/vmware/vic/pkg/kvstore"
"github.com/vmware/vic/pkg/trace"
"github.com/vmware/vic/pkg/vsphere/datastore"
"github.com/vmware/vic/pkg/vsphere/extraconfig"
"github.com/vmware/vic/pkg/vsphere/session"
)
// Sample plugin to migrate data in keyvalue store
// If there is any key/value change, should create a new keyvalue store file with version appendix, like .v2, to differentiate with old keyvalue store file
// Migrate keyvalue plugin should read configuration from input VirtualContainerHost configuration, and then read from old keyvalue store file directly
// After migration, write back to new datastore file with version appendix
// Data migration framework is not responsible for data roll back. With versioned datastore file, even roll back happens, old version's datastore file is still useable by old binary
// Make sure to delete existing new version datastore file, which might be a left over of last failed data migration attempt.
const (
version = 2
target = manager.ApplianceConfigure
KVStoreFolder = "kvStores"
APIKV = "apiKV"
oldKey = "image.name"
newKey = "image.tag"
)
func init() {
log.Debugf("Registering plugin %s:%d", target, version)
if err := manager.Migrator.Register(version, target, &NewImageMeta{}); err != nil {
log.Errorf("Failed to register plugin %s:%d, %s", target, version, err)
}
}
// NewImageMeta is plugin for vic 0.8.0-GA version upgrade
type NewImageMeta struct {
}
func (p *NewImageMeta) Migrate(ctx context.Context, s *session.Session, data interface{}) error {
defer trace.End(trace.Begin(fmt.Sprintf("%d", version)))
if data == nil {
return nil
}
vchConfMap := data.(map[string]string)
// No plugin query keyvalue store yet, load from datastore file
// get a ds helper for this ds url
vchConf := &v2.VirtualContainerHostConfigSpec{}
extraconfig.Decode(extraconfig.MapSource(vchConfMap), vchConf)
imageURL := vchConf.ImageStores[0]
// TODO: sample code, should get datastore from imageURL
dsHelper, err := datastore.NewHelper(trace.NewOperation(ctx, "datastore helper creation"), s,
s.Datastore, fmt.Sprintf("%s/%s", imageURL.Path, KVStoreFolder))
if err != nil {
return &errors.InternalError{
Message: fmt.Sprintf("unable to get datastore helper for %s store creation: %s", APIKV, err.Error()),
}
}
// restore the modified K/V store
oldKeyValStore, err := kvstore.NewKeyValueStore(ctx, kvstore.NewDatastoreBackend(dsHelper), APIKV)
if err != nil && !os.IsExist(err) {
return &errors.InternalError{
Message: fmt.Sprintf("unable to create %s datastore backed store: %s", APIKV, err.Error()),
}
}
// create new k/v store with version appendix v2
newDsFile := fmt.Sprintf("%s.v%d", APIKV, version)
// try to remove new k/v store file in case it's created already
dsHelper.Rm(ctx, newDsFile)
newKeyValueStore, err := kvstore.NewKeyValueStore(ctx, kvstore.NewDatastoreBackend(dsHelper), newDsFile)
if err != nil && !os.IsExist(err) {
return &errors.InternalError{
Message: fmt.Sprintf("unable to create %s datastore backed store: %s", newDsFile, err.Error()),
}
}
// copy all key/value from old k/v store
allKeyVals, err := oldKeyValStore.List(".*")
if err != nil {
return &errors.InternalError{
Message: fmt.Sprintf("unable to list key/value store %s: %s", APIKV, err.Error()),
}
}
for key, val := range allKeyVals {
newKeyValueStore.Put(ctx, key, val)
}
val, err := newKeyValueStore.Get(oldKey)
if err != nil && err != kvstore.ErrKeyNotFound {
return &errors.InternalError{
Message: fmt.Sprintf("failed to get %s from store %s: %s", oldKey, APIKV, err.Error()),
}
}
// put the new key/value to store, and leave the old key/value there, in case upgrade failed, old binary still works well with half-changed store
newKeyValueStore.Put(ctx, newKey, []byte(fmt.Sprintf("%s:%s", val, "latest")))
// persist new data back to vsphere, framework does not take of it
newKeyValueStore.Save(ctx)
return nil
}