This ensures that we can catch certain types of errors from providers and handle accordingly in the core. There is still more to do here to improve that but this resolves an immediate need to know why a Delete failed. vic provider was not updated since I could not figure out where to get this information.
345 lines
9.2 KiB
Go
345 lines
9.2 KiB
Go
package mock
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
"io"
|
|
"io/ioutil"
|
|
"log"
|
|
"time"
|
|
|
|
"github.com/cpuguy83/strongerrors"
|
|
"github.com/virtual-kubelet/virtual-kubelet/providers"
|
|
"k8s.io/api/core/v1"
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
"k8s.io/client-go/tools/remotecommand"
|
|
)
|
|
|
|
const (
|
|
// Provider configuration defaults.
|
|
defaultCPUCapacity = "20"
|
|
defaultMemoryCapacity = "100Gi"
|
|
defaultPodCapacity = "20"
|
|
)
|
|
|
|
// MockProvider implements the virtual-kubelet provider interface and stores pods in memory.
|
|
type MockProvider struct {
|
|
nodeName string
|
|
operatingSystem string
|
|
internalIP string
|
|
daemonEndpointPort int32
|
|
pods map[string]*v1.Pod
|
|
config MockConfig
|
|
}
|
|
|
|
// MockConfig contains a mock virtual-kubelet's configurable parameters.
|
|
type MockConfig struct {
|
|
CPU string `json:"cpu,omitempty"`
|
|
Memory string `json:"memory,omitempty"`
|
|
Pods string `json:"pods,omitempty"`
|
|
}
|
|
|
|
// NewMockProvider creates a new MockProvider
|
|
func NewMockProvider(providerConfig, nodeName, operatingSystem string, internalIP string, daemonEndpointPort int32) (*MockProvider, error) {
|
|
config, err := loadConfig(providerConfig, nodeName)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
provider := MockProvider{
|
|
nodeName: nodeName,
|
|
operatingSystem: operatingSystem,
|
|
internalIP: internalIP,
|
|
daemonEndpointPort: daemonEndpointPort,
|
|
pods: make(map[string]*v1.Pod),
|
|
config: config,
|
|
}
|
|
return &provider, nil
|
|
}
|
|
|
|
// loadConfig loads the given json configuration files.
|
|
|
|
func loadConfig(providerConfig, nodeName string) (config MockConfig, err error) {
|
|
data, err := ioutil.ReadFile(providerConfig)
|
|
if err != nil {
|
|
return config, err
|
|
}
|
|
configMap := map[string]MockConfig{}
|
|
err = json.Unmarshal(data, &configMap)
|
|
if err != nil {
|
|
return config, err
|
|
}
|
|
if _, exist := configMap[nodeName]; exist {
|
|
config = configMap[nodeName]
|
|
if config.CPU == "" {
|
|
config.CPU = defaultCPUCapacity
|
|
}
|
|
if config.Memory == "" {
|
|
config.Memory = defaultMemoryCapacity
|
|
}
|
|
if config.Pods == "" {
|
|
config.Pods = defaultPodCapacity
|
|
}
|
|
}
|
|
|
|
if _, err = resource.ParseQuantity(config.CPU); err != nil {
|
|
return config, fmt.Errorf("Invalid CPU value %v", config.CPU)
|
|
}
|
|
if _, err = resource.ParseQuantity(config.Memory); err != nil {
|
|
return config, fmt.Errorf("Invalid memory value %v", config.Memory)
|
|
}
|
|
if _, err = resource.ParseQuantity(config.Pods); err != nil {
|
|
return config, fmt.Errorf("Invalid pods value %v", config.Pods)
|
|
}
|
|
return config, nil
|
|
}
|
|
|
|
// CreatePod accepts a Pod definition and stores it in memory.
|
|
func (p *MockProvider) CreatePod(ctx context.Context, pod *v1.Pod) error {
|
|
log.Printf("receive CreatePod %q\n", pod.Name)
|
|
|
|
key, err := buildKey(pod)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
p.pods[key] = pod
|
|
|
|
return nil
|
|
}
|
|
|
|
// UpdatePod accepts a Pod definition and updates its reference.
|
|
func (p *MockProvider) UpdatePod(ctx context.Context, pod *v1.Pod) error {
|
|
log.Printf("receive UpdatePod %q\n", pod.Name)
|
|
|
|
key, err := buildKey(pod)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
p.pods[key] = pod
|
|
|
|
return nil
|
|
}
|
|
|
|
// DeletePod deletes the specified pod out of memory.
|
|
func (p *MockProvider) DeletePod(ctx context.Context, pod *v1.Pod) (err error) {
|
|
log.Printf("receive DeletePod %q\n", pod.Name)
|
|
|
|
key, err := buildKey(pod)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if _, exists := p.pods[key]; !exists {
|
|
return strongerrors.NotFound(fmt.Errorf("pod not found"))
|
|
}
|
|
|
|
delete(p.pods, key)
|
|
|
|
return nil
|
|
}
|
|
|
|
// GetPod returns a pod by name that is stored in memory.
|
|
func (p *MockProvider) GetPod(ctx context.Context, namespace, name string) (pod *v1.Pod, err error) {
|
|
log.Printf("receive GetPod %q\n", name)
|
|
|
|
key, err := buildKeyFromNames(namespace, name)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if pod, ok := p.pods[key]; ok {
|
|
return pod, nil
|
|
}
|
|
|
|
return nil, nil
|
|
}
|
|
|
|
// GetContainerLogs retrieves the logs of a container by name from the provider.
|
|
func (p *MockProvider) GetContainerLogs(ctx context.Context, namespace, podName, containerName string, tail int) (string, error) {
|
|
log.Printf("receive GetContainerLogs %q\n", podName)
|
|
return "", nil
|
|
}
|
|
|
|
// Get full pod name as defined in the provider context
|
|
// TODO: Implementation
|
|
func (p *MockProvider) GetPodFullName(namespace string, pod string) string {
|
|
return ""
|
|
}
|
|
|
|
// ExecInContainer executes a command in a container in the pod, copying data
|
|
// between in/out/err and the container's stdin/stdout/stderr.
|
|
func (p *MockProvider) ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error {
|
|
log.Printf("receive ExecInContainer %q\n", container)
|
|
return nil
|
|
}
|
|
|
|
// GetPodStatus returns the status of a pod by name that is "running".
|
|
// returns nil if a pod by that name is not found.
|
|
func (p *MockProvider) GetPodStatus(ctx context.Context, namespace, name string) (*v1.PodStatus, error) {
|
|
log.Printf("receive GetPodStatus %q\n", name)
|
|
|
|
now := metav1.NewTime(time.Now())
|
|
|
|
status := &v1.PodStatus{
|
|
Phase: v1.PodRunning,
|
|
HostIP: "1.2.3.4",
|
|
PodIP: "5.6.7.8",
|
|
StartTime: &now,
|
|
Conditions: []v1.PodCondition{
|
|
{
|
|
Type: v1.PodInitialized,
|
|
Status: v1.ConditionTrue,
|
|
},
|
|
{
|
|
Type: v1.PodReady,
|
|
Status: v1.ConditionTrue,
|
|
},
|
|
{
|
|
Type: v1.PodScheduled,
|
|
Status: v1.ConditionTrue,
|
|
},
|
|
},
|
|
}
|
|
|
|
pod, err := p.GetPod(ctx, namespace, name)
|
|
if err != nil {
|
|
return status, err
|
|
}
|
|
|
|
for _, container := range pod.Spec.Containers {
|
|
status.ContainerStatuses = append(status.ContainerStatuses, v1.ContainerStatus{
|
|
Name: container.Name,
|
|
Image: container.Image,
|
|
Ready: true,
|
|
RestartCount: 0,
|
|
State: v1.ContainerState{
|
|
Running: &v1.ContainerStateRunning{
|
|
StartedAt: now,
|
|
},
|
|
},
|
|
})
|
|
}
|
|
|
|
return status, nil
|
|
}
|
|
|
|
// GetPods returns a list of all pods known to be "running".
|
|
func (p *MockProvider) GetPods(ctx context.Context) ([]*v1.Pod, error) {
|
|
log.Printf("receive GetPods\n")
|
|
|
|
var pods []*v1.Pod
|
|
|
|
for _, pod := range p.pods {
|
|
pods = append(pods, pod)
|
|
}
|
|
|
|
return pods, nil
|
|
}
|
|
|
|
// Capacity returns a resource list containing the capacity limits.
|
|
func (p *MockProvider) Capacity(ctx context.Context) v1.ResourceList {
|
|
return v1.ResourceList{
|
|
"cpu": resource.MustParse(p.config.CPU),
|
|
"memory": resource.MustParse(p.config.Memory),
|
|
"pods": resource.MustParse(p.config.Pods),
|
|
}
|
|
}
|
|
|
|
// NodeConditions returns a list of conditions (Ready, OutOfDisk, etc), for updates to the node status
|
|
// within Kubernetes.
|
|
func (p *MockProvider) NodeConditions(ctx context.Context) []v1.NodeCondition {
|
|
// TODO: Make this configurable
|
|
return []v1.NodeCondition{
|
|
{
|
|
Type: "Ready",
|
|
Status: v1.ConditionTrue,
|
|
LastHeartbeatTime: metav1.Now(),
|
|
LastTransitionTime: metav1.Now(),
|
|
Reason: "KubeletReady",
|
|
Message: "kubelet is ready.",
|
|
},
|
|
{
|
|
Type: "OutOfDisk",
|
|
Status: v1.ConditionFalse,
|
|
LastHeartbeatTime: metav1.Now(),
|
|
LastTransitionTime: metav1.Now(),
|
|
Reason: "KubeletHasSufficientDisk",
|
|
Message: "kubelet has sufficient disk space available",
|
|
},
|
|
{
|
|
Type: "MemoryPressure",
|
|
Status: v1.ConditionFalse,
|
|
LastHeartbeatTime: metav1.Now(),
|
|
LastTransitionTime: metav1.Now(),
|
|
Reason: "KubeletHasSufficientMemory",
|
|
Message: "kubelet has sufficient memory available",
|
|
},
|
|
{
|
|
Type: "DiskPressure",
|
|
Status: v1.ConditionFalse,
|
|
LastHeartbeatTime: metav1.Now(),
|
|
LastTransitionTime: metav1.Now(),
|
|
Reason: "KubeletHasNoDiskPressure",
|
|
Message: "kubelet has no disk pressure",
|
|
},
|
|
{
|
|
Type: "NetworkUnavailable",
|
|
Status: v1.ConditionFalse,
|
|
LastHeartbeatTime: metav1.Now(),
|
|
LastTransitionTime: metav1.Now(),
|
|
Reason: "RouteCreated",
|
|
Message: "RouteController created a route",
|
|
},
|
|
}
|
|
|
|
}
|
|
|
|
// NodeAddresses returns a list of addresses for the node status
|
|
// within Kubernetes.
|
|
func (p *MockProvider) NodeAddresses(ctx context.Context) []v1.NodeAddress {
|
|
return []v1.NodeAddress{
|
|
{
|
|
Type: "InternalIP",
|
|
Address: p.internalIP,
|
|
},
|
|
}
|
|
}
|
|
|
|
// NodeDaemonEndpoints returns NodeDaemonEndpoints for the node status
|
|
// within Kubernetes.
|
|
func (p *MockProvider) NodeDaemonEndpoints(ctx context.Context) *v1.NodeDaemonEndpoints {
|
|
return &v1.NodeDaemonEndpoints{
|
|
KubeletEndpoint: v1.DaemonEndpoint{
|
|
Port: p.daemonEndpointPort,
|
|
},
|
|
}
|
|
}
|
|
|
|
// OperatingSystem returns the operating system for this provider.
|
|
// This is a noop to default to Linux for now.
|
|
func (p *MockProvider) OperatingSystem() string {
|
|
return providers.OperatingSystemLinux
|
|
}
|
|
|
|
func buildKeyFromNames(namespace string, name string) (string, error) {
|
|
return fmt.Sprintf("%s-%s", namespace, name), nil
|
|
}
|
|
|
|
// buildKey is a helper for building the "key" for the providers pod store.
|
|
func buildKey(pod *v1.Pod) (string, error) {
|
|
if pod.ObjectMeta.Namespace == "" {
|
|
return "", fmt.Errorf("pod namespace not found")
|
|
}
|
|
|
|
if pod.ObjectMeta.Name == "" {
|
|
return "", fmt.Errorf("pod name not found")
|
|
}
|
|
|
|
return buildKeyFromNames(pod.ObjectMeta.Namespace, pod.ObjectMeta.Name)
|
|
}
|