Use strong type for container ID

Change all references to the container ID in pkg/kubelet/... to the
strong type defined in pkg/kubelet/container: ContainerID

The motivation for this change is to make the format of the ID
unambiguous, specifically whether or not it includes the runtime
prefix (e.g. "docker://").
pull/6/head
Tim St. Clair 2015-10-07 10:58:05 -07:00
parent 092dddd12c
commit 551eff63b8
27 changed files with 228 additions and 211 deletions

View File

@ -28,32 +28,31 @@ import (
// for the caller.
type RefManager struct {
sync.RWMutex
// TODO(yifan): To use strong type.
containerIDToRef map[string]*api.ObjectReference
containerIDToRef map[ContainerID]*api.ObjectReference
}
// NewRefManager creates and returns a container reference manager
// with empty contents.
func NewRefManager() *RefManager {
return &RefManager{containerIDToRef: make(map[string]*api.ObjectReference)}
return &RefManager{containerIDToRef: make(map[ContainerID]*api.ObjectReference)}
}
// SetRef stores a reference to a pod's container, associating it with the given container ID.
func (c *RefManager) SetRef(id string, ref *api.ObjectReference) {
func (c *RefManager) SetRef(id ContainerID, ref *api.ObjectReference) {
c.Lock()
defer c.Unlock()
c.containerIDToRef[id] = ref
}
// ClearRef forgets the given container id and its associated container reference.
func (c *RefManager) ClearRef(id string) {
func (c *RefManager) ClearRef(id ContainerID) {
c.Lock()
defer c.Unlock()
delete(c.containerIDToRef, id)
}
// GetRef returns the container reference of the given ID, or (nil, false) if none is stored.
func (c *RefManager) GetRef(id string) (ref *api.ObjectReference, ok bool) {
func (c *RefManager) GetRef(id ContainerID) (ref *api.ObjectReference, ok bool) {
c.RLock()
defer c.RUnlock()
ref, ok = c.containerIDToRef[id]

View File

@ -217,7 +217,7 @@ func (f *FakeRuntime) GetPodStatus(*api.Pod) (*api.PodStatus, error) {
return &status, f.Err
}
func (f *FakeRuntime) ExecInContainer(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
func (f *FakeRuntime) ExecInContainer(containerID ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
f.Lock()
defer f.Unlock()
@ -225,7 +225,7 @@ func (f *FakeRuntime) ExecInContainer(containerID string, cmd []string, stdin io
return f.Err
}
func (f *FakeRuntime) AttachContainer(containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
func (f *FakeRuntime) AttachContainer(containerID ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
f.Lock()
defer f.Unlock()
@ -233,7 +233,7 @@ func (f *FakeRuntime) AttachContainer(containerID string, stdin io.Reader, stdou
return f.Err
}
func (f *FakeRuntime) RunInContainer(containerID string, cmd []string) ([]byte, error) {
func (f *FakeRuntime) RunInContainer(containerID ContainerID, cmd []string) ([]byte, error) {
f.Lock()
defer f.Unlock()
@ -241,7 +241,7 @@ func (f *FakeRuntime) RunInContainer(containerID string, cmd []string) ([]byte,
return []byte{}, f.Err
}
func (f *FakeRuntime) GetContainerLogs(pod *api.Pod, containerID string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error) {
func (f *FakeRuntime) GetContainerLogs(pod *api.Pod, containerID ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error) {
f.Lock()
defer f.Unlock()

View File

@ -18,7 +18,6 @@ package container
import (
"hash/adler32"
"strings"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util"
@ -29,7 +28,7 @@ import (
// HandlerRunner runs a lifecycle handler for a container.
type HandlerRunner interface {
Run(containerID string, pod *api.Pod, container *api.Container, handler *api.Handler) error
Run(containerID ContainerID, pod *api.Pod, container *api.Container, handler *api.Handler) error
}
// RunContainerOptionsGenerator generates the options that necessary for
@ -38,17 +37,6 @@ type RunContainerOptionsGenerator interface {
GenerateRunContainerOptions(pod *api.Pod, container *api.Container) (*RunContainerOptions, error)
}
// Trims runtime prefix from ID or image name (e.g.: docker://busybox -> busybox).
func TrimRuntimePrefix(fullString string) string {
const prefixSeparator = "://"
idx := strings.Index(fullString, prefixSeparator)
if idx < 0 {
return fullString
}
return fullString[idx+len(prefixSeparator):]
}
// ShouldContainerBeRestarted checks whether a container needs to be restarted.
// TODO(yifan): Think about how to refactor this.
func ShouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatus *api.PodStatus) bool {

View File

@ -23,6 +23,7 @@ import (
"reflect"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
@ -79,7 +80,7 @@ type Runtime interface {
// default, it returns a snapshot of the container log. Set 'follow' to true to
// stream the log. Set 'follow' to false and specify the number of lines (e.g.
// "100" or "all") to tail the log.
GetContainerLogs(pod *api.Pod, containerID string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error)
GetContainerLogs(pod *api.Pod, containerID ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error)
// ContainerCommandRunner encapsulates the command runner interfaces for testability.
ContainerCommandRunner
// ContainerAttach encapsulates the attaching to containers for testability
@ -87,20 +88,18 @@ type Runtime interface {
}
type ContainerAttacher interface {
AttachContainer(id string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) (err error)
AttachContainer(id ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) (err error)
}
// CommandRunner encapsulates the command runner interfaces for testability.
type ContainerCommandRunner interface {
// TODO(vmarmol): Merge RunInContainer and ExecInContainer.
// Runs the command in the container of the specified pod using nsinit.
// TODO(yifan): Use strong type for containerID.
RunInContainer(containerID string, cmd []string) ([]byte, error)
RunInContainer(containerID ContainerID, cmd []string) ([]byte, error)
// Runs the command in the container of the specified pod using nsenter.
// Attaches the processes stdin, stdout, and stderr. Optionally uses a
// tty.
// TODO(yifan): Use strong type for containerID.
ExecInContainer(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error
ExecInContainer(containerID ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error
// Forward the specified port from the specified pod to the stream.
PortForward(pod *Pod, port uint16, stream io.ReadWriteCloser) error
}
@ -139,6 +138,15 @@ func BuildContainerID(typ, ID string) ContainerID {
return ContainerID{Type: typ, ID: ID}
}
// Convenience method for creating a ContainerID from an ID string.
func ParseContainerID(containerID string) ContainerID {
var id ContainerID
if err := id.ParseString(containerID); err != nil {
glog.Error(err)
}
return id
}
func (c *ContainerID) ParseString(data string) error {
// Trim the quotes and split the type and ID.
parts := strings.Split(strings.Trim(data, "\""), "://")
@ -153,6 +161,10 @@ func (c *ContainerID) String() string {
return fmt.Sprintf("%s://%s", c.Type, c.ID)
}
func (c *ContainerID) IsEmpty() bool {
return *c == ContainerID{}
}
func (c *ContainerID) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("%q", c.String())), nil
}
@ -166,7 +178,7 @@ func (c *ContainerID) UnmarshalJSON(data []byte) error {
type Container struct {
// The ID of the container, used by the container runtime to identify
// a container.
ID types.UID
ID ContainerID
// The name of the container, which should be the same as specified by
// api.Container.
Name string

View File

@ -21,7 +21,7 @@ import (
docker "github.com/fsouza/go-dockerclient"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
)
// This file contains helper functions to convert docker API types to runtime
@ -38,7 +38,7 @@ func toRuntimeContainer(c *docker.APIContainers) (*kubecontainer.Container, erro
return nil, err
}
return &kubecontainer.Container{
ID: types.UID(c.ID),
ID: kubeletTypes.DockerID(c.ID).ContainerID(),
Name: dockerName.ContainerName,
Image: c.Image,
Hash: hash,

View File

@ -22,7 +22,6 @@ import (
docker "github.com/fsouza/go-dockerclient"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
)
func TestToRuntimeContainer(t *testing.T) {
@ -33,7 +32,7 @@ func TestToRuntimeContainer(t *testing.T) {
Names: []string{"/k8s_bar.5678_foo_ns_1234_42"},
}
expected := &kubecontainer.Container{
ID: types.UID("ab2cdf"),
ID: kubecontainer.ContainerID{"docker", "ab2cdf"},
Name: "bar",
Image: "bar_image",
Hash: 0x5678,

View File

@ -34,6 +34,7 @@ import (
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/network"
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
)
@ -171,10 +172,10 @@ func TestExecSupportNotExists(t *testing.T) {
func TestDockerContainerCommand(t *testing.T) {
runner := &DockerManager{}
containerID := "1234"
containerID := kubeletTypes.DockerID("1234").ContainerID()
command := []string{"ls"}
cmd, _ := runner.getRunInContainerCommand(containerID, command)
if cmd.Dir != "/var/lib/docker/execdriver/native/"+containerID {
if cmd.Dir != "/var/lib/docker/execdriver/native/"+containerID.ID {
t.Errorf("unexpected command CWD: %s", cmd.Dir)
}
if !reflect.DeepEqual(cmd.Args, []string{"/usr/sbin/nsinit", "exec", "ls"}) {
@ -517,7 +518,7 @@ type containersByID []*kubecontainer.Container
func (b containersByID) Len() int { return len(b) }
func (b containersByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b containersByID) Less(i, j int) bool { return b[i].ID < b[j].ID }
func (b containersByID) Less(i, j int) bool { return b[i].ID.ID < b[j].ID.ID }
func TestFindContainersByPod(t *testing.T) {
tests := []struct {
@ -560,12 +561,12 @@ func TestFindContainersByPod(t *testing.T) {
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: "foobar",
ID: kubeletTypes.DockerID("foobar").ContainerID(),
Name: "foobar",
Hash: 0x1234,
},
{
ID: "baz",
ID: kubeletTypes.DockerID("baz").ContainerID(),
Name: "baz",
Hash: 0x1234,
},
@ -577,7 +578,7 @@ func TestFindContainersByPod(t *testing.T) {
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: "barbar",
ID: kubeletTypes.DockerID("barbar").ContainerID(),
Name: "barbar",
Hash: 0x1234,
},
@ -618,17 +619,17 @@ func TestFindContainersByPod(t *testing.T) {
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: "foobar",
ID: kubeletTypes.DockerID("foobar").ContainerID(),
Name: "foobar",
Hash: 0x1234,
},
{
ID: "barfoo",
ID: kubeletTypes.DockerID("barfoo").ContainerID(),
Name: "barfoo",
Hash: 0x1234,
},
{
ID: "baz",
ID: kubeletTypes.DockerID("baz").ContainerID(),
Name: "baz",
Hash: 0x1234,
},
@ -640,7 +641,7 @@ func TestFindContainersByPod(t *testing.T) {
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: "barbar",
ID: kubeletTypes.DockerID("barbar").ContainerID(),
Name: "barbar",
Hash: 0x1234,
},
@ -652,7 +653,7 @@ func TestFindContainersByPod(t *testing.T) {
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: "bazbaz",
ID: kubeletTypes.DockerID("bazbaz").ContainerID(),
Name: "bazbaz",
Hash: 0x1234,
},

View File

@ -258,7 +258,7 @@ func (sc *reasonInfoCache) Get(uid types.UID, name string) (reasonInfo, bool) {
// stream the log. Set 'follow' to false and specify the number of lines (e.g.
// "100" or "all") to tail the log.
// TODO: Make 'RawTerminal' option flagable.
func (dm *DockerManager) GetContainerLogs(pod *api.Pod, containerID string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error) {
func (dm *DockerManager) GetContainerLogs(pod *api.Pod, containerID kubecontainer.ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error) {
var since int64
if logOptions.SinceSeconds != nil {
t := unversioned.Now().Add(-time.Duration(*logOptions.SinceSeconds) * time.Second)
@ -268,7 +268,7 @@ func (dm *DockerManager) GetContainerLogs(pod *api.Pod, containerID string, logO
since = logOptions.SinceTime.Unix()
}
opts := docker.LogsOptions{
Container: containerID,
Container: containerID.ID,
Stdout: true,
Stderr: true,
OutputStream: stdout,
@ -638,7 +638,7 @@ func (dm *DockerManager) runContainer(
netMode string,
ipcMode string,
utsMode string,
pidMode string) (string, error) {
pidMode string) (kubecontainer.ContainerID, error) {
dockerName := KubeletContainerName{
PodFullName: kubecontainer.GetPodFullName(pod),
@ -722,7 +722,7 @@ func (dm *DockerManager) runContainer(
if ref != nil {
dm.recorder.Eventf(ref, "Failed", "Failed to create docker container with error: %v", err)
}
return "", err
return kubecontainer.ContainerID{}, err
}
if ref != nil {
@ -786,12 +786,12 @@ func (dm *DockerManager) runContainer(
dm.recorder.Eventf(ref, "Failed",
"Failed to start with docker id %v with error: %v", util.ShortenString(dockerContainer.ID, 12), err)
}
return "", err
return kubecontainer.ContainerID{}, err
}
if ref != nil {
dm.recorder.Eventf(ref, "Started", "Started with docker id %v", util.ShortenString(dockerContainer.ID, 12))
}
return dockerContainer.ID, nil
return kubeletTypes.DockerID(dockerContainer.ID).ContainerID(), nil
}
func setEntrypointAndCommand(container *api.Container, opts *kubecontainer.RunContainerOptions, dockerOpts *docker.CreateContainerOptions) {
@ -933,7 +933,7 @@ func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContaine
networkMode := ""
var ports []api.ContainerPort
dockerPodInfraContainer, err := dm.client.InspectContainer(string(podInfraContainer.ID))
dockerPodInfraContainer, err := dm.client.InspectContainer(podInfraContainer.ID.ID)
if err != nil {
return false, err
}
@ -1019,14 +1019,14 @@ func (dm *DockerManager) nativeExecSupportExists() (bool, error) {
return false, err
}
func (dm *DockerManager) getRunInContainerCommand(containerID string, cmd []string) (*exec.Cmd, error) {
func (dm *DockerManager) getRunInContainerCommand(containerID kubecontainer.ContainerID, cmd []string) (*exec.Cmd, error) {
args := append([]string{"exec"}, cmd...)
command := exec.Command("/usr/sbin/nsinit", args...)
command.Dir = fmt.Sprintf("/var/lib/docker/execdriver/native/%s", containerID)
command.Dir = fmt.Sprintf("/var/lib/docker/execdriver/native/%s", containerID.ID)
return command, nil
}
func (dm *DockerManager) runInContainerUsingNsinit(containerID string, cmd []string) ([]byte, error) {
func (dm *DockerManager) runInContainerUsingNsinit(containerID kubecontainer.ContainerID, cmd []string) ([]byte, error) {
c, err := dm.getRunInContainerCommand(containerID, cmd)
if err != nil {
return nil, err
@ -1035,8 +1035,7 @@ func (dm *DockerManager) runInContainerUsingNsinit(containerID string, cmd []str
}
// RunInContainer uses nsinit to run the command inside the container identified by containerID
// TODO(yifan): Use strong type for containerID.
func (dm *DockerManager) RunInContainer(containerID string, cmd []string) ([]byte, error) {
func (dm *DockerManager) RunInContainer(containerID kubecontainer.ContainerID, cmd []string) ([]byte, error) {
// If native exec support does not exist in the local docker daemon use nsinit.
useNativeExec, err := dm.nativeExecSupportExists()
if err != nil {
@ -1048,7 +1047,7 @@ func (dm *DockerManager) RunInContainer(containerID string, cmd []string) ([]byt
}
glog.V(2).Infof("Using docker native exec to run cmd %+v inside container %s", cmd, containerID)
createOpts := docker.CreateExecOptions{
Container: containerID,
Container: containerID.ID,
Cmd: cmd,
AttachStdin: false,
AttachStdout: true,
@ -1114,15 +1113,12 @@ func (d *dockerExitError) ExitStatus() int {
}
// ExecInContainer runs the command inside the container identified by containerID.
//
// TODO:
// - use strong type for containerId
func (dm *DockerManager) ExecInContainer(containerId string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
func (dm *DockerManager) ExecInContainer(containerID kubecontainer.ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
if dm.execHandler == nil {
return errors.New("unable to exec without an exec handler")
}
container, err := dm.client.InspectContainer(containerId)
container, err := dm.client.InspectContainer(containerID.ID)
if err != nil {
return err
}
@ -1133,9 +1129,9 @@ func (dm *DockerManager) ExecInContainer(containerId string, cmd []string, stdin
return dm.execHandler.ExecInContainer(dm.client, container, cmd, stdin, stdout, stderr, tty)
}
func (dm *DockerManager) AttachContainer(containerId string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
func (dm *DockerManager) AttachContainer(containerID kubecontainer.ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
opts := docker.AttachToContainerOptions{
Container: containerId,
Container: containerID.ID,
InputStream: stdin,
OutputStream: stdout,
ErrorStream: stderr,
@ -1166,7 +1162,7 @@ func (dm *DockerManager) PortForward(pod *kubecontainer.Pod, port uint16, stream
if podInfraContainer == nil {
return noPodInfraContainerError(pod.Name, pod.Namespace)
}
container, err := dm.client.InspectContainer(string(podInfraContainer.ID))
container, err := dm.client.InspectContainer(podInfraContainer.ID.ID)
if err != nil {
return err
}
@ -1283,7 +1279,7 @@ func (dm *DockerManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) err
}
wg.Wait()
if networkContainer != nil {
if err := dm.networkPlugin.TearDownPod(runningPod.Namespace, runningPod.Name, kubeletTypes.DockerID(networkContainer.ID)); err != nil {
if err := dm.networkPlugin.TearDownPod(runningPod.Namespace, runningPod.Name, kubeletTypes.DockerID(networkContainer.ID.ID)); err != nil {
glog.Errorf("Failed tearing down the infra container: %v", err)
errs <- err
}
@ -1305,9 +1301,9 @@ func (dm *DockerManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) err
// KillContainerInPod kills a container in the pod. It must be passed either a container ID or a container and pod,
// and will attempt to lookup the other information if missing.
func (dm *DockerManager) KillContainerInPod(containerID types.UID, container *api.Container, pod *api.Pod) error {
func (dm *DockerManager) KillContainerInPod(containerID kubecontainer.ContainerID, container *api.Container, pod *api.Pod) error {
switch {
case len(containerID) == 0:
case containerID.IsEmpty():
// Locate the container.
pods, err := dm.GetPods(false)
if err != nil {
@ -1322,7 +1318,7 @@ func (dm *DockerManager) KillContainerInPod(containerID types.UID, container *ap
case container == nil || pod == nil:
// Read information about the container from labels
inspect, err := dm.client.InspectContainer(string(containerID))
inspect, err := dm.client.InspectContainer(containerID.ID)
if err != nil {
return err
}
@ -1342,8 +1338,8 @@ func (dm *DockerManager) KillContainerInPod(containerID types.UID, container *ap
// killContainer accepts a containerID and an optional container or pod containing shutdown policies. Invoke
// KillContainerInPod if information must be retrieved first.
func (dm *DockerManager) killContainer(containerID types.UID, container *api.Container, pod *api.Pod) error {
ID := string(containerID)
func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, container *api.Container, pod *api.Pod) error {
ID := containerID.ID
name := ID
if container != nil {
name = fmt.Sprintf("%s %s", name, container.Name)
@ -1370,7 +1366,7 @@ func (dm *DockerManager) killContainer(containerID types.UID, container *api.Con
go func() {
defer close(done)
defer util.HandleCrash()
if err := dm.runner.Run(ID, pod, container, container.Lifecycle.PreStop); err != nil {
if err := dm.runner.Run(containerID, pod, container, container.Lifecycle.PreStop); err != nil {
glog.Errorf("preStop hook for container %q failed: %v", name, err)
}
}()
@ -1397,13 +1393,13 @@ func (dm *DockerManager) killContainer(containerID types.UID, container *api.Con
} else {
glog.V(2).Infof("Container %q termination failed after %s: %v", name, unversioned.Now().Sub(start.Time), err)
}
ref, ok := dm.containerRefManager.GetRef(ID)
ref, ok := dm.containerRefManager.GetRef(containerID)
if !ok {
glog.Warningf("No ref for pod '%q'", name)
} else {
// TODO: pass reason down here, and state, or move this call up the stack.
dm.recorder.Eventf(ref, "Killing", "Killing with docker id %v", util.ShortenString(ID, 12))
dm.containerRefManager.ClearRef(ID)
dm.containerRefManager.ClearRef(containerID)
}
return err
}
@ -1451,7 +1447,7 @@ func containerAndPodFromLabels(inspect *docker.Container) (pod *api.Pod, contain
}
// Run a single container from a pod. Returns the docker container ID
func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Container, netMode, ipcMode, pidMode string) (kubeletTypes.DockerID, error) {
func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Container, netMode, ipcMode, pidMode string) (kubecontainer.ContainerID, error) {
start := time.Now()
defer func() {
metrics.ContainerManagerLatency.WithLabelValues("runContainerInPod").Observe(metrics.SinceInMicroseconds(start))
@ -1464,7 +1460,7 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
opts, err := dm.generator.GenerateRunContainerOptions(pod, container)
if err != nil {
return "", err
return kubecontainer.ContainerID{}, err
}
utsMode := ""
@ -1473,7 +1469,7 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
}
id, err := dm.runContainer(pod, container, opts, ref, netMode, ipcMode, utsMode, pidMode)
if err != nil {
return "", err
return kubecontainer.ContainerID{}, err
}
// Remember this reference so we can report events about this container
@ -1484,8 +1480,8 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
if container.Lifecycle != nil && container.Lifecycle.PostStart != nil {
handlerErr := dm.runner.Run(id, pod, container, container.Lifecycle.PostStart)
if handlerErr != nil {
dm.KillContainerInPod(types.UID(id), container, pod)
return kubeletTypes.DockerID(""), fmt.Errorf("failed to call event handler: %v", handlerErr)
dm.KillContainerInPod(id, container, pod)
return kubecontainer.ContainerID{}, fmt.Errorf("failed to call event handler: %v", handlerErr)
}
}
@ -1494,20 +1490,20 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
// capture these symbolic filenames which can be used for search terms in Elasticsearch or for
// labels for Cloud Logging.
podFullName := kubecontainer.GetPodFullName(pod)
containerLogFile := path.Join(dm.dockerRoot, "containers", id, fmt.Sprintf("%s-json.log", id))
symlinkFile := LogSymlink(dm.containerLogsDir, podFullName, container.Name, id)
containerLogFile := path.Join(dm.dockerRoot, "containers", id.ID, fmt.Sprintf("%s-json.log", id.ID))
symlinkFile := LogSymlink(dm.containerLogsDir, podFullName, container.Name, id.ID)
if err = dm.os.Symlink(containerLogFile, symlinkFile); err != nil {
glog.Errorf("Failed to create symbolic link to the log file of pod %q container %q: %v", podFullName, container.Name, err)
}
// Container information is used in adjusting OOM scores and adding ndots.
containerInfo, err := dm.client.InspectContainer(string(id))
containerInfo, err := dm.client.InspectContainer(id.ID)
if err != nil {
return "", err
return kubecontainer.ContainerID{}, err
}
// Ensure the PID actually exists, else we'll move ourselves.
if containerInfo.State.Pid == 0 {
return "", fmt.Errorf("failed to get init PID for Docker container %q", string(id))
return kubecontainer.ContainerID{}, fmt.Errorf("failed to get init PID for Docker container %q", id)
}
// Set OOM score of the container based on the priority of the container.
@ -1522,10 +1518,10 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
}
cgroupName, err := dm.procFs.GetFullContainerName(containerInfo.State.Pid)
if err != nil {
return "", err
return kubecontainer.ContainerID{}, err
}
if err = dm.oomAdjuster.ApplyOOMScoreAdjContainer(cgroupName, oomScoreAdj, 5); err != nil {
return "", err
return kubecontainer.ContainerID{}, err
}
// currently, Docker does not have a flag by which the ndots option can be passed.
@ -1538,7 +1534,7 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
err = addNDotsOption(containerInfo.ResolvConfPath)
}
return kubeletTypes.DockerID(id), err
return id, err
}
func addNDotsOption(resolvFilePath string) error {
@ -1612,7 +1608,7 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubeletTypes.Doc
return "", err
}
return id, nil
return kubeletTypes.DockerID(id.ID), nil
}
// TODO(vmarmol): This will soon be made non-public when its only use is internal.
@ -1668,7 +1664,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
} else {
glog.V(4).Infof("Pod infra container looks good, keep it %q", podFullName)
createPodInfraContainer = false
podInfraContainerID = kubeletTypes.DockerID(podInfraContainer.ID)
podInfraContainerID = kubeletTypes.DockerID(podInfraContainer.ID.ID)
containersToKeep[podInfraContainerID] = -1
}
@ -1687,7 +1683,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
continue
}
containerID := kubeletTypes.DockerID(c.ID)
containerID := kubeletTypes.DockerID(c.ID.ID)
hash := c.Hash
glog.V(3).Infof("pod %q container %q exists as %v", podFullName, container.Name, containerID)
@ -1712,7 +1708,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
continue
}
result, err := dm.prober.ProbeLiveness(pod, podStatus, container, string(c.ID), c.Created)
result, err := dm.prober.ProbeLiveness(pod, podStatus, container, c.ID, c.Created)
if err != nil {
// TODO(vmarmol): examine this logic.
glog.V(2).Infof("probe no-error: %q", container.Name)
@ -1789,7 +1785,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod
} else {
// Otherwise kill any containers in this pod which are not specified as ones to keep.
for _, container := range runningPod.Containers {
_, keep := containerChanges.ContainersToKeep[kubeletTypes.DockerID(container.ID)]
_, keep := containerChanges.ContainersToKeep[kubeletTypes.DockerID(container.ID.ID)]
if !keep {
glog.V(3).Infof("Killing unwanted container %+v", container)
// attempt to find the appropriate container policy
@ -2001,8 +1997,8 @@ func getIPCMode(pod *api.Pod) string {
}
// GetNetNs returns the network namespace path for the given container
func (dm *DockerManager) GetNetNs(containerID string) (string, error) {
inspectResult, err := dm.client.InspectContainer(string(containerID))
func (dm *DockerManager) GetNetNs(containerID kubecontainer.ContainerID) (string, error) {
inspectResult, err := dm.client.InspectContainer(containerID.ID)
if err != nil {
glog.Errorf("Error inspecting container: '%v'", err)
return "", err

View File

@ -346,7 +346,7 @@ func apiContainerToContainer(c docker.APIContainers) kubecontainer.Container {
return kubecontainer.Container{}
}
return kubecontainer.Container{
ID: types.UID(c.ID),
ID: kubecontainer.ContainerID{"docker", c.ID},
Name: dockerName.ContainerName,
Hash: hash,
}
@ -360,7 +360,7 @@ func dockerContainersToPod(containers DockerContainers) kubecontainer.Pod {
continue
}
pod.Containers = append(pod.Containers, &kubecontainer.Container{
ID: types.UID(c.ID),
ID: kubecontainer.ContainerID{"docker", c.ID},
Name: dockerName.ContainerName,
Hash: hash,
Image: c.Image,
@ -399,7 +399,7 @@ func TestKillContainerInPod(t *testing.T) {
containerToSpare := &containers[1]
fakeDocker.ContainerList = containers
if err := manager.KillContainerInPod("", &pod.Spec.Containers[0], pod); err != nil {
if err := manager.KillContainerInPod(kubecontainer.ContainerID{}, &pod.Spec.Containers[0], pod); err != nil {
t.Errorf("unexpected error: %v", err)
}
// Assert the container has been stopped.
@ -464,7 +464,7 @@ func TestKillContainerInPodWithPreStop(t *testing.T) {
},
}
if err := manager.KillContainerInPod("", &pod.Spec.Containers[0], pod); err != nil {
if err := manager.KillContainerInPod(kubecontainer.ContainerID{}, &pod.Spec.Containers[0], pod); err != nil {
t.Errorf("unexpected error: %v", err)
}
// Assert the container has been stopped.
@ -501,7 +501,7 @@ func TestKillContainerInPodWithError(t *testing.T) {
fakeDocker.ContainerList = containers
fakeDocker.Errors["stop"] = fmt.Errorf("sample error")
if err := manager.KillContainerInPod("", &pod.Spec.Containers[0], pod); err == nil {
if err := manager.KillContainerInPod(kubecontainer.ContainerID{}, &pod.Spec.Containers[0], pod); err == nil {
t.Errorf("expected error, found nil")
}
}

View File

@ -27,7 +27,6 @@ import (
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
)
var zero time.Time
@ -74,7 +73,7 @@ func makeImage(id int, size int64) container.Image {
// Make a container with the specified ID. It will use the image with the same ID.
func makeContainer(id int) *container.Container {
return &container.Container{
ID: types.UID(fmt.Sprintf("container-%d", id)),
ID: container.ContainerID{"test", fmt.Sprintf("container-%d", id)},
Image: imageName(id),
}
}
@ -322,7 +321,7 @@ func TestFreeSpaceImagesAlsoDoesLookupByRepoTags(t *testing.T) {
{
Containers: []*container.Container{
{
ID: "c5678",
ID: container.ContainerID{"test", "c5678"},
Image: "salad",
},
},

View File

@ -2073,25 +2073,25 @@ func (kl *Kubelet) validatePodPhase(podStatus *api.PodStatus) error {
return fmt.Errorf("pod is not in 'Running', 'Succeeded' or 'Failed' state - State: %q", podStatus.Phase)
}
func (kl *Kubelet) validateContainerStatus(podStatus *api.PodStatus, containerName string, previous bool) (containerID string, err error) {
func (kl *Kubelet) validateContainerStatus(podStatus *api.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) {
var cID string
cStatus, found := api.GetContainerStatus(podStatus.ContainerStatuses, containerName)
if !found {
return "", fmt.Errorf("container %q not found", containerName)
return kubecontainer.ContainerID{}, fmt.Errorf("container %q not found", containerName)
}
if previous {
if cStatus.LastTerminationState.Terminated == nil {
return "", fmt.Errorf("previous terminated container %q not found", containerName)
return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q not found", containerName)
}
cID = cStatus.LastTerminationState.Terminated.ContainerID
} else {
if cStatus.State.Waiting != nil {
return "", fmt.Errorf("container %q is in waiting state.", containerName)
return kubecontainer.ContainerID{}, fmt.Errorf("container %q is in waiting state.", containerName)
}
cID = cStatus.ContainerID
}
return kubecontainer.TrimRuntimePrefix(cID), nil
return kubecontainer.ParseContainerID(cID), nil
}
// GetKubeletContainerLogs returns logs from the container
@ -2673,7 +2673,7 @@ func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containe
if container == nil {
return nil, fmt.Errorf("container not found (%q)", containerName)
}
return kl.runner.RunInContainer(string(container.ID), cmd)
return kl.runner.RunInContainer(container.ID, cmd)
}
// ExecInContainer executes a command in a container, connecting the supplied
@ -2688,7 +2688,7 @@ func (kl *Kubelet) ExecInContainer(podFullName string, podUID types.UID, contain
if container == nil {
return fmt.Errorf("container not found (%q)", containerName)
}
return kl.runner.ExecInContainer(string(container.ID), cmd, stdin, stdout, stderr, tty)
return kl.runner.ExecInContainer(container.ID, cmd, stdin, stdout, stderr, tty)
}
func (kl *Kubelet) AttachContainer(podFullName string, podUID types.UID, containerName string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
@ -2701,7 +2701,7 @@ func (kl *Kubelet) AttachContainer(podFullName string, podUID types.UID, contain
if container == nil {
return fmt.Errorf("container not found (%q)", containerName)
}
return kl.containerRuntime.AttachContainer(string(container.ID), stdin, stdout, stderr, tty)
return kl.containerRuntime.AttachContainer(container.ID, stdin, stdout, stderr, tty)
}
// PortForward connects to the pod's port and copies data between the port
@ -2749,7 +2749,7 @@ func (kl *Kubelet) GetContainerInfo(podFullName string, podUID types.UID, contai
return nil, ErrContainerNotFound
}
ci, err := kl.cadvisor.DockerContainer(string(container.ID), req)
ci, err := kl.cadvisor.DockerContainer(container.ID.ID, req)
if err != nil {
return nil, err
}
@ -2800,10 +2800,10 @@ func (kl *Kubelet) GetRuntime() kubecontainer.Runtime {
// Proxy prober calls through the Kubelet to break the circular dependency between the runtime &
// prober.
// TODO: Remove this hack once the runtime no longer depends on the prober.
func (kl *Kubelet) ProbeLiveness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) {
func (kl *Kubelet) ProbeLiveness(pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID, createdAt int64) (probe.Result, error) {
return kl.prober.ProbeLiveness(pod, status, container, containerID, createdAt)
}
func (kl *Kubelet) ProbeReadiness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string) (probe.Result, error) {
func (kl *Kubelet) ProbeReadiness(pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID) (probe.Result, error) {
return kl.prober.ProbeReadiness(pod, status, container, containerID)
}

View File

@ -586,7 +586,7 @@ func TestGetContainerInfo(t *testing.T) {
Containers: []*kubecontainer.Container{
{
Name: "foo",
ID: types.UID(containerID),
ID: kubecontainer.ContainerID{"test", containerID},
},
},
},
@ -668,7 +668,7 @@ func TestGetContainerInfoWhenCadvisorFailed(t *testing.T) {
Namespace: "ns",
Containers: []*kubecontainer.Container{
{Name: "foo",
ID: types.UID(containerID),
ID: kubecontainer.ContainerID{"test", containerID},
},
},
},
@ -752,7 +752,7 @@ func TestGetContainerInfoWithNoMatchingContainers(t *testing.T) {
Namespace: "ns",
Containers: []*kubecontainer.Container{
{Name: "bar",
ID: types.UID("fakeID"),
ID: kubecontainer.ContainerID{"test", "fakeID"},
},
}},
}
@ -772,7 +772,7 @@ func TestGetContainerInfoWithNoMatchingContainers(t *testing.T) {
type fakeContainerCommandRunner struct {
Cmd []string
ID string
ID kubecontainer.ContainerID
PodID types.UID
E error
Stdin io.Reader
@ -783,13 +783,13 @@ type fakeContainerCommandRunner struct {
Stream io.ReadWriteCloser
}
func (f *fakeContainerCommandRunner) RunInContainer(id string, cmd []string) ([]byte, error) {
func (f *fakeContainerCommandRunner) RunInContainer(id kubecontainer.ContainerID, cmd []string) ([]byte, error) {
f.Cmd = cmd
f.ID = id
return []byte{}, f.E
}
func (f *fakeContainerCommandRunner) ExecInContainer(id string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error {
func (f *fakeContainerCommandRunner) ExecInContainer(id kubecontainer.ContainerID, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error {
f.Cmd = cmd
f.ID = id
f.Stdin = in
@ -835,7 +835,7 @@ func TestRunInContainer(t *testing.T) {
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
containerID := "abc1234"
containerID := kubecontainer.ContainerID{"test", "abc1234"}
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
@ -843,7 +843,7 @@ func TestRunInContainer(t *testing.T) {
Namespace: "nsFoo",
Containers: []*kubecontainer.Container{
{Name: "containerFoo",
ID: types.UID(containerID),
ID: containerID,
},
},
},
@ -1865,7 +1865,7 @@ func TestExecInContainerNoSuchPod(t *testing.T) {
if err == nil {
t.Fatal("unexpected non-error")
}
if fakeCommandRunner.ID != "" {
if !fakeCommandRunner.ID.IsEmpty() {
t.Fatal("unexpected invocation of runner.ExecInContainer")
}
}
@ -1887,7 +1887,7 @@ func TestExecInContainerNoSuchContainer(t *testing.T) {
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{Name: "bar",
ID: "barID"},
ID: kubecontainer.ContainerID{"test", "barID"}},
},
},
}
@ -1909,7 +1909,7 @@ func TestExecInContainerNoSuchContainer(t *testing.T) {
if err == nil {
t.Fatal("unexpected non-error")
}
if fakeCommandRunner.ID != "" {
if !fakeCommandRunner.ID.IsEmpty() {
t.Fatal("unexpected invocation of runner.ExecInContainer")
}
}
@ -1950,7 +1950,7 @@ func TestExecInContainer(t *testing.T) {
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{Name: containerID,
ID: types.UID(containerID),
ID: kubecontainer.ContainerID{"test", containerID},
},
},
},
@ -1973,7 +1973,7 @@ func TestExecInContainer(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if e, a := containerID, fakeCommandRunner.ID; e != a {
if e, a := containerID, fakeCommandRunner.ID.ID; e != a {
t.Fatalf("container name: expected %q, got %q", e, a)
}
if e, a := command, fakeCommandRunner.Cmd; !reflect.DeepEqual(e, a) {
@ -2014,7 +2014,7 @@ func TestPortForwardNoSuchPod(t *testing.T) {
if err == nil {
t.Fatal("unexpected non-error")
}
if fakeCommandRunner.ID != "" {
if !fakeCommandRunner.ID.IsEmpty() {
t.Fatal("unexpected invocation of runner.PortForward")
}
}
@ -2035,7 +2035,7 @@ func TestPortForward(t *testing.T) {
Containers: []*kubecontainer.Container{
{
Name: "foo",
ID: "containerFoo",
ID: kubecontainer.ContainerID{"test", "containerFoo"},
},
},
},
@ -2847,7 +2847,7 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
Containers: []*kubecontainer.Container{
{
Name: "foo",
ID: types.UID(containerID),
ID: kubecontainer.ContainerID{"test", containerID},
},
},
},

View File

@ -46,8 +46,7 @@ func NewHandlerRunner(httpGetter kubeletTypes.HttpGetter, commandRunner kubecont
}
}
// TODO(yifan): Use a strong type for containerID.
func (hr *HandlerRunner) Run(containerID string, pod *api.Pod, container *api.Container, handler *api.Handler) error {
func (hr *HandlerRunner) Run(containerID kubecontainer.ContainerID, pod *api.Pod, container *api.Container, handler *api.Handler) error {
switch {
case handler.Exec != nil:
_, err := hr.commandRunner.RunInContainer(containerID, handler.Exec.Command)

View File

@ -74,16 +74,16 @@ func TestResolvePortStringUnknown(t *testing.T) {
type fakeContainerCommandRunner struct {
Cmd []string
ID string
ID kubecontainer.ContainerID
}
func (f *fakeContainerCommandRunner) RunInContainer(id string, cmd []string) ([]byte, error) {
func (f *fakeContainerCommandRunner) RunInContainer(id kubecontainer.ContainerID, cmd []string) ([]byte, error) {
f.Cmd = cmd
f.ID = id
return []byte{}, nil
}
func (f *fakeContainerCommandRunner) ExecInContainer(id string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error {
func (f *fakeContainerCommandRunner) ExecInContainer(id kubecontainer.ContainerID, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error {
return nil
}
@ -95,7 +95,7 @@ func TestRunHandlerExec(t *testing.T) {
fakeCommandRunner := fakeContainerCommandRunner{}
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil)
containerID := "abc1234"
containerID := kubecontainer.ContainerID{"test", "abc1234"}
containerName := "containerFoo"
container := api.Container{
@ -137,7 +137,7 @@ func TestRunHandlerHttp(t *testing.T) {
fakeHttp := fakeHTTP{}
handlerRunner := NewHandlerRunner(&fakeHttp, &fakeContainerCommandRunner{}, nil)
containerID := "abc1234"
containerID := kubecontainer.ContainerID{"test", "abc1234"}
containerName := "containerFoo"
container := api.Container{
@ -168,7 +168,7 @@ func TestRunHandlerHttp(t *testing.T) {
func TestRunHandlerNil(t *testing.T) {
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, nil)
containerID := "abc1234"
containerID := kubecontainer.ContainerID{"test", "abc1234"}
podName := "podFoo"
podNamespace := "nsFoo"
containerName := "containerFoo"

View File

@ -18,15 +18,17 @@ package cni
import (
"fmt"
"github.com/appc/cni/libcni"
cniTypes "github.com/appc/cni/pkg/types"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/pkg/kubelet/network"
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
"net"
"sort"
"strings"
"github.com/appc/cni/libcni"
cniTypes "github.com/appc/cni/pkg/types"
"github.com/golang/glog"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/pkg/kubelet/network"
kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types"
)
const (
@ -105,12 +107,12 @@ func (plugin *cniNetworkPlugin) SetUpPod(namespace string, name string, id kubel
if !ok {
return fmt.Errorf("CNI execution called on non-docker runtime")
}
netns, err := runtime.GetNetNs(string(id))
netns, err := runtime.GetNetNs(id.ContainerID())
if err != nil {
return err
}
_, err = plugin.defaultNetwork.addToNetwork(name, namespace, string(id), netns)
_, err = plugin.defaultNetwork.addToNetwork(name, namespace, id.ContainerID(), netns)
if err != nil {
glog.Errorf("Error while adding to cni network: %s", err)
return err
@ -124,12 +126,12 @@ func (plugin *cniNetworkPlugin) TearDownPod(namespace string, name string, id ku
if !ok {
return fmt.Errorf("CNI execution called on non-docker runtime")
}
netns, err := runtime.GetNetNs(string(id))
netns, err := runtime.GetNetNs(id.ContainerID())
if err != nil {
return err
}
return plugin.defaultNetwork.deleteFromNetwork(name, namespace, string(id), netns)
return plugin.defaultNetwork.deleteFromNetwork(name, namespace, id.ContainerID(), netns)
}
// TODO: Use the addToNetwork function to obtain the IP of the Pod. That will assume idempotent ADD call to the plugin.
@ -150,7 +152,7 @@ func (plugin *cniNetworkPlugin) Status(namespace string, name string, id kubelet
return &network.PodNetworkStatus{IP: ip}, nil
}
func (network *cniNetwork) addToNetwork(podName string, podNamespace string, podInfraContainerID string, podNetnsPath string) (*cniTypes.Result, error) {
func (network *cniNetwork) addToNetwork(podName string, podNamespace string, podInfraContainerID kubecontainer.ContainerID, podNetnsPath string) (*cniTypes.Result, error) {
rt, err := buildCNIRuntimeConf(podName, podNamespace, podInfraContainerID, podNetnsPath)
if err != nil {
glog.Errorf("Error adding network: %v", err)
@ -168,7 +170,7 @@ func (network *cniNetwork) addToNetwork(podName string, podNamespace string, pod
return res, nil
}
func (network *cniNetwork) deleteFromNetwork(podName string, podNamespace string, podInfraContainerID string, podNetnsPath string) error {
func (network *cniNetwork) deleteFromNetwork(podName string, podNamespace string, podInfraContainerID kubecontainer.ContainerID, podNetnsPath string) error {
rt, err := buildCNIRuntimeConf(podName, podNamespace, podInfraContainerID, podNetnsPath)
if err != nil {
glog.Errorf("Error deleting network: %v", err)
@ -185,18 +187,18 @@ func (network *cniNetwork) deleteFromNetwork(podName string, podNamespace string
return nil
}
func buildCNIRuntimeConf(podName string, podNs string, podInfraContainerID string, podNetnsPath string) (*libcni.RuntimeConf, error) {
func buildCNIRuntimeConf(podName string, podNs string, podInfraContainerID kubecontainer.ContainerID, podNetnsPath string) (*libcni.RuntimeConf, error) {
glog.V(4).Infof("Got netns path %v", podNetnsPath)
glog.V(4).Infof("Using netns path %v", podNs)
rt := &libcni.RuntimeConf{
ContainerID: podInfraContainerID,
ContainerID: podInfraContainerID.ID,
NetNS: podNetnsPath,
IfName: DefaultInterfaceName,
Args: [][2]string{
{"K8S_POD_NAMESPACE", podNs},
{"K8S_POD_NAME", podName},
{"K8S_POD_INFRA_CONTAINER_ID", podInfraContainerID},
{"K8S_POD_INFRA_CONTAINER_ID", podInfraContainerID.ID},
},
}

View File

@ -18,6 +18,7 @@ package prober
import (
"k8s.io/kubernetes/pkg/api"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/probe"
)
@ -29,14 +30,14 @@ type FakeProber struct {
Error error
}
func (f FakeProber) ProbeLiveness(_ *api.Pod, _ api.PodStatus, c api.Container, _ string, _ int64) (probe.Result, error) {
func (f FakeProber) ProbeLiveness(_ *api.Pod, _ api.PodStatus, c api.Container, _ kubecontainer.ContainerID, _ int64) (probe.Result, error) {
if c.LivenessProbe == nil {
return probe.Success, nil
}
return f.Liveness, f.Error
}
func (f FakeProber) ProbeReadiness(_ *api.Pod, _ api.PodStatus, c api.Container, _ string) (probe.Result, error) {
func (f FakeProber) ProbeReadiness(_ *api.Pod, _ api.PodStatus, c api.Container, _ kubecontainer.ContainerID) (probe.Result, error) {
if c.ReadinessProbe == nil {
return probe.Success, nil
}

View File

@ -141,7 +141,8 @@ func (m *manager) UpdatePodStatus(podUID types.UID, podStatus *api.PodStatus) {
var ready bool
if c.State.Running == nil {
ready = false
} else if result, ok := m.readinessCache.getReadiness(kubecontainer.TrimRuntimePrefix(c.ContainerID)); ok {
} else if result, ok := m.readinessCache.getReadiness(
kubecontainer.ParseContainerID(c.ContainerID)); ok {
ready = result
} else {
// The check whether there is a probe which hasn't run yet.

View File

@ -24,6 +24,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/kubernetes/pkg/util/wait"
@ -151,35 +152,35 @@ func TestUpdatePodStatus(t *testing.T) {
const podUID = "pod_uid"
unprobed := api.ContainerStatus{
Name: "unprobed_container",
ContainerID: "unprobed_container_id",
ContainerID: "test://unprobed_container_id",
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
}
probedReady := api.ContainerStatus{
Name: "probed_container_ready",
ContainerID: "probed_container_ready_id",
ContainerID: "test://probed_container_ready_id",
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
}
probedPending := api.ContainerStatus{
Name: "probed_container_pending",
ContainerID: "probed_container_pending_id",
ContainerID: "test://probed_container_pending_id",
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
}
probedUnready := api.ContainerStatus{
Name: "probed_container_unready",
ContainerID: "probed_container_unready_id",
ContainerID: "test://probed_container_unready_id",
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
}
terminated := api.ContainerStatus{
Name: "terminated_container",
ContainerID: "terminated_container_id",
ContainerID: "test://terminated_container_id",
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{},
},
@ -199,9 +200,10 @@ func TestUpdatePodStatus(t *testing.T) {
containerPath{podUID, probedUnready.Name}: {},
containerPath{podUID, terminated.Name}: {},
}
m.readinessCache.setReadiness(probedReady.ContainerID, true)
m.readinessCache.setReadiness(probedUnready.ContainerID, false)
m.readinessCache.setReadiness(terminated.ContainerID, true)
m.readinessCache.setReadiness(kubecontainer.ParseContainerID(probedReady.ContainerID), true)
m.readinessCache.setReadiness(kubecontainer.ParseContainerID(probedUnready.ContainerID), false)
m.readinessCache.setReadiness(kubecontainer.ParseContainerID(terminated.ContainerID), true)
m.UpdatePodStatus(podUID, &podStatus)

View File

@ -41,8 +41,8 @@ const maxProbeRetries = 3
// Prober checks the healthiness of a container.
type Prober interface {
ProbeLiveness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error)
ProbeReadiness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string) (probe.Result, error)
ProbeLiveness(pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID, createdAt int64) (probe.Result, error)
ProbeReadiness(pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID) (probe.Result, error)
}
// Prober helps to check the liveness/readiness of a container.
@ -75,7 +75,7 @@ func New(
// ProbeLiveness probes the liveness of a container.
// If the initalDelay since container creation on liveness probe has not passed the probe will return probe.Success.
func (pb *prober) ProbeLiveness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) {
func (pb *prober) ProbeLiveness(pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID, createdAt int64) (probe.Result, error) {
var live probe.Result
var output string
var err error
@ -114,7 +114,7 @@ func (pb *prober) ProbeLiveness(pod *api.Pod, status api.PodStatus, container ap
}
// ProbeReadiness probes and sets the readiness of a container.
func (pb *prober) ProbeReadiness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string) (probe.Result, error) {
func (pb *prober) ProbeReadiness(pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID) (probe.Result, error) {
var ready probe.Result
var output string
var err error
@ -151,7 +151,7 @@ func (pb *prober) ProbeReadiness(pod *api.Pod, status api.PodStatus, container a
// runProbeWithRetries tries to probe the container in a finite loop, it returns the last result
// if it never succeeds.
func (pb *prober) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID string, retries int) (probe.Result, string, error) {
func (pb *prober) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) {
var err error
var result probe.Result
var output string
@ -164,7 +164,7 @@ func (pb *prober) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.Pod
return result, output, err
}
func (pb *prober) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID string) (probe.Result, string, error) {
func (pb *prober) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) {
timeout := time.Duration(p.TimeoutSeconds) * time.Second
if p.Exec != nil {
glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod, container, p.Exec.Command)
@ -242,7 +242,7 @@ type execInContainer struct {
run func() ([]byte, error)
}
func (p *prober) newExecInContainer(pod *api.Pod, container api.Container, containerID string, cmd []string) exec.Cmd {
func (p *prober) newExecInContainer(pod *api.Pod, container api.Container, containerID kubecontainer.ContainerID, cmd []string) exec.Cmd {
return execInContainer{func() ([]byte, error) {
return p.runner.RunInContainer(containerID, cmd)
}}

View File

@ -183,7 +183,7 @@ func TestProbeContainer(t *testing.T) {
refManager: kubecontainer.NewRefManager(),
recorder: &record.FakeRecorder{},
}
containerID := "foobar"
containerID := kubecontainer.ContainerID{"test", "foobar"}
createdAt := time.Now().Unix()
tests := []struct {

View File

@ -16,7 +16,11 @@ limitations under the License.
package prober
import "sync"
import (
"sync"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// readinessManager maintains the readiness information(probe results) of
// containers over time to allow for implementation of health thresholds.
@ -24,20 +28,19 @@ import "sync"
type readinessManager struct {
// guards states
sync.RWMutex
// TODO(yifan): To use strong type.
states map[string]bool
states map[kubecontainer.ContainerID]bool
}
// newReadinessManager creates ane returns a readiness manager with empty
// contents.
func newReadinessManager() *readinessManager {
return &readinessManager{states: make(map[string]bool)}
return &readinessManager{states: make(map[kubecontainer.ContainerID]bool)}
}
// getReadiness returns the readiness value for the container with the given ID.
// If the readiness value is found, returns it.
// If the readiness is not found, returns false.
func (r *readinessManager) getReadiness(id string) (ready bool, found bool) {
func (r *readinessManager) getReadiness(id kubecontainer.ContainerID) (ready bool, found bool) {
r.RLock()
defer r.RUnlock()
state, found := r.states[id]
@ -45,14 +48,14 @@ func (r *readinessManager) getReadiness(id string) (ready bool, found bool) {
}
// setReadiness sets the readiness value for the container with the given ID.
func (r *readinessManager) setReadiness(id string, value bool) {
func (r *readinessManager) setReadiness(id kubecontainer.ContainerID, value bool) {
r.Lock()
defer r.Unlock()
r.states[id] = value
}
// removeReadiness clears the readiness value for the container with the given ID.
func (r *readinessManager) removeReadiness(id string) {
func (r *readinessManager) removeReadiness(id kubecontainer.ContainerID) {
r.Lock()
defer r.Unlock()
delete(r.states, id)

View File

@ -24,7 +24,6 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubeutil "k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
)
@ -47,7 +46,7 @@ type worker struct {
spec *api.Probe
// The last known container ID for this worker.
containerID types.UID
containerID kubecontainer.ContainerID
}
// Creates and starts a new probe worker.
@ -75,8 +74,8 @@ func run(m *manager, w *worker) {
defer func() {
// Clean up.
probeTicker.Stop()
if w.containerID != "" {
m.readinessCache.removeReadiness(string(w.containerID))
if !w.containerID.IsEmpty() {
m.readinessCache.removeReadiness(w.containerID)
}
m.removeReadinessProbe(w.pod.UID, w.container.Name)
@ -121,17 +120,17 @@ func doProbe(m *manager, w *worker) (keepGoing bool) {
return true // Wait for more information.
}
if w.containerID != types.UID(c.ContainerID) {
if w.containerID != "" {
m.readinessCache.removeReadiness(string(w.containerID))
if w.containerID.String() != c.ContainerID {
if !w.containerID.IsEmpty() {
m.readinessCache.removeReadiness(w.containerID)
}
w.containerID = types.UID(kubecontainer.TrimRuntimePrefix(c.ContainerID))
w.containerID = kubecontainer.ParseContainerID(c.ContainerID)
}
if c.State.Running == nil {
glog.V(3).Infof("Non-running container probed: %v - %v",
kubeutil.FormatPodName(w.pod), w.container.Name)
m.readinessCache.setReadiness(string(w.containerID), false)
m.readinessCache.setReadiness(w.containerID, false)
// Abort if the container will not be restarted.
return c.State.Terminated == nil ||
w.pod.Spec.RestartPolicy != api.RestartPolicyNever
@ -139,14 +138,14 @@ func doProbe(m *manager, w *worker) (keepGoing bool) {
if int64(time.Since(c.State.Running.StartedAt.Time).Seconds()) < w.spec.InitialDelaySeconds {
// Readiness defaults to false during the initial delay.
m.readinessCache.setReadiness(string(w.containerID), false)
m.readinessCache.setReadiness(w.containerID, false)
return true
}
// TODO: Move error handling out of prober.
result, _ := m.prober.ProbeReadiness(w.pod, status, w.container, string(w.containerID))
result, _ := m.prober.ProbeReadiness(w.pod, status, w.container, w.containerID)
if result != probe.Unknown {
m.readinessCache.setReadiness(string(w.containerID), result != probe.Failure)
m.readinessCache.setReadiness(w.containerID, result != probe.Failure)
}
return true

View File

@ -22,15 +22,17 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/probe"
)
const (
containerID = "cOnTaInEr_Id"
containerName = "cOnTaInEr_NaMe"
podUID = "pOd_UiD"
)
var containerID = kubecontainer.ContainerID{"test", "cOnTaInEr_Id"}
func TestDoProbe(t *testing.T) {
m := newTestManager()
@ -204,7 +206,7 @@ func newTestWorker(probeSpec api.Probe) *worker {
func getRunningStatus() api.PodStatus {
containerStatus := api.ContainerStatus{
Name: containerName,
ContainerID: containerID,
ContainerID: containerID.String(),
}
containerStatus.State.Running = &api.ContainerStateRunning{unversioned.Now()}
podStatus := api.PodStatus{
@ -231,10 +233,10 @@ func getTestPod(probeSpec api.Probe) api.Pod {
type CrashingProber struct{}
func (f CrashingProber) ProbeLiveness(_ *api.Pod, _ api.PodStatus, c api.Container, _ string, _ int64) (probe.Result, error) {
func (f CrashingProber) ProbeLiveness(_ *api.Pod, _ api.PodStatus, c api.Container, _ kubecontainer.ContainerID, _ int64) (probe.Result, error) {
panic("Intentional ProbeLiveness crash.")
}
func (f CrashingProber) ProbeReadiness(_ *api.Pod, _ api.PodStatus, c api.Container, _ string) (probe.Result, error) {
func (f CrashingProber) ProbeReadiness(_ *api.Pod, _ api.PodStatus, c api.Container, _ kubecontainer.ContainerID) (probe.Result, error) {
panic("Intentional ProbeReadiness crash.")
}

View File

@ -19,6 +19,8 @@ package rkt
import (
"fmt"
"strings"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// containerID defines the ID of rkt containers, it will
@ -29,17 +31,22 @@ type containerID struct {
appName string // Name of the app in that pod.
}
const RktType = "rkt"
// buildContainerID constructs the containers's ID using containerID,
// which consists of the pod uuid and the container name.
// The result can be used to uniquely identify a container.
func buildContainerID(c *containerID) string {
return fmt.Sprintf("%s:%s", c.uuid, c.appName)
func buildContainerID(c *containerID) kubecontainer.ContainerID {
return kubecontainer.ContainerID{
Type: RktType,
ID: fmt.Sprintf("%s:%s", c.uuid, c.appName),
}
}
// parseContainerID parses the containerID into pod uuid and the container name. The
// results can be used to get more information of the container.
func parseContainerID(id string) (*containerID, error) {
tuples := strings.Split(id, ":")
func parseContainerID(id kubecontainer.ContainerID) (*containerID, error) {
tuples := strings.Split(id.ID, ":")
if len(tuples) != 2 {
return nil, fmt.Errorf("rkt: cannot parse container ID for: %v", id)
}

View File

@ -143,7 +143,7 @@ func makeContainerStatus(container *kubecontainer.Container, podInfo *podInfo) a
var status api.ContainerStatus
status.Name = container.Name
status.Image = container.Image
status.ContainerID = string(container.ID)
status.ContainerID = container.ID.String()
// TODO(yifan): Add image ID info.
switch podInfo.state {

View File

@ -513,7 +513,7 @@ func apiPodToruntimePod(uuid string, pod *api.Pod) *kubecontainer.Pod {
for i := range pod.Spec.Containers {
c := &pod.Spec.Containers[i]
p.Containers = append(p.Containers, &kubecontainer.Container{
ID: types.UID(buildContainerID(&containerID{uuid, c.Name})),
ID: buildContainerID(&containerID{uuid, c.Name}),
Name: c.Name,
Image: c.Image,
Hash: kubecontainer.HashContainer(c),
@ -646,7 +646,7 @@ func (r *Runtime) preparePod(pod *api.Pod, pullSecrets []api.Secret) (string, *k
func (r *Runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, failure error) {
// Set up container references.
for _, c := range runtimePod.Containers {
containerID := string(c.ID)
containerID := c.ID
id, err := parseContainerID(containerID)
if err != nil {
glog.Warningf("Invalid container ID %q", containerID)
@ -697,7 +697,7 @@ func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error {
r.recorder.Eventf(ref, "Failed", "Failed to create rkt container with error: %v", prepareErr)
continue
}
containerID := string(runtimePod.Containers[i].ID)
containerID := runtimePod.Containers[i].ID
r.containerRefManager.SetRef(containerID, ref)
}
@ -802,8 +802,7 @@ func (r *Runtime) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) error {
serviceName := makePodServiceFileName(runningPod.ID)
r.generateEvents(&runningPod, "Killing", nil)
for _, c := range runningPod.Containers {
id := string(c.ID)
r.containerRefManager.ClearRef(id)
r.containerRefManager.ClearRef(c.ID)
}
// Since all service file have 'KillMode=mixed', the processes in
@ -989,7 +988,7 @@ func (r *Runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus
podFullName := kubeletUtil.FormatPodName(pod)
// Add references to all containers.
unidentifiedContainers := make(map[types.UID]*kubecontainer.Container)
unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container)
for _, c := range runningPod.Containers {
unidentifiedContainers[c.ID] = c
}
@ -1020,7 +1019,7 @@ func (r *Runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus
break
}
result, err := r.prober.ProbeLiveness(pod, podStatus, container, string(c.ID), c.Created)
result, err := r.prober.ProbeLiveness(pod, podStatus, container, c.ID, c.Created)
// TODO(vmarmol): examine this logic.
if err == nil && result != probe.Success {
glog.Infof("Pod %q container %q is unhealthy (probe result: %v), it will be killed and re-created.", podFullName, container.Name, result)
@ -1062,7 +1061,7 @@ func (r *Runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus
// See https://github.com/coreos/rkt/blob/master/Documentation/commands.md#logging for more details.
//
// TODO(yifan): If the rkt is using lkvm as the stage1 image, then this function will fail.
func (r *Runtime) GetContainerLogs(pod *api.Pod, containerID string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error {
func (r *Runtime) GetContainerLogs(pod *api.Pod, containerID kubecontainer.ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error {
id, err := parseContainerID(containerID)
if err != nil {
return err
@ -1098,7 +1097,7 @@ func (r *Runtime) GarbageCollect() error {
// Note: In rkt, the container ID is in the form of "UUID:appName", where
// appName is the container name.
// TODO(yifan): If the rkt is using lkvm as the stage1 image, then this function will fail.
func (r *Runtime) RunInContainer(containerID string, cmd []string) ([]byte, error) {
func (r *Runtime) RunInContainer(containerID kubecontainer.ContainerID, cmd []string) ([]byte, error) {
glog.V(4).Infof("Rkt running in container.")
id, err := parseContainerID(containerID)
@ -1112,14 +1111,14 @@ func (r *Runtime) RunInContainer(containerID string, cmd []string) ([]byte, erro
return []byte(strings.Join(result, "\n")), err
}
func (r *Runtime) AttachContainer(containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
func (r *Runtime) AttachContainer(containerID kubecontainer.ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
return fmt.Errorf("unimplemented")
}
// Note: In rkt, the container ID is in the form of "UUID:appName", where UUID is
// the rkt UUID, and appName is the container name.
// TODO(yifan): If the rkt is using lkvm as the stage1 image, then this function will fail.
func (r *Runtime) ExecInContainer(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
func (r *Runtime) ExecInContainer(containerID kubecontainer.ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
glog.V(4).Infof("Rkt execing in container.")
id, err := parseContainerID(containerID)

View File

@ -21,6 +21,7 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// TODO: Reconcile custom types in kubelet/types and this subpackage
@ -28,6 +29,13 @@ import (
// DockerID is an ID of docker container. It is a type to make it clear when we're working with docker container Ids
type DockerID string
func (id DockerID) ContainerID() kubecontainer.ContainerID {
return kubecontainer.ContainerID{
Type: "docker",
ID: string(id),
}
}
type HttpGetter interface {
Get(url string) (*http.Response, error)
}