Simplify containerd runtime configuration for gVisor
The previous code had a copy of `config.toml` which is easy to get out of sync. Instead, append the necessary configuration to the existing file to retain the rest of the configuration. Also changed the location where `runsc` is downloaded to use the latest release instead of hardcondig to a release which gets old quickly.pull/14996/head
parent
f6a20ae94f
commit
0b041f573b
|
@ -109,7 +109,7 @@ var (
|
|||
LogviewerAssets embed.FS
|
||||
|
||||
// GvisorAssets assets for gvisor addon
|
||||
//go:embed gvisor/*.tmpl gvisor/*.toml
|
||||
//go:embed gvisor/*.tmpl
|
||||
GvisorAssets embed.FS
|
||||
|
||||
// HelmTillerAssets assets for helm-tiller addon
|
||||
|
|
|
@ -71,4 +71,4 @@ NAME READY STATUS RESTARTS AGE
|
|||
gvisor 1/1 Terminating 0 5m
|
||||
```
|
||||
|
||||
_Note: Once gVisor is disabled, any pod with the `gvisor` Runtime Class or `io.kubernetes.cri.untrusted-workload` annotation will fail with a FailedCreatePodSandBox error._
|
||||
_Note: Once gVisor is disabled, any pod with the `gvisor` Runtime Class will fail with a FailedCreatePodSandBox error._
|
||||
|
|
|
@ -1,68 +0,0 @@
|
|||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
oom_score = 0
|
||||
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
max_recv_message_size = 16777216
|
||||
max_send_message_size = 16777216
|
||||
|
||||
[debug]
|
||||
address = ""
|
||||
uid = 0
|
||||
gid = 0
|
||||
level = ""
|
||||
|
||||
[metrics]
|
||||
address = ""
|
||||
grpc_histogram = false
|
||||
|
||||
[cgroup]
|
||||
path = ""
|
||||
|
||||
[plugins]
|
||||
[plugins.cgroups]
|
||||
no_prometheus = false
|
||||
[plugins.cri]
|
||||
stream_server_address = ""
|
||||
stream_server_port = "10010"
|
||||
enable_selinux = false
|
||||
sandbox_image = "{{default "registry.k8s.io" .ImageRepository}}/pause:3.1"
|
||||
stats_collect_period = 10
|
||||
systemd_cgroup = false
|
||||
enable_tls_streaming = false
|
||||
max_container_log_line_size = 16384
|
||||
[plugins.cri.containerd]
|
||||
snapshotter = "overlayfs"
|
||||
no_pivot = false
|
||||
[plugins.cri.containerd.default_runtime]
|
||||
runtime_type = "io.containerd.runtime.v1.linux"
|
||||
runtime_engine = ""
|
||||
runtime_root = ""
|
||||
[plugins.cri.containerd.runtimes.untrusted]
|
||||
runtime_type = "io.containerd.runsc.v1"
|
||||
[plugins.cri.containerd.runtimes.runsc]
|
||||
runtime_type = "io.containerd.runsc.v1"
|
||||
[plugins.cri.cni]
|
||||
bin_dir = "/opt/cni/bin"
|
||||
conf_dir = "/etc/cni/net.d"
|
||||
conf_template = ""
|
||||
[plugins.cri.registry]
|
||||
[plugins.cri.registry.mirrors]
|
||||
[plugins.cri.registry.mirrors."docker.io"]
|
||||
endpoint = ["https://registry-1.docker.io"]
|
||||
[plugins.diff-service]
|
||||
default = ["walking"]
|
||||
[plugins.linux]
|
||||
runtime = "runc"
|
||||
runtime_root = ""
|
||||
no_shim = false
|
||||
shim_debug = true
|
||||
[plugins.scheduler]
|
||||
pause_threshold = 0.02
|
||||
deletion_threshold = 0
|
||||
mutation_threshold = 100
|
||||
schedule_delay = "0s"
|
||||
startup_delay = "100ms"
|
|
@ -28,11 +28,11 @@ import (
|
|||
// Disable reverts containerd config files and restarts containerd
|
||||
func Disable() error {
|
||||
log.Print("Disabling gvisor...")
|
||||
if err := os.Remove(filepath.Join(nodeDir, containerdConfigTomlPath)); err != nil {
|
||||
return errors.Wrapf(err, "removing %s", containerdConfigTomlPath)
|
||||
if err := os.Remove(filepath.Join(nodeDir, containerdConfigPath)); err != nil {
|
||||
return errors.Wrapf(err, "removing %s", containerdConfigPath)
|
||||
}
|
||||
log.Printf("Restoring default config.toml at %s", containerdConfigTomlPath)
|
||||
if err := mcnutils.CopyFile(filepath.Join(nodeDir, storedContainerdConfigTomlPath), filepath.Join(nodeDir, containerdConfigTomlPath)); err != nil {
|
||||
log.Printf("Restoring default config.toml at %s", containerdConfigPath)
|
||||
if err := mcnutils.CopyFile(filepath.Join(nodeDir, containerdConfigBackupPath), filepath.Join(nodeDir, containerdConfigPath)); err != nil {
|
||||
return errors.Wrap(err, "reverting back to default config.toml")
|
||||
}
|
||||
// restart containerd
|
||||
|
|
|
@ -29,23 +29,28 @@ import (
|
|||
|
||||
"github.com/docker/machine/libmachine/mcnutils"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/vmpath"
|
||||
)
|
||||
|
||||
const (
|
||||
nodeDir = "/node"
|
||||
containerdConfigTomlPath = "/etc/containerd/config.toml"
|
||||
storedContainerdConfigTomlPath = "/tmp/config.toml"
|
||||
gvisorContainerdShimURL = "https://github.com/google/gvisor-containerd-shim/releases/download/v0.0.3/containerd-shim-runsc-v1.linux-amd64"
|
||||
gvisorURL = "https://storage.googleapis.com/gvisor/releases/nightly/2020-02-14/runsc"
|
||||
nodeDir = "/node"
|
||||
containerdConfigPath = "/etc/containerd/config.toml"
|
||||
containerdConfigBackupPath = "/tmp/containerd-config.toml.bak"
|
||||
|
||||
releaseURL = "https://storage.googleapis.com/gvisor/releases/release/latest/x86_64/"
|
||||
shimURL = releaseURL + "containerd-shim-runsc-v1"
|
||||
gvisorURL = releaseURL + "runsc"
|
||||
|
||||
configFragment = `
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runsc]
|
||||
runtime_type = "io.containerd.runsc.v1"
|
||||
pod_annotations = [ "dev.gvisor.*" ]
|
||||
`
|
||||
)
|
||||
|
||||
// Enable follows these steps for enabling gvisor in minikube:
|
||||
// 1. creates necessary directories for storing binaries and runsc logs
|
||||
// 2. downloads runsc and gvisor-containerd-shim
|
||||
// 3. copies necessary containerd config files
|
||||
// 3. configures containerd
|
||||
// 4. restarts containerd
|
||||
func Enable() error {
|
||||
if err := makeGvisorDirs(); err != nil {
|
||||
|
@ -54,7 +59,7 @@ func Enable() error {
|
|||
if err := downloadBinaries(); err != nil {
|
||||
return errors.Wrap(err, "downloading binaries")
|
||||
}
|
||||
if err := copyConfigFiles(); err != nil {
|
||||
if err := configure(); err != nil {
|
||||
return errors.Wrap(err, "copying config files")
|
||||
}
|
||||
if err := restartContainerd(); err != nil {
|
||||
|
@ -106,7 +111,7 @@ func downloadBinaries() error {
|
|||
// downloads the gvisor-containerd-shim
|
||||
func gvisorContainerdShim() error {
|
||||
dest := filepath.Join(nodeDir, "usr/bin/containerd-shim-runsc-v1")
|
||||
return downloadFileToDest(gvisorContainerdShimURL, dest)
|
||||
return downloadFileToDest(shimURL, dest)
|
||||
}
|
||||
|
||||
// downloads the runsc binary and returns a path to the binary
|
||||
|
@ -148,54 +153,23 @@ func downloadFileToDest(url, dest string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Must write the following files:
|
||||
// 1. gvisor-containerd-shim.toml
|
||||
// 2. gvisor containerd config.toml
|
||||
//
|
||||
// and save the default version of config.toml
|
||||
func copyConfigFiles() error {
|
||||
log.Printf("Storing default config.toml at %s", storedContainerdConfigTomlPath)
|
||||
if err := mcnutils.CopyFile(filepath.Join(nodeDir, containerdConfigTomlPath), filepath.Join(nodeDir, storedContainerdConfigTomlPath)); err != nil {
|
||||
// configure changes containerd `config.toml` file to include runsc runtime. A
|
||||
// copy of the original file is stored under `/tmp` to be restored when this
|
||||
// plug in is disabled.
|
||||
func configure() error {
|
||||
log.Printf("Storing default config.toml at %s", containerdConfigBackupPath)
|
||||
configPath := filepath.Join(nodeDir, containerdConfigPath)
|
||||
if err := mcnutils.CopyFile(configPath, filepath.Join(nodeDir, containerdConfigBackupPath)); err != nil {
|
||||
return errors.Wrap(err, "copying default config.toml")
|
||||
}
|
||||
log.Printf("Copying %s asset to %s", constants.GvisorConfigTomlTargetName, filepath.Join(nodeDir, containerdConfigTomlPath))
|
||||
if err := copyAssetToDest(constants.GvisorConfigTomlTargetName, filepath.Join(nodeDir, containerdConfigTomlPath)); err != nil {
|
||||
return errors.Wrap(err, "copying gvisor version of config.toml")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyAssetToDest(targetName, dest string) error {
|
||||
var asset *assets.BinAsset
|
||||
for _, a := range assets.Addons["gvisor"].Assets {
|
||||
if a.GetTargetName() == targetName {
|
||||
asset = a
|
||||
}
|
||||
}
|
||||
if asset == nil {
|
||||
return fmt.Errorf("no asset matching target %s among %+v", targetName, assets.Addons["gvisor"])
|
||||
}
|
||||
|
||||
// Now, copy the data from this asset to dest
|
||||
src := filepath.Join(vmpath.GuestGvisorDir, asset.GetTargetName())
|
||||
log.Printf("%s asset path: %s", targetName, src)
|
||||
contents, err := os.ReadFile(src)
|
||||
// Append runsc configuration to contained config.
|
||||
config, err := os.OpenFile(configPath, os.O_WRONLY|os.O_APPEND, 0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "getting contents of %s", asset.GetSourcePath())
|
||||
return err
|
||||
}
|
||||
if _, err := os.Stat(dest); err == nil {
|
||||
if err := os.Remove(dest); err != nil {
|
||||
return errors.Wrapf(err, "removing %s", dest)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("creating %s", dest)
|
||||
f, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "creating %s", dest)
|
||||
}
|
||||
if _, err := f.Write(contents); err != nil {
|
||||
return errors.Wrapf(err, "writing contents to %s", f.Name())
|
||||
if _, err := config.WriteString(configFragment); err != nil {
|
||||
return errors.Wrap(err, "changing config.toml")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -22,12 +22,11 @@ import (
|
|||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver/v4"
|
||||
semver "github.com/blang/semver/v4"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/deploy/addons"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/vmpath"
|
||||
"k8s.io/minikube/pkg/util"
|
||||
|
@ -502,11 +501,6 @@ var Addons = map[string]*Addon{
|
|||
vmpath.GuestAddonsDir,
|
||||
"gvisor-runtimeclass.yaml",
|
||||
"0640"),
|
||||
MustBinAsset(addons.GvisorAssets,
|
||||
"gvisor/gvisor-config.toml",
|
||||
vmpath.GuestGvisorDir,
|
||||
constants.GvisorConfigTomlTargetName,
|
||||
"0640"),
|
||||
}, false, "gvisor", "minikube", "", "https://github.com/kubernetes/minikube/blob/master/deploy/addons/gvisor/README.md", map[string]string{
|
||||
"GvisorAddon": "k8s-minikube/gvisor-addon:3@sha256:23eb17d48a66fc2b09c31454fb54ecae520c3e9c9197ef17fcb398b4f31d505a",
|
||||
}, map[string]string{
|
||||
|
|
|
@ -162,8 +162,6 @@ const (
|
|||
var (
|
||||
// IsMinikubeChildProcess is the name of "is minikube child process" variable
|
||||
IsMinikubeChildProcess = "IS_MINIKUBE_CHILD_PROCESS"
|
||||
// GvisorConfigTomlTargetName is the go-bindata target name for the gvisor config.toml
|
||||
GvisorConfigTomlTargetName = "gvisor-config.toml"
|
||||
// MountProcessFileName is the filename of the mount process
|
||||
MountProcessFileName = ".mount-process"
|
||||
|
||||
|
|
|
@ -69,20 +69,12 @@ func TestGvisorAddon(t *testing.T) {
|
|||
t.Fatalf("failed waiting for 'gvisor controller' pod: %v", err)
|
||||
}
|
||||
|
||||
// Create an untrusted workload
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-untrusted.yaml")))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
// Create gvisor workload
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-gvisor.yaml")))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if _, err := PodWait(ctx, t, profile, "default", "run=nginx,untrusted=true", Minutes(4)); err != nil {
|
||||
t.Errorf("failed waiting for nginx pod: %v", err)
|
||||
}
|
||||
if _, err := PodWait(ctx, t, profile, "default", "run=nginx,runtime=gvisor", Minutes(4)); err != nil {
|
||||
t.Errorf("failed waitinf for gvisor pod: %v", err)
|
||||
}
|
||||
|
@ -100,9 +92,6 @@ func TestGvisorAddon(t *testing.T) {
|
|||
if _, err := PodWait(ctx, t, profile, "kube-system", "kubernetes.io/minikube-addons=gvisor", Minutes(4)); err != nil {
|
||||
t.Errorf("failed waiting for 'gvisor controller' pod : %v", err)
|
||||
}
|
||||
if _, err := PodWait(ctx, t, profile, "default", "run=nginx,untrusted=true", Minutes(4)); err != nil {
|
||||
t.Errorf("failed waiting for 'nginx' pod : %v", err)
|
||||
}
|
||||
if _, err := PodWait(ctx, t, profile, "default", "run=nginx,runtime=gvisor", Minutes(4)); err != nil {
|
||||
t.Errorf("failed waiting for 'gvisor' pod : %v", err)
|
||||
}
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-untrusted
|
||||
labels:
|
||||
run: nginx
|
||||
untrusted: "true"
|
||||
annotations:
|
||||
io.kubernetes.cri.untrusted-workload: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: docker.io/nginx
|
Loading…
Reference in New Issue