Merge branch 'master' into defaults

pull/3617/head
Anders F Björklund 2019-02-09 18:25:27 +01:00
commit 78806674f3
70 changed files with 4784 additions and 601 deletions

27
Gopkg.lock generated
View File

@ -61,7 +61,7 @@
[[projects]]
branch = "master"
digest = "1:031cfe09be23486aed11668813dd112b5f79ed9926ca411bb3dac32e9e20fc4a"
digest = "1:47b479ee07f66c92682bcf27e3a65b411b7ba52bfc28fd0b6b74a742620fc61a"
name = "github.com/docker/machine"
packages = [
"commands/mcndirs",
@ -98,7 +98,7 @@
"version",
]
pruneopts = "NUT"
revision = "19035310d4ba1b58056aae427ea669d1db5fc618"
revision = "a773edc6f013c9fab13360fea0192fd335023a16"
source = "github.com/machine-drivers/machine"
[[projects]]
@ -160,6 +160,19 @@
pruneopts = "NUT"
revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
[[projects]]
digest = "1:2e3c336fc7fde5c984d2841455a658a6d626450b1754a854b3b32e7a8f49a07a"
name = "github.com/google/go-cmp"
packages = [
"cmp",
"cmp/internal/diff",
"cmp/internal/function",
"cmp/internal/value",
]
pruneopts = "NUT"
revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
version = "v0.2.0"
[[projects]]
digest = "1:186f7de0e878b5ff1fca82271ce36a7abf9747be09d03b3f08a921584c2f26fc"
name = "github.com/google/go-containerregistry"
@ -285,6 +298,14 @@
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
branch = "master"
digest = "1:3526ea57285a47f99273a9b00252869d06888b08cd7358d6f89c35db6ec1605b"
name = "github.com/intel-go/cpuid"
packages = ["."]
pruneopts = "NUT"
revision = "1a4a6f06a1c643c8fbd339bd61d980960627d09e"
[[projects]]
branch = "master"
digest = "1:29b8c51f736120581688521c42f4772cc42ab7942402ee38877be23bed001ede"
@ -925,6 +946,7 @@
"github.com/docker/machine/libmachine/swarm",
"github.com/docker/machine/libmachine/version",
"github.com/golang/glog",
"github.com/google/go-cmp/cmp",
"github.com/google/go-containerregistry/pkg/authn",
"github.com/google/go-containerregistry/pkg/name",
"github.com/google/go-containerregistry/pkg/v1/remote",
@ -935,6 +957,7 @@
"github.com/johanneswuerbach/nfsexports",
"github.com/libvirt/libvirt-go",
"github.com/machine-drivers/docker-machine-driver-vmware/pkg/drivers/vmware/config",
"github.com/mitchellh/go-ps",
"github.com/moby/hyperkit/go",
"github.com/olekukonko/tablewriter",
"github.com/pborman/uuid",

View File

@ -102,7 +102,15 @@
name = "github.com/json-iterator/go"
version = "1.1.3-22-gf2b4162"
[[override]]
branch = "master"
name = "github.com/intel-go/cpuid"
[prune]
go-tests = true
non-go = true
unused-packages = true
[[constraint]]
name = "github.com/google/go-cmp"
version = "0.2.0"

View File

@ -126,7 +126,7 @@ minikube_iso: # old target kept for making tests happy
echo $(ISO_VERSION) > deploy/iso/minikube-iso/board/coreos/minikube/rootfs-overlay/etc/VERSION
if [ ! -d $(BUILD_DIR)/buildroot ]; then \
mkdir -p $(BUILD_DIR); \
git clone --branch=$(BUILDROOT_BRANCH) https://github.com/buildroot/buildroot $(BUILD_DIR)/buildroot; \
git clone --depth=1 --branch=$(BUILDROOT_BRANCH) https://github.com/buildroot/buildroot $(BUILD_DIR)/buildroot; \
fi;
$(MAKE) BR2_EXTERNAL=../../deploy/iso/minikube-iso minikube_defconfig -C $(BUILD_DIR)/buildroot
$(MAKE) -C $(BUILD_DIR)/buildroot

2
OWNERS
View File

@ -1,3 +1,5 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- dlorenc
- balopat

View File

@ -95,11 +95,11 @@ func SetBool(m config.MinikubeConfig, name string, val string) error {
return nil
}
// EnableOrDisableAddon updates addon status executing any commands necessary
func EnableOrDisableAddon(name string, val string) error {
enable, err := strconv.ParseBool(val)
if err != nil {
errors.Wrapf(err, "error attempted to parse enabled/disable value addon %s", name)
return errors.Wrapf(err, "parsing bool: %s", name)
}
//TODO(r2d4): config package should not reference API, pull this out
@ -111,28 +111,25 @@ func EnableOrDisableAddon(name string, val string) error {
defer api.Close()
cluster.EnsureMinikubeRunningOrExit(api, 0)
addon, _ := assets.Addons[name] // validation done prior
if err != nil {
return err
}
addon := assets.Addons[name]
host, err := cluster.CheckIfHostExistsAndLoad(api, config.GetMachineName())
if err != nil {
return errors.Wrap(err, "getting host")
}
cmd, err := machine.GetCommandRunner(host)
cmd, err := machine.CommandRunner(host)
if err != nil {
return errors.Wrap(err, "getting command runner")
return errors.Wrap(err, "command runner")
}
if enable {
for _, addon := range addon.Assets {
if err := cmd.Copy(addon); err != nil {
return errors.Wrapf(err, "error enabling addon %s", addon.AssetName)
return errors.Wrapf(err, "enabling addon %s", addon.AssetName)
}
}
} else {
for _, addon := range addon.Assets {
if err := cmd.Remove(addon); err != nil {
return errors.Wrapf(err, "error disabling addon %s", addon.AssetName)
return errors.Wrapf(err, "disabling addon %s", addon.AssetName)
}
}
}

View File

@ -24,11 +24,12 @@ import (
"strconv"
"strings"
"github.com/docker/go-units"
units "github.com/docker/go-units"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
)
func IsValidDriver(string, driver string) error {
@ -128,12 +129,18 @@ func IsValidAddon(name string, val string) error {
return errors.Errorf("Cannot enable/disable invalid addon %s", name)
}
// IsContainerdRuntime is a validator which returns an error if the current runtime is not containerd
func IsContainerdRuntime(_, _ string) error {
config, err := config.Load()
if err != nil {
return fmt.Errorf("error getting cluster config: %v", err)
return fmt.Errorf("config.Load: %v", err)
}
if config.KubernetesConfig.ContainerRuntime != constants.ContainerdRuntime {
r, err := cruntime.New(cruntime.Config{Type: config.KubernetesConfig.ContainerRuntime})
if err != nil {
return err
}
_, ok := r.(*cruntime.Containerd)
if !ok {
return fmt.Errorf(`This addon can only be enabled with the containerd runtime backend.
To enable this backend, please first stop minikube with:

View File

@ -40,6 +40,7 @@ import (
"k8s.io/minikube/pkg/minikube/cluster"
cfg "k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/machine"
pkgutil "k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/util/kubeconfig"
@ -95,26 +96,10 @@ var startCmd = &cobra.Command{
Use: "start",
Short: "Starts a local kubernetes cluster",
Long: `Starts a local kubernetes cluster using VM. This command
assumes you have already installed one of the VM drivers: virtualbox/vmwarefusion/kvm/xhyve/hyperv.`,
assumes you have already installed one of the VM drivers: virtualbox/parallels/vmwarefusion/kvm/xhyve/hyperv.`,
Run: runStart,
}
// SetContainerRuntime possibly sets the container runtime
func SetContainerRuntime(cfg map[string]string, runtime string) map[string]string {
switch runtime {
case "crio", "cri-o":
cfg["runtime-endpoint"] = "unix:///var/run/crio/crio.sock"
cfg["image-endpoint"] = "unix:///var/run/crio/crio.sock"
case "containerd":
cfg["runtime-endpoint"] = "unix:///run/containerd/containerd.sock"
cfg["image-endpoint"] = "unix:///run/containerd/containerd.sock"
default:
return nil
}
return cfg
}
func runStart(cmd *cobra.Command, args []string) {
if glog.V(8) {
glog.Infoln("Viper configuration:")
@ -223,18 +208,6 @@ func runStart(cmd *cobra.Command, args []string) {
selectedNetworkPlugin := viper.GetString(networkPlugin)
selectedEnableDefaultCNI := viper.GetBool(enableDefaultCNI)
// common config (currently none)
var cricfg = map[string]string{}
if cricfg := SetContainerRuntime(cricfg, selectedContainerRuntime); cricfg != nil {
var command string
fmt.Println("Writing crictl config...")
if command, err = cmdutil.GetCrictlConfigCommand(cricfg); err == nil {
_, err = host.RunSSHCommand(command)
}
if err != nil {
glog.Errorln("Error writing crictl config: ", err)
}
}
// default network plugin (cni)
if selectedContainerRuntime != "" {
if !cmd.Flags().Changed(networkPlugin) {
@ -286,11 +259,6 @@ func runStart(cmd *cobra.Command, args []string) {
EnableDefaultCNI: selectedEnableDefaultCNI,
}
k8sBootstrapper, err := GetClusterBootstrapper(api, clusterBootstrapper)
if err != nil {
glog.Exitf("Error getting cluster bootstrapper: %v", err)
}
// Write profile cluster configuration to file
clusterConfig = cfg.Config{
MachineConfig: config,
@ -309,14 +277,17 @@ func runStart(cmd *cobra.Command, args []string) {
}
fmt.Println("Moving files into cluster...")
if err := k8sBootstrapper.UpdateCluster(kubernetesConfig); err != nil {
bs, err := GetClusterBootstrapper(api, clusterBootstrapper)
if err != nil {
glog.Exitf("Error getting cluster bootstrapper: %v", err)
}
if err := bs.UpdateCluster(kubernetesConfig); err != nil {
glog.Errorln("Error updating cluster: ", err)
cmdutil.MaybeReportErrorAndExit(err)
}
fmt.Println("Setting up certs...")
if err := k8sBootstrapper.SetupCerts(kubernetesConfig); err != nil {
if err := bs.SetupCerts(kubernetesConfig); err != nil {
glog.Errorln("Error configuring authentication: ", err)
cmdutil.MaybeReportErrorAndExit(err)
}
@ -330,8 +301,6 @@ func runStart(cmd *cobra.Command, args []string) {
kubeHost = strings.Replace(kubeHost, ":2376", ":"+strconv.Itoa(kubernetesConfig.NodePort), -1)
fmt.Println("Setting up kubeconfig...")
// setup kubeconfig
kubeConfigFile := cmdutil.GetKubeConfigPath()
kubeCfgSetup := &kubeconfig.KubeConfigSetup{
@ -350,49 +319,19 @@ func runStart(cmd *cobra.Command, args []string) {
cmdutil.MaybeReportErrorAndExit(err)
}
fmt.Println("Stopping extra container runtimes...")
if config.VMDriver != constants.DriverNone && selectedContainerRuntime != "" {
if _, err := host.RunSSHCommand("sudo systemctl stop docker"); err == nil {
_, err = host.RunSSHCommand("sudo systemctl stop docker.socket")
}
if err != nil {
glog.Errorf("Error stopping docker: %v", err)
}
}
if config.VMDriver != constants.DriverNone && (selectedContainerRuntime != constants.CrioRuntime && selectedContainerRuntime != constants.Cri_oRuntime) {
if _, err := host.RunSSHCommand("sudo systemctl stop crio"); err != nil {
glog.Errorf("Error stopping crio: %v", err)
}
}
if config.VMDriver != constants.DriverNone && selectedContainerRuntime != constants.RktRuntime {
if _, err := host.RunSSHCommand("sudo systemctl stop rkt-api"); err == nil {
_, err = host.RunSSHCommand("sudo systemctl stop rkt-metadata")
}
if err != nil {
glog.Errorf("Error stopping rkt: %v", err)
}
}
if config.VMDriver != constants.DriverNone && selectedContainerRuntime != constants.ContainerdRuntime {
if _, err = host.RunSSHCommand("sudo systemctl stop containerd"); err != nil {
glog.Errorf("Error stopping containerd: %v", err)
}
runner, err := machine.CommandRunner(host)
if err != nil {
cmdutil.MaybeReportErrorAndExit(err)
}
if config.VMDriver != constants.DriverNone && (selectedContainerRuntime == constants.CrioRuntime || selectedContainerRuntime == constants.Cri_oRuntime) {
fmt.Println("Restarting crio runtime...")
// restart crio so that it can monitor all hook dirs
if _, err := host.RunSSHCommand("sudo systemctl restart crio"); err != nil {
glog.Errorf("Error restarting crio: %v", err)
}
cr, err := cruntime.New(cruntime.Config{Type: selectedContainerRuntime, Runner: runner})
if err != nil {
cmdutil.MaybeReportErrorAndExit(err)
}
if config.VMDriver != constants.DriverNone && selectedContainerRuntime == constants.ContainerdRuntime {
fmt.Println("Restarting containerd runtime...")
// restart containerd so that it can install all plugins
if _, err := host.RunSSHCommand("sudo systemctl restart containerd"); err != nil {
glog.Errorf("Error restarting containerd: %v", err)
}
fmt.Printf("Configuring %s runtime...\n", cr.Name())
err = cr.Enable()
if err != nil {
cmdutil.MaybeReportErrorAndExit(err)
}
if config.VMDriver == constants.DriverNone {
@ -424,14 +363,18 @@ This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_
}
if !exists || config.VMDriver == constants.DriverNone {
fmt.Println("Pulling images...")
if err := bs.PullImages(kubernetesConfig); err != nil {
fmt.Printf("Unable to pull images, which may be OK: %v", err)
}
fmt.Println("Starting cluster components...")
if err := k8sBootstrapper.StartCluster(kubernetesConfig); err != nil {
if err := bs.StartCluster(kubernetesConfig); err != nil {
glog.Errorf("Error starting cluster: %v", err)
cmdutil.MaybeReportErrorAndExit(err)
}
} else {
fmt.Println("Machine exists, restarting cluster components...")
if err := k8sBootstrapper.RestartCluster(kubernetesConfig); err != nil {
if err := bs.RestartCluster(kubernetesConfig); err != nil {
glog.Errorln("Error restarting cluster: ", err)
cmdutil.MaybeReportErrorAndExit(err)
}
@ -440,7 +383,7 @@ This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_
// Block until the cluster is healthy.
fmt.Print("Verifying kubelet health ...")
kStat := func() (err error) {
st, err := k8sBootstrapper.GetKubeletStatus()
st, err := bs.GetKubeletStatus()
if err != nil || st != state.Running.String() {
fmt.Printf(".")
return &pkgutil.RetriableError{Err: fmt.Errorf("kubelet unhealthy: %v: %s", err, st)}
@ -454,7 +397,7 @@ This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_
}
fmt.Print("\nVerifying apiserver health ...")
aStat := func() (err error) {
st, err := k8sBootstrapper.GetApiServerStatus(net.ParseIP(ip))
st, err := bs.GetApiServerStatus(net.ParseIP(ip))
if err != nil || st != state.Running.String() {
fmt.Print(".")
return &pkgutil.RetriableError{Err: fmt.Errorf("apiserver status=%s err=%v", st, err)}
@ -509,7 +452,6 @@ This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_
fmt.Println("Unable to load cached images from config file.")
}
fmt.Println("\n\nEverything looks great. Please enjoy minikube!")
return
}
func init() {
@ -538,12 +480,13 @@ func init() {
startCmd.Flags().String(serviceCIDR, pkgutil.DefaultServiceCIDR, "The CIDR to be used for service cluster IPs.")
startCmd.Flags().StringSliceVar(&insecureRegistry, "insecure-registry", nil, "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.")
startCmd.Flags().StringSliceVar(&registryMirror, "registry-mirror", nil, "Registry mirrors to pass to the Docker daemon")
startCmd.Flags().String(containerRuntime, "", "The container runtime to be used")
startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd, rkt)")
startCmd.Flags().String(criSocket, "", "The cri socket path to be used")
startCmd.Flags().String(kubernetesVersion, constants.DefaultKubernetesVersion, "The kubernetes version that the minikube VM will use (ex: v1.2.3)")
startCmd.Flags().String(networkPlugin, "", "The name of the network plugin")
startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\"")
startCmd.Flags().String(featureGates, "", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
// TODO(tstromberg): Flip cacheImages to true once it can be stabilized
startCmd.Flags().Bool(cacheImages, false, "If true, cache docker images for the current bootstrapper and load them into the machine.")
startCmd.Flags().Var(&extraOptions, "extra-config",
`A set of key=value pairs that describe configuration that may be passed to different components.

View File

@ -27,14 +27,11 @@ import (
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strings"
"text/template"
"time"
"strconv"
"strings"
"time"
"github.com/golang/glog"
"github.com/pkg/errors"
@ -231,33 +228,6 @@ minikube config set WantKubectlDownloadMsg false
}
}
// Return a command to run, that will generate the crictl config file
func GetCrictlConfigCommand(cfg map[string]string) (string, error) {
var (
crictlYamlTmpl = `runtime-endpoint: {{.RuntimeEndpoint}}
image-endpoint: {{.ImageEndpoint}}
`
crictlYamlPath = "/etc/crictl.yaml"
)
t, err := template.New("crictlYaml").Parse(crictlYamlTmpl)
if err != nil {
return "", err
}
opts := struct {
RuntimeEndpoint string
ImageEndpoint string
}{
RuntimeEndpoint: cfg["runtime-endpoint"],
ImageEndpoint: cfg["image-endpoint"],
}
var crictlYamlBuf bytes.Buffer
if err := t.Execute(&crictlYamlBuf, opts); err != nil {
return "", err
}
return fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s", path.Dir(crictlYamlPath), crictlYamlBuf.String(), crictlYamlPath), nil
}
// Ask the kernel for a free open port that is ready to use
func GetPort() (string, error) {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")

View File

@ -1 +1,2 @@
sha256 ccf83574556793ceb01717dc91c66b70f183c60c2bbec70283939aae8fdef768 crictl-v1.11.1-linux-amd64.tar.gz
sha256 9bdbea7a2b382494aff2ff014da328a042c5aba9096a7772e57fdf487e5a1d51 crictl-v1.13.0-linux-amd64.tar.gz

View File

@ -4,7 +4,7 @@
#
################################################################################
CRICTL_BIN_VERSION = v1.11.1
CRICTL_BIN_VERSION = v1.13.0
CRICTL_BIN_SITE = https://github.com/kubernetes-sigs/cri-tools/releases/download/$(CRICTL_BIN_VERSION)
CRICTL_BIN_SOURCE = crictl-$(CRICTL_BIN_VERSION)-linux-amd64.tar.gz
CRICTL_BIN_STRIP_COMPONENTS = 0

View File

@ -122,7 +122,7 @@ log_size_max = -1
default_transport = "docker://"
# pause_image is the image which we use to instantiate infra containers.
pause_image = "kubernetes/pause"
pause_image = "k8s.gcr.io/pause:3.1"
# pause_command is the command to run in a pause_image to have a container just
# sit there. If the image contains the necessary information, this value need

View File

@ -1,10 +1,13 @@
### Debugging Issues With Minikube
To debug issues with minikube (not Kubernetes but minikube itself), you can use the -v flag to see debug level info. The specified values for v will do the following (the values are all encompassing in that higher values will give you all lower value outputs as well):
* --v=0 INFO level logs
* --v=1 WARNING level logs
* --v=2 ERROR level logs
* --v=3 libmachine logging
* --v=7 libmachine --debug level logging
To debug issues with minikube (not *Kubernetes* but **minikube** itself), you can use the `-v` flag to see debug level info. The specified values for `-v` will do the following (the values are all encompassing in that higher values will give you all lower value outputs as well):
* `--v=0` will output **INFO** level logs
* `--v=1` will output **WARNING** level logs
* `--v=2` will output **ERROR** level logs
* `--v=3` will output *libmachine* logging
* `--v=7` will output *libmachine --debug* level logging
Example:
`minikube start --v=1` Will start minikube and output all warnings to stdout.
If you need to access additional tools for debugging, minikube also includes the [CoreOS toolbox](https://github.com/coreos/toolbox)

View File

@ -1,9 +1,9 @@
## Persistent Volumes
Minikube supports [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) of type `hostPath`.
These PersistentVolumes are mapped to a directory inside the Minikube VM.
Minikube supports [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) of type `hostPath` out of the box. These PersistentVolumes are mapped to a directory inside the running Minikube instance (usually a VM, unless you use `--vm-driver=none`). For more information on how this works, read the Dynamic Provisioning section below.
The Minikube VM boots into a tmpfs, so most directories will not be persisted across reboots (`minikube stop`).
However, Minikube is configured to persist files stored under the following directories in the Minikube VM:
### A note on mounts, persistence, and Minikube hosts
Minikube is configured to persist files stored under the following directories, which are made in the Minikube VM (or on your localhost if running on bare metal). You may lose data from other directories on reboots.
* `/data`
* `/var/lib/minikube`
@ -28,3 +28,11 @@ spec:
```
You can also achieve persistence by creating a PV in a mounted host folder.
## Dynamic provisioning and CSI
In addition, minikube implements a very simple, canonical implementation of dynamic storage controller that runs alongside its deployment. This manages provisioning of *hostPath* volumes (rather then via the previous, in-tree hostPath provider).
The default [Storage Provisioner Controller](https://github.com/kubernetes/minikube/blob/master/pkg/storage/storage_provisioner.go) is managed internally, in the minikube codebase, demonstrating how easy it is to plug a custom storage controller into kubernetes as a storage component of the system, and provides pods with dynamically, to test your pod's behaviour when persistent storage is mapped to it.
Note that this is not a CSI based storage provider, rather, it simply declares a PersistentVolume object of type hostpath dynamically when the controller see's that there is an outstanding storage request.

View File

@ -149,8 +149,8 @@ def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014, 2015, 2016, 2017, or 2018, company holder names can be anything
regexs["date"] = re.compile( '(2014|2015|2016|2017|2018)' )
# dates can be 2010 to 2039
regexs["date"] = re.compile( '(20[123]\d)' )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts

View File

@ -23,8 +23,8 @@ REPLACE_MINIKUBE_DRIVER_KVM_SHA256=$(awk '{ print $1 }' out/docker-machine-drive
REPLACE_MINIKUBE_DARWIN_SHA256=$(awk '{ print $1 }' out/minikube-darwin-amd64.sha256)
MINIKUBE_ROOT=$PWD
git clone ssh://aur@aur.archlinux.org/minikube.git aur-minikube
pushd aur-minikube >/dev/null
git clone ssh://aur@aur.archlinux.org/minikube-bin.git aur-minikube-bin
pushd aur-minikube-bin >/dev/null
sed -e "s/\$PKG_VERSION/${REPLACE_PKG_VERSION}/g" \
-e "s/\$MINIKUBE_LINUX_SHA256/${REPLACE_MINIKUBE_LINUX_SHA256}/g" \
$MINIKUBE_ROOT/installers/linux/archlinux/PKGBUILD > PKGBUILD

View File

@ -17,54 +17,68 @@ limitations under the License.
package none
import (
"bytes"
"fmt"
"os/exec"
"strings"
"github.com/golang/glog"
"github.com/docker/machine/libmachine/drivers"
"github.com/docker/machine/libmachine/state"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/net"
pkgdrivers "k8s.io/minikube/pkg/drivers"
// TODO(tstromberg): Extract CommandRunner into its own package
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/cruntime"
)
const driverName = "none"
const dockerstopcmd = `docker kill $(docker ps -a --filter="name=k8s_" --format="{{.ID}}")`
var dockerkillcmd = fmt.Sprintf(`docker rm $(%s)`, dockerstopcmd)
// cleanupPaths are paths to be removed by cleanup, and are used by both kubeadm and minikube.
var cleanupPaths = []string{
"/data/minikube",
"/etc/kubernetes/manifests",
"/var/lib/minikube",
}
// none Driver is a driver designed to run kubeadm w/o a VM
// none Driver is a driver designed to run kubeadm w/o VM management, and assumes systemctl.
// https://github.com/kubernetes/minikube/blob/master/docs/vmdriver-none.md
type Driver struct {
*drivers.BaseDriver
*pkgdrivers.CommonDriver
URL string
URL string
runtime cruntime.Manager
exec bootstrapper.CommandRunner
}
// Config is configuration for the None driver
type Config struct {
MachineName string
StorePath string
ContainerRuntime string
}
func NewDriver(hostName, storePath string) *Driver {
// NewDriver returns a fully configured None driver
func NewDriver(c Config) *Driver {
runner := &bootstrapper.ExecRunner{}
runtime, err := cruntime.New(cruntime.Config{Type: c.ContainerRuntime, Runner: runner})
// Libraries shouldn't panic, but there is no way for drivers to return error :(
if err != nil {
glog.Fatalf("unable to create container runtime: %v", err)
}
return &Driver{
BaseDriver: &drivers.BaseDriver{
MachineName: hostName,
StorePath: storePath,
MachineName: c.MachineName,
StorePath: c.StorePath,
},
runtime: runtime,
exec: runner,
}
}
// PreCreateCheck checks for correct privileges and dependencies
func (d *Driver) PreCreateCheck() error {
if d.ContainerRuntime == "" {
// check that docker is on path
_, err := exec.LookPath("docker")
if err != nil {
return errors.Wrap(err, "docker cannot be found on the path for this machine. "+
"A docker installation is a requirement for using the none driver")
}
}
return nil
return d.runtime.Available()
}
func (d *Driver) Create() error {
@ -77,6 +91,7 @@ func (d *Driver) DriverName() string {
return driverName
}
// GetIP returns an IP or hostname that this host is available at
func (d *Driver) GetIP() (string, error) {
ip, err := net.ChooseBindAddress(nil)
if err != nil {
@ -85,87 +100,73 @@ func (d *Driver) GetIP() (string, error) {
return ip.String(), nil
}
// GetSSHHostname returns hostname for use with ssh
func (d *Driver) GetSSHHostname() (string, error) {
return "", fmt.Errorf("driver does not support ssh commands")
}
// GetSSHPort returns port for use with ssh
func (d *Driver) GetSSHPort() (int, error) {
return 0, fmt.Errorf("driver does not support ssh commands")
}
// GetURL returns a Docker compatible host URL for connecting to this host
// e.g. tcp://1.2.3.4:2376
func (d *Driver) GetURL() (string, error) {
ip, err := d.GetIP()
if err != nil {
return "", err
}
return fmt.Sprintf("tcp://%s:2376", ip), nil
}
// GetState returns the state that the host is in (running, stopped, etc)
func (d *Driver) GetState() (state.State, error) {
var statuscmd = fmt.Sprintf(
`sudo systemctl is-active kubelet &>/dev/null && echo "Running" || echo "Stopped"`)
out, err := runCommand(statuscmd, true)
if err != nil {
return state.None, err
}
s := strings.TrimSpace(out)
if state.Running.String() == s {
return state.Running, nil
} else if state.Stopped.String() == s {
if err := checkKubelet(d.exec); err != nil {
glog.Infof("kubelet not running: %v", err)
return state.Stopped, nil
} else {
return state.None, fmt.Errorf("Error: Unrecognize output from GetState: %s", s)
}
return state.Running, nil
}
// Kill stops a host forcefully, including any containers that we are managing.
func (d *Driver) Kill() error {
for _, cmdStr := range [][]string{
{"systemctl", "stop", "kubelet.service"},
{"rm", "-rf", "/var/lib/minikube"},
} {
cmd := exec.Command("sudo", cmdStr...)
if out, err := cmd.CombinedOutput(); err != nil {
glog.Warningf("Error %v running command: %s. Output: %s", err, cmdStr, out)
}
if err := stopKubelet(d.exec); err != nil {
return errors.Wrap(err, "kubelet")
}
containers, err := d.runtime.ListContainers(cruntime.MinikubeContainerPrefix)
if err != nil {
return errors.Wrap(err, "containers")
}
// Try to be graceful before sending SIGKILL everywhere.
if err := d.runtime.StopContainers(containers); err != nil {
return errors.Wrap(err, "stop")
}
if err := d.runtime.KillContainers(containers); err != nil {
return errors.Wrap(err, "kill")
}
return nil
}
// Remove a host, including any data which may have been written by it.
func (d *Driver) Remove() error {
rmCmd := `sudo systemctl stop kubelet.service
sudo rm -rf /data/minikube
sudo rm -rf /etc/kubernetes/manifests
sudo rm -rf /var/lib/minikube || true`
for _, cmdStr := range []string{rmCmd} {
if out, err := runCommand(cmdStr, true); err != nil {
glog.Warningf("Error %v running command: %s, Output: %s", err, cmdStr, out)
}
if err := d.Kill(); err != nil {
return errors.Wrap(err, "kill")
}
if d.ContainerRuntime == "" {
if out, err := runCommand(dockerkillcmd, true); err != nil {
glog.Warningf("Error %v running command: %s, Output: %s", err, dockerkillcmd, out)
}
// TODO(#3637): Make sure this calls into the bootstrapper to perform `kubeadm reset`
cmd := fmt.Sprintf("sudo rm -rf %s", strings.Join(cleanupPaths, " "))
if err := d.exec.Run(cmd); err != nil {
glog.Errorf("cleanup incomplete: %v", err)
}
return nil
}
// Restart a host
func (d *Driver) Restart() error {
restartCmd := `
if systemctl is-active kubelet.service; then
sudo systemctl restart kubelet.service
fi`
cmd := exec.Command(restartCmd)
if err := cmd.Start(); err != nil {
return err
}
return nil
return restartKubelet(d.exec)
}
// Start a host
func (d *Driver) Start() error {
var err error
d.IPAddress, err = d.GetIP()
@ -179,50 +180,40 @@ func (d *Driver) Start() error {
return nil
}
// Stop a host gracefully, including any containers that we are managing.
func (d *Driver) Stop() error {
var stopcmd = fmt.Sprintf("if [[ `systemctl` =~ -\\.mount ]] &>/dev/null; " + `then
for svc in "kubelet"; do
sudo systemctl stop "$svc".service || true
done
fi
`)
_, err := runCommand(stopcmd, false)
if err != nil {
if err := stopKubelet(d.exec); err != nil {
return err
}
for {
s, err := d.GetState()
if err != nil {
return err
}
if s != state.Running {
break
}
containers, err := d.runtime.ListContainers(cruntime.MinikubeContainerPrefix)
if err != nil {
return errors.Wrap(err, "containers")
}
if d.ContainerRuntime == "" {
if out, err := runCommand(dockerstopcmd, false); err != nil {
glog.Warningf("Error %v running command %s. Output: %s", err, dockerstopcmd, out)
}
if err := d.runtime.StopContainers(containers); err != nil {
return errors.Wrap(err, "stop")
}
return nil
}
// RunSSHCommandFromDriver implements direct ssh control to the driver
func (d *Driver) RunSSHCommandFromDriver() error {
return fmt.Errorf("driver does not support ssh commands")
}
func runCommand(command string, sudo bool) (string, error) {
cmd := exec.Command("/bin/bash", "-c", command)
if sudo {
cmd = exec.Command("sudo", "/bin/bash", "-c", command)
}
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
return "", errors.Wrap(err, stderr.String())
}
return out.String(), nil
// stopKubelet idempotently stops the kubelet
func stopKubelet(exec bootstrapper.CommandRunner) error {
glog.Infof("stopping kubelet.service ...")
return exec.Run("sudo systemctl stop kubelet.service")
}
// restartKubelet restarts the kubelet
func restartKubelet(exec bootstrapper.CommandRunner) error {
glog.Infof("restarting kubelet.service ...")
return exec.Run("sudo systemctl restart kubelet.service")
}
// checkKubelet returns an error if the kubelet is not running.
func checkKubelet(exec bootstrapper.CommandRunner) error {
glog.Infof("checking for running kubelet ...")
return exec.Run("systemctl is-active --quiet service kubelet")
}

View File

@ -19,6 +19,7 @@ package assets
import (
"fmt"
"os"
"path"
"path/filepath"
"strconv"
@ -307,7 +308,8 @@ func addMinikubeDirToAssets(basedir, vmpath string, assets *[]CopyableFile) erro
return errors.Wrap(err, "generating relative path")
}
rPath = filepath.Dir(rPath)
vmpath = filepath.Join("/", rPath)
rPath = filepath.ToSlash(rPath)
vmpath = path.Join("/", rPath)
}
permString := fmt.Sprintf("%o", info.Mode().Perm())
// The conversion will strip the leading 0 if present, so add it back

View File

@ -26,6 +26,8 @@ import (
// Bootstrapper contains all the methods needed to bootstrap a kubernetes cluster
type Bootstrapper interface {
// PullImages pulls images necessary for a cluster. Success should not be required.
PullImages(config.KubernetesConfig) error
StartCluster(config.KubernetesConfig) error
UpdateCluster(config.KubernetesConfig) error
RestartCluster(config.KubernetesConfig) error

View File

@ -40,11 +40,33 @@ import (
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/sshutil"
"k8s.io/minikube/pkg/util"
)
// SkipPreflights are preflight checks we always skip.
var SkipPreflights = []string{
// We use --ignore-preflight-errors=DirAvailable since we have our own custom addons
// that we also stick in /etc/kubernetes/manifests
"DirAvailable--etc-kubernetes-manifests",
"DirAvailable--data-minikube",
"Port-10250",
"FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml",
"FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml",
"FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml",
"FileAvailable--etc-kubernetes-manifests-etcd.yaml",
// We use --ignore-preflight-errors=Swap since minikube.iso allocates a swap partition.
// (it should probably stop doing this, though...)
"Swap",
// We use --ignore-preflight-errors=CRI since /var/run/dockershim.sock is not present.
// (because we start kubelet with an invalid config)
"CRI",
}
// SkipAdditionalPreflights are additional preflights we skip depending on the runtime in use.
var SkipAdditionalPreflights = map[string][]string{}
type KubeadmBootstrapper struct {
c bootstrapper.CommandRunner
}
@ -54,20 +76,11 @@ func NewKubeadmBootstrapper(api libmachine.API) (*KubeadmBootstrapper, error) {
if err != nil {
return nil, errors.Wrap(err, "getting api client")
}
var cmd bootstrapper.CommandRunner
// The none driver executes commands directly on the host
if h.Driver.DriverName() == constants.DriverNone {
cmd = &bootstrapper.ExecRunner{}
} else {
client, err := sshutil.NewSSHClient(h.Driver)
if err != nil {
return nil, errors.Wrap(err, "getting ssh client")
}
cmd = bootstrapper.NewSSHRunner(client)
runner, err := machine.CommandRunner(h)
if err != nil {
return nil, errors.Wrap(err, "command runner")
}
return &KubeadmBootstrapper{
c: cmd,
}, nil
return &KubeadmBootstrapper{c: runner}, nil
}
func (k *KubeadmBootstrapper) GetKubeletStatus() (string, error) {
@ -137,21 +150,13 @@ func (k *KubeadmBootstrapper) StartCluster(k8s config.KubernetesConfig) error {
return errors.Wrap(err, "parsing kubernetes version")
}
b := bytes.Buffer{}
preflights := constants.Preflights
if k8s.ContainerRuntime != "" {
preflights = constants.AlternateRuntimePreflights
out, err := k.c.CombinedOutput("sudo modprobe br_netfilter")
if err != nil {
glog.Infoln(out)
return errors.Wrap(err, "sudo modprobe br_netfilter")
}
out, err = k.c.CombinedOutput("sudo sh -c \"echo '1' > /proc/sys/net/ipv4/ip_forward\"")
if err != nil {
glog.Infoln(out)
return errors.Wrap(err, "creating /proc/sys/net/ipv4/ip_forward")
}
r, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime})
if err != nil {
return err
}
b := bytes.Buffer{}
preflights := SkipPreflights
preflights = append(preflights, SkipAdditionalPreflights[r.Name()]...)
templateContext := struct {
KubeadmConfigFile string
@ -208,6 +213,7 @@ func addAddons(files *[]assets.CopyableFile) error {
return nil
}
// RestartCluster restarts the Kubernetes cluster configured by kubeadm
func (k *KubeadmBootstrapper) RestartCluster(k8s config.KubernetesConfig) error {
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
if err != nil {
@ -241,63 +247,23 @@ func (k *KubeadmBootstrapper) RestartCluster(k8s config.KubernetesConfig) error
return nil
}
// PullImages downloads images that will be used by RestartCluster
func (k *KubeadmBootstrapper) PullImages(k8s config.KubernetesConfig) error {
cmd := fmt.Sprintf("sudo kubeadm config images pull --config %s", constants.KubeadmConfigFile)
if err := k.c.Run(cmd); err != nil {
return errors.Wrapf(err, "running cmd: %s", cmd)
}
return nil
}
// SetupCerts sets up certificates within the cluster.
func (k *KubeadmBootstrapper) SetupCerts(k8s config.KubernetesConfig) error {
return bootstrapper.SetupCerts(k.c, k8s)
}
// SetContainerRuntime possibly sets the container runtime, if it hasn't already
// been specified by the extra-config option. It has a set of defaults known to
// work for a particular runtime.
func SetContainerRuntime(cfg map[string]string, runtime string) map[string]string {
if _, ok := cfg["container-runtime"]; ok {
glog.Infoln("Container runtime already set through extra options, ignoring --container-runtime flag.")
return cfg
}
if runtime == "" {
glog.Infoln("Container runtime flag provided with no value, using defaults.")
return cfg
}
switch runtime {
case "crio", "cri-o":
cfg["container-runtime"] = "remote"
cfg["container-runtime-endpoint"] = "/var/run/crio/crio.sock"
cfg["image-service-endpoint"] = "/var/run/crio/crio.sock"
cfg["runtime-request-timeout"] = "15m"
case "containerd":
cfg["container-runtime"] = "remote"
cfg["container-runtime-endpoint"] = "unix:///run/containerd/containerd.sock"
cfg["image-service-endpoint"] = "unix:///run/containerd/containerd.sock"
cfg["runtime-request-timeout"] = "15m"
default:
cfg["container-runtime"] = runtime
}
return cfg
}
func GetCRISocket(path string, runtime string) string {
if path != "" {
glog.Infoln("Container runtime interface socket provided, using path.")
return path
}
switch runtime {
case "crio", "cri-o":
path = "/var/run/crio/crio.sock"
case "containerd":
path = "/run/containerd/containerd.sock"
default:
path = ""
}
return path
}
// NewKubeletConfig generates a new systemd unit containing a configured kubelet
// based on the options present in the KubernetesConfig.
func NewKubeletConfig(k8s config.KubernetesConfig) (string, error) {
func NewKubeletConfig(k8s config.KubernetesConfig, r cruntime.Manager) (string, error) {
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
if err != nil {
return "", errors.Wrap(err, "parsing kubernetes version")
@ -308,8 +274,9 @@ func NewKubeletConfig(k8s config.KubernetesConfig) (string, error) {
return "", errors.Wrap(err, "generating extra configuration for kubelet")
}
extraOpts = SetContainerRuntime(extraOpts, k8s.ContainerRuntime)
for k, v := range r.KubeletOptions() {
extraOpts[k] = v
}
if k8s.NetworkPlugin != "" {
extraOpts["network-plugin"] = k8s.NetworkPlugin
}
@ -346,16 +313,20 @@ func (k *KubeadmBootstrapper) UpdateCluster(cfg config.KubernetesConfig) error {
return errors.Wrap(err, "loading cached images")
}
}
kubeadmCfg, err := generateConfig(cfg)
r, err := cruntime.New(cruntime.Config{Type: cfg.ContainerRuntime, Socket: cfg.CRISocket})
if err != nil {
return errors.Wrap(err, "runtime")
}
kubeadmCfg, err := generateConfig(cfg, r)
if err != nil {
return errors.Wrap(err, "generating kubeadm cfg")
}
kubeletCfg, err := NewKubeletConfig(cfg)
kubeletCfg, err := NewKubeletConfig(cfg, r)
if err != nil {
return errors.Wrap(err, "generating kubelet config")
}
glog.Infof("kubelet %s config:\n%s", cfg.KubernetesVersion, kubeletCfg)
files := []assets.CopyableFile{
assets.NewMemoryAssetTarget([]byte(kubeletService), constants.KubeletServiceFile, "0640"),
@ -416,14 +387,12 @@ sudo systemctl start kubelet
return nil
}
func generateConfig(k8s config.KubernetesConfig) (string, error) {
func generateConfig(k8s config.KubernetesConfig, r cruntime.Manager) (string, error) {
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
if err != nil {
return "", errors.Wrap(err, "parsing kubernetes version")
}
criSocket := GetCRISocket(k8s.CRISocket, k8s.ContainerRuntime)
// parses a map of the feature gates for kubeadm and component
kubeadmFeatureArgs, componentFeatureArgs, err := ParseFeatureArgs(k8s.FeatureGates)
if err != nil {
@ -462,7 +431,7 @@ func generateConfig(k8s config.KubernetesConfig) (string, error) {
KubernetesVersion: k8s.KubernetesVersion,
EtcdDataDir: "/data/minikube", //TODO(r2d4): change to something else persisted
NodeName: k8s.NodeName,
CRISocket: criSocket,
CRISocket: r.SocketPath(),
ExtraArgs: extraComponentConfig,
FeatureArgs: kubeadmFeatureArgs,
NoTaintMaster: false, // That does not work with k8s 1.12+
@ -513,11 +482,10 @@ func maybeDownloadAndCache(binary, version string) (string, error) {
options.Checksum = constants.GetKubernetesReleaseURLSha1(binary, version)
options.ChecksumHash = crypto.SHA1
fmt.Printf("Downloading %s %s\n", binary, version)
glog.Infof("Downloading %s %s", binary, version)
if err := download.ToFile(url, targetFilepath, options); err != nil {
return "", errors.Wrapf(err, "Error downloading %s %s", binary, version)
}
fmt.Printf("Finished Downloading %s %s\n", binary, version)
glog.Infof("Finished Downloading %s %s", binary, version)
return targetFilepath, nil
}

View File

@ -20,6 +20,7 @@ import (
"testing"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/util"
)
@ -257,8 +258,13 @@ apiServerExtraArgs:
}
for _, test := range tests {
runtime, err := cruntime.New(cruntime.Config{Type: "docker"})
if err != nil {
t.Fatalf("runtime: %v", err)
}
t.Run(test.description, func(t *testing.T) {
actualCfg, err := generateConfig(test.cfg)
actualCfg, err := generateConfig(test.cfg, runtime)
if err != nil && !test.shouldErr {
t.Errorf("got unexpected error generating config: %v", err)
return

View File

@ -54,6 +54,7 @@ func GetMinipath() string {
// used in gendocs.
var SupportedVMDrivers = [...]string{
"virtualbox",
"parallels",
"vmwarefusion",
"kvm",
"xhyve",
@ -129,7 +130,7 @@ kubectl: {{.Kubeconfig}}
var DefaultIsoUrl = fmt.Sprintf("https://storage.googleapis.com/%s/minikube-%s.iso", minikubeVersion.GetIsoPath(), minikubeVersion.GetIsoVersion())
var DefaultIsoShaUrl = DefaultIsoUrl + ShaSuffix
var DefaultKubernetesVersion = "v1.13.2"
var DefaultKubernetesVersion = "v1.13.3"
var ConfigFilePath = MakeMiniPath("config")
var ConfigFile = MakeMiniPath("config", "config.json")
@ -155,41 +156,6 @@ const (
DefaultRktNetConfigPath = "/etc/rkt/net.d/k8s.conf"
)
var Preflights = []string{
// We use --ignore-preflight-errors=DirAvailable since we have our own custom addons
// that we also stick in /etc/kubernetes/manifests
"DirAvailable--etc-kubernetes-manifests",
"DirAvailable--data-minikube",
"Port-10250",
"FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml",
"FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml",
"FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml",
"FileAvailable--etc-kubernetes-manifests-etcd.yaml",
// We use --ignore-preflight-errors=Swap since minikube.iso allocates a swap partition.
// (it should probably stop doing this, though...)
"Swap",
// We use --ignore-preflight-errors=CRI since /var/run/dockershim.sock is not present.
// (because we start kubelet with an invalid config)
"CRI",
}
// AlternateRuntimePreflights are additional preflight checks that are skipped when running
// any container runtime that isn't Docker
var AlternateRuntimePreflights = append(Preflights, []string{
"Service-Docker",
"Port-8443",
"Port-10251",
"Port-10252",
"Port-2379",
}...)
const (
ContainerdRuntime = "containerd"
RktRuntime = "rkt"
CrioRuntime = "crio"
Cri_oRuntime = "cri-o"
)
const (
DefaultUfsPort = "5640"
DefaultUfsDebugLvl = 0
@ -286,7 +252,7 @@ const (
GvisorFilesPath = "/tmp/gvisor"
// ContainerdConfigTomlPath is the path to the containerd config.toml
ContainerdConfigTomlPath = "/etc/containerd/config.toml"
// GvisorContainerdShimTomlPath is the path to givosr-containerd-shim.toml
// GvisorContainerdShimTomlPath is the path to gvisor-containerd-shim.toml
GvisorContainerdShimTomlPath = "/etc/containerd/gvisor-containerd-shim.toml"
// StoredContainerdConfigTomlPath is the path where the default config.toml will be stored
StoredContainerdConfigTomlPath = "/tmp/config.toml"

View File

@ -0,0 +1,103 @@
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cruntime
import (
"fmt"
"github.com/golang/glog"
)
// Containerd contains containerd runtime state
type Containerd struct {
Socket string
Runner CommandRunner
}
// Name is a human readable name for containerd
func (r *Containerd) Name() string {
return "containerd"
}
// SocketPath returns the path to the socket file for containerd
func (r *Containerd) SocketPath() string {
if r.Socket != "" {
return r.Socket
}
return "/run/containerd/containerd.sock"
}
// Active returns if containerd is active on the host
func (r *Containerd) Active() bool {
err := r.Runner.Run("systemctl is-active --quiet service containerd")
return err == nil
}
// Available returns an error if it is not possible to use this runtime on a host
func (r *Containerd) Available() error {
return r.Runner.Run("command -v containerd")
}
// Enable idempotently enables containerd on a host
func (r *Containerd) Enable() error {
if err := disableOthers(r, r.Runner); err != nil {
glog.Warningf("disableOthers: %v", err)
}
if err := populateCRIConfig(r.Runner, r.SocketPath()); err != nil {
return err
}
if err := enableIPForwarding(r.Runner); err != nil {
return err
}
// Oherwise, containerd will fail API requests with 'Unimplemented'
return r.Runner.Run("sudo systemctl restart containerd")
}
// Disable idempotently disables containerd on a host
func (r *Containerd) Disable() error {
return r.Runner.Run("sudo systemctl stop containerd")
}
// LoadImage loads an image into this runtime
func (r *Containerd) LoadImage(path string) error {
return pullImageCRI(r.Runner, path)
}
// KubeletOptions returns kubelet options for a containerd
func (r *Containerd) KubeletOptions() map[string]string {
return map[string]string{
"container-runtime": "remote",
"container-runtime-endpoint": fmt.Sprintf("unix://%s", r.SocketPath()),
"image-service-endpoint": fmt.Sprintf("unix://%s", r.SocketPath()),
"runtime-request-timeout": "15m",
}
}
// ListContainers returns a list of managed by this container runtime
func (r *Containerd) ListContainers(filter string) ([]string, error) {
return listCRIContainers(r.Runner, filter)
}
// KillContainers removes containers based on ID
func (r *Containerd) KillContainers(ids []string) error {
return killCRIContainers(r.Runner, ids)
}
// StopContainers stops containers based on ID
func (r *Containerd) StopContainers(ids []string) error {
return stopCRIContainers(r.Runner, ids)
}

View File

@ -0,0 +1,66 @@
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cruntime
import (
"bytes"
"fmt"
"html/template"
"path"
"github.com/golang/glog"
)
// listCRIContainers returns a list of containers using crictl
func listCRIContainers(_ CommandRunner, _ string) ([]string, error) {
// Should use crictl ps -a, but needs some massaging and testing.
return []string{}, fmt.Errorf("unimplemented")
}
// pullImageCRI uses ctr to pull images into a CRI runtime
func pullImageCRI(cr CommandRunner, path string) error {
glog.Infof("Loading image: %s", path)
return cr.Run(fmt.Sprintf("sudo ctr cri load %s", path))
}
// criCRIContainers kills a list of containers using crictl
func killCRIContainers(CommandRunner, []string) error {
return fmt.Errorf("unimplemented")
}
// stopCRIContainers stops containers using crictl
func stopCRIContainers(CommandRunner, []string) error {
return fmt.Errorf("unimplemented")
}
// populateCRIConfig sets up /etc/crictl.yaml
func populateCRIConfig(cr CommandRunner, socket string) error {
cPath := "/etc/crictl.yaml"
tmpl := `runtime-endpoint: unix://{{.Socket}}
image-endpoint: unix://{{.Socket}}
`
t, err := template.New("crictl").Parse(tmpl)
if err != nil {
return err
}
opts := struct{ Socket string }{Socket: socket}
var b bytes.Buffer
if err := t.Execute(&b, opts); err != nil {
return err
}
return cr.Run(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s", path.Dir(cPath), b.String(), cPath))
}

View File

@ -0,0 +1,104 @@
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cruntime
import (
"fmt"
"github.com/golang/glog"
)
// CRIO contains CRIO runtime state
type CRIO struct {
Socket string
Runner CommandRunner
}
// Name is a human readable name for CRIO
func (r *CRIO) Name() string {
return "CRIO"
}
// SocketPath returns the path to the socket file for CRIO
func (r *CRIO) SocketPath() string {
if r.Socket != "" {
return r.Socket
}
return "/var/run/crio/crio.sock"
}
// Available returns an error if it is not possible to use this runtime on a host
func (r *CRIO) Available() error {
return r.Runner.Run("command -v crio")
}
// Active returns if CRIO is active on the host
func (r *CRIO) Active() bool {
err := r.Runner.Run("systemctl is-active --quiet service crio")
return err == nil
}
// Enable idempotently enables CRIO on a host
func (r *CRIO) Enable() error {
if err := disableOthers(r, r.Runner); err != nil {
glog.Warningf("disableOthers: %v", err)
}
if err := populateCRIConfig(r.Runner, r.SocketPath()); err != nil {
return err
}
if err := enableIPForwarding(r.Runner); err != nil {
return err
}
return r.Runner.Run("sudo systemctl restart crio")
}
// Disable idempotently disables CRIO on a host
func (r *CRIO) Disable() error {
return r.Runner.Run("sudo systemctl stop crio")
}
// LoadImage loads an image into this runtime
func (r *CRIO) LoadImage(path string) error {
// This should use ctr via pullImageCRI once we sort out why api.v1.CRIPluginService is unimplemented.
return r.Runner.Run(fmt.Sprintf("sudo podman load -i %s", path))
}
// KubeletOptions returns kubelet options for a runtime.
func (r *CRIO) KubeletOptions() map[string]string {
return map[string]string{
"container-runtime": "remote",
"container-runtime-endpoint": r.SocketPath(),
"image-service-endpoint": r.SocketPath(),
"runtime-request-timeout": "15m",
}
}
// ListContainers returns a list of managed by this container runtime
func (r *CRIO) ListContainers(filter string) ([]string, error) {
return listCRIContainers(r.Runner, filter)
}
// KillContainers removes containers based on ID
func (r *CRIO) KillContainers(ids []string) error {
return killCRIContainers(r.Runner, ids)
}
// StopContainers stops containers based on ID
func (r *CRIO) StopContainers(ids []string) error {
return stopCRIContainers(r.Runner, ids)
}

View File

@ -0,0 +1,127 @@
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cruntime contains code specific to container runtimes
package cruntime
import (
"fmt"
"github.com/golang/glog"
"github.com/pkg/errors"
)
const MinikubeContainerPrefix = "k8s_"
// CommandRunner is the subset of bootstrapper.CommandRunner this package consumes
type CommandRunner interface {
Run(string) error
CombinedOutput(string) (string, error)
}
// Manager is a common interface for container runtimes
type Manager interface {
// Name is a human readable name for a runtime
Name() string
// Enable idempotently enables this runtime on a host
Enable() error
// Disable idempotently disables this runtime on a host
Disable() error
// Active returns whether or not a runtime is active on a host
Active() bool
// Available returns an error if it is not possible to use this runtime on a host
Available() error
// KubeletOptions returns kubelet options for a runtime.
KubeletOptions() map[string]string
// SocketPath returns the path to the socket file for a given runtime
SocketPath() string
// Load an image idempotently into the runtime on a host
LoadImage(string) error
// ListContainers returns a list of managed by this container runtime
ListContainers(string) ([]string, error)
// KillContainers removes containers based on ID
KillContainers([]string) error
// StopContainers stops containers based on ID
StopContainers([]string) error
}
// Config is runtime configuration
type Config struct {
// Type of runtime to create ("docker, "crio", etc)
Type string
// Custom path to a socket file
Socket string
// Runner is the CommandRunner object to execute commands with
Runner CommandRunner
}
// New returns an appropriately configured runtime
func New(c Config) (Manager, error) {
switch c.Type {
case "", "docker":
return &Docker{Socket: c.Socket, Runner: c.Runner}, nil
case "crio", "cri-o":
return &CRIO{Socket: c.Socket, Runner: c.Runner}, nil
case "containerd":
return &Containerd{Socket: c.Socket, Runner: c.Runner}, nil
default:
return nil, fmt.Errorf("unknown runtime type: %q", c.Type)
}
}
// disableOthers disables all other runtimes except for me.
func disableOthers(me Manager, cr CommandRunner) error {
// valid values returned by manager.Name()
runtimes := []string{"containerd", "crio", "docker"}
for _, name := range runtimes {
r, err := New(Config{Type: name, Runner: cr})
if err != nil {
return fmt.Errorf("runtime(%s): %v", name, err)
}
// Don't disable myself.
if r.Name() == me.Name() {
continue
}
// runtime is already disabled, nothing to do.
if !r.Active() {
continue
}
if err = r.Disable(); err != nil {
glog.Warningf("disable failed: %v", err)
}
// Validate that the runtime really is offline - and that Active & Disable are properly written.
if r.Active() {
return fmt.Errorf("%s is still active", r.Name())
}
}
return nil
}
// enableIPForwarding configures IP forwarding, which is handled normally by Docker
// Context: https://github.com/kubernetes/kubeadm/issues/1062
func enableIPForwarding(cr CommandRunner) error {
if err := cr.Run("sudo modprobe br_netfilter"); err != nil {
return errors.Wrap(err, "br_netfilter")
}
if err := cr.Run("sudo sh -c \"echo 1 > /proc/sys/net/ipv4/ip_forward\""); err != nil {
return errors.Wrap(err, "ip_forward")
}
return nil
}

View File

@ -0,0 +1,279 @@
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cruntime
import (
"fmt"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
)
func TestName(t *testing.T) {
var tests = []struct {
runtime string
want string
}{
{"", "Docker"},
{"docker", "Docker"},
{"crio", "CRIO"},
{"cri-o", "CRIO"},
{"containerd", "containerd"},
}
for _, tc := range tests {
t.Run(tc.runtime, func(t *testing.T) {
r, err := New(Config{Type: tc.runtime})
if err != nil {
t.Fatalf("New(%s): %v", tc.runtime, err)
}
got := r.Name()
if got != tc.want {
t.Errorf("Name(%s) = %q, want: %q", tc.runtime, got, tc.want)
}
})
}
}
func TestKubeletOptions(t *testing.T) {
var tests = []struct {
runtime string
want map[string]string
}{
{"docker", map[string]string{"container-runtime": "docker"}},
{"crio", map[string]string{
"container-runtime": "remote",
"container-runtime-endpoint": "/var/run/crio/crio.sock",
"image-service-endpoint": "/var/run/crio/crio.sock",
"runtime-request-timeout": "15m",
}},
{"containerd", map[string]string{
"container-runtime": "remote",
"container-runtime-endpoint": "unix:///run/containerd/containerd.sock",
"image-service-endpoint": "unix:///run/containerd/containerd.sock",
"runtime-request-timeout": "15m",
}},
}
for _, tc := range tests {
t.Run(tc.runtime, func(t *testing.T) {
r, err := New(Config{Type: tc.runtime})
if err != nil {
t.Fatalf("New(%s): %v", tc.runtime, err)
}
got := r.KubeletOptions()
if diff := cmp.Diff(tc.want, got); diff != "" {
t.Errorf("KubeletOptions(%s) returned diff (-want +got):\n%s", tc.runtime, diff)
}
})
}
}
type serviceState int
const (
Exited serviceState = iota
Running
Restarted
)
// FakeRunner is a command runner that isn't very smart.
type FakeRunner struct {
cmds []string
services map[string]serviceState
t *testing.T
}
// NewFakeRunner returns a CommandRunner which emulates a systemd host
func NewFakeRunner(t *testing.T) *FakeRunner {
return &FakeRunner{
services: map[string]serviceState{},
cmds: []string{},
t: t,
}
}
// Run a fake command!
func (f *FakeRunner) CombinedOutput(cmd string) (string, error) {
f.cmds = append(f.cmds, cmd)
out := ""
root := false
args := strings.Split(cmd, " ")
bin, args := args[0], args[1:]
f.t.Logf("bin=%s args=%v", bin, args)
if bin == "sudo" {
root = true
bin, args = args[0], args[1:]
}
if bin == "systemctl" {
return f.systemctl(args, root)
}
if bin == "docker" {
return f.docker(args, root)
}
return out, nil
}
// Run a fake command!
func (f *FakeRunner) Run(cmd string) error {
_, err := f.CombinedOutput(cmd)
return err
}
// docker is a fake implementation of docker
func (f *FakeRunner) docker(args []string, root bool) (string, error) {
return "", nil
}
// systemctl is a fake implementation of systemctl
func (f *FakeRunner) systemctl(args []string, root bool) (string, error) {
action := args[0]
svcs := args[1:]
out := ""
for i, arg := range args {
// systemctl is-active --quiet service crio
if arg == "service" {
svcs = args[i+1:]
}
}
for _, svc := range svcs {
state, ok := f.services[svc]
if !ok {
return out, fmt.Errorf("unknown fake service: %s", svc)
}
switch action {
case "stop":
if !root {
return out, fmt.Errorf("not root")
}
f.services[svc] = Exited
f.t.Logf("stopped %s", svc)
case "start":
if !root {
return out, fmt.Errorf("not root")
}
f.services[svc] = Running
f.t.Logf("started %s", svc)
case "restart":
if !root {
return out, fmt.Errorf("not root")
}
f.services[svc] = Restarted
f.t.Logf("restarted %s", svc)
case "is-active":
f.t.Logf("%s is-status: %v", svc, state)
if state == Running {
return out, nil
}
return out, fmt.Errorf("%s in state: %v", svc, state)
default:
return out, fmt.Errorf("unimplemented fake action: %q", action)
}
}
return out, nil
}
// defaultServices reflects the default boot state for the minikube VM
var defaultServices = map[string]serviceState{
"docker": Running,
"docker.socket": Running,
"crio": Exited,
"crio-shutdown": Exited,
"containerd": Exited,
}
func TestDisable(t *testing.T) {
var tests = []struct {
runtime string
want []string
}{
{"docker", []string{"sudo systemctl stop docker docker.socket"}},
{"crio", []string{"sudo systemctl stop crio"}},
{"containerd", []string{"sudo systemctl stop containerd"}},
}
for _, tc := range tests {
t.Run(tc.runtime, func(t *testing.T) {
runner := NewFakeRunner(t)
for k, v := range defaultServices {
runner.services[k] = v
}
cr, err := New(Config{Type: tc.runtime, Runner: runner})
if err != nil {
t.Fatalf("New(%s): %v", tc.runtime, err)
}
err = cr.Disable()
if err != nil {
t.Errorf("%s disable unexpected error: %v", tc.runtime, err)
}
if diff := cmp.Diff(tc.want, runner.cmds); diff != "" {
t.Errorf("Disable(%s) commands diff (-want +got):\n%s", tc.runtime, diff)
}
})
}
}
func TestEnable(t *testing.T) {
var tests = []struct {
runtime string
want map[string]serviceState
}{
{"docker", map[string]serviceState{
"docker": Restarted,
"docker.socket": Running,
"containerd": Exited,
"crio": Exited,
"crio-shutdown": Exited,
}},
{"containerd", map[string]serviceState{
"docker": Exited,
"docker.socket": Exited,
"containerd": Restarted,
"crio": Exited,
"crio-shutdown": Exited,
}},
{"crio", map[string]serviceState{
"docker": Exited,
"docker.socket": Exited,
"containerd": Exited,
"crio": Restarted,
"crio-shutdown": Exited,
}},
}
for _, tc := range tests {
t.Run(tc.runtime, func(t *testing.T) {
runner := NewFakeRunner(t)
for k, v := range defaultServices {
runner.services[k] = v
}
cr, err := New(Config{Type: tc.runtime, Runner: runner})
if err != nil {
t.Fatalf("New(%s): %v", tc.runtime, err)
}
err = cr.Enable()
if err != nil {
t.Errorf("%s disable unexpected error: %v", tc.runtime, err)
}
if diff := cmp.Diff(tc.want, runner.services); diff != "" {
t.Errorf("service diff (-want +got):\n%s", diff)
}
})
}
}

View File

@ -0,0 +1,98 @@
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cruntime
import (
"fmt"
"os/exec"
"strings"
"github.com/golang/glog"
)
// Docker contains Docker runtime state
type Docker struct {
Socket string
Runner CommandRunner
}
// Name is a human readable name for Docker
func (r *Docker) Name() string {
return "Docker"
}
// SocketPath returns the path to the socket file for Docker
func (r *Docker) SocketPath() string {
return r.Socket
}
// Available returns an error if it is not possible to use this runtime on a host
func (r *Docker) Available() error {
_, err := exec.LookPath("docker")
return err
}
// Active returns if docker is active on the host
func (r *Docker) Active() bool {
err := r.Runner.Run("systemctl is-active --quiet service docker")
return err == nil
}
// Enable idempotently enables Docker on a host
func (r *Docker) Enable() error {
if err := disableOthers(r, r.Runner); err != nil {
glog.Warningf("disableOthers: %v", err)
}
return r.Runner.Run("sudo systemctl restart docker")
}
// Disable idempotently disables Docker on a host
func (r *Docker) Disable() error {
return r.Runner.Run("sudo systemctl stop docker docker.socket")
}
// LoadImage loads an image into this runtime
func (r *Docker) LoadImage(path string) error {
glog.Infof("Loading image: %s", path)
return r.Runner.Run(fmt.Sprintf("docker load -i %s", path))
}
// KubeletOptions returns kubelet options for a runtime.
func (r *Docker) KubeletOptions() map[string]string {
return map[string]string{
"container-runtime": "docker",
}
}
// ListContainers returns a list of containers
func (r *Docker) ListContainers(filter string) ([]string, error) {
content, err := r.Runner.CombinedOutput(fmt.Sprintf(`docker ps -a --filter="name=%s" --format="{{.ID}}"`, filter))
if err != nil {
return nil, err
}
return strings.Split(content, "\n"), nil
}
// KillContainers forcibly removes a running pod based on ID
func (r *Docker) KillContainers(ids []string) error {
return r.Runner.Run(fmt.Sprintf("docker rm -f %s", strings.Join(ids, " ")))
}
// StopContainers stops a running pod based on ID
func (r *Docker) StopContainers(ids []string) error {
return r.Runner.Run(fmt.Sprintf("docker stop %s", strings.Join(ids, " ")))
}

View File

@ -32,17 +32,16 @@ func init() {
Builtin: true,
ConfigCreator: createNoneHost,
DriverCreator: func() drivers.Driver {
return none.NewDriver("", "")
return none.NewDriver(none.Config{})
},
})
}
// createNoneHost creates a none Driver from a MachineConfig
func createNoneHost(config cfg.MachineConfig) interface{} {
return &none.Driver{
BaseDriver: &drivers.BaseDriver{
MachineName: cfg.GetMachineName(),
StorePath: constants.GetMinipath(),
},
return none.NewDriver(none.Config{
MachineName: cfg.GetMachineName(),
StorePath: constants.GetMinipath(),
ContainerRuntime: config.ContainerRuntime,
}
})
}

View File

@ -0,0 +1,17 @@
/*
Copyright 2018 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package parallels

View File

@ -0,0 +1,48 @@
// +build darwin
/*
Copyright 2018 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package parallels
import (
parallels "github.com/Parallels/docker-machine-parallels"
"github.com/docker/machine/libmachine/drivers"
cfg "k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/registry"
)
func init() {
registry.Register(registry.DriverDef{
Name: "parallels",
Builtin: true,
ConfigCreator: createParallelsHost,
DriverCreator: func() drivers.Driver {
return parallels.NewDriver("", "")
},
})
}
func createParallelsHost(config cfg.MachineConfig) interface{} {
d := parallels.NewDriver(cfg.GetMachineName(), constants.GetMinipath()).(*parallels.Driver)
d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO)
d.Memory = config.Memory
d.CPU = config.CPUs
d.DiskSize = config.DiskSize
d.ISO = d.ResolveStorePath("boot2docker.iso")
return d
}

View File

@ -26,30 +26,27 @@ import (
"strings"
"sync"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/golang/glog"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/sshutil"
"github.com/golang/glog"
"github.com/pkg/errors"
)
const tempLoadDir = "/tmp"
var getWindowsVolumeName = getWindowsVolumeNameCmd
var podmanLoad sync.Mutex
// loadImageLock is used to serialize image loads to avoid overloading the guest VM
var loadImageLock sync.Mutex
func CacheImagesForBootstrapper(version string, clusterBootstrapper string) error {
images := bootstrapper.GetCachedImageList(version, clusterBootstrapper)
@ -198,7 +195,7 @@ func getWindowsVolumeNameCmd(d string) (string, error) {
return vname, nil
}
func LoadFromCacheBlocking(cmd bootstrapper.CommandRunner, k8s config.KubernetesConfig, src string) error {
func LoadFromCacheBlocking(cr bootstrapper.CommandRunner, k8s config.KubernetesConfig, src string) error {
glog.Infoln("Loading image from cache at ", src)
filename := filepath.Base(src)
for {
@ -211,34 +208,26 @@ func LoadFromCacheBlocking(cmd bootstrapper.CommandRunner, k8s config.Kubernetes
if err != nil {
return errors.Wrapf(err, "creating copyable file asset: %s", filename)
}
if err := cmd.Copy(f); err != nil {
if err := cr.Copy(f); err != nil {
return errors.Wrap(err, "transferring cached image")
}
var dockerLoadCmd string
crio := k8s.ContainerRuntime == constants.CrioRuntime || k8s.ContainerRuntime == constants.Cri_oRuntime
if crio {
dockerLoadCmd = "sudo podman load -i " + dst
} else {
dockerLoadCmd = "docker load -i " + dst
r, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: cr})
if err != nil {
return errors.Wrap(err, "runtime")
}
loadImageLock.Lock()
defer loadImageLock.Unlock()
if crio {
podmanLoad.Lock()
err = r.LoadImage(dst)
if err != nil {
return errors.Wrapf(err, "%s load %s", r.Name(), dst)
}
loadImageLock.Unlock()
if err := cmd.Run(dockerLoadCmd); err != nil {
return errors.Wrapf(err, "loading docker image: %s", dst)
}
if crio {
podmanLoad.Unlock()
}
if err := cmd.Run("sudo rm -rf " + dst); err != nil {
if err := cr.Run("sudo rm -rf " + dst); err != nil {
return errors.Wrap(err, "deleting temp docker image location")
}
glog.Infof("Successfully loaded image %s from cache", src)
return nil
}

View File

@ -25,12 +25,6 @@ import (
"path/filepath"
"time"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/registry"
"k8s.io/minikube/pkg/minikube/sshutil"
"k8s.io/minikube/pkg/provision"
"github.com/docker/machine/libmachine"
"github.com/docker/machine/libmachine/auth"
"github.com/docker/machine/libmachine/cert"
@ -48,6 +42,11 @@ import (
"github.com/docker/machine/libmachine/version"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/registry"
"k8s.io/minikube/pkg/minikube/sshutil"
"k8s.io/minikube/pkg/provision"
)
func NewRPCClient(storePath, certsDir string) libmachine.API {
@ -142,16 +141,16 @@ func (api *LocalClient) Close() error {
return nil
}
func GetCommandRunner(h *host.Host) (bootstrapper.CommandRunner, error) {
if h.DriverName != constants.DriverNone {
client, err := sshutil.NewSSHClient(h.Driver)
if err != nil {
return nil, errors.Wrap(err, "getting ssh client for bootstrapper")
}
return bootstrapper.NewSSHRunner(client), nil
// CommandRunner returns best available command runner for this host
func CommandRunner(h *host.Host) (bootstrapper.CommandRunner, error) {
if h.DriverName == constants.DriverNone {
return &bootstrapper.ExecRunner{}, nil
}
return &bootstrapper.ExecRunner{}, nil
client, err := sshutil.NewSSHClient(h.Driver)
if err != nil {
return nil, errors.Wrap(err, "getting ssh client for bootstrapper")
}
return bootstrapper.NewSSHRunner(client), nil
}
func (api *LocalClient) Create(h *host.Host) error {

View File

@ -55,8 +55,8 @@ var _ controller.Provisioner = &hostPathProvisioner{}
// Provision creates a storage asset and returns a PV object representing it.
func (p *hostPathProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error) {
glog.Infof("Provisioning volume %v", options)
path := path.Join(p.pvDir, options.PVName)
if err := os.MkdirAll(path, 0777); err != nil {
return nil, err
}
@ -93,6 +93,7 @@ func (p *hostPathProvisioner) Provision(options controller.VolumeOptions) (*v1.P
// Delete removes the storage asset that was created by Provision represented
// by the given PV.
func (p *hostPathProvisioner) Delete(volume *v1.PersistentVolume) error {
glog.Infof("Deleting volume %v", volume)
ann, ok := volume.Annotations["hostPathProvisionerIdentity"]
if !ok {
return errors.New("identity annotation not found on PV")
@ -111,6 +112,7 @@ func (p *hostPathProvisioner) Delete(volume *v1.PersistentVolume) error {
// Start storage provisioner server
func StartStorageProvisioner() error {
glog.Infof("Initializing the Minikube storage provisioner...")
config, err := restclient.InClusterConfig()
if err != nil {
return err
@ -135,7 +137,7 @@ func StartStorageProvisioner() error {
// PVs
pc := controller.NewProvisionController(clientset, provisionerName, hostPathProvisioner, serverVersion.GitVersion)
glog.Info("Starting storage provisioner server")
glog.Info("Storage provisioner initialized, now starting service!")
pc.Run(wait.NeverStop)
return nil
}

View File

@ -23,7 +23,6 @@ import (
"testing"
"github.com/docker/machine/libmachine/state"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/test/integration/util"
)
@ -61,7 +60,7 @@ func TestFunctionalContainerd(t *testing.T) {
minikubeRunner.RunCommand("delete", true)
}
minikubeRunner.SetRuntime(constants.ContainerdRuntime)
minikubeRunner.SetRuntime("containerd")
minikubeRunner.EnsureRunning()
t.Run("Gvisor", testGvisor)

View File

@ -25,32 +25,20 @@ import (
"time"
"github.com/docker/machine/libmachine/state"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/test/integration/util"
)
func TestStartStop(t *testing.T) {
tests := []struct {
name string
runtime string
}{
{
name: "default",
runtime: "",
},
{
name: "containerd",
runtime: constants.ContainerdRuntime,
},
{
name: "crio",
runtime: constants.CrioRuntime,
},
// TODO(tstromberg): Add test for crio w/o cni
{runtime: "docker"},
{runtime: "containerd"},
{runtime: "crio"},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
t.Run(test.runtime, func(t *testing.T) {
runner := NewMinikubeRunner(t)
if test.runtime != "" && usingNoneDriver(runner) {
t.Skipf("skipping, can't use %s with none driver", test.runtime)

View File

@ -34,7 +34,6 @@ import (
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/constants"
commonutil "k8s.io/minikube/pkg/util"
)
@ -207,9 +206,9 @@ func (m *MinikubeRunner) Start() {
opts := ""
// TODO(tstromberg): Deprecate this in favor of making it possible for tests to define explicit flags.
switch r := m.Runtime; r {
case constants.ContainerdRuntime:
case "containerd":
opts = "--container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock"
case constants.CrioRuntime:
case "crio":
opts = "--container-runtime=crio"
}
m.RunCommand(fmt.Sprintf("start %s %s %s --alsologtostderr --v=5", m.StartArgs, m.Args, opts), true)

View File

@ -1,27 +1,5 @@
package virtualbox
import (
"strings"
"syscall"
"github.com/docker/machine/libmachine/log"
)
// IsVTXDisabled checks if VT-X is disabled in the BIOS. If it is, the vm will fail to start.
// If we can't be sure it is disabled, we carry on and will check the vm logs after it's started.
func (d *Driver) IsVTXDisabled() bool {
features, err := syscall.Sysctl("machdep.cpu.features")
if err != nil {
log.Debugf("Couldn't check that VT-X/AMD-v is enabled. Will check that the vm is properly created: %v", err)
return false
}
return isVTXDisabled(features)
}
func isVTXDisabled(features string) bool {
return !strings.Contains(features, "VMX")
}
func detectVBoxManageCmd() string {
return detectVBoxManageCmdInPath()
}

View File

@ -1,37 +1,5 @@
package virtualbox
import (
"bytes"
"io/ioutil"
"github.com/docker/machine/libmachine/log"
)
// IsVTXDisabled checks if VT-X is disabled in the BIOS. If it is, the vm will fail to start.
// If we can't be sure it is disabled, we carry on and will check the vm logs after it's started.
// We want to check that either vmx or svm flags are present in /proc/cpuinfo.
func (d *Driver) IsVTXDisabled() bool {
cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
log.Debugf("Couldn't check that VT-X/AMD-v is enabled. Will check that the vm is properly created: %v", err)
return false
}
return isVTXDisabled(cpuinfo)
}
func isVTXDisabled(cpuinfo []byte) bool {
features := [2][]byte{
{'v', 'm', 'x'},
{'s', 'v', 'm'},
}
for _, v := range features {
if bytes.Contains(cpuinfo, v) {
return false
}
}
return true
}
func detectVBoxManageCmd() string {
return detectVBoxManageCmdInPath()
}

View File

@ -1,37 +1,5 @@
package virtualbox
import (
"bytes"
"io/ioutil"
"github.com/docker/machine/libmachine/log"
)
// IsVTXDisabled checks if VT-X is disabled in the BIOS. If it is, the vm will fail to start.
// If we can't be sure it is disabled, we carry on and will check the vm logs after it's started.
// We want to check that either vmx or svm flags are present in /proc/cpuinfo.
func (d *Driver) IsVTXDisabled() bool {
cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
log.Debugf("Couldn't check that VT-X/AMD-v is enabled. Will check that the vm is properly created: %v", err)
return false
}
return isVTXDisabled(cpuinfo)
}
func isVTXDisabled(cpuinfo []byte) bool {
features := [2][]byte{
{'v', 'm', 'x'},
{'s', 'v', 'm'},
}
for _, v := range features {
if bytes.Contains(cpuinfo, v) {
return false
}
}
return true
}
func detectVBoxManageCmd() string {
return detectVBoxManageCmdInPath()
}

View File

@ -1,9 +1,5 @@
package virtualbox
func (d *Driver) IsVTXDisabled() bool {
return false
}
func detectVBoxManageCmd() string {
return detectVBoxManageCmdInPath()
}

View File

@ -12,20 +12,6 @@ import (
"golang.org/x/sys/windows/registry"
)
// IsVTXDisabled checks if VT-X is disabled in the BIOS. If it is, the vm will fail to start.
// If we can't be sure it is disabled, we carry on and will check the vm logs after it's started.
func (d *Driver) IsVTXDisabled() bool {
errmsg := "Couldn't check that VT-X/AMD-v is enabled. Will check that the vm is properly created: %v"
output, err := cmdOutput("wmic", "cpu", "get", "VirtualizationFirmwareEnabled")
if err != nil {
log.Debugf(errmsg, err)
return false
}
disabled := strings.Contains(output, "FALSE")
return disabled
}
// cmdOutput runs a shell command and returns its output.
func cmdOutput(name string, args ...string) (string, error) {
cmd := exec.Command(name, args...)

View File

@ -0,0 +1,14 @@
// +build 386 amd64
package virtualbox
import "github.com/intel-go/cpuid"
// IsVTXDisabled checks if VT-x is disabled in the CPU.
func (d *Driver) IsVTXDisabled() bool {
if cpuid.HasFeature(cpuid.VMX) || cpuid.HasFeature(cpuid.SVM) {
return false
}
return true
}

View File

@ -0,0 +1,8 @@
// +build !386,!amd64
package virtualbox
// IsVTXDisabled checks if VT-x is disabled in the CPU.
func (d *Driver) IsVTXDisabled() bool {
return true
}

View File

@ -72,7 +72,7 @@ type Plugin struct {
Addr string
MachineName string
addrCh chan string
stopCh chan bool
stopCh chan struct{}
timeout time.Duration
}
@ -121,7 +121,7 @@ func NewPlugin(driverName string) (*Plugin, error) {
log.Debugf("Found binary path at %s", binaryPath)
return &Plugin{
stopCh: make(chan bool),
stopCh: make(chan struct{}),
addrCh: make(chan string, 1),
Executor: &Executor{
DriverName: driverName,
@ -168,19 +168,23 @@ func (lbe *Executor) Close() error {
return nil
}
func stream(scanner *bufio.Scanner, streamOutCh chan<- string) {
func stream(scanner *bufio.Scanner, streamOutCh chan<- string, stopCh <-chan struct{}) {
for scanner.Scan() {
line := scanner.Text()
if err := scanner.Err(); err != nil {
log.Warnf("Scanning stream: %s", err)
}
streamOutCh <- strings.Trim(line, "\n")
select {
case streamOutCh <- strings.Trim(line, "\n"):
case <-stopCh:
return
}
}
}
func (lbp *Plugin) AttachStream(scanner *bufio.Scanner) <-chan string {
streamOutCh := make(chan string)
go stream(scanner, streamOutCh)
go stream(scanner, streamOutCh, lbp.stopCh)
return streamOutCh
}
@ -241,6 +245,6 @@ func (lbp *Plugin) Address() (string, error) {
}
func (lbp *Plugin) Close() error {
lbp.stopCh <- true
close(lbp.stopCh)
return nil
}

View File

@ -76,7 +76,7 @@ func (api *Client) NewHost(driverName string, rawDriver []byte) (*host.Host, err
},
EngineOptions: &engine.Options{
InstallURL: drivers.DefaultEngineInstallURL,
StorageDriver: "aufs",
StorageDriver: "overlay2",
TLSVerify: true,
},
SwarmOptions: &swarm.Options{

View File

@ -232,7 +232,7 @@ func (provisioner *Boot2DockerProvisioner) Provision(swarmOptions swarm.Options,
swarmOptions.Env = engineOptions.Env
if provisioner.EngineOptions.StorageDriver == "" {
provisioner.EngineOptions.StorageDriver = "aufs"
provisioner.EngineOptions.StorageDriver = "overlay2"
}
if err = provisioner.SetHostname(provisioner.Driver.GetMachineName()); err != nil {

View File

@ -86,7 +86,7 @@ func (provisioner *DebianProvisioner) Provision(swarmOptions swarm.Options, auth
provisioner.EngineOptions = engineOptions
swarmOptions.Env = engineOptions.Env
storageDriver, err := decideStorageDriver(provisioner, "aufs", engineOptions.StorageDriver)
storageDriver, err := decideStorageDriver(provisioner, "overlay2", engineOptions.StorageDriver)
if err != nil {
return err
}

View File

@ -134,7 +134,7 @@ func (provisioner *RedHatProvisioner) Provision(swarmOptions swarm.Options, auth
swarmOptions.Env = engineOptions.Env
// set default storage driver for redhat
storageDriver, err := decideStorageDriver(provisioner, "devicemapper", engineOptions.StorageDriver)
storageDriver, err := decideStorageDriver(provisioner, "overlay2", engineOptions.StorageDriver)
if err != nil {
return err
}

View File

@ -15,32 +15,14 @@ import (
)
func init() {
Register("openSUSE", &RegisteredProvisioner{
New: NewOpenSUSEProvisioner,
})
Register("SUSE Linux Enterprise Desktop", &RegisteredProvisioner{
New: NewSLEDProvisioner,
})
Register("SUSE Linux Enterprise Server", &RegisteredProvisioner{
New: NewSLESProvisioner,
Register("SUSE", &RegisteredProvisioner{
New: NewSUSEProvisioner,
})
}
func NewSLEDProvisioner(d drivers.Driver) Provisioner {
func NewSUSEProvisioner(d drivers.Driver) Provisioner {
return &SUSEProvisioner{
NewSystemdProvisioner("sled", d),
}
}
func NewSLESProvisioner(d drivers.Driver) Provisioner {
return &SUSEProvisioner{
NewSystemdProvisioner("sles", d),
}
}
func NewOpenSUSEProvisioner(d drivers.Driver) Provisioner {
return &SUSEProvisioner{
NewSystemdProvisioner("openSUSE", d),
NewSystemdProvisioner("SUSE", d),
}
}
@ -49,11 +31,17 @@ type SUSEProvisioner struct {
}
func (provisioner *SUSEProvisioner) CompatibleWithHost() bool {
return strings.ToLower(provisioner.OsReleaseInfo.ID) == strings.ToLower(provisioner.OsReleaseID)
ids := strings.Split(provisioner.OsReleaseInfo.IDLike, " ")
for _, id := range ids {
if id == "suse" {
return true
}
}
return false
}
func (provisioner *SUSEProvisioner) String() string {
return "openSUSE"
return "SUSE"
}
func (provisioner *SUSEProvisioner) Package(name string, action pkgaction.PackageAction) error {
@ -135,7 +123,7 @@ func (provisioner *SUSEProvisioner) Provision(swarmOptions swarm.Options, authOp
return err
}
if strings.ToLower(provisioner.OsReleaseInfo.ID) != "opensuse" {
if !strings.HasPrefix(strings.ToLower(provisioner.OsReleaseInfo.ID), "opensuse") {
// This is a SLE machine, enable the containers module to have access
// to the docker packages
if _, err := provisioner.SSHCommand("sudo -E SUSEConnect -p sle-module-containers/12/$(uname -m) -r ''"); err != nil {
@ -157,10 +145,10 @@ func (provisioner *SUSEProvisioner) Provision(swarmOptions swarm.Options, authOp
return err
}
// create symlinks for containerd, containerd-shim and runc.
// create symlinks for containerd, containerd-shim and optional runc.
// We have to do that because machine overrides the openSUSE systemd
// unit of docker
if _, err := provisioner.SSHCommand("sudo -E ln -sf /usr/sbin/runc /usr/sbin/docker-runc"); err != nil {
if _, err := provisioner.SSHCommand("yes no | sudo -E ln -si /usr/sbin/runc /usr/sbin/docker-runc"); err != nil {
return err
}
if _, err := provisioner.SSHCommand("sudo -E ln -sf /usr/sbin/containerd /usr/sbin/docker-containerd"); err != nil {

View File

@ -102,7 +102,7 @@ func (provisioner *UbuntuSystemdProvisioner) Provision(swarmOptions swarm.Option
provisioner.EngineOptions = engineOptions
swarmOptions.Env = engineOptions.Env
storageDriver, err := decideStorageDriver(provisioner, "aufs", engineOptions.StorageDriver)
storageDriver, err := decideStorageDriver(provisioner, "overlay2", engineOptions.StorageDriver)
if err != nil {
return err
}

View File

@ -118,7 +118,7 @@ func (provisioner *UbuntuProvisioner) Provision(swarmOptions swarm.Options, auth
provisioner.EngineOptions = engineOptions
swarmOptions.Env = engineOptions.Env
storageDriver, err := decideStorageDriver(provisioner, "aufs", engineOptions.StorageDriver)
storageDriver, err := decideStorageDriver(provisioner, "overlay2", engineOptions.StorageDriver)
if err != nil {
return err
}

View File

@ -233,7 +233,7 @@ func decideStorageDriver(p Provisioner, defaultDriver, suppliedDriver string) (s
if remoteFilesystemType == "btrfs" {
bestSuitedDriver = "btrfs"
} else {
bestSuitedDriver = "aufs"
bestSuitedDriver = defaultDriver
}
}
return bestSuitedDriver, nil

View File

@ -321,11 +321,14 @@ func (client *NativeClient) Shell(args ...string) error {
if err := session.Shell(); err != nil {
return err
}
session.Wait()
if err := session.Wait(); err != nil {
return err
}
} else {
session.Run(strings.Join(args, " "))
if err := session.Run(strings.Join(args, " ")); err != nil {
return err
}
}
return nil
}

27
vendor/github.com/google/go-cmp/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2017 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

553
vendor/github.com/google/go-cmp/cmp/compare.go generated vendored Normal file
View File

@ -0,0 +1,553 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package cmp determines equality of values.
//
// This package is intended to be a more powerful and safer alternative to
// reflect.DeepEqual for comparing whether two values are semantically equal.
//
// The primary features of cmp are:
//
// • When the default behavior of equality does not suit the needs of the test,
// custom equality functions can override the equality operation.
// For example, an equality function may report floats as equal so long as they
// are within some tolerance of each other.
//
// • Types that have an Equal method may use that method to determine equality.
// This allows package authors to determine the equality operation for the types
// that they define.
//
// • If no custom equality functions are used and no Equal method is defined,
// equality is determined by recursively comparing the primitive kinds on both
// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
// fields are not compared by default; they result in panics unless suppressed
// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared
// using the AllowUnexported option.
package cmp
import (
"fmt"
"reflect"
"github.com/google/go-cmp/cmp/internal/diff"
"github.com/google/go-cmp/cmp/internal/function"
"github.com/google/go-cmp/cmp/internal/value"
)
// BUG(dsnet): Maps with keys containing NaN values cannot be properly compared due to
// the reflection package's inability to retrieve such entries. Equal will panic
// anytime it comes across a NaN key, but this behavior may change.
//
// See https://golang.org/issue/11104 for more details.
var nothing = reflect.Value{}
// Equal reports whether x and y are equal by recursively applying the
// following rules in the given order to x and y and all of their sub-values:
//
// • If two values are not of the same type, then they are never equal
// and the overall result is false.
//
// • Let S be the set of all Ignore, Transformer, and Comparer options that
// remain after applying all path filters, value filters, and type filters.
// If at least one Ignore exists in S, then the comparison is ignored.
// If the number of Transformer and Comparer options in S is greater than one,
// then Equal panics because it is ambiguous which option to use.
// If S contains a single Transformer, then use that to transform the current
// values and recursively call Equal on the output values.
// If S contains a single Comparer, then use that to compare the current values.
// Otherwise, evaluation proceeds to the next rule.
//
// • If the values have an Equal method of the form "(T) Equal(T) bool" or
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
// x.Equal(y) even if x or y is nil.
// Otherwise, no such method exists and evaluation proceeds to the next rule.
//
// • Lastly, try to compare x and y based on their basic kinds.
// Simple kinds like booleans, integers, floats, complex numbers, strings, and
// channels are compared using the equivalent of the == operator in Go.
// Functions are only equal if they are both nil, otherwise they are unequal.
// Pointers are equal if the underlying values they point to are also equal.
// Interfaces are equal if their underlying concrete values are also equal.
//
// Structs are equal if all of their fields are equal. If a struct contains
// unexported fields, Equal panics unless the AllowUnexported option is used or
// an Ignore option (e.g., cmpopts.IgnoreUnexported) ignores that field.
//
// Arrays, slices, and maps are equal if they are both nil or both non-nil
// with the same length and the elements at each index or key are equal.
// Note that a non-nil empty slice and a nil slice are not equal.
// To equate empty slices and maps, consider using cmpopts.EquateEmpty.
// Map keys are equal according to the == operator.
// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
func Equal(x, y interface{}, opts ...Option) bool {
s := newState(opts)
s.compareAny(reflect.ValueOf(x), reflect.ValueOf(y))
return s.result.Equal()
}
// Diff returns a human-readable report of the differences between two values.
// It returns an empty string if and only if Equal returns true for the same
// input values and options. The output string will use the "-" symbol to
// indicate elements removed from x, and the "+" symbol to indicate elements
// added to y.
//
// Do not depend on this output being stable.
func Diff(x, y interface{}, opts ...Option) string {
r := new(defaultReporter)
opts = Options{Options(opts), r}
eq := Equal(x, y, opts...)
d := r.String()
if (d == "") != eq {
panic("inconsistent difference and equality results")
}
return d
}
type state struct {
// These fields represent the "comparison state".
// Calling statelessCompare must not result in observable changes to these.
result diff.Result // The current result of comparison
curPath Path // The current path in the value tree
reporter reporter // Optional reporter used for difference formatting
// dynChecker triggers pseudo-random checks for option correctness.
// It is safe for statelessCompare to mutate this value.
dynChecker dynChecker
// These fields, once set by processOption, will not change.
exporters map[reflect.Type]bool // Set of structs with unexported field visibility
opts Options // List of all fundamental and filter options
}
func newState(opts []Option) *state {
s := new(state)
for _, opt := range opts {
s.processOption(opt)
}
return s
}
func (s *state) processOption(opt Option) {
switch opt := opt.(type) {
case nil:
case Options:
for _, o := range opt {
s.processOption(o)
}
case coreOption:
type filtered interface {
isFiltered() bool
}
if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
}
s.opts = append(s.opts, opt)
case visibleStructs:
if s.exporters == nil {
s.exporters = make(map[reflect.Type]bool)
}
for t := range opt {
s.exporters[t] = true
}
case reporter:
if s.reporter != nil {
panic("difference reporter already registered")
}
s.reporter = opt
default:
panic(fmt.Sprintf("unknown option %T", opt))
}
}
// statelessCompare compares two values and returns the result.
// This function is stateless in that it does not alter the current result,
// or output to any registered reporters.
func (s *state) statelessCompare(vx, vy reflect.Value) diff.Result {
// We do not save and restore the curPath because all of the compareX
// methods should properly push and pop from the path.
// It is an implementation bug if the contents of curPath differs from
// when calling this function to when returning from it.
oldResult, oldReporter := s.result, s.reporter
s.result = diff.Result{} // Reset result
s.reporter = nil // Remove reporter to avoid spurious printouts
s.compareAny(vx, vy)
res := s.result
s.result, s.reporter = oldResult, oldReporter
return res
}
func (s *state) compareAny(vx, vy reflect.Value) {
// TODO: Support cyclic data structures.
// Rule 0: Differing types are never equal.
if !vx.IsValid() || !vy.IsValid() {
s.report(vx.IsValid() == vy.IsValid(), vx, vy)
return
}
if vx.Type() != vy.Type() {
s.report(false, vx, vy) // Possible for path to be empty
return
}
t := vx.Type()
if len(s.curPath) == 0 {
s.curPath.push(&pathStep{typ: t})
defer s.curPath.pop()
}
vx, vy = s.tryExporting(vx, vy)
// Rule 1: Check whether an option applies on this node in the value tree.
if s.tryOptions(vx, vy, t) {
return
}
// Rule 2: Check whether the type has a valid Equal method.
if s.tryMethod(vx, vy, t) {
return
}
// Rule 3: Recursively descend into each value's underlying kind.
switch t.Kind() {
case reflect.Bool:
s.report(vx.Bool() == vy.Bool(), vx, vy)
return
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
s.report(vx.Int() == vy.Int(), vx, vy)
return
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
s.report(vx.Uint() == vy.Uint(), vx, vy)
return
case reflect.Float32, reflect.Float64:
s.report(vx.Float() == vy.Float(), vx, vy)
return
case reflect.Complex64, reflect.Complex128:
s.report(vx.Complex() == vy.Complex(), vx, vy)
return
case reflect.String:
s.report(vx.String() == vy.String(), vx, vy)
return
case reflect.Chan, reflect.UnsafePointer:
s.report(vx.Pointer() == vy.Pointer(), vx, vy)
return
case reflect.Func:
s.report(vx.IsNil() && vy.IsNil(), vx, vy)
return
case reflect.Ptr:
if vx.IsNil() || vy.IsNil() {
s.report(vx.IsNil() && vy.IsNil(), vx, vy)
return
}
s.curPath.push(&indirect{pathStep{t.Elem()}})
defer s.curPath.pop()
s.compareAny(vx.Elem(), vy.Elem())
return
case reflect.Interface:
if vx.IsNil() || vy.IsNil() {
s.report(vx.IsNil() && vy.IsNil(), vx, vy)
return
}
if vx.Elem().Type() != vy.Elem().Type() {
s.report(false, vx.Elem(), vy.Elem())
return
}
s.curPath.push(&typeAssertion{pathStep{vx.Elem().Type()}})
defer s.curPath.pop()
s.compareAny(vx.Elem(), vy.Elem())
return
case reflect.Slice:
if vx.IsNil() || vy.IsNil() {
s.report(vx.IsNil() && vy.IsNil(), vx, vy)
return
}
fallthrough
case reflect.Array:
s.compareArray(vx, vy, t)
return
case reflect.Map:
s.compareMap(vx, vy, t)
return
case reflect.Struct:
s.compareStruct(vx, vy, t)
return
default:
panic(fmt.Sprintf("%v kind not handled", t.Kind()))
}
}
func (s *state) tryExporting(vx, vy reflect.Value) (reflect.Value, reflect.Value) {
if sf, ok := s.curPath[len(s.curPath)-1].(*structField); ok && sf.unexported {
if sf.force {
// Use unsafe pointer arithmetic to get read-write access to an
// unexported field in the struct.
vx = unsafeRetrieveField(sf.pvx, sf.field)
vy = unsafeRetrieveField(sf.pvy, sf.field)
} else {
// We are not allowed to export the value, so invalidate them
// so that tryOptions can panic later if not explicitly ignored.
vx = nothing
vy = nothing
}
}
return vx, vy
}
func (s *state) tryOptions(vx, vy reflect.Value, t reflect.Type) bool {
// If there were no FilterValues, we will not detect invalid inputs,
// so manually check for them and append invalid if necessary.
// We still evaluate the options since an ignore can override invalid.
opts := s.opts
if !vx.IsValid() || !vy.IsValid() {
opts = Options{opts, invalid{}}
}
// Evaluate all filters and apply the remaining options.
if opt := opts.filter(s, vx, vy, t); opt != nil {
opt.apply(s, vx, vy)
return true
}
return false
}
func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool {
// Check if this type even has an Equal method.
m, ok := t.MethodByName("Equal")
if !ok || !function.IsType(m.Type, function.EqualAssignable) {
return false
}
eq := s.callTTBFunc(m.Func, vx, vy)
s.report(eq, vx, vy)
return true
}
func (s *state) callTRFunc(f, v reflect.Value) reflect.Value {
v = sanitizeValue(v, f.Type().In(0))
if !s.dynChecker.Next() {
return f.Call([]reflect.Value{v})[0]
}
// Run the function twice and ensure that we get the same results back.
// We run in goroutines so that the race detector (if enabled) can detect
// unsafe mutations to the input.
c := make(chan reflect.Value)
go detectRaces(c, f, v)
want := f.Call([]reflect.Value{v})[0]
if got := <-c; !s.statelessCompare(got, want).Equal() {
// To avoid false-positives with non-reflexive equality operations,
// we sanity check whether a value is equal to itself.
if !s.statelessCompare(want, want).Equal() {
return want
}
fn := getFuncName(f.Pointer())
panic(fmt.Sprintf("non-deterministic function detected: %s", fn))
}
return want
}
func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
x = sanitizeValue(x, f.Type().In(0))
y = sanitizeValue(y, f.Type().In(1))
if !s.dynChecker.Next() {
return f.Call([]reflect.Value{x, y})[0].Bool()
}
// Swapping the input arguments is sufficient to check that
// f is symmetric and deterministic.
// We run in goroutines so that the race detector (if enabled) can detect
// unsafe mutations to the input.
c := make(chan reflect.Value)
go detectRaces(c, f, y, x)
want := f.Call([]reflect.Value{x, y})[0].Bool()
if got := <-c; !got.IsValid() || got.Bool() != want {
fn := getFuncName(f.Pointer())
panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", fn))
}
return want
}
func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
var ret reflect.Value
defer func() {
recover() // Ignore panics, let the other call to f panic instead
c <- ret
}()
ret = f.Call(vs)[0]
}
// sanitizeValue converts nil interfaces of type T to those of type R,
// assuming that T is assignable to R.
// Otherwise, it returns the input value as is.
func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
// TODO(dsnet): Remove this hacky workaround.
// See https://golang.org/issue/22143
if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
return reflect.New(t).Elem()
}
return v
}
func (s *state) compareArray(vx, vy reflect.Value, t reflect.Type) {
step := &sliceIndex{pathStep{t.Elem()}, 0, 0}
s.curPath.push(step)
// Compute an edit-script for slices vx and vy.
es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
step.xkey, step.ykey = ix, iy
return s.statelessCompare(vx.Index(ix), vy.Index(iy))
})
// Report the entire slice as is if the arrays are of primitive kind,
// and the arrays are different enough.
isPrimitive := false
switch t.Elem().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
isPrimitive = true
}
if isPrimitive && es.Dist() > (vx.Len()+vy.Len())/4 {
s.curPath.pop() // Pop first since we are reporting the whole slice
s.report(false, vx, vy)
return
}
// Replay the edit-script.
var ix, iy int
for _, e := range es {
switch e {
case diff.UniqueX:
step.xkey, step.ykey = ix, -1
s.report(false, vx.Index(ix), nothing)
ix++
case diff.UniqueY:
step.xkey, step.ykey = -1, iy
s.report(false, nothing, vy.Index(iy))
iy++
default:
step.xkey, step.ykey = ix, iy
if e == diff.Identity {
s.report(true, vx.Index(ix), vy.Index(iy))
} else {
s.compareAny(vx.Index(ix), vy.Index(iy))
}
ix++
iy++
}
}
s.curPath.pop()
return
}
func (s *state) compareMap(vx, vy reflect.Value, t reflect.Type) {
if vx.IsNil() || vy.IsNil() {
s.report(vx.IsNil() && vy.IsNil(), vx, vy)
return
}
// We combine and sort the two map keys so that we can perform the
// comparisons in a deterministic order.
step := &mapIndex{pathStep: pathStep{t.Elem()}}
s.curPath.push(step)
defer s.curPath.pop()
for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
step.key = k
vvx := vx.MapIndex(k)
vvy := vy.MapIndex(k)
switch {
case vvx.IsValid() && vvy.IsValid():
s.compareAny(vvx, vvy)
case vvx.IsValid() && !vvy.IsValid():
s.report(false, vvx, nothing)
case !vvx.IsValid() && vvy.IsValid():
s.report(false, nothing, vvy)
default:
// It is possible for both vvx and vvy to be invalid if the
// key contained a NaN value in it. There is no way in
// reflection to be able to retrieve these values.
// See https://golang.org/issue/11104
panic(fmt.Sprintf("%#v has map key with NaNs", s.curPath))
}
}
}
func (s *state) compareStruct(vx, vy reflect.Value, t reflect.Type) {
var vax, vay reflect.Value // Addressable versions of vx and vy
step := &structField{}
s.curPath.push(step)
defer s.curPath.pop()
for i := 0; i < t.NumField(); i++ {
vvx := vx.Field(i)
vvy := vy.Field(i)
step.typ = t.Field(i).Type
step.name = t.Field(i).Name
step.idx = i
step.unexported = !isExported(step.name)
if step.unexported {
// Defer checking of unexported fields until later to give an
// Ignore a chance to ignore the field.
if !vax.IsValid() || !vay.IsValid() {
// For unsafeRetrieveField to work, the parent struct must
// be addressable. Create a new copy of the values if
// necessary to make them addressable.
vax = makeAddressable(vx)
vay = makeAddressable(vy)
}
step.force = s.exporters[t]
step.pvx = vax
step.pvy = vay
step.field = t.Field(i)
}
s.compareAny(vvx, vvy)
}
}
// report records the result of a single comparison.
// It also calls Report if any reporter is registered.
func (s *state) report(eq bool, vx, vy reflect.Value) {
if eq {
s.result.NSame++
} else {
s.result.NDiff++
}
if s.reporter != nil {
s.reporter.Report(vx, vy, eq, s.curPath)
}
}
// dynChecker tracks the state needed to periodically perform checks that
// user provided functions are symmetric and deterministic.
// The zero value is safe for immediate use.
type dynChecker struct{ curr, next int }
// Next increments the state and reports whether a check should be performed.
//
// Checks occur every Nth function call, where N is a triangular number:
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
// See https://en.wikipedia.org/wiki/Triangular_number
//
// This sequence ensures that the cost of checks drops significantly as
// the number of functions calls grows larger.
func (dc *dynChecker) Next() bool {
ok := dc.curr == dc.next
if ok {
dc.curr = 0
dc.next++
}
dc.curr++
return ok
}
// makeAddressable returns a value that is always addressable.
// It returns the input verbatim if it is already addressable,
// otherwise it creates a new value and returns an addressable copy.
func makeAddressable(v reflect.Value) reflect.Value {
if v.CanAddr() {
return v
}
vc := reflect.New(v.Type()).Elem()
vc.Set(v)
return vc
}

View File

@ -0,0 +1,17 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build !debug
package diff
var debug debugger
type debugger struct{}
func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
return f
}
func (debugger) Update() {}
func (debugger) Finish() {}

View File

@ -0,0 +1,122 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build debug
package diff
import (
"fmt"
"strings"
"sync"
"time"
)
// The algorithm can be seen running in real-time by enabling debugging:
// go test -tags=debug -v
//
// Example output:
// === RUN TestDifference/#34
// ┌───────────────────────────────┐
// │ \ · · · · · · · · · · · · · · │
// │ · # · · · · · · · · · · · · · │
// │ · \ · · · · · · · · · · · · · │
// │ · · \ · · · · · · · · · · · · │
// │ · · · X # · · · · · · · · · · │
// │ · · · # \ · · · · · · · · · · │
// │ · · · · · # # · · · · · · · · │
// │ · · · · · # \ · · · · · · · · │
// │ · · · · · · · \ · · · · · · · │
// │ · · · · · · · · \ · · · · · · │
// │ · · · · · · · · · \ · · · · · │
// │ · · · · · · · · · · \ · · # · │
// │ · · · · · · · · · · · \ # # · │
// │ · · · · · · · · · · · # # # · │
// │ · · · · · · · · · · # # # # · │
// │ · · · · · · · · · # # # # # · │
// │ · · · · · · · · · · · · · · \ │
// └───────────────────────────────┘
// [.Y..M.XY......YXYXY.|]
//
// The grid represents the edit-graph where the horizontal axis represents
// list X and the vertical axis represents list Y. The start of the two lists
// is the top-left, while the ends are the bottom-right. The '·' represents
// an unexplored node in the graph. The '\' indicates that the two symbols
// from list X and Y are equal. The 'X' indicates that two symbols are similar
// (but not exactly equal) to each other. The '#' indicates that the two symbols
// are different (and not similar). The algorithm traverses this graph trying to
// make the paths starting in the top-left and the bottom-right connect.
//
// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
// the currently established path from the forward and reverse searches,
// separated by a '|' character.
const (
updateDelay = 100 * time.Millisecond
finishDelay = 500 * time.Millisecond
ansiTerminal = true // ANSI escape codes used to move terminal cursor
)
var debug debugger
type debugger struct {
sync.Mutex
p1, p2 EditScript
fwdPath, revPath *EditScript
grid []byte
lines int
}
func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
dbg.Lock()
dbg.fwdPath, dbg.revPath = p1, p2
top := "┌─" + strings.Repeat("──", nx) + "┐\n"
row := "│ " + strings.Repeat("· ", nx) + "│\n"
btm := "└─" + strings.Repeat("──", nx) + "┘\n"
dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
dbg.lines = strings.Count(dbg.String(), "\n")
fmt.Print(dbg)
// Wrap the EqualFunc so that we can intercept each result.
return func(ix, iy int) (r Result) {
cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
for i := range cell {
cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
}
switch r = f(ix, iy); {
case r.Equal():
cell[0] = '\\'
case r.Similar():
cell[0] = 'X'
default:
cell[0] = '#'
}
return
}
}
func (dbg *debugger) Update() {
dbg.print(updateDelay)
}
func (dbg *debugger) Finish() {
dbg.print(finishDelay)
dbg.Unlock()
}
func (dbg *debugger) String() string {
dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
for i := len(*dbg.revPath) - 1; i >= 0; i-- {
dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
}
return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
}
func (dbg *debugger) print(d time.Duration) {
if ansiTerminal {
fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
}
fmt.Print(dbg)
time.Sleep(d)
}

View File

@ -0,0 +1,363 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package diff implements an algorithm for producing edit-scripts.
// The edit-script is a sequence of operations needed to transform one list
// of symbols into another (or vice-versa). The edits allowed are insertions,
// deletions, and modifications. The summation of all edits is called the
// Levenshtein distance as this problem is well-known in computer science.
//
// This package prioritizes performance over accuracy. That is, the run time
// is more important than obtaining a minimal Levenshtein distance.
package diff
// EditType represents a single operation within an edit-script.
type EditType uint8
const (
// Identity indicates that a symbol pair is identical in both list X and Y.
Identity EditType = iota
// UniqueX indicates that a symbol only exists in X and not Y.
UniqueX
// UniqueY indicates that a symbol only exists in Y and not X.
UniqueY
// Modified indicates that a symbol pair is a modification of each other.
Modified
)
// EditScript represents the series of differences between two lists.
type EditScript []EditType
// String returns a human-readable string representing the edit-script where
// Identity, UniqueX, UniqueY, and Modified are represented by the
// '.', 'X', 'Y', and 'M' characters, respectively.
func (es EditScript) String() string {
b := make([]byte, len(es))
for i, e := range es {
switch e {
case Identity:
b[i] = '.'
case UniqueX:
b[i] = 'X'
case UniqueY:
b[i] = 'Y'
case Modified:
b[i] = 'M'
default:
panic("invalid edit-type")
}
}
return string(b)
}
// stats returns a histogram of the number of each type of edit operation.
func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
for _, e := range es {
switch e {
case Identity:
s.NI++
case UniqueX:
s.NX++
case UniqueY:
s.NY++
case Modified:
s.NM++
default:
panic("invalid edit-type")
}
}
return
}
// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
// lists X and Y are equal.
func (es EditScript) Dist() int { return len(es) - es.stats().NI }
// LenX is the length of the X list.
func (es EditScript) LenX() int { return len(es) - es.stats().NY }
// LenY is the length of the Y list.
func (es EditScript) LenY() int { return len(es) - es.stats().NX }
// EqualFunc reports whether the symbols at indexes ix and iy are equal.
// When called by Difference, the index is guaranteed to be within nx and ny.
type EqualFunc func(ix int, iy int) Result
// Result is the result of comparison.
// NSame is the number of sub-elements that are equal.
// NDiff is the number of sub-elements that are not equal.
type Result struct{ NSame, NDiff int }
// Equal indicates whether the symbols are equal. Two symbols are equal
// if and only if NDiff == 0. If Equal, then they are also Similar.
func (r Result) Equal() bool { return r.NDiff == 0 }
// Similar indicates whether two symbols are similar and may be represented
// by using the Modified type. As a special case, we consider binary comparisons
// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
//
// The exact ratio of NSame to NDiff to determine similarity may change.
func (r Result) Similar() bool {
// Use NSame+1 to offset NSame so that binary comparisons are similar.
return r.NSame+1 >= r.NDiff
}
// Difference reports whether two lists of lengths nx and ny are equal
// given the definition of equality provided as f.
//
// This function returns an edit-script, which is a sequence of operations
// needed to convert one list into the other. The following invariants for
// the edit-script are maintained:
// • eq == (es.Dist()==0)
// • nx == es.LenX()
// • ny == es.LenY()
//
// This algorithm is not guaranteed to be an optimal solution (i.e., one that
// produces an edit-script with a minimal Levenshtein distance). This algorithm
// favors performance over optimality. The exact output is not guaranteed to
// be stable and may change over time.
func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// This algorithm is based on traversing what is known as an "edit-graph".
// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
// by Eugene W. Myers. Since D can be as large as N itself, this is
// effectively O(N^2). Unlike the algorithm from that paper, we are not
// interested in the optimal path, but at least some "decent" path.
//
// For example, let X and Y be lists of symbols:
// X = [A B C A B B A]
// Y = [C B A B A C]
//
// The edit-graph can be drawn as the following:
// A B C A B B A
// ┌─────────────┐
// C │_|_|\|_|_|_|_│ 0
// B │_|\|_|_|\|\|_│ 1
// A │\|_|_|\|_|_|\│ 2
// B │_|\|_|_|\|\|_│ 3
// A │\|_|_|\|_|_|\│ 4
// C │ | |\| | | | │ 5
// └─────────────┘ 6
// 0 1 2 3 4 5 6 7
//
// List X is written along the horizontal axis, while list Y is written
// along the vertical axis. At any point on this grid, if the symbol in
// list X matches the corresponding symbol in list Y, then a '\' is drawn.
// The goal of any minimal edit-script algorithm is to find a path from the
// top-left corner to the bottom-right corner, while traveling through the
// fewest horizontal or vertical edges.
// A horizontal edge is equivalent to inserting a symbol from list X.
// A vertical edge is equivalent to inserting a symbol from list Y.
// A diagonal edge is equivalent to a matching symbol between both X and Y.
// Invariants:
// • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
// • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
//
// In general:
// • fwdFrontier.X < revFrontier.X
// • fwdFrontier.Y < revFrontier.Y
// Unless, it is time for the algorithm to terminate.
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
fwdFrontier := fwdPath.point // Forward search frontier
revFrontier := revPath.point // Reverse search frontier
// Search budget bounds the cost of searching for better paths.
// The longest sequence of non-matching symbols that can be tolerated is
// approximately the square-root of the search budget.
searchBudget := 4 * (nx + ny) // O(n)
// The algorithm below is a greedy, meet-in-the-middle algorithm for
// computing sub-optimal edit-scripts between two lists.
//
// The algorithm is approximately as follows:
// • Searching for differences switches back-and-forth between
// a search that starts at the beginning (the top-left corner), and
// a search that starts at the end (the bottom-right corner). The goal of
// the search is connect with the search from the opposite corner.
// • As we search, we build a path in a greedy manner, where the first
// match seen is added to the path (this is sub-optimal, but provides a
// decent result in practice). When matches are found, we try the next pair
// of symbols in the lists and follow all matches as far as possible.
// • When searching for matches, we search along a diagonal going through
// through the "frontier" point. If no matches are found, we advance the
// frontier towards the opposite corner.
// • This algorithm terminates when either the X coordinates or the
// Y coordinates of the forward and reverse frontier points ever intersect.
//
// This algorithm is correct even if searching only in the forward direction
// or in the reverse direction. We do both because it is commonly observed
// that two lists commonly differ because elements were added to the front
// or end of the other list.
//
// Running the tests with the "debug" build tag prints a visualization of
// the algorithm running in real-time. This is educational for understanding
// how the algorithm works. See debug_enable.go.
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
for {
// Forward search from the beginning.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
break
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
switch {
case p.X >= revPath.X || p.Y < fwdPath.Y:
stop1 = true // Hit top-right corner
case p.Y >= revPath.Y || p.X < fwdPath.X:
stop2 = true // Hit bottom-left corner
case f(p.X, p.Y).Equal():
// Match found, so connect the path to this point.
fwdPath.connect(p, f)
fwdPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(fwdPath.X, fwdPath.Y).Equal() {
break
}
fwdPath.append(Identity)
}
fwdFrontier = fwdPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards reverse point.
if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
fwdFrontier.X++
} else {
fwdFrontier.Y++
}
// Reverse search from the end.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
break
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{revFrontier.X - z, revFrontier.Y + z}
switch {
case fwdPath.X >= p.X || revPath.Y < p.Y:
stop1 = true // Hit bottom-left corner
case fwdPath.Y >= p.Y || revPath.X < p.X:
stop2 = true // Hit top-right corner
case f(p.X-1, p.Y-1).Equal():
// Match found, so connect the path to this point.
revPath.connect(p, f)
revPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(revPath.X-1, revPath.Y-1).Equal() {
break
}
revPath.append(Identity)
}
revFrontier = revPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards forward point.
if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
revFrontier.X--
} else {
revFrontier.Y--
}
}
// Join the forward and reverse paths and then append the reverse path.
fwdPath.connect(revPath.point, f)
for i := len(revPath.es) - 1; i >= 0; i-- {
t := revPath.es[i]
revPath.es = revPath.es[:i]
fwdPath.append(t)
}
debug.Finish()
return fwdPath.es
}
type path struct {
dir int // +1 if forward, -1 if reverse
point // Leading point of the EditScript path
es EditScript
}
// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
// to the edit-script to connect p.point to dst.
func (p *path) connect(dst point, f EqualFunc) {
if p.dir > 0 {
// Connect in forward direction.
for dst.X > p.X && dst.Y > p.Y {
switch r := f(p.X, p.Y); {
case r.Equal():
p.append(Identity)
case r.Similar():
p.append(Modified)
case dst.X-p.X >= dst.Y-p.Y:
p.append(UniqueX)
default:
p.append(UniqueY)
}
}
for dst.X > p.X {
p.append(UniqueX)
}
for dst.Y > p.Y {
p.append(UniqueY)
}
} else {
// Connect in reverse direction.
for p.X > dst.X && p.Y > dst.Y {
switch r := f(p.X-1, p.Y-1); {
case r.Equal():
p.append(Identity)
case r.Similar():
p.append(Modified)
case p.Y-dst.Y >= p.X-dst.X:
p.append(UniqueY)
default:
p.append(UniqueX)
}
}
for p.X > dst.X {
p.append(UniqueX)
}
for p.Y > dst.Y {
p.append(UniqueY)
}
}
}
func (p *path) append(t EditType) {
p.es = append(p.es, t)
switch t {
case Identity, Modified:
p.add(p.dir, p.dir)
case UniqueX:
p.add(p.dir, 0)
case UniqueY:
p.add(0, p.dir)
}
debug.Update()
}
type point struct{ X, Y int }
func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
func zigzag(x int) int {
if x&1 != 0 {
x = ^x
}
return x >> 1
}

View File

@ -0,0 +1,49 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package function identifies function types.
package function
import "reflect"
type funcType int
const (
_ funcType = iota
ttbFunc // func(T, T) bool
tibFunc // func(T, I) bool
trFunc // func(T) R
Equal = ttbFunc // func(T, T) bool
EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
Transformer = trFunc // func(T) R
ValueFilter = ttbFunc // func(T, T) bool
Less = ttbFunc // func(T, T) bool
)
var boolType = reflect.TypeOf(true)
// IsType reports whether the reflect.Type is of the specified function type.
func IsType(t reflect.Type, ft funcType) bool {
if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
return false
}
ni, no := t.NumIn(), t.NumOut()
switch ft {
case ttbFunc: // func(T, T) bool
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
return true
}
case tibFunc: // func(T, I) bool
if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
return true
}
case trFunc: // func(T) R
if ni == 1 && no == 1 {
return true
}
}
return false
}

View File

@ -0,0 +1,277 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package value provides functionality for reflect.Value types.
package value
import (
"fmt"
"reflect"
"strconv"
"strings"
"unicode"
)
var stringerIface = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
// Format formats the value v as a string.
//
// This is similar to fmt.Sprintf("%+v", v) except this:
// * Prints the type unless it can be elided
// * Avoids printing struct fields that are zero
// * Prints a nil-slice as being nil, not empty
// * Prints map entries in deterministic order
func Format(v reflect.Value, conf FormatConfig) string {
conf.printType = true
conf.followPointers = true
conf.realPointers = true
return formatAny(v, conf, nil)
}
type FormatConfig struct {
UseStringer bool // Should the String method be used if available?
printType bool // Should we print the type before the value?
PrintPrimitiveType bool // Should we print the type of primitives?
followPointers bool // Should we recursively follow pointers?
realPointers bool // Should we print the real address of pointers?
}
func formatAny(v reflect.Value, conf FormatConfig, visited map[uintptr]bool) string {
// TODO: Should this be a multi-line printout in certain situations?
if !v.IsValid() {
return "<non-existent>"
}
if conf.UseStringer && v.Type().Implements(stringerIface) && v.CanInterface() {
if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && v.IsNil() {
return "<nil>"
}
const stringerPrefix = "s" // Indicates that the String method was used
s := v.Interface().(fmt.Stringer).String()
return stringerPrefix + formatString(s)
}
switch v.Kind() {
case reflect.Bool:
return formatPrimitive(v.Type(), v.Bool(), conf)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return formatPrimitive(v.Type(), v.Int(), conf)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
if v.Type().PkgPath() == "" || v.Kind() == reflect.Uintptr {
// Unnamed uints are usually bytes or words, so use hexadecimal.
return formatPrimitive(v.Type(), formatHex(v.Uint()), conf)
}
return formatPrimitive(v.Type(), v.Uint(), conf)
case reflect.Float32, reflect.Float64:
return formatPrimitive(v.Type(), v.Float(), conf)
case reflect.Complex64, reflect.Complex128:
return formatPrimitive(v.Type(), v.Complex(), conf)
case reflect.String:
return formatPrimitive(v.Type(), formatString(v.String()), conf)
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
return formatPointer(v, conf)
case reflect.Ptr:
if v.IsNil() {
if conf.printType {
return fmt.Sprintf("(%v)(nil)", v.Type())
}
return "<nil>"
}
if visited[v.Pointer()] || !conf.followPointers {
return formatPointer(v, conf)
}
visited = insertPointer(visited, v.Pointer())
return "&" + formatAny(v.Elem(), conf, visited)
case reflect.Interface:
if v.IsNil() {
if conf.printType {
return fmt.Sprintf("%v(nil)", v.Type())
}
return "<nil>"
}
return formatAny(v.Elem(), conf, visited)
case reflect.Slice:
if v.IsNil() {
if conf.printType {
return fmt.Sprintf("%v(nil)", v.Type())
}
return "<nil>"
}
if visited[v.Pointer()] {
return formatPointer(v, conf)
}
visited = insertPointer(visited, v.Pointer())
fallthrough
case reflect.Array:
var ss []string
subConf := conf
subConf.printType = v.Type().Elem().Kind() == reflect.Interface
for i := 0; i < v.Len(); i++ {
s := formatAny(v.Index(i), subConf, visited)
ss = append(ss, s)
}
s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
if conf.printType {
return v.Type().String() + s
}
return s
case reflect.Map:
if v.IsNil() {
if conf.printType {
return fmt.Sprintf("%v(nil)", v.Type())
}
return "<nil>"
}
if visited[v.Pointer()] {
return formatPointer(v, conf)
}
visited = insertPointer(visited, v.Pointer())
var ss []string
keyConf, valConf := conf, conf
keyConf.printType = v.Type().Key().Kind() == reflect.Interface
keyConf.followPointers = false
valConf.printType = v.Type().Elem().Kind() == reflect.Interface
for _, k := range SortKeys(v.MapKeys()) {
sk := formatAny(k, keyConf, visited)
sv := formatAny(v.MapIndex(k), valConf, visited)
ss = append(ss, fmt.Sprintf("%s: %s", sk, sv))
}
s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
if conf.printType {
return v.Type().String() + s
}
return s
case reflect.Struct:
var ss []string
subConf := conf
subConf.printType = true
for i := 0; i < v.NumField(); i++ {
vv := v.Field(i)
if isZero(vv) {
continue // Elide zero value fields
}
name := v.Type().Field(i).Name
subConf.UseStringer = conf.UseStringer
s := formatAny(vv, subConf, visited)
ss = append(ss, fmt.Sprintf("%s: %s", name, s))
}
s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
if conf.printType {
return v.Type().String() + s
}
return s
default:
panic(fmt.Sprintf("%v kind not handled", v.Kind()))
}
}
func formatString(s string) string {
// Use quoted string if it the same length as a raw string literal.
// Otherwise, attempt to use the raw string form.
qs := strconv.Quote(s)
if len(qs) == 1+len(s)+1 {
return qs
}
// Disallow newlines to ensure output is a single line.
// Only allow printable runes for readability purposes.
rawInvalid := func(r rune) bool {
return r == '`' || r == '\n' || !unicode.IsPrint(r)
}
if strings.IndexFunc(s, rawInvalid) < 0 {
return "`" + s + "`"
}
return qs
}
func formatPrimitive(t reflect.Type, v interface{}, conf FormatConfig) string {
if conf.printType && (conf.PrintPrimitiveType || t.PkgPath() != "") {
return fmt.Sprintf("%v(%v)", t, v)
}
return fmt.Sprintf("%v", v)
}
func formatPointer(v reflect.Value, conf FormatConfig) string {
p := v.Pointer()
if !conf.realPointers {
p = 0 // For deterministic printing purposes
}
s := formatHex(uint64(p))
if conf.printType {
return fmt.Sprintf("(%v)(%s)", v.Type(), s)
}
return s
}
func formatHex(u uint64) string {
var f string
switch {
case u <= 0xff:
f = "0x%02x"
case u <= 0xffff:
f = "0x%04x"
case u <= 0xffffff:
f = "0x%06x"
case u <= 0xffffffff:
f = "0x%08x"
case u <= 0xffffffffff:
f = "0x%010x"
case u <= 0xffffffffffff:
f = "0x%012x"
case u <= 0xffffffffffffff:
f = "0x%014x"
case u <= 0xffffffffffffffff:
f = "0x%016x"
}
return fmt.Sprintf(f, u)
}
// insertPointer insert p into m, allocating m if necessary.
func insertPointer(m map[uintptr]bool, p uintptr) map[uintptr]bool {
if m == nil {
m = make(map[uintptr]bool)
}
m[p] = true
return m
}
// isZero reports whether v is the zero value.
// This does not rely on Interface and so can be used on unexported fields.
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return v.Bool() == false
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Complex64, reflect.Complex128:
return v.Complex() == 0
case reflect.String:
return v.String() == ""
case reflect.UnsafePointer:
return v.Pointer() == 0
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
return v.IsNil()
case reflect.Array:
for i := 0; i < v.Len(); i++ {
if !isZero(v.Index(i)) {
return false
}
}
return true
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}

View File

@ -0,0 +1,111 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package value
import (
"fmt"
"math"
"reflect"
"sort"
)
// SortKeys sorts a list of map keys, deduplicating keys if necessary.
// The type of each value must be comparable.
func SortKeys(vs []reflect.Value) []reflect.Value {
if len(vs) == 0 {
return vs
}
// Sort the map keys.
sort.Sort(valueSorter(vs))
// Deduplicate keys (fails for NaNs).
vs2 := vs[:1]
for _, v := range vs[1:] {
if isLess(vs2[len(vs2)-1], v) {
vs2 = append(vs2, v)
}
}
return vs2
}
// TODO: Use sort.Slice once Google AppEngine is on Go1.8 or above.
type valueSorter []reflect.Value
func (vs valueSorter) Len() int { return len(vs) }
func (vs valueSorter) Less(i, j int) bool { return isLess(vs[i], vs[j]) }
func (vs valueSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
// isLess is a generic function for sorting arbitrary map keys.
// The inputs must be of the same type and must be comparable.
func isLess(x, y reflect.Value) bool {
switch x.Type().Kind() {
case reflect.Bool:
return !x.Bool() && y.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return x.Int() < y.Int()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return x.Uint() < y.Uint()
case reflect.Float32, reflect.Float64:
fx, fy := x.Float(), y.Float()
return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
case reflect.Complex64, reflect.Complex128:
cx, cy := x.Complex(), y.Complex()
rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
}
return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
return x.Pointer() < y.Pointer()
case reflect.String:
return x.String() < y.String()
case reflect.Array:
for i := 0; i < x.Len(); i++ {
if isLess(x.Index(i), y.Index(i)) {
return true
}
if isLess(y.Index(i), x.Index(i)) {
return false
}
}
return false
case reflect.Struct:
for i := 0; i < x.NumField(); i++ {
if isLess(x.Field(i), y.Field(i)) {
return true
}
if isLess(y.Field(i), x.Field(i)) {
return false
}
}
return false
case reflect.Interface:
vx, vy := x.Elem(), y.Elem()
if !vx.IsValid() || !vy.IsValid() {
return !vx.IsValid() && vy.IsValid()
}
tx, ty := vx.Type(), vy.Type()
if tx == ty {
return isLess(x.Elem(), y.Elem())
}
if tx.Kind() != ty.Kind() {
return vx.Kind() < vy.Kind()
}
if tx.String() != ty.String() {
return tx.String() < ty.String()
}
if tx.PkgPath() != ty.PkgPath() {
return tx.PkgPath() < ty.PkgPath()
}
// This can happen in rare situations, so we fallback to just comparing
// the unique pointer for a reflect.Type. This guarantees deterministic
// ordering within a program, but it is obviously not stable.
return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
default:
// Must be Func, Map, or Slice; which are not comparable.
panic(fmt.Sprintf("%T is not comparable", x.Type()))
}
}

453
vendor/github.com/google/go-cmp/cmp/options.go generated vendored Normal file
View File

@ -0,0 +1,453 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmp
import (
"fmt"
"reflect"
"runtime"
"strings"
"github.com/google/go-cmp/cmp/internal/function"
)
// Option configures for specific behavior of Equal and Diff. In particular,
// the fundamental Option functions (Ignore, Transformer, and Comparer),
// configure how equality is determined.
//
// The fundamental options may be composed with filters (FilterPath and
// FilterValues) to control the scope over which they are applied.
//
// The cmp/cmpopts package provides helper functions for creating options that
// may be used with Equal and Diff.
type Option interface {
// filter applies all filters and returns the option that remains.
// Each option may only read s.curPath and call s.callTTBFunc.
//
// An Options is returned only if multiple comparers or transformers
// can apply simultaneously and will only contain values of those types
// or sub-Options containing values of those types.
filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption
}
// applicableOption represents the following types:
// Fundamental: ignore | invalid | *comparer | *transformer
// Grouping: Options
type applicableOption interface {
Option
// apply executes the option, which may mutate s or panic.
apply(s *state, vx, vy reflect.Value)
}
// coreOption represents the following types:
// Fundamental: ignore | invalid | *comparer | *transformer
// Filters: *pathFilter | *valuesFilter
type coreOption interface {
Option
isCore()
}
type core struct{}
func (core) isCore() {}
// Options is a list of Option values that also satisfies the Option interface.
// Helper comparison packages may return an Options value when packing multiple
// Option values into a single Option. When this package processes an Options,
// it will be implicitly expanded into a flat list.
//
// Applying a filter on an Options is equivalent to applying that same filter
// on all individual options held within.
type Options []Option
func (opts Options) filter(s *state, vx, vy reflect.Value, t reflect.Type) (out applicableOption) {
for _, opt := range opts {
switch opt := opt.filter(s, vx, vy, t); opt.(type) {
case ignore:
return ignore{} // Only ignore can short-circuit evaluation
case invalid:
out = invalid{} // Takes precedence over comparer or transformer
case *comparer, *transformer, Options:
switch out.(type) {
case nil:
out = opt
case invalid:
// Keep invalid
case *comparer, *transformer, Options:
out = Options{out, opt} // Conflicting comparers or transformers
}
}
}
return out
}
func (opts Options) apply(s *state, _, _ reflect.Value) {
const warning = "ambiguous set of applicable options"
const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
var ss []string
for _, opt := range flattenOptions(nil, opts) {
ss = append(ss, fmt.Sprint(opt))
}
set := strings.Join(ss, "\n\t")
panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
}
func (opts Options) String() string {
var ss []string
for _, opt := range opts {
ss = append(ss, fmt.Sprint(opt))
}
return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
}
// FilterPath returns a new Option where opt is only evaluated if filter f
// returns true for the current Path in the value tree.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
func FilterPath(f func(Path) bool, opt Option) Option {
if f == nil {
panic("invalid path filter function")
}
if opt := normalizeOption(opt); opt != nil {
return &pathFilter{fnc: f, opt: opt}
}
return nil
}
type pathFilter struct {
core
fnc func(Path) bool
opt Option
}
func (f pathFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption {
if f.fnc(s.curPath) {
return f.opt.filter(s, vx, vy, t)
}
return nil
}
func (f pathFilter) String() string {
fn := getFuncName(reflect.ValueOf(f.fnc).Pointer())
return fmt.Sprintf("FilterPath(%s, %v)", fn, f.opt)
}
// FilterValues returns a new Option where opt is only evaluated if filter f,
// which is a function of the form "func(T, T) bool", returns true for the
// current pair of values being compared. If the type of the values is not
// assignable to T, then this filter implicitly returns false.
//
// The filter function must be
// symmetric (i.e., agnostic to the order of the inputs) and
// deterministic (i.e., produces the same result when given the same inputs).
// If T is an interface, it is possible that f is called with two values with
// different concrete types that both implement T.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
func FilterValues(f interface{}, opt Option) Option {
v := reflect.ValueOf(f)
if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
panic(fmt.Sprintf("invalid values filter function: %T", f))
}
if opt := normalizeOption(opt); opt != nil {
vf := &valuesFilter{fnc: v, opt: opt}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
vf.typ = ti
}
return vf
}
return nil
}
type valuesFilter struct {
core
typ reflect.Type // T
fnc reflect.Value // func(T, T) bool
opt Option
}
func (f valuesFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption {
if !vx.IsValid() || !vy.IsValid() {
return invalid{}
}
if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
return f.opt.filter(s, vx, vy, t)
}
return nil
}
func (f valuesFilter) String() string {
fn := getFuncName(f.fnc.Pointer())
return fmt.Sprintf("FilterValues(%s, %v)", fn, f.opt)
}
// Ignore is an Option that causes all comparisons to be ignored.
// This value is intended to be combined with FilterPath or FilterValues.
// It is an error to pass an unfiltered Ignore option to Equal.
func Ignore() Option { return ignore{} }
type ignore struct{ core }
func (ignore) isFiltered() bool { return false }
func (ignore) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return ignore{} }
func (ignore) apply(_ *state, _, _ reflect.Value) { return }
func (ignore) String() string { return "Ignore()" }
// invalid is a sentinel Option type to indicate that some options could not
// be evaluated due to unexported fields.
type invalid struct{ core }
func (invalid) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return invalid{} }
func (invalid) apply(s *state, _, _ reflect.Value) {
const help = "consider using AllowUnexported or cmpopts.IgnoreUnexported"
panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help))
}
// Transformer returns an Option that applies a transformation function that
// converts values of a certain type into that of another.
//
// The transformer f must be a function "func(T) R" that converts values of
// type T to those of type R and is implicitly filtered to input values
// assignable to T. The transformer must not mutate T in any way.
//
// To help prevent some cases of infinite recursive cycles applying the
// same transform to the output of itself (e.g., in the case where the
// input and output types are the same), an implicit filter is added such that
// a transformer is applicable only if that exact transformer is not already
// in the tail of the Path since the last non-Transform step.
//
// The name is a user provided label that is used as the Transform.Name in the
// transformation PathStep. If empty, an arbitrary name is used.
func Transformer(name string, f interface{}) Option {
v := reflect.ValueOf(f)
if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
panic(fmt.Sprintf("invalid transformer function: %T", f))
}
if name == "" {
name = "λ" // Lambda-symbol as place-holder for anonymous transformer
}
if !isValid(name) {
panic(fmt.Sprintf("invalid name: %q", name))
}
tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
tr.typ = ti
}
return tr
}
type transformer struct {
core
name string
typ reflect.Type // T
fnc reflect.Value // func(T) R
}
func (tr *transformer) isFiltered() bool { return tr.typ != nil }
func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) applicableOption {
for i := len(s.curPath) - 1; i >= 0; i-- {
if t, ok := s.curPath[i].(*transform); !ok {
break // Hit most recent non-Transform step
} else if tr == t.trans {
return nil // Cannot directly use same Transform
}
}
if tr.typ == nil || t.AssignableTo(tr.typ) {
return tr
}
return nil
}
func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
// Update path before calling the Transformer so that dynamic checks
// will use the updated path.
s.curPath.push(&transform{pathStep{tr.fnc.Type().Out(0)}, tr})
defer s.curPath.pop()
vx = s.callTRFunc(tr.fnc, vx)
vy = s.callTRFunc(tr.fnc, vy)
s.compareAny(vx, vy)
}
func (tr transformer) String() string {
return fmt.Sprintf("Transformer(%s, %s)", tr.name, getFuncName(tr.fnc.Pointer()))
}
// Comparer returns an Option that determines whether two values are equal
// to each other.
//
// The comparer f must be a function "func(T, T) bool" and is implicitly
// filtered to input values assignable to T. If T is an interface, it is
// possible that f is called with two values of different concrete types that
// both implement T.
//
// The equality function must be:
// • Symmetric: equal(x, y) == equal(y, x)
// • Deterministic: equal(x, y) == equal(x, y)
// • Pure: equal(x, y) does not modify x or y
func Comparer(f interface{}) Option {
v := reflect.ValueOf(f)
if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
panic(fmt.Sprintf("invalid comparer function: %T", f))
}
cm := &comparer{fnc: v}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
cm.typ = ti
}
return cm
}
type comparer struct {
core
typ reflect.Type // T
fnc reflect.Value // func(T, T) bool
}
func (cm *comparer) isFiltered() bool { return cm.typ != nil }
func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applicableOption {
if cm.typ == nil || t.AssignableTo(cm.typ) {
return cm
}
return nil
}
func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
eq := s.callTTBFunc(cm.fnc, vx, vy)
s.report(eq, vx, vy)
}
func (cm comparer) String() string {
return fmt.Sprintf("Comparer(%s)", getFuncName(cm.fnc.Pointer()))
}
// AllowUnexported returns an Option that forcibly allows operations on
// unexported fields in certain structs, which are specified by passing in a
// value of each struct type.
//
// Users of this option must understand that comparing on unexported fields
// from external packages is not safe since changes in the internal
// implementation of some external package may cause the result of Equal
// to unexpectedly change. However, it may be valid to use this option on types
// defined in an internal package where the semantic meaning of an unexported
// field is in the control of the user.
//
// For some cases, a custom Comparer should be used instead that defines
// equality as a function of the public API of a type rather than the underlying
// unexported implementation.
//
// For example, the reflect.Type documentation defines equality to be determined
// by the == operator on the interface (essentially performing a shallow pointer
// comparison) and most attempts to compare *regexp.Regexp types are interested
// in only checking that the regular expression strings are equal.
// Both of these are accomplished using Comparers:
//
// Comparer(func(x, y reflect.Type) bool { return x == y })
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
//
// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
// all unexported fields on specified struct types.
func AllowUnexported(types ...interface{}) Option {
if !supportAllowUnexported {
panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS")
}
m := make(map[reflect.Type]bool)
for _, typ := range types {
t := reflect.TypeOf(typ)
if t.Kind() != reflect.Struct {
panic(fmt.Sprintf("invalid struct type: %T", typ))
}
m[t] = true
}
return visibleStructs(m)
}
type visibleStructs map[reflect.Type]bool
func (visibleStructs) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption {
panic("not implemented")
}
// reporter is an Option that configures how differences are reported.
type reporter interface {
// TODO: Not exported yet.
//
// Perhaps add PushStep and PopStep and change Report to only accept
// a PathStep instead of the full-path? Adding a PushStep and PopStep makes
// it clear that we are traversing the value tree in a depth-first-search
// manner, which has an effect on how values are printed.
Option
// Report is called for every comparison made and will be provided with
// the two values being compared, the equality result, and the
// current path in the value tree. It is possible for x or y to be an
// invalid reflect.Value if one of the values is non-existent;
// which is possible with maps and slices.
Report(x, y reflect.Value, eq bool, p Path)
}
// normalizeOption normalizes the input options such that all Options groups
// are flattened and groups with a single element are reduced to that element.
// Only coreOptions and Options containing coreOptions are allowed.
func normalizeOption(src Option) Option {
switch opts := flattenOptions(nil, Options{src}); len(opts) {
case 0:
return nil
case 1:
return opts[0]
default:
return opts
}
}
// flattenOptions copies all options in src to dst as a flat list.
// Only coreOptions and Options containing coreOptions are allowed.
func flattenOptions(dst, src Options) Options {
for _, opt := range src {
switch opt := opt.(type) {
case nil:
continue
case Options:
dst = flattenOptions(dst, opt)
case coreOption:
dst = append(dst, opt)
default:
panic(fmt.Sprintf("invalid option type: %T", opt))
}
}
return dst
}
// getFuncName returns a short function name from the pointer.
// The string parsing logic works up until Go1.9.
func getFuncName(p uintptr) string {
fnc := runtime.FuncForPC(p)
if fnc == nil {
return "<unknown>"
}
name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm"
if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") {
// Strip the package name from method name.
name = strings.TrimSuffix(name, ")-fm")
name = strings.TrimSuffix(name, ")·fm")
if i := strings.LastIndexByte(name, '('); i >= 0 {
methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc"
if j := strings.LastIndexByte(methodName, '.'); j >= 0 {
methodName = methodName[j+1:] // E.g., "myfunc"
}
name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc"
}
}
if i := strings.LastIndexByte(name, '/'); i >= 0 {
// Strip the package name.
name = name[i+1:] // E.g., "mypkg.(mytype).myfunc"
}
return name
}

309
vendor/github.com/google/go-cmp/cmp/path.go generated vendored Normal file
View File

@ -0,0 +1,309 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmp
import (
"fmt"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
type (
// Path is a list of PathSteps describing the sequence of operations to get
// from some root type to the current position in the value tree.
// The first Path element is always an operation-less PathStep that exists
// simply to identify the initial type.
//
// When traversing structs with embedded structs, the embedded struct will
// always be accessed as a field before traversing the fields of the
// embedded struct themselves. That is, an exported field from the
// embedded struct will never be accessed directly from the parent struct.
Path []PathStep
// PathStep is a union-type for specific operations to traverse
// a value's tree structure. Users of this package never need to implement
// these types as values of this type will be returned by this package.
PathStep interface {
String() string
Type() reflect.Type // Resulting type after performing the path step
isPathStep()
}
// SliceIndex is an index operation on a slice or array at some index Key.
SliceIndex interface {
PathStep
Key() int // May return -1 if in a split state
// SplitKeys returns the indexes for indexing into slices in the
// x and y values, respectively. These indexes may differ due to the
// insertion or removal of an element in one of the slices, causing
// all of the indexes to be shifted. If an index is -1, then that
// indicates that the element does not exist in the associated slice.
//
// Key is guaranteed to return -1 if and only if the indexes returned
// by SplitKeys are not the same. SplitKeys will never return -1 for
// both indexes.
SplitKeys() (x int, y int)
isSliceIndex()
}
// MapIndex is an index operation on a map at some index Key.
MapIndex interface {
PathStep
Key() reflect.Value
isMapIndex()
}
// TypeAssertion represents a type assertion on an interface.
TypeAssertion interface {
PathStep
isTypeAssertion()
}
// StructField represents a struct field access on a field called Name.
StructField interface {
PathStep
Name() string
Index() int
isStructField()
}
// Indirect represents pointer indirection on the parent type.
Indirect interface {
PathStep
isIndirect()
}
// Transform is a transformation from the parent type to the current type.
Transform interface {
PathStep
Name() string
Func() reflect.Value
// Option returns the originally constructed Transformer option.
// The == operator can be used to detect the exact option used.
Option() Option
isTransform()
}
)
func (pa *Path) push(s PathStep) {
*pa = append(*pa, s)
}
func (pa *Path) pop() {
*pa = (*pa)[:len(*pa)-1]
}
// Last returns the last PathStep in the Path.
// If the path is empty, this returns a non-nil PathStep that reports a nil Type.
func (pa Path) Last() PathStep {
return pa.Index(-1)
}
// Index returns the ith step in the Path and supports negative indexing.
// A negative index starts counting from the tail of the Path such that -1
// refers to the last step, -2 refers to the second-to-last step, and so on.
// If index is invalid, this returns a non-nil PathStep that reports a nil Type.
func (pa Path) Index(i int) PathStep {
if i < 0 {
i = len(pa) + i
}
if i < 0 || i >= len(pa) {
return pathStep{}
}
return pa[i]
}
// String returns the simplified path to a node.
// The simplified path only contains struct field accesses.
//
// For example:
// MyMap.MySlices.MyField
func (pa Path) String() string {
var ss []string
for _, s := range pa {
if _, ok := s.(*structField); ok {
ss = append(ss, s.String())
}
}
return strings.TrimPrefix(strings.Join(ss, ""), ".")
}
// GoString returns the path to a specific node using Go syntax.
//
// For example:
// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
func (pa Path) GoString() string {
var ssPre, ssPost []string
var numIndirect int
for i, s := range pa {
var nextStep PathStep
if i+1 < len(pa) {
nextStep = pa[i+1]
}
switch s := s.(type) {
case *indirect:
numIndirect++
pPre, pPost := "(", ")"
switch nextStep.(type) {
case *indirect:
continue // Next step is indirection, so let them batch up
case *structField:
numIndirect-- // Automatic indirection on struct fields
case nil:
pPre, pPost = "", "" // Last step; no need for parenthesis
}
if numIndirect > 0 {
ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
ssPost = append(ssPost, pPost)
}
numIndirect = 0
continue
case *transform:
ssPre = append(ssPre, s.trans.name+"(")
ssPost = append(ssPost, ")")
continue
case *typeAssertion:
// As a special-case, elide type assertions on anonymous types
// since they are typically generated dynamically and can be very
// verbose. For example, some transforms return interface{} because
// of Go's lack of generics, but typically take in and return the
// exact same concrete type.
if s.Type().PkgPath() == "" {
continue
}
}
ssPost = append(ssPost, s.String())
}
for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
}
return strings.Join(ssPre, "") + strings.Join(ssPost, "")
}
type (
pathStep struct {
typ reflect.Type
}
sliceIndex struct {
pathStep
xkey, ykey int
}
mapIndex struct {
pathStep
key reflect.Value
}
typeAssertion struct {
pathStep
}
structField struct {
pathStep
name string
idx int
// These fields are used for forcibly accessing an unexported field.
// pvx, pvy, and field are only valid if unexported is true.
unexported bool
force bool // Forcibly allow visibility
pvx, pvy reflect.Value // Parent values
field reflect.StructField // Field information
}
indirect struct {
pathStep
}
transform struct {
pathStep
trans *transformer
}
)
func (ps pathStep) Type() reflect.Type { return ps.typ }
func (ps pathStep) String() string {
if ps.typ == nil {
return "<nil>"
}
s := ps.typ.String()
if s == "" || strings.ContainsAny(s, "{}\n") {
return "root" // Type too simple or complex to print
}
return fmt.Sprintf("{%s}", s)
}
func (si sliceIndex) String() string {
switch {
case si.xkey == si.ykey:
return fmt.Sprintf("[%d]", si.xkey)
case si.ykey == -1:
// [5->?] means "I don't know where X[5] went"
return fmt.Sprintf("[%d->?]", si.xkey)
case si.xkey == -1:
// [?->3] means "I don't know where Y[3] came from"
return fmt.Sprintf("[?->%d]", si.ykey)
default:
// [5->3] means "X[5] moved to Y[3]"
return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
}
}
func (mi mapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
func (ta typeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
func (sf structField) String() string { return fmt.Sprintf(".%s", sf.name) }
func (in indirect) String() string { return "*" }
func (tf transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
func (si sliceIndex) Key() int {
if si.xkey != si.ykey {
return -1
}
return si.xkey
}
func (si sliceIndex) SplitKeys() (x, y int) { return si.xkey, si.ykey }
func (mi mapIndex) Key() reflect.Value { return mi.key }
func (sf structField) Name() string { return sf.name }
func (sf structField) Index() int { return sf.idx }
func (tf transform) Name() string { return tf.trans.name }
func (tf transform) Func() reflect.Value { return tf.trans.fnc }
func (tf transform) Option() Option { return tf.trans }
func (pathStep) isPathStep() {}
func (sliceIndex) isSliceIndex() {}
func (mapIndex) isMapIndex() {}
func (typeAssertion) isTypeAssertion() {}
func (structField) isStructField() {}
func (indirect) isIndirect() {}
func (transform) isTransform() {}
var (
_ SliceIndex = sliceIndex{}
_ MapIndex = mapIndex{}
_ TypeAssertion = typeAssertion{}
_ StructField = structField{}
_ Indirect = indirect{}
_ Transform = transform{}
_ PathStep = sliceIndex{}
_ PathStep = mapIndex{}
_ PathStep = typeAssertion{}
_ PathStep = structField{}
_ PathStep = indirect{}
_ PathStep = transform{}
)
// isExported reports whether the identifier is exported.
func isExported(id string) bool {
r, _ := utf8.DecodeRuneInString(id)
return unicode.IsUpper(r)
}
// isValid reports whether the identifier is valid.
// Empty and underscore-only strings are not valid.
func isValid(id string) bool {
ok := id != "" && id != "_"
for j, c := range id {
ok = ok && (j > 0 || !unicode.IsDigit(c))
ok = ok && (c == '_' || unicode.IsLetter(c) || unicode.IsDigit(c))
}
return ok
}

53
vendor/github.com/google/go-cmp/cmp/reporter.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmp
import (
"fmt"
"reflect"
"strings"
"github.com/google/go-cmp/cmp/internal/value"
)
type defaultReporter struct {
Option
diffs []string // List of differences, possibly truncated
ndiffs int // Total number of differences
nbytes int // Number of bytes in diffs
nlines int // Number of lines in diffs
}
var _ reporter = (*defaultReporter)(nil)
func (r *defaultReporter) Report(x, y reflect.Value, eq bool, p Path) {
if eq {
return // Ignore equal results
}
const maxBytes = 4096
const maxLines = 256
r.ndiffs++
if r.nbytes < maxBytes && r.nlines < maxLines {
sx := value.Format(x, value.FormatConfig{UseStringer: true})
sy := value.Format(y, value.FormatConfig{UseStringer: true})
if sx == sy {
// Unhelpful output, so use more exact formatting.
sx = value.Format(x, value.FormatConfig{PrintPrimitiveType: true})
sy = value.Format(y, value.FormatConfig{PrintPrimitiveType: true})
}
s := fmt.Sprintf("%#v:\n\t-: %s\n\t+: %s\n", p, sx, sy)
r.diffs = append(r.diffs, s)
r.nbytes += len(s)
r.nlines += strings.Count(s, "\n")
}
}
func (r *defaultReporter) String() string {
s := strings.Join(r.diffs, "")
if r.ndiffs == len(r.diffs) {
return s
}
return fmt.Sprintf("%s... %d more differences ...", s, r.ndiffs-len(r.diffs))
}

15
vendor/github.com/google/go-cmp/cmp/unsafe_panic.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build purego appengine js
package cmp
import "reflect"
const supportAllowUnexported = false
func unsafeRetrieveField(reflect.Value, reflect.StructField) reflect.Value {
panic("unsafeRetrieveField is not implemented")
}

23
vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build !purego,!appengine,!js
package cmp
import (
"reflect"
"unsafe"
)
const supportAllowUnexported = true
// unsafeRetrieveField uses unsafe to forcibly retrieve any field from a struct
// such that the value has read-write permissions.
//
// The parent struct, v, must be addressable, while f must be a StructField
// describing the field to retrieve.
func unsafeRetrieveField(v reflect.Value, f reflect.StructField) reflect.Value {
return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem()
}

28
vendor/github.com/intel-go/cpuid/LICENSE generated vendored Normal file
View File

@ -0,0 +1,28 @@
Copyright (c) 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

537
vendor/github.com/intel-go/cpuid/cpuid.go generated vendored Normal file
View File

@ -0,0 +1,537 @@
// Copyright 2015 Intel Corporation.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cpuid provides access to the information available
// through the CPUID instruction.
// All information is gathered during package initialization phase
// so package's public interface doesn't call CPUID instruction.
package cpuid
// VendorIdentificationString like "GenuineIntel" or "AuthenticAMD"
var VendorIdentificatorString string
// ProcessorBrandString like "Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz"
var ProcessorBrandString string
// SteppingId is Processor Stepping ID as described in
// Intel® 64 and IA-32 Architectures Software Developers Manual
var SteppingId uint32
// ProcessorType obtained from processor Version Information, according to
// Intel® 64 and IA-32 Architectures Software Developers Manual
var ProcessorType uint32
// DisplayFamily is Family of processors obtained from processor Version Information, according to
// Intel® 64 and IA-32 Architectures Software Developers Manual
var DisplayFamily uint32
// Display Model is Model of processor obtained from processor Version Information, according to
// Intel® 64 and IA-32 Architectures Software Developers Manual
var DisplayModel uint32
// Cache line size in bytes
var CacheLineSize uint32
// Maximum number of addressable IDs for logical processors in this physical package
var MaxLogocalCPUId uint32
// Initial APIC ID
var InitialAPICId uint32
// Cache descriptor's array
// You can iterate like there:
// for _, cacheDescription := range cpuid.CacheDescriptors {
// fmt.Printf("CacheDescriptor: %v\n", cacheDescription)
// }
// See CacheDescriptor type for more information
var CacheDescriptors []CacheDescriptor
// Smallest monitor-line size in bytes (default is processor's monitor granularity)
var MonLineSizeMin uint32
// Largest monitor-line size in bytes (default is processor's monitor granularity)
var MonLineSizeMax uint32
// Enumeration of Monitor-Mwait extensions availability status
var MonitorEMX bool
// Supports treating interrupts as break-event for MWAIT flag
var MonitorIBE bool
// EnabledAVX flag allows to check if feature AVX is enabled by OS/BIOS
var EnabledAVX bool = false
// EnabledAVX512 flag allows to check if features AVX512xxx are enabled by OS/BIOS
var EnabledAVX512 bool = false
type CacheDescriptor struct {
Level int // Cache level
CacheType int // Cache type
CacheName string // Name
CacheSize int // in KBytes (of page size for TLB)
Ways int // Associativity, 0 undefined, 0xFF fully associate
LineSize int // Cache line size in bytes
Entries int // number of entries for TLB
Partioning int // partitioning
}
// ThermalSensorInterruptThresholds is the number of interrupt thresholds in digital thermal sensor.
var ThermalSensorInterruptThresholds uint32
// HasFeature to check if features from FeatureNames map are available on the current processor
func HasFeature(feature uint64) bool {
return (featureFlags & feature) != 0
}
// HasExtendedFeature to check if features from ExtendedFeatureNames map are available on the current processor
func HasExtendedFeature(feature uint64) bool {
return (extendedFeatureFlags & feature) != 0
}
// HasExtraFeature to check if features from ExtraFeatureNames map are available on the current processor
func HasExtraFeature(feature uint64) bool {
return (extraFeatureFlags & feature) != 0
}
// HasThermalAndPowerFeature to check if features from ThermalAndPowerFeatureNames map are available on the current processor
func HasThermalAndPowerFeature(feature uint32) bool {
return (thermalAndPowerFeatureFlags & feature) != 0
}
var FeatureNames = map[uint64]string{
SSE3: "SSE3",
PCLMULQDQ: "PCLMULQDQ",
DTES64: "DTES64",
MONITOR: "MONITOR",
DSI_CPL: "DSI_CPL",
VMX: "VMX",
SMX: "SMX",
EST: "EST",
TM2: "TM2",
SSSE3: "SSSE3",
CNXT_ID: "CNXT_ID",
SDBG: "SDBG",
FMA: "FMA",
CX16: "CX16",
XTPR: "XTPR",
PDCM: "PDCM",
PCID: "PCID",
DCA: "DCA",
SSE4_1: "SSE4_1",
SSE4_2: "SSE4_2",
X2APIC: "X2APIC",
MOVBE: "MOVBE",
POPCNT: "POPCNT",
TSC_DEADLINE: "TSC_DEADLINE",
AES: "AES",
XSAVE: "XSAVE",
OSXSAVE: "OSXSAVE",
AVX: "AVX",
F16C: "F16C",
RDRND: "RDRND",
HYPERVISOR: "HYPERVISOR",
FPU: "FPU",
VME: "VME",
DE: "DE",
PSE: "PSE",
TSC: "TSC",
MSR: "MSR",
PAE: "PAE",
MCE: "MCE",
CX8: "CX8",
APIC: "APIC",
SEP: "SEP",
MTRR: "MTRR",
PGE: "PGE",
MCA: "MCA",
CMOV: "CMOV",
PAT: "PAT",
PSE_36: "PSE_36",
PSN: "PSN",
CLFSH: "CLFSH",
DS: "DS",
ACPI: "ACPI",
MMX: "MMX",
FXSR: "FXSR",
SSE: "SSE",
SSE2: "SSE2",
SS: "SS",
HTT: "HTT",
TM: "TM",
IA64: "IA64",
PBE: "PBE",
}
var ThermalAndPowerFeatureNames = map[uint32]string{ // From leaf06
ARAT: "ARAT",
PLN: "PLN",
ECMD: "ECMD",
PTM: "PTM",
HDC: "HDC",
HCFC: "HCFC",
HWP: "HWP",
HWP_NOTIF: "HWP_NOTIF",
HWP_ACTIVITY_WINDOW: "HWP_ACTIVITY_WINDOW",
HWP_ENERGY_PERFORMANCE: "HWP_ENERGY_PERFORMANCE",
HWP_PACKAGE_LEVEL_REQUEST: "HWP_PACKAGE_LEVEL_REQUEST",
PERFORMANCE_ENERGY_BIAS: "PERFORMANCE_ENERGY_BIAS",
TEMPERATURE_SENSOR: "TEMPERATURE_SENSOR",
TURBO_BOOST: "TURBO_BOOST",
TURBO_BOOST_MAX: "TURBO_BOOST_MAX",
}
var ExtendedFeatureNames = map[uint64]string{ // From leaf07
FSGSBASE: "FSGSBASE",
IA32_TSC_ADJUST: "IA32_TSC_ADJUST",
BMI1: "BMI1",
HLE: "HLE",
AVX2: "AVX2",
SMEP: "SMEP",
BMI2: "BMI2",
ERMS: "ERMS",
INVPCID: "INVPCID",
RTM: "RTM",
PQM: "PQM",
DFPUCDS: "DFPUCDS",
MPX: "MPX",
PQE: "PQE",
AVX512F: "AVX512F",
AVX512DQ: "AVX512DQ",
RDSEED: "RDSEED",
ADX: "ADX",
SMAP: "SMAP",
AVX512IFMA: "AVX512IFMA",
PCOMMIT: "PCOMMIT",
CLFLUSHOPT: "CLFLUSHOPT",
CLWB: "CLWB",
INTEL_PROCESSOR_TRACE: "INTEL_PROCESSOR_TRACE",
AVX512PF: "AVX512PF",
AVX512ER: "AVX512ER",
AVX512CD: "AVX512CD",
SHA: "SHA",
AVX512BW: "AVX512BW",
AVX512VL: "AVX512VL",
PREFETCHWT1: "PREFETCHWT1",
AVX512VBMI: "AVX512VBMI",
}
var ExtraFeatureNames = map[uint64]string{ // From leaf 8000 0001
LAHF_LM: "LAHF_LM",
CMP_LEGACY: "CMP_LEGACY",
SVM: "SVM",
EXTAPIC: "EXTAPIC",
CR8_LEGACY: "CR8_LEGACY",
ABM: "ABM",
SSE4A: "SSE4A",
MISALIGNSSE: "MISALIGNSSE",
PREFETCHW: "PREFETCHW",
OSVW: "OSVW",
IBS: "IBS",
XOP: "XOP",
SKINIT: "SKINIT",
WDT: "WDT",
LWP: "LWP",
FMA4: "FMA4",
TCE: "TCE",
NODEID_MSR: "NODEID_MSR",
TBM: "TBM",
TOPOEXT: "TOPOEXT",
PERFCTR_CORE: "PERFCTR_CORE",
PERFCTR_NB: "PERFCTR_NB",
SPM: "SPM",
DBX: "DBX",
PERFTSC: "PERFTSC",
PCX_L2I: "PCX_L2I",
FPU_2: "FPU",
VME_2: "VME",
DE_2: "DE",
PSE_2: "PSE",
TSC_2: "TSC",
MSR_2: "MSR",
PAE_2: "PAE",
MCE_2: "MCE",
CX8_2: "CX8",
APIC_2: "APIC",
SYSCALL: "SYSCALL",
MTRR_2: "MTRR",
PGE_2: "PGE",
MCA_2: "MCA",
CMOV_2: "CMOV",
PAT_2: "PAT",
PSE36: "PSE36",
MP: "MP",
NX: "NX",
MMXEXT: "MMXEXT",
MMX_2: "MMX",
FXSR_2: "FXSR",
FXSR_OPT: "FXSR_OPT",
PDPE1GB: "PDPE1GB",
RDTSCP: "RDTSCP",
LM: "LM",
_3DNOWEXT: "3DNOWEXT",
_3DNOW: "3DNOW",
}
var brandStrings = map[string]int{
"AMDisbetter!": AMD,
"AuthenticAMD": AMD,
"CentaurHauls": CENTAUR,
"CyrixInstead": CYRIX,
"GenuineIntel": INTEL,
"TransmetaCPU": TRANSMETA,
"GenuineTMx86": TRANSMETA,
"Geode by NSC": NATIONALSEMICONDUCTOR,
"NexGenDriven": NEXGEN,
"RiseRiseRise": RISE,
"SiS SiS SiS ": SIS,
"UMC UMC UMC ": UMC,
"VIA VIA VIA ": VIA,
"Vortex86 SoC": VORTEX,
"KVMKVMKVM": KVM,
"Microsoft Hv": HYPERV,
"VMwareVMware": VMWARE,
"XenVMMXenVMM": XEN,
}
var maxInputValue uint32
var maxExtendedInputValue uint32
var extendedModelId uint32
var extendedFamilyId uint32
var brandIndex uint32
var brandId int
var featureFlags uint64
var thermalAndPowerFeatureFlags uint32
var extendedFeatureFlags uint64
var extraFeatureFlags uint64
const (
UKNOWN = iota
AMD
CENTAUR
CYRIX
INTEL
TRANSMETA
NATIONALSEMICONDUCTOR
NEXGEN
RISE
SIS
UMC
VIA
VORTEX
KVM
HYPERV
VMWARE
XEN
)
const (
SSE3 = uint64(1) << iota
PCLMULQDQ
DTES64
MONITOR
DSI_CPL
VMX
SMX
EST
TM2
SSSE3
CNXT_ID
SDBG
FMA
CX16
XTPR
PDCM
_
PCID
DCA
SSE4_1
SSE4_2
X2APIC
MOVBE
POPCNT
TSC_DEADLINE
AES
XSAVE
OSXSAVE
AVX
F16C
RDRND
HYPERVISOR
FPU
VME
DE
PSE
TSC
MSR
PAE
MCE
CX8
APIC
_
SEP
MTRR
PGE
MCA
CMOV
PAT
PSE_36
PSN
CLFSH
_
DS
ACPI
MMX
FXSR
SSE
SSE2
SS
HTT
TM
IA64
PBE
)
const (
FSGSBASE = uint64(1) << iota
IA32_TSC_ADJUST
_
BMI1
HLE
AVX2
_
SMEP
BMI2
ERMS
INVPCID
RTM
PQM
DFPUCDS
MPX
PQE
AVX512F
AVX512DQ
RDSEED
ADX
SMAP
AVX512IFMA
PCOMMIT
CLFLUSHOPT
CLWB
INTEL_PROCESSOR_TRACE
AVX512PF
AVX512ER
AVX512CD
SHA
AVX512BW
AVX512VL
// ECX's const from there
PREFETCHWT1
AVX512VBMI
)
const (
LAHF_LM = uint64(1) << iota
CMP_LEGACY
SVM
EXTAPIC
CR8_LEGACY
ABM
SSE4A
MISALIGNSSE
PREFETCHW
OSVW
IBS
XOP
SKINIT
WDT
_
LWP
FMA4
TCE
_
NODEID_MSR
_
TBM
TOPOEXT
PERFCTR_CORE
PERFCTR_NB
SPM
DBX
PERFTSC
PCX_L2I
_
_
_
// EDX features from there
FPU_2
VME_2
DE_2
PSE_2
TSC_2
MSR_2
PAE_2
MCE_2
CX8_2
APIC_2
_
SYSCALL
MTRR_2
PGE_2
MCA_2
CMOV_2
PAT_2
PSE36
_
MP
NX
_
MMXEXT
MMX_2
FXSR_2
FXSR_OPT
PDPE1GB
RDTSCP
_
LM
_3DNOWEXT
_3DNOW
)
// Thermal and Power Management features
const (
// EAX bits 0-15
TEMPERATURE_SENSOR = uint32(1) << iota // Digital temperature sensor
TURBO_BOOST // Intel Turbo Boost Technology available
ARAT // APIC-Timer-always-running feature is supported if set.
_ // Reserved
PLN // Power limit notification controls
ECMD // Clock modulation duty cycle extension
PTM // Package thermal management
HWP // HWP base registers (IA32_PM_ENABLE[bit 0], IA32_HWP_CAPABILITIES, IA32_HWP_REQUEST, IA32_HWP_STATUS)
HWP_NOTIF // IA32_HWP_INTERRUPT MSR
HWP_ACTIVITY_WINDOW // IA32_HWP_REQUEST[bits 41:32]
HWP_ENERGY_PERFORMANCE // IA32_HWP_REQUEST[bits 31:24]
HWP_PACKAGE_LEVEL_REQUEST // IA32_HWP_REQUEST_PKG MSR
_ // Reserved (eax bit 12)
HDC // HDC base registers IA32_PKG_HDC_CTL, IA32_PM_CTL1, IA32_THREAD_STALL MSRs
TURBO_BOOST_MAX // Intel® Turbo Boost Max Technology
_ // Reserved (eax bit 15)
// ECX bits 0-15
HCFC // Hardware Coordination Feedback Capability
_
_
PERFORMANCE_ENERGY_BIAS // Processor supports performance-energy bias preference
)
const (
NULL = iota
DATA_CACHE
INSTRUCTION_CACHE
UNIFIED_CACHE
TLB
DTLB
STLB
PREFETCH
)

595
vendor/github.com/intel-go/cpuid/cpuid_amd64.go generated vendored Normal file
View File

@ -0,0 +1,595 @@
// +build amd64
// Copyright 2015 Intel Corporation.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cpuid provides access to the information available
// through the CPUID instruction.
// All information is gathered during package initialization phase
// so package's public interface doesn't call CPUID instruction.
package cpuid
func cpuid_low(arg1, arg2 uint32) (eax, ebx, ecx, edx uint32) // implemented in cpuidlow_amd64.s
func xgetbv_low(arg1 uint32) (eax, edx uint32) // implemented in cpuidlow_amd64.s
func init() {
detectFeatures()
}
func detectFeatures() {
leaf0()
leaf1()
leaf2()
leaf3()
leaf4()
leaf5()
leaf6()
leaf7()
leaf0x80000000()
leaf0x80000001()
leaf0x80000004()
leaf0x80000005()
leaf0x80000006()
if HasFeature(OSXSAVE) {
eax, _ := xgetbv_low(0)
if (eax & 0x6) == 0x6 {
EnabledAVX = true
}
if (eax & 0xE0) == 0xE0 {
EnabledAVX512 = true
}
}
}
var leaf02Names = [...]string{
"NULL",
"DATA_CACHE",
"INSTRUCTION_CACHE",
"UNIFIED_CACHE",
"TLB",
"DTLB",
"STLB",
"PREFETCH",
}
func leaf0() {
eax, ebx, ecx, edx := cpuid_low(0, 0)
maxInputValue = eax
VendorIdentificatorString = string(int32sToBytes(ebx, edx, ecx))
brandId = brandStrings[VendorIdentificatorString]
}
func leaf1() {
if maxInputValue < 1 {
return
}
eax, ebx, ecx, edx := cpuid_low(1, 0)
// Parse EAX
SteppingId = (eax & 0xF)
modelId := (eax >> 4) & 0xF
familyId := (eax >> 8) & 0xF
ProcessorType = (eax >> 12) & 0x3
ExtendedModelId := (eax >> 16) & 0xF
extendedFamilyId := (eax >> 20) & 0xFF
DisplayFamily = familyId
DisplayModel = modelId
if familyId == 0xF {
DisplayFamily = extendedFamilyId + familyId
}
if familyId == 0x6 || familyId == 0xF {
DisplayModel = ExtendedModelId<<4 + modelId
}
// Parse EBX
brandIndex = ebx & 0xFF
CacheLineSize = ((ebx >> 8) & 0xFF) << 3
MaxLogocalCPUId = (ebx >> 16) & 0xFF
InitialAPICId = (ebx >> 24)
// Parse ECX & EDX not needed. Ask through HasFeature function
featureFlags = (uint64(edx) << 32) | uint64(ecx)
}
func leaf2() {
if brandId != INTEL {
return
}
if maxInputValue < 2 {
return
}
bytes := int32sToBytes(cpuid_low(2, 0))
for i := 0; i < len(bytes); i++ {
if (i%4 == 0) && (bytes[i+3]&(1<<7) != 0) {
i += 4
continue
}
if bytes[i] == 0xFF { // it means that we should use leaf 4 for cache info
CacheDescriptors = CacheDescriptors[0:0]
break
}
CacheDescriptors = append(CacheDescriptors, leaf02Descriptors[int16(bytes[i])])
}
}
func leaf3() {
if brandId != INTEL {
return
}
if maxInputValue < 3 {
return
}
// TODO SerialNumber for < Pentium 4
}
func leaf4() {
if brandId != INTEL {
return
}
if maxInputValue < 4 {
return
}
cacheId := 0
for {
eax, ebx, ecx, _ := cpuid_low(4, uint32(cacheId))
cacheId++
cacheType := eax & 0xF
if cacheType == NULL {
break
}
cacheLevel := (eax >> 5) & 0x7
// selfInitializingCacheLevel := eax & (1<<8)
// fullyAssociativeCache := eax & (1<<9)
// maxNumLogicalCoresSharing := (eax >> 14) & 0x3FF
// maxNumPhisCores := (eax >> 26) & 0x3F
systemCoherencyLineSize := (ebx & 0xFFF) + 1
physicalLinePartions := (ebx>>12)&0x3FF + 1
waysOfAssiociativity := (ebx>>22)&0x3FF + 1
numberOfSets := ecx + 1
// writeBackInvalidate := edx & 1
// cacheInclusiveness := edx & (1<<1)
// complexCacheIndexing := edx & (1<<2)
cacheSize := (waysOfAssiociativity * physicalLinePartions *
systemCoherencyLineSize * numberOfSets) >> 10
CacheDescriptors = append(CacheDescriptors,
CacheDescriptor{int(cacheLevel),
int(cacheType),
"",
int(cacheSize),
int(waysOfAssiociativity),
int(systemCoherencyLineSize),
int(numberOfSets),
int(physicalLinePartions),
})
}
}
func leaf5() {
if maxInputValue < 5 {
return
}
eax, ebx, ecx, _ := cpuid_low(4, 0) // TODO process EDX with C0-C7 C-states
MonLineSizeMax = eax & (0xFFFF)
MonLineSizeMax = ebx & (0xFFFF)
MonitorEMX = (ecx & (1 << 0)) != 0
MonitorIBE = (ecx & (1 << 1)) != 0
}
func leaf6() {
// Thermal and Power Management Features for Intel
if maxInputValue < 6 {
return
}
eax, ebx, ecx, _ := cpuid_low(6, 0)
thermalAndPowerFeatureFlags = (eax & 0xFFFF) | (ecx << 16)
ThermalSensorInterruptThresholds = ebx & 7
}
func leaf7() {
_, ebx, ecx, _ := cpuid_low(7, 0)
extendedFeatureFlags = (uint64(ecx) << 32) | uint64(ebx)
}
func leaf0x80000000() {
maxExtendedInputValue, _, _, _ = cpuid_low(0x80000000, 0)
}
func leaf0x80000001() {
if maxExtendedInputValue < 0x80000001 {
return
}
_, _, ecx, edx := cpuid_low(0x80000001, 0)
//extendedProcessorSignatureAndFeatureBits := eax
extraFeatureFlags = (uint64(edx) << 32) | uint64(ecx)
}
// leaf0x80000004 looks at the Processor Brand String in leaves 0x80000002 through 0x80000004
func leaf0x80000004() {
if maxExtendedInputValue < 0x80000004 {
return
}
ProcessorBrandString += string(int32sToBytes(cpuid_low(0x80000002, 0)))
ProcessorBrandString += string(int32sToBytes(cpuid_low(0x80000003, 0)))
ProcessorBrandString += string(int32sToBytes(cpuid_low(0x80000004, 0)))
}
func leaf0x80000005() {
// AMD L1 Cache and TLB Information
if maxExtendedInputValue < 0x80000005 {
return
}
if brandId != AMD {
return
}
eax, ebx, ecx, edx := cpuid_low(0x80000005, 0)
L1DTlb2and4MAssoc := (eax >> 24) & 0xFF
L1DTlb2and4MSize := (eax >> 16) & 0xFF
CacheDescriptors = append(CacheDescriptors,
CacheDescriptor{1,
DTLB,
"DTLB 2M/4M",
2 * 1024,
int(L1DTlb2and4MAssoc),
-1,
int(L1DTlb2and4MSize),
0,
})
L1ITlb2and4MAssoc := (eax >> 8) & 0xFF
L1ITlb2and4MSize := (eax) & 0xFF
CacheDescriptors = append(CacheDescriptors,
CacheDescriptor{1,
TLB,
"ITLB 2M/4M",
2 * 1024,
int(L1ITlb2and4MAssoc),
-1,
int(L1ITlb2and4MSize),
0,
})
L1DTlb4KAssoc := (ebx >> 24) & 0xFF
L1DTlb4KSize := (ebx >> 16) & 0xFF
CacheDescriptors = append(CacheDescriptors,
CacheDescriptor{1,
DTLB,
"DTLB 4K",
4,
int(L1DTlb4KAssoc),
-1,
int(L1DTlb4KSize),
0,
})
L1ITlb4KAssoc := (ebx >> 8) & 0xFF
L1ITlb4KSize := (ebx) & 0xFF
CacheDescriptors = append(CacheDescriptors,
CacheDescriptor{1,
TLB,
"ITLB 4K",
4,
int(L1ITlb4KAssoc),
-1,
int(L1ITlb4KSize),
0,
})
L1DcSize := (ecx >> 24) & 0xFF
L1DcAssoc := (ecx >> 16) & 0xFF
L1DcLinesPerTag := (ecx >> 8) & 0xFF
L1DcLineSize := (ecx >> 0) & 0xFF
CacheDescriptors = append(CacheDescriptors,
CacheDescriptor{1,
DATA_CACHE,
"L1 Data cache",
int(L1DcSize),
int(L1DcAssoc),
int(L1DcLineSize),
-1,
int(L1DcLinesPerTag),
})
L1IcSize := (edx >> 24) & 0xFF
L1IcAssoc := (edx >> 16) & 0xFF
L1IcLinesPerTag := (edx >> 8) & 0xFF
L1IcLineSize := (edx >> 0) & 0xFF
CacheDescriptors = append(CacheDescriptors,
CacheDescriptor{1,
INSTRUCTION_CACHE,
"L1 Instruction cache",
int(L1IcSize),
int(L1IcAssoc),
int(L1IcLineSize),
-1,
int(L1IcLinesPerTag),
})
}
func leaf0x80000006() {
if maxExtendedInputValue < 0x80000006 {
return
}
var associativityEncodings = map[uint]uint{
0x00: 0,
0x01: 1,
0x02: 2,
0x04: 4,
0x06: 8,
0x08: 16,
0x0A: 32,
0x0B: 48,
0x0C: 64,
0x0D: 96,
0x0E: 128,
0x0F: 0xFF, // - Fully associative
}
eax, ebx, ecx, edx := cpuid_low(0x80000006, 0)
if brandId == INTEL {
CacheLineSize := (ecx >> 0) & 0xFF
L2Associativity := uint((ecx >> 12) & 0xF)
CacheSize := (ecx >> 16) & 0xFFFF
CacheDescriptors = append(CacheDescriptors,
CacheDescriptor{2,
0,
"Cache info from leaf 0x80000006 for Intel",
int(CacheSize),
int(associativityEncodings[L2Associativity]),
int(CacheLineSize),
-1,
0,
})
}
if brandId == AMD {
L2DTlb2and4MAssoc := uint((eax >> 28) & 0xF)
L2DTlb2and4MSize := (eax >> 16) & 0xFFF
CacheDescriptors = append(CacheDescriptors,
CacheDescriptor{2,
DTLB,
"DTLB 2M/4M",
2 * 1024,
int(associativityEncodings[L2DTlb2and4MAssoc]),
-1,
int(L2DTlb2and4MSize),
0,
})
L2ITlb2and4MAssoc := uint((eax >> 12) & 0xF)
L2ITlb2and4MSize := (eax) & 0xFFF
CacheDescriptors = append(CacheDescriptors,
CacheDescriptor{2,
TLB,
"ITLB 2M/4M",
2 * 1024,
int(associativityEncodings[L2ITlb2and4MAssoc]),
-1,
int(L2ITlb2and4MSize),
0,
})
L2DTlb4KAssoc := uint((ebx >> 28) & 0xF)
L2DTlb4KSize := (ebx >> 16) & 0xFFF
CacheDescriptors = append(CacheDescriptors,
CacheDescriptor{2,
DTLB,
"DTLB 4K",
4,
int(associativityEncodings[L2DTlb4KAssoc]),
-1,
int(L2DTlb4KSize),
0,
})
L2ITlb4KAssoc := uint((ebx >> 12) & 0xF)
L2ITlb4KSize := (ebx) & 0xFFF
CacheDescriptors = append(CacheDescriptors,
CacheDescriptor{2,
TLB,
"ITLB 4K",
4,
int(associativityEncodings[L2ITlb4KAssoc]),
-1,
int(L2ITlb4KSize),
0,
})
L2Size := (ecx >> 16) & 0xFFFF
L2Assoc := uint((ecx >> 12) & 0xF)
L2LinesPerTag := (ecx >> 8) & 0xF
L2LineSize := (ecx >> 0) & 0xFF
CacheDescriptors = append(CacheDescriptors,
CacheDescriptor{2,
DATA_CACHE,
"L2 Data cache",
int(L2Size),
int(associativityEncodings[L2Assoc]),
int(L2LineSize),
-1,
int(L2LinesPerTag),
})
L3Size := ((edx >> 18) & 0xF) * 512
L3Assoc := uint((edx >> 12) & 0xF)
L3LinesPerTag := (edx >> 8) & 0xF
L3LineSize := (edx >> 0) & 0xFF
CacheDescriptors = append(CacheDescriptors,
CacheDescriptor{3,
DATA_CACHE,
"L3 Data cache",
int(L3Size),
int(associativityEncodings[L3Assoc]),
int(L3LineSize),
-1,
int(L3LinesPerTag),
})
}
}
// TODO split fused descritops with bits in high key's byte like for 0x49
var leaf02Descriptors = map[int16]CacheDescriptor{
0x01: {-1, TLB, "Instruction TLB", 4, 4, -1, 32, 0},
0x02: {-1, TLB, "Instruction TLB", 4 * 1024, 0xFF, -1, 2, 0},
0x03: {-1, TLB, "Data TLB", 4, 4, -1, 64, 0},
0x04: {-1, TLB, "Data TLB", 4 * 1024, 4, -1, 8, 0},
0x05: {-1, TLB, "Data TLB1", 4 * 1024, 4, -1, 32, 0},
0x06: {1, INSTRUCTION_CACHE, "1st-level instruction cache", 8, 4, 32, -1, 0},
0x08: {1, INSTRUCTION_CACHE, "1st-level instruction cache", 16, 4, 32, -1, 0},
0x09: {1, INSTRUCTION_CACHE, "1st-level instruction cache", 32, 4, 64, -1, 0},
0x0A: {1, DATA_CACHE, "1st-level data cache", 8, 2, 32, -1, 0},
0x0B: {-1, TLB, "Instruction TLB", 4 * 1024, 4, -1, 4, 0},
0x0C: {1, DATA_CACHE, "1st-level data cache", 16, 4, 32, -1, 0},
0x0D: {1, DATA_CACHE, "1st-level data cache", 16, 4, 64, -1, 0},
0x0E: {1, DATA_CACHE, "1st-level data cache", 24, 6, 64, -1, 0},
0x1D: {2, DATA_CACHE, "2nd-level cache", 128, 2, 64, -1, 0},
0x21: {2, DATA_CACHE, "2nd-level cache", 256, 8, 64, -1, 0},
0x22: {3, DATA_CACHE, "3nd-level cache", 512, 4, 64, -1, 2},
0x23: {3, DATA_CACHE, "3nd-level cache", 1 * 1024, 8, 64, -1, 2},
0x24: {2, DATA_CACHE, "2nd-level cache", 1 * 1024, 16, 64, -1, 0},
0x25: {3, DATA_CACHE, "3nd-level cache", 2 * 1024, 8, 64, -1, 2},
0x29: {3, DATA_CACHE, "2nd-level cache", 4 * 1024, 8, 64, -1, 2},
0x2C: {1, DATA_CACHE, "1st-level cache", 32, 8, 64, -1, 0},
0x30: {1, INSTRUCTION_CACHE, "1st-level instruction cache", 32, 8, 64, -1, 0},
0x40: {-1, DATA_CACHE, "No 2nd-level cache or, if processor contains a " +
"valid 2nd-level cache, no 3rd-level cache", -1, -1, -1, -1, 0},
0x41: {2, DATA_CACHE, "2nd-level cache", 128, 4, 32, -1, 0},
0x42: {2, DATA_CACHE, "2nd-level cache", 256, 4, 32, -1, 0},
0x43: {2, DATA_CACHE, "2nd-level cache", 512, 4, 32, -1, 0},
0x44: {2, DATA_CACHE, "2nd-level cache", 1 * 1024, 4, 32, -1, 0},
0x45: {2, DATA_CACHE, "2nd-level cache", 2 * 1024, 4, 32, -1, 0},
0x46: {3, DATA_CACHE, "3nd-level cache", 4 * 1024, 4, 64, -1, 0},
0x47: {3, DATA_CACHE, "3nd-level cache", 8 * 1024, 8, 64, -1, 0},
0x48: {2, DATA_CACHE, "2nd-level cache", 3 * 1024, 12, 64, -1, 0},
0x49: {2, DATA_CACHE, "2nd-level cache", 4 * 1024, 16, 64, -1, 0},
// (Intel Xeon processor MP, Family 0FH, Model 06H)
(0x49 | (1 << 8)): {3, DATA_CACHE, "3nd-level cache", 4 * 1024, 16, 64, -1, 0},
0x4A: {3, DATA_CACHE, "3nd-level cache", 6 * 1024, 12, 64, -1, 0},
0x4B: {3, DATA_CACHE, "3nd-level cache", 8 * 1024, 16, 64, -1, 0},
0x4C: {3, DATA_CACHE, "3nd-level cache", 12 * 1024, 12, 64, -1, 0},
0x4D: {3, DATA_CACHE, "3nd-level cache", 16 * 1024, 16, 64, -1, 0},
0x4E: {2, DATA_CACHE, "3nd-level cache", 6 * 1024, 24, 64, -1, 0},
0x4F: {-1, TLB, "Instruction TLB", 4, -1, -1, 32, 0},
0x50: {-1, TLB, "Instruction TLB: 4 KByte and 2-MByte or 4-MByte pages", 4, -1, -1, 64, 0},
0x51: {-1, TLB, "Instruction TLB: 4 KByte and 2-MByte or 4-MByte pages", 4, -1, -1, 128, 0},
0x52: {-1, TLB, "Instruction TLB: 4 KByte and 2-MByte or 4-MByte pages", 4, -1, -1, 256, 0},
0x55: {-1, TLB, "Instruction TLB: 2-MByte or 4-MByte pages", 2 * 1024, 0xFF, -1, 7, 0},
0x56: {-1, TLB, "Data TLB0", 4 * 1024, 4, -1, 16, 0},
0x57: {-1, TLB, "Data TLB0", 4, 4, -1, 16, 0},
0x59: {-1, TLB, "Data TLB0", 4, 0xFF, -1, 16, 0},
0x5A: {-1, TLB, "Data TLB0 2-MByte or 4 MByte pages", 2 * 1024, 4, -1, 32, 0},
0x5B: {-1, TLB, "Data TLB 4 KByte and 4 MByte pages", 4, -1, -1, 64, 0},
0x5C: {-1, TLB, "Data TLB 4 KByte and 4 MByte pages", 4, -1, -1, 128, 0},
0x5D: {-1, TLB, "Data TLB 4 KByte and 4 MByte pages", 4, -1, -1, 256, 0},
0x60: {1, DATA_CACHE, "1st-level data cache", 16, 8, 64, -1, 0},
0x61: {-1, TLB, "Instruction TLB", 4, 0xFF, -1, 48, 0},
0x63: {-1, TLB, "Data TLB", 1 * 1024 * 1024, 4, -1, 4, 0},
0x66: {1, DATA_CACHE, "1st-level data cache", 8, 4, 64, -1, 0},
0x67: {1, DATA_CACHE, "1st-level data cache", 16, 4, 64, -1, 0},
0x68: {1, DATA_CACHE, "1st-level data cache", 32, 4, 64, -1, 0},
0x70: {1, INSTRUCTION_CACHE, "Trace cache (size in K of uop)", 12, 8, -1, -1, 0},
0x71: {1, INSTRUCTION_CACHE, "Trace cache (size in K of uop)", 16, 8, -1, -1, 0},
0x72: {1, INSTRUCTION_CACHE, "Trace cache (size in K of uop)", 32, 8, -1, -1, 0},
0x76: {-1, TLB, "Instruction TLB: 2M/4M pages", 2 * 1024, 0xFF, -1, 8, 0},
0x78: {2, DATA_CACHE, "2nd-level cache", 1 * 1024, 4, 64, -1, 0},
0x79: {2, DATA_CACHE, "2nd-level cache", 128, 8, 64, -1, 2},
0x7A: {2, DATA_CACHE, "2nd-level cache", 256, 8, 64, -1, 2},
0x7B: {2, DATA_CACHE, "2nd-level cache", 512, 8, 64, -1, 2},
0x7C: {2, DATA_CACHE, "2nd-level cache", 1 * 1024, 8, 64, -1, 2},
0x7D: {2, DATA_CACHE, "2nd-level cache", 2 * 1024, 8, 64, -1, 0},
0x7F: {2, DATA_CACHE, "2nd-level cache", 512, 2, 64, -1, 0},
0x80: {2, DATA_CACHE, "2nd-level cache", 512, 8, 64, -1, 0},
0x82: {2, DATA_CACHE, "2nd-level cache", 256, 8, 32, -1, 0},
0x83: {2, DATA_CACHE, "2nd-level cache", 512, 8, 32, -1, 0},
0x84: {2, DATA_CACHE, "2nd-level cache", 1 * 1024, 8, 32, -1, 0},
0x85: {2, DATA_CACHE, "2nd-level cache", 2 * 1024, 8, 32, -1, 0},
0x86: {2, DATA_CACHE, "2nd-level cache", 512, 4, 32, -1, 0},
0x87: {2, DATA_CACHE, "2nd-level cache", 1 * 1024, 8, 64, -1, 0},
0xA0: {-1, DTLB, "DTLB", 4, 0xFF, -1, 32, 0},
0xB0: {-1, TLB, "Instruction TLB", 4, 4, -1, 128, 0},
0xB1: {-1, TLB, "Instruction TLB 2M pages 4 way 8 entries or" +
"4M pages 4-way, 4 entries", 2 * 1024, 4, -1, 8, 0},
0xB2: {-1, TLB, "Instruction TLB", 4, 4, -1, 64, 0},
0xB3: {-1, TLB, "Data TLB", 4, 4, -1, 128, 0},
0xB4: {-1, TLB, "Data TLB1", 4, 4, -1, 256, 0},
0xB5: {-1, TLB, "Instruction TLB", 4, 8, -1, 64, 0},
0xB6: {-1, TLB, "Instruction TLB", 4, 8, -1, 128, 0},
0xBA: {-1, TLB, "Data TLB1", 4, 4, -1, 64, 0},
0xC0: {-1, TLB, "Data TLB: 4 KByte and 4 MByte pages", 4, 4, -1, 8, 0},
0xC1: {-1, STLB, "Shared 2nd-Level TLB: 4Kbyte and 2Mbyte pages", 4, 8, -1, 1024, 0},
0xC2: {-1, DTLB, "DTLB 4KByte/2 MByte pages", 4, 4, -1, 16, 0},
0xC3: {-1, STLB, "Shared 2nd-Level TLB: " +
"4 KByte /2 MByte pages, 6-way associative, 1536 entries." +
"Also 1GBbyte pages, 4-way,16 entries.", 4, 6, -1, 1536, 0},
0xCA: {-1, STLB, "Shared 2nd-Level TLB", 4, 4, -1, 512, 0},
0xD0: {3, DATA_CACHE, "3nd-level cache", 512, 4, 64, -1, 0},
0xD1: {3, DATA_CACHE, "3nd-level cache", 1 * 1024, 4, 64, -1, 0},
0xD2: {3, DATA_CACHE, "3nd-level cache", 2 * 1024, 4, 64, -1, 0},
0xD6: {3, DATA_CACHE, "3nd-level cache", 1 * 1024, 8, 64, -1, 0},
0xD7: {3, DATA_CACHE, "3nd-level cache", 2 * 1024, 8, 64, -1, 0},
0xD8: {3, DATA_CACHE, "3nd-level cache", 4 * 1024, 8, 64, -1, 0},
0xDC: {3, DATA_CACHE, "3nd-level cache", 1 * 1536, 12, 64, -1, 0},
0xDD: {3, DATA_CACHE, "3nd-level cache", 3 * 1024, 12, 64, -1, 0},
0xDE: {3, DATA_CACHE, "3nd-level cache", 6 * 1024, 12, 64, -1, 0},
0xE2: {3, DATA_CACHE, "3nd-level cache", 2 * 1024, 16, 64, -1, 0},
0xE3: {3, DATA_CACHE, "3nd-level cache", 4 * 1024, 16, 64, -1, 0},
0xE4: {3, DATA_CACHE, "3nd-level cache", 8 * 1024, 16, 64, -1, 0},
0xEA: {3, DATA_CACHE, "3nd-level cache", 12 * 1024, 24, 64, -1, 0},
0xEB: {3, DATA_CACHE, "3nd-level cache", 18 * 1024, 24, 64, -1, 0},
0xEC: {3, DATA_CACHE, "3nd-level cache", 24 * 1024, 24, 64, -1, 0},
0xF0: {-1, PREFETCH, "", 64, -1, -1, -1, 0},
0xF1: {-1, PREFETCH, "", 128, -1, -1, -1, 0},
0xFF: {-1, NULL, "CPUID leaf 2 does not report cache descriptor " +
"information, use CPUID leaf 4 to query cache parameters",
-1, -1, -1, -1, 0},
}
func int32sToBytes(args ...uint32) []byte {
var result []byte
for _, arg := range args {
result = append(result,
byte((arg)&0xFF),
byte((arg>>8)&0xFF),
byte((arg>>16)&0xFF),
byte((arg>>24)&0xFF))
}
return result
}

24
vendor/github.com/intel-go/cpuid/cpuidlow_amd64.s generated vendored Normal file
View File

@ -0,0 +1,24 @@
// Copyright 2017 Intel Corporation.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// func cpuid_low(arg1, arg2 uint32) (eax, ebx, ecx, edx uint32)
TEXT ·cpuid_low(SB),NOSPLIT,$0-24
MOVL arg1+0(FP), AX
MOVL arg2+4(FP), CX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func xgetbv_low(arg1 uint32) (eax, edx uint32)
TEXT ·xgetbv_low(SB),NOSPLIT,$0-16
MOVL arg1+0(FP), CX
BYTE $0x0F
BYTE $0x01
BYTE $0xD0
MOVL AX,eax+8(FP)
MOVL DX,edx+12(FP)
RET