Update packages to remove dep on archived github.com/pkg/errors

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
pull/13730/head
Brad Davidson 2026-03-03 22:09:44 +00:00 committed by Brad Davidson
parent 64207c324f
commit 3acf8db8f2
70 changed files with 514 additions and 493 deletions

View File

@ -3,7 +3,6 @@ package main
import (
"bytes"
"context"
"errors"
"io"
"io/fs"
"os"
@ -20,9 +19,9 @@ import (
"github.com/k3s-io/k3s/pkg/dataverify"
"github.com/k3s-io/k3s/pkg/flock"
"github.com/k3s-io/k3s/pkg/untar"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/util/home"
"github.com/k3s-io/k3s/pkg/version"
pkgerrors "github.com/pkg/errors"
"github.com/rancher/wrangler/v3/pkg/resolvehome"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/urfave/cli/v2"
@ -212,7 +211,7 @@ func stageAndRunCLI(cli *cli.Context, cmd string, dataDir string, args []string)
func stageAndRun(dataDir, cmd string, args []string, calledAsInternal bool) error {
dir, err := extract(dataDir)
if err != nil {
return pkgerrors.WithMessage(err, "extracting data")
return errors.WithMessage(err, "extracting data")
}
logrus.Debugf("Asset dir %s", dir)
@ -386,7 +385,7 @@ func extract(dataDir string) (string, error) {
func findCriConfig(dataDir string) string {
searchList := []string{filepath.Join(dataDir, "agent", criDefaultConfigPath)}
if homeDataDir, err := resolvehome.Resolve(datadir.DefaultHomeDataDir); err == nil {
if homeDataDir, err := home.Resolve(datadir.DefaultHomeDataDir); err == nil {
searchList = append(searchList, filepath.Join(homeDataDir, "agent", criDefaultConfigPath))
} else {
logrus.Warnf("Failed to resolve user home directory: %s", err)

View File

@ -6,14 +6,14 @@ import (
"os"
"syscall"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
)
const programPostfix = ""
func runExec(cmd string, args []string, calledAsInternal bool) (err error) {
if err := syscall.Exec(cmd, args, os.Environ()); err != nil {
return pkgerrors.WithMessagef(err, "exec %s failed", cmd)
return errors.WithMessagef(err, "exec %s failed", cmd)
}
return nil
}

22
go.mod
View File

@ -110,7 +110,7 @@ require (
github.com/json-iterator/go v1.1.12
github.com/k3s-io/api v0.1.4
github.com/k3s-io/helm-controller v0.16.17
github.com/k3s-io/kine v0.14.12
github.com/k3s-io/kine v0.14.14
github.com/klauspost/compress v1.18.4
github.com/libp2p/go-libp2p v0.46.0
github.com/minio/minio-go/v7 v7.0.91
@ -126,15 +126,14 @@ require (
github.com/opencontainers/selinux v1.13.1
github.com/otiai10/copy v1.14.1
github.com/pdtpartners/nix-snapshotter v0.4.0
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.23.2
github.com/prometheus/common v0.66.1
github.com/rancher/dynamiclistener v0.7.1
github.com/rancher/lasso v0.2.3
github.com/rancher/dynamiclistener v0.7.5
github.com/rancher/lasso v0.2.6
github.com/rancher/permissions v0.0.0-20240523180510-4001d3d637f7
github.com/rancher/remotedialer v0.6.0-rc.1.0.20250916111157-f160aa32568d
github.com/rancher/wharfie v0.7.0
github.com/rancher/wrangler/v3 v3.2.3
github.com/rancher/wharfie v0.7.1
github.com/rancher/wrangler/v3 v3.4.0
github.com/robfig/cron/v3 v3.0.1
github.com/rootless-containers/rootlesskit v1.1.1
github.com/sirupsen/logrus v1.9.4
@ -150,7 +149,7 @@ require (
go.etcd.io/etcd/client/v3 v3.6.8
go.etcd.io/etcd/etcdutl/v3 v3.6.6
go.etcd.io/etcd/server/v3 v3.6.8
go.uber.org/mock v0.5.2
go.uber.org/mock v0.6.0
go.uber.org/zap v1.27.1
golang.org/x/crypto v0.46.0
golang.org/x/mod v0.30.0
@ -247,7 +246,7 @@ require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/docker/cli v28.3.2+incompatible // indirect
github.com/docker/cli v29.0.4+incompatible // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-connections v0.6.0 // indirect
@ -402,8 +401,8 @@ require (
github.com/nats-io/jsm.go v0.3.0 // indirect
github.com/nats-io/jwt/v2 v2.8.0 // indirect
github.com/nats-io/nats-server/v2 v2.12.2 // indirect
github.com/nats-io/nats.go v1.48.0 // indirect
github.com/nats-io/nkeys v0.4.11 // indirect
github.com/nats-io/nats.go v1.49.0 // indirect
github.com/nats-io/nkeys v0.4.12 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/opencontainers/runtime-spec v1.2.1 // indirect
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 // indirect
@ -432,6 +431,7 @@ require (
github.com/pion/transport/v3 v3.0.7 // indirect
github.com/pion/turn/v4 v4.0.2 // indirect
github.com/pion/webrtc/v4 v4.1.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/polydawn/refmt v0.89.0 // indirect
github.com/pquerna/cachecontrol v0.1.0 // indirect
@ -505,7 +505,7 @@ require (
k8s.io/dynamic-resource-allocation v0.0.0 // indirect
k8s.io/endpointslice v0.0.0 // indirect
k8s.io/externaljwt v1.32.0 // indirect
k8s.io/kms v0.0.0 // indirect
k8s.io/kms v0.34.5 // indirect
k8s.io/kube-aggregator v0.35.0 // indirect
k8s.io/kube-controller-manager v0.0.0 // indirect
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect

40
go.sum
View File

@ -415,8 +415,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvw
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v28.3.2+incompatible h1:mOt9fcLE7zaACbxW1GeS65RI67wIJrTnqS3hP2huFsY=
github.com/docker/cli v28.3.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v29.0.4+incompatible h1:mffN/hPqaI39vx/4QiSkdldHeM0rP1ZZBIXRUOPI5+I=
github.com/docker/cli v29.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v25.0.13+incompatible h1:YeBrkUd3q0ZoRDNoEzuopwCLU+uD8GZahDHwBdsTnkU=
@ -813,8 +813,8 @@ github.com/k3s-io/etcd/server/v3 v3.6.7-k3s1 h1:ZBV6n9XhjGex9MIOaEAefbhNriH5Gxo4
github.com/k3s-io/etcd/server/v3 v3.6.7-k3s1/go.mod h1:LEM328bPA2uVMhN0+Ht/vAsADW127QS1oM7EuHrOTy0=
github.com/k3s-io/helm-controller v0.16.17 h1:VXMmXQmmTB49x6bnN/PsJUTVKHb0r69b+SffIDUTMTM=
github.com/k3s-io/helm-controller v0.16.17/go.mod h1:jmrgGttLQbh2yB1kcf9XFAigNW6U8oWCswCSuEjkxXU=
github.com/k3s-io/kine v0.14.12 h1:He4hfjkFnd6zV/mwO7WUIsMp7zHAU02J4ALlViY8pKA=
github.com/k3s-io/kine v0.14.12/go.mod h1:5C5dPvFbapdvyLurH1Ls4xGdgl/Gqn+73BunjDndXBU=
github.com/k3s-io/kine v0.14.14 h1:DD1tAIfOIy51T784p46szjVVXq019Uv7Efn+5HguhTA=
github.com/k3s-io/kine v0.14.14/go.mod h1:6+xemLl2cJ7csY06UWfj3HpkAzQLNGAfjElQwhumjyk=
github.com/k3s-io/klog/v2 v2.120.1-k3s1 h1:7twAHPFpZA21KdMnMNnj68STQMPldAxF2Zsaol57dxw=
github.com/k3s-io/klog/v2 v2.120.1-k3s1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
github.com/k3s-io/kube-router/v2 v2.6.3-k3s1 h1:RZjUBIuitXCuYoCzm1aM6p5EgQFC5k3N72j4pBIc2j4=
@ -1085,10 +1085,10 @@ github.com/nats-io/jwt/v2 v2.8.0 h1:K7uzyz50+yGZDO5o772eRE7atlcSEENpL7P+b74JV1g=
github.com/nats-io/jwt/v2 v2.8.0/go.mod h1:me11pOkwObtcBNR8AiMrUbtVOUGkqYjMQZ6jnSdVUIA=
github.com/nats-io/nats-server/v2 v2.12.2 h1:4TEQd0Y4zvcW0IsVxjlXnRso1hBkQl3TS0BI+SxgPhE=
github.com/nats-io/nats-server/v2 v2.12.2/go.mod h1:j1AAttYeu7WnvD8HLJ+WWKNMSyxsqmZ160pNtCQRMyE=
github.com/nats-io/nats.go v1.48.0 h1:pSFyXApG+yWU/TgbKCjmm5K4wrHu86231/w84qRVR+U=
github.com/nats-io/nats.go v1.48.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
github.com/nats-io/nats.go v1.49.0 h1:yh/WvY59gXqYpgl33ZI+XoVPKyut/IcEaqtsiuTJpoE=
github.com/nats-io/nats.go v1.49.0/go.mod h1:fDCn3mN5cY8HooHwE2ukiLb4p4G4ImmzvXyJt+tGwdw=
github.com/nats-io/nkeys v0.4.12 h1:nssm7JKOG9/x4J8II47VWCL1Ds29avyiQDRn0ckMvDc=
github.com/nats-io/nkeys v0.4.12/go.mod h1:MT59A1HYcjIcyQDJStTfaOY6vhy9XTUjOFo+SVsvpBg=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
@ -1212,18 +1212,18 @@ github.com/quic-go/quic-go v0.57.1 h1:25KAAR9QR8KZrCZRThWMKVAwGoiHIrNbT72ULHTuI1
github.com/quic-go/quic-go v0.57.1/go.mod h1:ly4QBAjHA2VhdnxhojRsCUOeJwKYg+taDlos92xb1+s=
github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=
github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
github.com/rancher/dynamiclistener v0.7.1 h1:vt4AGDw/s19qFCfpyYSQS8HcRc6THcTD7PIAPAK2R1o=
github.com/rancher/dynamiclistener v0.7.1/go.mod h1:kq+3QHceIpW3f2xVpYgok7ILn1JcS8+O2utC42y8ZNY=
github.com/rancher/lasso v0.2.3 h1:74/z/C/O3ykhyMrRuEgc9kVyYiSoS7kp5BAijlcyXDg=
github.com/rancher/lasso v0.2.3/go.mod h1:G+KeeOaKRjp+qGp0bV6VbLhYrq1vHbJPbDh40ejg5yE=
github.com/rancher/dynamiclistener v0.7.5 h1:C8Aqefrwo18Z6//CNXt7Wf2gwajMOpVmWEcSU/dJRE8=
github.com/rancher/dynamiclistener v0.7.5/go.mod h1:eL7NEC7+bmHgjwRxJ2vddmgnTe0nGatIyz5aXHHbZ60=
github.com/rancher/lasso v0.2.6 h1:/QNO491vWlTNYgwOOopRlILcTv+pI/wBHYdPFD2Pk5E=
github.com/rancher/lasso v0.2.6/go.mod h1:L3ol8PdO21KoMhNa3RWjpR3ZBnE70JCAod1nJuOvT1E=
github.com/rancher/permissions v0.0.0-20240523180510-4001d3d637f7 h1:0Kg2SGoMeU1ll4xPi4DE0+qNHLFO/U5MwtK0WrIdK+o=
github.com/rancher/permissions v0.0.0-20240523180510-4001d3d637f7/go.mod h1:fsbs0YOsGn1ofPD5p+BuI4qDhbMbSJtTegKt6Ucna+c=
github.com/rancher/remotedialer v0.6.0-rc.1.0.20250916111157-f160aa32568d h1:0ckbeLz9EFnLznH3+ywtQ3UaMbCaJbyaHc5ut/wzqH8=
github.com/rancher/remotedialer v0.6.0-rc.1.0.20250916111157-f160aa32568d/go.mod h1:CW6Q8F8IESN05/yl48OSwhVi54nDwVQQriV16zAiGkg=
github.com/rancher/wharfie v0.7.0 h1:M+OHMkE+tfafY59E5RuZ/Q4IorKNJGVqhtZRksTpOWo=
github.com/rancher/wharfie v0.7.0/go.mod h1:wSQoRNUM58z0Qb9kmAT1L6ia2ys0LWHRH+7Vix/rkD8=
github.com/rancher/wrangler/v3 v3.2.3 h1:s35Dpu/oIbXI1GV/FmYQaAex60+NXe67An3Z5zzzyEY=
github.com/rancher/wrangler/v3 v3.2.3/go.mod h1:TA1QuuQxrtn/kmJbBLW/l24IcfHBmSXBa9an3IRlqQQ=
github.com/rancher/wharfie v0.7.1 h1:/4MPrr71I7aXESB0C5DVf7an4H1TYOYv1yx6vA6eKko=
github.com/rancher/wharfie v0.7.1/go.mod h1:x/X8GGrrjBAdKHJuSF4ANqcljVQAhu3jBHoutVKeCE8=
github.com/rancher/wrangler/v3 v3.4.0 h1:pEZ9wIM3k5EZkVXU2TbD6mWVfw5mtdv1v0PRJ7fPQxw=
github.com/rancher/wrangler/v3 v3.4.0/go.mod h1:bRcdkdwRTwoXVSWVGtJdSBNXkKbInlo+PVkCtkUCi4s=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
@ -1427,8 +1427,8 @@ go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
@ -1847,8 +1847,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -9,7 +9,6 @@ import (
"crypto/x509"
"encoding/hex"
"encoding/pem"
"errors"
"fmt"
"io"
"net"
@ -30,8 +29,8 @@ import (
"github.com/k3s-io/k3s/pkg/daemons/control/deps"
"github.com/k3s-io/k3s/pkg/spegel"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
pkgerrors "github.com/pkg/errors"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/wharfie/pkg/registries"
"github.com/rancher/wrangler/v3/pkg/slice"
@ -248,7 +247,7 @@ func upgradeOldNodePasswordPath(oldNodePasswordFile, newNodePasswordFile string)
func getKubeletServingCert(nodeName string, nodeIPs []net.IP, certFile, keyFile, nodePasswordFile string, info *clientaccess.Info) error {
csr, err := getCSRBytes(keyFile)
if err != nil {
return pkgerrors.WithMessagef(err, "failed to create certificate request %s", certFile)
return errors.WithMessagef(err, "failed to create certificate request %s", certFile)
}
basename := filepath.Base(certFile)
@ -262,11 +261,11 @@ func getKubeletServingCert(nodeName string, nodeIPs []net.IP, certFile, keyFile,
// must be used instead of the one we signed the CSR with.
certBytes, keyBytes := splitCertKeyPEM(body)
if err := os.WriteFile(certFile, certBytes, 0600); err != nil {
return pkgerrors.WithMessagef(err, "failed to write cert %s", certFile)
return errors.WithMessagef(err, "failed to write cert %s", certFile)
}
if len(keyBytes) > 0 {
if err := os.WriteFile(keyFile, keyBytes, 0600); err != nil {
return pkgerrors.WithMessagef(err, "failed to write key %s", keyFile)
return errors.WithMessagef(err, "failed to write key %s", keyFile)
}
}
return nil
@ -280,7 +279,7 @@ func getHostFile(filename string, info *clientaccess.Info) error {
return err
}
if err := os.WriteFile(filename, fileBytes, 0600); err != nil {
return pkgerrors.WithMessagef(err, "failed to write cert %s", filename)
return errors.WithMessagef(err, "failed to write cert %s", filename)
}
return nil
}
@ -292,7 +291,7 @@ func getHostFile(filename string, info *clientaccess.Info) error {
func getClientCert(certFile, keyFile string, info *clientaccess.Info) error {
csr, err := getCSRBytes(keyFile)
if err != nil {
return pkgerrors.WithMessagef(err, "failed to create certificate request %s", certFile)
return errors.WithMessagef(err, "failed to create certificate request %s", certFile)
}
basename := filepath.Base(certFile)
@ -306,11 +305,11 @@ func getClientCert(certFile, keyFile string, info *clientaccess.Info) error {
// must be used instead of the one we signed the CSR with.
certBytes, keyBytes := splitCertKeyPEM(fileBytes)
if err := os.WriteFile(certFile, certBytes, 0600); err != nil {
return pkgerrors.WithMessagef(err, "failed to write cert %s", certFile)
return errors.WithMessagef(err, "failed to write cert %s", certFile)
}
if len(keyBytes) > 0 {
if err := os.WriteFile(keyFile, keyBytes, 0600); err != nil {
return pkgerrors.WithMessagef(err, "failed to write key %s", keyFile)
return errors.WithMessagef(err, "failed to write key %s", keyFile)
}
}
return nil
@ -353,7 +352,7 @@ func splitCertKeyPEM(bytes []byte) (certPem []byte, keyPem []byte) {
func getKubeletClientCert(certFile, keyFile, nodeName string, nodeIPs []net.IP, nodePasswordFile string, info *clientaccess.Info) error {
csr, err := getCSRBytes(keyFile)
if err != nil {
return pkgerrors.WithMessagef(err, "failed to create certificate request %s", certFile)
return errors.WithMessagef(err, "failed to create certificate request %s", certFile)
}
basename := filepath.Base(certFile)
@ -367,11 +366,11 @@ func getKubeletClientCert(certFile, keyFile, nodeName string, nodeIPs []net.IP,
// must be used instead of the one we signed the CSR with.
certBytes, keyBytes := splitCertKeyPEM(body)
if err := os.WriteFile(certFile, certBytes, 0600); err != nil {
return pkgerrors.WithMessagef(err, "failed to write cert %s", certFile)
return errors.WithMessagef(err, "failed to write cert %s", certFile)
}
if len(keyBytes) > 0 {
if err := os.WriteFile(keyFile, keyBytes, 0600); err != nil {
return pkgerrors.WithMessagef(err, "failed to write key %s", keyFile)
return errors.WithMessagef(err, "failed to write key %s", keyFile)
}
}
return nil
@ -451,18 +450,18 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
controlConfig, err := getConfig(info)
if err != nil {
return nil, pkgerrors.WithMessage(err, "failed to retrieve configuration from server")
return nil, errors.WithMessage(err, "failed to retrieve configuration from server")
}
nodeName, nodeIPs, err := util.GetHostnameAndIPs(envInfo.NodeName, envInfo.NodeIP.Value())
if err != nil {
return nil, pkgerrors.WithMessage(err, "failed to get node name and addresses")
return nil, errors.WithMessage(err, "failed to get node name and addresses")
}
// If the supervisor and externally-facing apiserver are not on the same port, tell the proxy where to find the apiserver.
if controlConfig.SupervisorPort != controlConfig.HTTPSPort {
if err := proxy.SetAPIServerPort(controlConfig.HTTPSPort, utilsnet.IsIPv6(nodeIPs[0])); err != nil {
return nil, pkgerrors.WithMessagef(err, "failed to set apiserver port to %d", controlConfig.HTTPSPort)
return nil, errors.WithMessagef(err, "failed to set apiserver port to %d", controlConfig.HTTPSPort)
}
}
apiServerURL := proxy.APIServerURL()
@ -523,12 +522,12 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
// Ask the server to sign our kubelet server cert.
if err := getKubeletServingCert(nodeName, nodeExternalAndInternalIPs, servingKubeletCert, servingKubeletKey, newNodePasswordFile, info); err != nil {
return nil, pkgerrors.WithMessage(err, servingKubeletCert)
return nil, errors.WithMessage(err, servingKubeletCert)
}
// Ask the server to sign our kubelet client cert.
if err := getKubeletClientCert(clientKubeletCert, clientKubeletKey, nodeName, nodeIPs, newNodePasswordFile, info); err != nil {
return nil, pkgerrors.WithMessage(err, clientKubeletCert)
return nil, errors.WithMessage(err, clientKubeletCert)
}
// Generate a kubeconfig for the kubelet.
@ -542,7 +541,7 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
// Ask the server to sign our kube-proxy client cert.
if err := getClientCert(clientKubeProxyCert, clientKubeProxyKey, info); err != nil {
return nil, pkgerrors.WithMessage(err, clientKubeProxyCert)
return nil, errors.WithMessage(err, clientKubeProxyCert)
}
// Generate a kubeconfig for kube-proxy.
@ -556,7 +555,7 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
// Ask the server to sign our agent controller client cert.
if err := getClientCert(clientK3sControllerCert, clientK3sControllerKey, info); err != nil {
return nil, pkgerrors.WithMessage(err, clientK3sControllerCert)
return nil, errors.WithMessage(err, clientK3sControllerCert)
}
// Generate a kubeconfig for the agent controller.
@ -627,7 +626,7 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
} else {
listenAddress, _, _, err := util.GetDefaultAddresses(nodeIPs[0])
if err != nil {
return nil, pkgerrors.WithMessage(err, "cannot configure IPv4/IPv6 node-ip")
return nil, errors.WithMessage(err, "cannot configure IPv4/IPv6 node-ip")
}
nodeConfig.AgentConfig.ListenAddress = listenAddress
}
@ -802,7 +801,7 @@ func getKubeProxyDisabled(ctx context.Context, node *config.Node, proxy proxy.Pr
controlConfig, err := getConfig(info)
if err != nil {
return false, pkgerrors.WithMessage(err, "failed to retrieve configuration from server")
return false, errors.WithMessage(err, "failed to retrieve configuration from server")
}
return controlConfig.DisableKubeProxy, nil

View File

@ -3,14 +3,13 @@
package config
import (
"errors"
"os"
"path/filepath"
"github.com/k3s-io/k3s/pkg/agent/containerd"
"github.com/k3s-io/k3s/pkg/cli/cmds"
"github.com/k3s-io/k3s/pkg/daemons/config"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/sirupsen/logrus"
)
@ -24,23 +23,23 @@ func applyContainerdOSSpecificConfig(nodeConfig *config.Node) error {
switch nodeConfig.AgentConfig.Snapshotter {
case "overlayfs":
if err := containerd.OverlaySupported(nodeConfig.Containerd.Root); err != nil {
return pkgerrors.WithMessagef(err, "\"overlayfs\" snapshotter cannot be enabled for %q, try using \"fuse-overlayfs\" or \"native\"",
return errors.WithMessagef(err, "\"overlayfs\" snapshotter cannot be enabled for %q, try using \"fuse-overlayfs\" or \"native\"",
nodeConfig.Containerd.Root)
}
case "fuse-overlayfs":
if err := containerd.FuseoverlayfsSupported(nodeConfig.Containerd.Root); err != nil {
return pkgerrors.WithMessagef(err, "\"fuse-overlayfs\" snapshotter cannot be enabled for %q, try using \"native\"",
return errors.WithMessagef(err, "\"fuse-overlayfs\" snapshotter cannot be enabled for %q, try using \"native\"",
nodeConfig.Containerd.Root)
}
case "stargz":
if err := containerd.StargzSupported(nodeConfig.Containerd.Root); err != nil {
return pkgerrors.WithMessagef(err, "\"stargz\" snapshotter cannot be enabled for %q, try using \"overlayfs\" or \"native\"",
return errors.WithMessagef(err, "\"stargz\" snapshotter cannot be enabled for %q, try using \"overlayfs\" or \"native\"",
nodeConfig.Containerd.Root)
}
nodeConfig.AgentConfig.ImageServiceSocket = "/run/containerd-stargz-grpc/containerd-stargz-grpc.sock"
case "nix":
if err := containerd.NixSupported(nodeConfig.Containerd.Root); err != nil {
return pkgerrors.WithMessagef(err, "\"nix\" snapshotter cannot be enabled for %q, try using \"overlayfs\" or \"native\"",
return errors.WithMessagef(err, "\"nix\" snapshotter cannot be enabled for %q, try using \"overlayfs\" or \"native\"",
nodeConfig.Containerd.Root)
}
nodeConfig.AgentConfig.ImageServiceSocket = filepath.Join(nodeConfig.Containerd.State, "nix-snapshotter.sock")

View File

@ -7,7 +7,7 @@ import (
"github.com/k3s-io/k3s/pkg/cli/cmds"
"github.com/k3s-io/k3s/pkg/daemons/config"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/rancher/permissions/pkg/access"
"github.com/rancher/permissions/pkg/acl"
"github.com/rancher/permissions/pkg/sid"
@ -45,7 +45,7 @@ func configureACL(file string) error {
access.GrantSid(windows.GENERIC_ALL, sid.LocalSystem()),
access.GrantSid(windows.GENERIC_ALL, sid.BuiltinAdministrators()),
}...); err != nil {
return pkgerrors.WithMessagef(err, "failed to configure Access Control List For %s", file)
return errors.WithMessagef(err, "failed to configure Access Control List For %s", file)
}
return nil

View File

@ -19,7 +19,6 @@ import (
"github.com/k3s-io/k3s/pkg/version"
"github.com/moby/sys/userns"
"github.com/pdtpartners/nix-snapshotter/pkg/nix"
pkgerrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
"k8s.io/cri-client/pkg/util"
@ -99,7 +98,7 @@ func SetupContainerdConfig(cfg *config.Node) error {
selEnabled, selConfigured, err := selinuxStatus()
if err != nil {
return pkgerrors.WithMessage(err, "failed to detect selinux")
return fmt.Errorf("failed to detect selinux: %w", err)
}
switch {
case !cfg.SELinux && selEnabled:

View File

@ -8,8 +8,7 @@ import (
containerd "github.com/containerd/containerd/v2/client"
"github.com/k3s-io/k3s/pkg/agent/templates"
"github.com/k3s-io/k3s/pkg/daemons/config"
util3 "github.com/k3s-io/k3s/pkg/util"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/sirupsen/logrus"
"k8s.io/cri-client/pkg/util"
)
@ -67,17 +66,17 @@ func Client(address string) (*containerd.Client, error) {
}
func OverlaySupported(root string) error {
return pkgerrors.WithMessagef(util3.ErrUnsupportedPlatform, "overlayfs is not supported")
return errors.WithMessagef(errors.ErrUnsupportedPlatform, "overlayfs is not supported")
}
func FuseoverlayfsSupported(root string) error {
return pkgerrors.WithMessagef(util3.ErrUnsupportedPlatform, "fuse-overlayfs is not supported")
return errors.WithMessagef(errors.ErrUnsupportedPlatform, "fuse-overlayfs is not supported")
}
func StargzSupported(root string) error {
return pkgerrors.WithMessagef(util3.ErrUnsupportedPlatform, "stargz is not supported")
return errors.WithMessagef(errors.ErrUnsupportedPlatform, "stargz is not supported")
}
func NixSupported(root string) error {
return pkgerrors.WithMessagef(util3.ErrUnsupportedPlatform, "nix is not supported")
return errors.WithMessagef(errors.ErrUnsupportedPlatform, "nix is not supported")
}

View File

@ -4,7 +4,6 @@ import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
@ -24,11 +23,11 @@ import (
util2 "github.com/k3s-io/k3s/pkg/agent/util"
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/signals"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
"github.com/natefinch/lumberjack"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
pkgerrors "github.com/pkg/errors"
"github.com/rancher/wharfie/pkg/tarfile"
"github.com/rancher/wrangler/v3/pkg/merr"
"github.com/sirupsen/logrus"
@ -112,7 +111,7 @@ func Run(ctx context.Context, cfg *config.Node) error {
addDeathSig(cmd)
err := cmd.Run()
if err != nil && !errors.Is(err, context.Canceled) {
signals.RequestShutdown(pkgerrors.WithMessage(err, "containerd exited"))
signals.RequestShutdown(errors.WithMessage(err, "containerd exited"))
}
signals.RequestShutdown(nil)
}()
@ -149,12 +148,12 @@ func PreloadImages(ctx context.Context, cfg *config.Node) error {
// At startup all leases from k3s are cleared; we no longer use leases to lock content
if err := clearLeases(ctx, client); err != nil {
return pkgerrors.WithMessage(err, "failed to clear leases")
return errors.WithMessage(err, "failed to clear leases")
}
// Clear the pinned labels on all images previously pinned by k3s
if err := clearLabels(ctx, client); err != nil {
return pkgerrors.WithMessage(err, "failed to clear pinned labels")
return errors.WithMessage(err, "failed to clear pinned labels")
}
return importAndWatchImages(ctx, cfg)
@ -174,7 +173,7 @@ func preloadFile(ctx context.Context, cfg *config.Node, client *containerd.Clien
logrus.Infof("Pulling images from %s", filePath)
images, err = prePullImages(ctx, client, imageClient, file)
if err != nil {
return pkgerrors.WithMessage(err, "failed to pull images from "+filePath)
return errors.WithMessage(err, "failed to pull images from "+filePath)
}
} else {
opener, err := tarfile.GetOpener(filePath)
@ -191,18 +190,18 @@ func preloadFile(ctx context.Context, cfg *config.Node, client *containerd.Clien
logrus.Infof("Importing images from %s", filePath)
images, err = client.Import(ctx, imageReader, containerd.WithAllPlatforms(true), containerd.WithSkipMissing())
if err != nil {
return pkgerrors.WithMessage(err, "failed to import images from "+filePath)
return errors.WithMessage(err, "failed to import images from "+filePath)
}
}
if err := labelImages(ctx, client, images, filepath.Base(filePath)); err != nil {
return pkgerrors.WithMessage(err, "failed to add pinned label to images")
return errors.WithMessage(err, "failed to add pinned label to images")
}
if err := retagImages(ctx, client, images, cfg.AgentConfig.AirgapExtraRegistry); err != nil {
return pkgerrors.WithMessage(err, "failed to retag images")
return errors.WithMessage(err, "failed to retag images")
}
if err := labelContent(ctx, client, images, cfg.AgentConfig.AirgapExtraRegistry); err != nil {
return pkgerrors.WithMessage(err, "failed to add source labels to layer content")
return errors.WithMessage(err, "failed to add source labels to layer content")
}
return nil
@ -238,7 +237,7 @@ func clearLabels(ctx context.Context, client *containerd.Client) error {
delete(image.Labels, k3sPinnedImageLabelKey)
delete(image.Labels, criPinnedImageLabelKey)
if _, err := imageService.Update(ctx, image, "labels"); err != nil {
errs = append(errs, pkgerrors.WithMessage(err, "failed to delete labels from image "+image.Name))
errs = append(errs, errors.WithMessage(err, "failed to delete labels from image "+image.Name))
}
}
return merr.NewErrors(errs...)
@ -263,7 +262,7 @@ func labelImages(ctx context.Context, client *containerd.Client, images []images
image.Labels[criPinnedImageLabelKey] = criPinnedImageLabelValue
updatedImage, err := imageService.Update(ctx, image, "labels")
if err != nil {
errs = append(errs, pkgerrors.WithMessage(err, "failed to add labels to image "+image.Name))
errs = append(errs, errors.WithMessage(err, "failed to add labels to image "+image.Name))
} else {
images[i] = updatedImage
}
@ -280,7 +279,7 @@ func retagImages(ctx context.Context, client *containerd.Client, images []images
for _, image := range images {
name, err := parseNamedTagged(image.Name)
if err != nil {
errs = append(errs, pkgerrors.WithMessage(err, "failed to parse tag for image "+image.Name))
errs = append(errs, errors.WithMessage(err, "failed to parse tag for image "+image.Name))
continue
}
logrus.Infof("Imported %s", image.Name)
@ -311,13 +310,13 @@ func forceCreateTag(ctx context.Context, imageService images.Store, image images
if _, err := imageService.Create(ctx, image); err != nil {
if errdefs.IsAlreadyExists(err) {
if err = imageService.Delete(ctx, image.Name); err != nil {
return pkgerrors.WithMessage(err, "failed to delete existing image "+image.Name)
return errors.WithMessage(err, "failed to delete existing image "+image.Name)
}
if _, err = imageService.Create(ctx, image); err != nil {
return pkgerrors.WithMessage(err, "failed to tag after deleting existing image "+image.Name)
return errors.WithMessage(err, "failed to tag after deleting existing image "+image.Name)
}
} else {
return pkgerrors.WithMessage(err, "failed to tag image "+image.Name)
return errors.WithMessage(err, "failed to tag image "+image.Name)
}
}
return nil
@ -332,13 +331,13 @@ func labelContent(ctx context.Context, client *containerd.Client, images []image
for _, image := range images {
name, err := parseNamedTagged(image.Name)
if err != nil {
errs = append(errs, pkgerrors.WithMessage(err, "failed to parse tags for image "+image.Name))
errs = append(errs, errors.WithMessage(err, "failed to parse tags for image "+image.Name))
continue
}
registries := append(registries, docker.Domain(name))
digests, err := getDigests(ctx, contentStore, image.Target)
if err != nil {
errs = append(errs, pkgerrors.WithMessage(err, "failed to get content digests for image "+image.Name))
errs = append(errs, errors.WithMessage(err, "failed to get content digests for image "+image.Name))
continue
}
for _, digest := range digests {
@ -354,7 +353,7 @@ func labelContent(ctx context.Context, client *containerd.Client, images []image
if _, err := contentStore.Update(ctx, info, paths...); err != nil {
if !errdefs.IsNotFound(err) {
errs = append(errs, pkgerrors.WithMessage(err, "failed to add source labels to content with digest "+digest.String()))
errs = append(errs, errors.WithMessage(err, "failed to add source labels to content with digest "+digest.String()))
}
continue
}

View File

@ -13,7 +13,7 @@ import (
"github.com/fsnotify/fsnotify"
"github.com/k3s-io/k3s/pkg/agent/cri"
"github.com/k3s-io/k3s/pkg/daemons/config"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/rancher/wharfie/pkg/tarfile"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -137,18 +137,18 @@ func (w *watchqueue) processImageEvent(ctx context.Context, key string, client *
defer w.syncCache()
return nil
} else if err != nil {
return pkgerrors.Wrapf(err, "failed to get fileinfo for image event %s", key)
return errors.WithMessagef(err, "failed to get fileinfo for image event %s", key)
}
if file.IsDir() {
// Add to watch and list+enqueue directory contents, as notify is not recursive
if err := w.watcher.Add(key); err != nil {
return pkgerrors.Wrapf(err, "failed to add watch of %s", key)
return errors.WithMessagef(err, "failed to add watch of %s", key)
}
fileInfos, err := os.ReadDir(key)
if err != nil {
return pkgerrors.Wrapf(err, "unable to list contents of %s", key)
return errors.WithMessagef(err, "unable to list contents of %s", key)
}
for _, fileInfo := range fileInfos {
@ -164,7 +164,7 @@ func (w *watchqueue) processImageEvent(ctx context.Context, key string, client *
if lastFileState := w.filesCache[key]; lastFileState == nil || (file.Size() != lastFileState.Size && file.ModTime().After(lastFileState.ModTime.Time)) {
start := time.Now()
if err := preloadFile(ctx, w.cfg, client, imageClient, key); err != nil {
return pkgerrors.Wrapf(err, "failed to import %s", key)
return errors.WithMessagef(err, "failed to import %s", key)
}
logrus.Infof("Imported images from %s in %s", key, time.Since(start))
w.filesCache[key] = &fileInfo{Size: file.Size(), ModTime: metav1.NewTime(file.ModTime()), seen: true}
@ -274,7 +274,7 @@ func watchImages(ctx context.Context, cfg *config.Node) (*watchqueue, error) {
// watch the directory above the images dir, as it may not exist yet when the watch is started.
watcher, err := createWatcher(filepath.Dir(cfg.Images))
if err != nil {
return nil, pkgerrors.Wrapf(err, "failed to create image import watcher for %s", filepath.Dir(cfg.Images))
return nil, errors.WithMessagef(err, "failed to create image import watcher for %s", filepath.Dir(cfg.Images))
}
w := &watchqueue{

View File

@ -8,7 +8,7 @@ import (
"github.com/docker/docker/client"
"github.com/k3s-io/k3s/pkg/daemons/config"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
)
const socketPrefix = "unix://"
@ -24,11 +24,11 @@ func setupDockerCRIConfig(ctx context.Context, cfg *config.Node) error {
}
c, err := client.NewClientWithOpts(clientOpts...)
if err != nil {
return pkgerrors.WithMessage(err, "failed to create docker client")
return errors.WithMessage(err, "failed to create docker client")
}
i, err := c.Info(ctx)
if err != nil {
return pkgerrors.WithMessage(err, "failed to get docker runtime info")
return errors.WithMessage(err, "failed to get docker runtime info")
}
// note: this mutatation of the passed agent.Config is later used to set the
// kubelet's cgroup-driver flag. This may merit moving to somewhere else in order

View File

@ -4,20 +4,18 @@ package cridockerd
import (
"context"
"errors"
"os"
"runtime/debug"
"strings"
"github.com/Mirantis/cri-dockerd/cmd"
"github.com/Mirantis/cri-dockerd/cmd/version"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/agent/cri"
"github.com/k3s-io/k3s/pkg/cgroups"
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/signals"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/sirupsen/logrus"
utilsnet "k8s.io/utils/net"
@ -42,7 +40,7 @@ func Run(ctx context.Context, cfg *config.Node) error {
}()
err := command.ExecuteContext(ctx)
if err != nil && !errors.Is(err, context.Canceled) {
signals.RequestShutdown(pkgerrors.WithMessage(err, "cri-dockerd exited"))
signals.RequestShutdown(errors.WithMessage(err, "cri-dockerd exited"))
}
signals.RequestShutdown(nil)
}()

View File

@ -15,7 +15,6 @@
package flannel
import (
"errors"
"fmt"
"math/big"
"net"
@ -29,7 +28,7 @@ import (
"github.com/flannel-io/flannel/pkg/subnet/kube"
"github.com/flannel-io/flannel/pkg/trafficmngr/iptables"
"github.com/joho/godotenv"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/rancher/wrangler/v3/pkg/merr"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
@ -61,7 +60,7 @@ var (
func flannel(ctx context.Context, wg *sync.WaitGroup, flannelIface *net.Interface, flannelConf, kubeConfigFile string, flannelIPv6Masq bool, nm netMode) error {
extIface, err := LookupExtInterface(flannelIface, nm)
if err != nil {
return pkgerrors.WithMessage(err, "failed to find the interface")
return errors.WithMessage(err, "failed to find the interface")
}
sm, err := kube.NewSubnetManager(ctx,
@ -71,12 +70,12 @@ func flannel(ctx context.Context, wg *sync.WaitGroup, flannelIface *net.Interfac
flannelConf,
false)
if err != nil {
return pkgerrors.WithMessage(err, "failed to create the SubnetManager")
return errors.WithMessage(err, "failed to create the SubnetManager")
}
config, err := sm.GetNetworkConfig(ctx)
if err != nil {
return pkgerrors.WithMessage(err, "failed to get the network config")
return errors.WithMessage(err, "failed to get the network config")
}
// Create a backend manager then use it to create the backend and register the network with it.
@ -84,17 +83,17 @@ func flannel(ctx context.Context, wg *sync.WaitGroup, flannelIface *net.Interfac
be, err := bm.GetBackend(config.BackendType)
if err != nil {
return pkgerrors.WithMessage(err, "failed to create the flannel backend")
return errors.WithMessage(err, "failed to create the flannel backend")
}
bn, err := be.RegisterNetwork(ctx, wg, config)
if err != nil {
return pkgerrors.WithMessage(err, "failed to register flannel network")
return errors.WithMessage(err, "failed to register flannel network")
}
trafficMngr := &iptables.IPTablesManager{}
err = trafficMngr.Init(ctx)
if err != nil {
return pkgerrors.WithMessage(err, "failed to initialize flannel ipTables manager")
return errors.WithMessage(err, "failed to initialize flannel ipTables manager")
}
if nm.IPv4Enabled() && config.Network.Empty() {
@ -114,7 +113,7 @@ func flannel(ctx context.Context, wg *sync.WaitGroup, flannelIface *net.Interfac
err = trafficMngr.SetupAndEnsureMasqRules(ctx, config.Network, prevSubnet, prevNetwork, ip.IP6Net{}, prevIPv6Subnet, prevIPv6Network, bn.Lease(), 60, false)
}
if err != nil {
return pkgerrors.WithMessage(err, "failed to setup masq rules")
return errors.WithMessage(err, "failed to setup masq rules")
}
// setup forward rules
@ -142,11 +141,11 @@ func LookupExtInterface(iface *net.Interface, nm netMode) (*backend.ExternalInte
logrus.Debug("No interface defined for flannel in the config. Fetching the default gateway interface")
if nm.IPv4Enabled() {
if iface, err = ip.GetDefaultGatewayInterface(); err != nil {
return nil, pkgerrors.WithMessage(err, "failed to get default interface")
return nil, errors.WithMessage(err, "failed to get default interface")
}
} else {
if iface, err = ip.GetDefaultV6GatewayInterface(); err != nil {
return nil, pkgerrors.WithMessage(err, "failed to get default interface")
return nil, errors.WithMessage(err, "failed to get default interface")
}
}
}
@ -155,7 +154,7 @@ func LookupExtInterface(iface *net.Interface, nm netMode) (*backend.ExternalInte
if nm.IPv4Enabled() {
ifaceAddr, err = ip.GetInterfaceIP4Addrs(iface)
if err != nil {
return nil, pkgerrors.WithMessagef(err, "failed to find IPv4 address for interface %s", iface.Name)
return nil, errors.WithMessagef(err, "failed to find IPv4 address for interface %s", iface.Name)
}
logrus.Infof("The interface %s with ipv4 address %s will be used by flannel", iface.Name, ifaceAddr[0])
} else {
@ -164,7 +163,7 @@ func LookupExtInterface(iface *net.Interface, nm netMode) (*backend.ExternalInte
if nm.IPv6Enabled() {
ifacev6Addr, err = ip.GetInterfaceIP6Addrs(iface)
if err != nil {
return nil, pkgerrors.WithMessagef(err, "failed to find IPv6 address for interface %s", iface.Name)
return nil, errors.WithMessagef(err, "failed to find IPv6 address for interface %s", iface.Name)
}
logrus.Infof("The interface %s with ipv6 address %s will be used by flannel", iface.Name, ifacev6Addr[0])
} else {

View File

@ -2,7 +2,6 @@ package flannel
import (
"context"
"errors"
"fmt"
"net"
"path/filepath"
@ -14,8 +13,8 @@ import (
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/signals"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/vpn"
pkgerrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
authorizationv1 "k8s.io/api/authorization/v1"
v1 "k8s.io/api/core/v1"
@ -86,7 +85,7 @@ func Run(ctx context.Context, wg *sync.WaitGroup, nodeConfig *config.Node) error
// use the kubelet kubeconfig to sync node annotations, as the k3s-controller
// rbac does not allow create or update of nodes.
if err := syncAnnotations(ctx, nodeConfig, coreClient); err != nil {
return pkgerrors.WithMessage(err, "flannel failed to sync address annotations")
return errors.WithMessage(err, "flannel failed to sync address annotations")
}
resourceAttrs := authorizationv1.ResourceAttributes{Verb: "list", Resource: "nodes"}
@ -95,7 +94,7 @@ func Run(ctx context.Context, wg *sync.WaitGroup, nodeConfig *config.Node) error
// Flannel needs to watch all nodes in the cluster, which the kubelet is not allowed to do on recent versions of Kubernetes.
// If the kubelet cannot list nodes, then wait for the k3s-controller RBAC to become ready, and use that kubeconfig instead.
if canListNodes, err := util.CheckRBAC(ctx, kubeConfig, resourceAttrs, ""); err != nil {
return pkgerrors.WithMessage(err, "failed to check if RBAC allows node list")
return errors.WithMessage(err, "failed to check if RBAC allows node list")
} else if !canListNodes {
kubeConfig = nodeConfig.AgentConfig.KubeConfigK3sController
coreClient, err = util.GetClientSet(kubeConfig)
@ -103,22 +102,22 @@ func Run(ctx context.Context, wg *sync.WaitGroup, nodeConfig *config.Node) error
return err
}
if err := util.WaitForRBACReady(ctx, kubeConfig, util.DefaultAPIServerReadyTimeout, resourceAttrs, ""); err != nil {
return pkgerrors.WithMessage(err, "flannel failed to wait for RBAC")
return errors.WithMessage(err, "flannel failed to wait for RBAC")
}
}
if err := waitForPodCIDR(ctx, nodeConfig.AgentConfig.NodeName, coreClient); err != nil {
return pkgerrors.WithMessage(err, "flannel failed to wait for PodCIDR assignment")
return errors.WithMessage(err, "flannel failed to wait for PodCIDR assignment")
}
nm, err := findNetMode(nodeConfig.AgentConfig.ClusterCIDRs)
if err != nil {
return pkgerrors.WithMessage(err, "failed to check netMode for flannel")
return errors.WithMessage(err, "failed to check netMode for flannel")
}
go func() {
err := flannel(ctx, wg, nodeConfig.Flannel.Iface, nodeConfig.Flannel.ConfFile, kubeConfig, nodeConfig.Flannel.IPv6Masq, nm)
if err != nil && !errors.Is(err, context.Canceled) {
signals.RequestShutdown(pkgerrors.WithMessage(err, "flannel exited"))
signals.RequestShutdown(errors.WithMessage(err, "flannel exited"))
}
signals.RequestShutdown(nil)
}()
@ -137,7 +136,7 @@ func waitForPodCIDR(ctx context.Context, nodeName string, coreClient kubernetes.
}
if _, err := toolswatch.UntilWithSync(ctx, lw, &v1.Node{}, nil, condition); err != nil {
return pkgerrors.WithMessage(err, "failed to wait for PodCIDR assignment")
return errors.WithMessage(err, "failed to wait for PodCIDR assignment")
}
logrus.Info("Flannel found PodCIDR assigned for node " + nodeName)

View File

@ -8,9 +8,9 @@ import (
"strconv"
"time"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
httpdialer "github.com/mwitkow/go-http-dialer"
pkgerrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/http/httpproxy"
"golang.org/x/net/proxy"
@ -32,14 +32,14 @@ func SetHTTPProxy(address string) error {
serverURL, err := url.Parse(address)
if err != nil {
return pkgerrors.WithMessagef(err, "failed to parse address %s", address)
return errors.WithMessagef(err, "failed to parse address %s", address)
}
// Call this directly instead of using the cached environment used by http.ProxyFromEnvironment to allow for testing
proxyFromEnvironment := httpproxy.FromEnvironment().ProxyFunc()
proxyURL, err := proxyFromEnvironment(serverURL)
if err != nil {
return pkgerrors.WithMessagef(err, "failed to get proxy for address %s", address)
return errors.WithMessagef(err, "failed to get proxy for address %s", address)
}
if proxyURL == nil {
logrus.Debug(version.ProgramUpper + "_AGENT_HTTP_PROXY_ALLOWED is true but no proxy is configured for URL " + serverURL.String())
@ -48,7 +48,7 @@ func SetHTTPProxy(address string) error {
dialer, err := proxyDialer(proxyURL, defaultDialer)
if err != nil {
return pkgerrors.WithMessagef(err, "failed to create proxy dialer for %s", proxyURL)
return errors.WithMessagef(err, "failed to create proxy dialer for %s", proxyURL)
}
defaultDialer = dialer

View File

@ -26,7 +26,7 @@ import (
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/metrics"
"github.com/k3s-io/k3s/pkg/util"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/informers"
@ -96,7 +96,7 @@ func Run(ctx context.Context, wg *sync.WaitGroup, nodeConfig *config.Node) error
}
return true, nil
}); err != nil {
return pkgerrors.WithMessagef(err, "network policy controller failed to wait for %s taint to be removed from Node %s", cloudproviderapi.TaintExternalCloudProvider, nodeConfig.AgentConfig.NodeName)
return errors.WithMessagef(err, "network policy controller failed to wait for %s taint to be removed from Node %s", cloudproviderapi.TaintExternalCloudProvider, nodeConfig.AgentConfig.NodeName)
}
krConfig := options.NewKubeRouterConfig()
@ -130,13 +130,13 @@ func Run(ctx context.Context, wg *sync.WaitGroup, nodeConfig *config.Node) error
if nodeConfig.AgentConfig.EnableIPv4 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
if err != nil {
return pkgerrors.WithMessage(err, "failed to create iptables handler")
return errors.WithMessage(err, "failed to create iptables handler")
}
iptablesCmdHandlers[v1.IPv4Protocol] = iptHandler
ipset, err := utils.NewIPSet(false)
if err != nil {
return pkgerrors.WithMessage(err, "failed to create ipset handler")
return errors.WithMessage(err, "failed to create ipset handler")
}
ipSetHandlers[v1.IPv4Protocol] = ipset
}
@ -144,13 +144,13 @@ func Run(ctx context.Context, wg *sync.WaitGroup, nodeConfig *config.Node) error
if nodeConfig.AgentConfig.EnableIPv6 {
ipt6Handler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return pkgerrors.WithMessage(err, "failed to create iptables handler")
return errors.WithMessage(err, "failed to create iptables handler")
}
iptablesCmdHandlers[v1.IPv6Protocol] = ipt6Handler
ipset, err := utils.NewIPSet(true)
if err != nil {
return pkgerrors.WithMessage(err, "failed to create ipset handler")
return errors.WithMessage(err, "failed to create ipset handler")
}
ipSetHandlers[v1.IPv6Protocol] = ipset
}
@ -179,7 +179,7 @@ func Run(ctx context.Context, wg *sync.WaitGroup, nodeConfig *config.Node) error
npc, err := netpol.NewNetworkPolicyController(client, krConfig, podInformer, npInformer, nsInformer, &sync.Mutex{}, nil,
iptablesCmdHandlers, ipSetHandlers)
if err != nil {
return pkgerrors.WithMessage(err, "unable to initialize network policy controller")
return errors.WithMessage(err, "unable to initialize network policy controller")
}
podInformer.AddEventHandler(npc.PodEventHandler)

View File

@ -9,7 +9,7 @@ import (
"github.com/sirupsen/logrus"
"github.com/k3s-io/k3s/pkg/agent/loadbalancer"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
)
type Proxy interface {
@ -57,7 +57,7 @@ func NewSupervisorProxy(ctx context.Context, lbEnabled bool, dataDir, supervisor
u, err := url.Parse(p.initialSupervisorURL)
if err != nil {
return nil, pkgerrors.WithMessagef(err, "failed to parse %s", p.initialSupervisorURL)
return nil, errors.WithMessagef(err, "failed to parse %s", p.initialSupervisorURL)
}
p.fallbackSupervisorAddress = u.Host
p.supervisorPort = u.Port()
@ -139,7 +139,7 @@ func (p *proxy) SetAPIServerPort(port int, isIPv6 bool) error {
u, err := url.Parse(p.initialSupervisorURL)
if err != nil {
return pkgerrors.WithMessagef(err, "failed to parse server URL %s", p.initialSupervisorURL)
return errors.WithMessagef(err, "failed to parse server URL %s", p.initialSupervisorURL)
}
p.apiServerPort = strconv.Itoa(port)
u.Host = net.JoinHostPort(u.Hostname(), p.apiServerPort)

View File

@ -2,7 +2,6 @@ package agent
import (
"context"
"errors"
"fmt"
"net"
"os"
@ -34,8 +33,8 @@ import (
"github.com/k3s-io/k3s/pkg/signals"
"github.com/k3s-io/k3s/pkg/spegel"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
pkgerrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -56,20 +55,20 @@ import (
func run(ctx context.Context, cfg cmds.Agent, proxy proxy.Proxy) error {
nodeConfig, err := config.Get(ctx, cfg, proxy)
if err != nil {
return pkgerrors.WithMessage(err, "failed to retrieve agent configuration")
return errors.WithMessage(err, "failed to retrieve agent configuration")
}
dualCluster, err := utilsnet.IsDualStackCIDRs(nodeConfig.AgentConfig.ClusterCIDRs)
if err != nil {
return pkgerrors.WithMessage(err, "failed to validate cluster-cidr")
return errors.WithMessage(err, "failed to validate cluster-cidr")
}
dualService, err := utilsnet.IsDualStackCIDRs(nodeConfig.AgentConfig.ServiceCIDRs)
if err != nil {
return pkgerrors.WithMessage(err, "failed to validate service-cidr")
return errors.WithMessage(err, "failed to validate service-cidr")
}
dualNode, err := utilsnet.IsDualStackIPs(nodeConfig.AgentConfig.NodeIPs)
if err != nil {
return pkgerrors.WithMessage(err, "failed to validate node-ip")
return errors.WithMessage(err, "failed to validate node-ip")
}
serviceIPv4 := utilsnet.IsIPv4CIDR(nodeConfig.AgentConfig.ServiceCIDR)
clusterIPv4 := utilsnet.IsIPv4CIDR(nodeConfig.AgentConfig.ClusterCIDR)
@ -98,7 +97,7 @@ func run(ctx context.Context, cfg cmds.Agent, proxy proxy.Proxy) error {
conntrackConfig, err := getConntrackConfig(nodeConfig)
if err != nil {
return pkgerrors.WithMessage(err, "failed to validate kube-proxy conntrack configuration")
return errors.WithMessage(err, "failed to validate kube-proxy conntrack configuration")
}
syssetup.Configure(enableIPv6, conntrackConfig)
nodeConfig.AgentConfig.EnableIPv4 = enableIPv4
@ -114,19 +113,19 @@ func run(ctx context.Context, cfg cmds.Agent, proxy proxy.Proxy) error {
}
if err := spegel.DefaultRegistry.Start(ctx, nodeConfig, executor.CRIReadyChan()); err != nil {
return pkgerrors.WithMessage(err, "failed to start embedded registry")
return errors.WithMessage(err, "failed to start embedded registry")
}
}
if nodeConfig.SupervisorMetrics {
if err := metrics.DefaultMetrics.Start(ctx, nodeConfig); err != nil {
return pkgerrors.WithMessage(err, "failed to serve metrics")
return errors.WithMessage(err, "failed to serve metrics")
}
}
if nodeConfig.EnablePProf {
if err := profile.DefaultProfiler.Start(ctx, nodeConfig); err != nil {
return pkgerrors.WithMessage(err, "failed to serve pprof")
return errors.WithMessage(err, "failed to serve pprof")
}
}
@ -144,7 +143,7 @@ func run(ctx context.Context, cfg cmds.Agent, proxy proxy.Proxy) error {
go func() {
if err := startCRI(ctx, nodeConfig); err != nil {
signals.RequestShutdown(pkgerrors.WithMessage(err, "failed to start container runtime"))
signals.RequestShutdown(errors.WithMessage(err, "failed to start container runtime"))
}
}()
@ -155,7 +154,7 @@ func run(ctx context.Context, cfg cmds.Agent, proxy proxy.Proxy) error {
go func() {
<-executor.APIServerReadyChan()
if err := startNetwork(ctx, &sync.WaitGroup{}, nodeConfig); err != nil {
signals.RequestShutdown(pkgerrors.WithMessage(err, "failed to start networking"))
signals.RequestShutdown(errors.WithMessage(err, "failed to start networking"))
return
}
@ -254,7 +253,7 @@ func RunStandalone(ctx context.Context, wg *sync.WaitGroup, cfg cmds.Agent) erro
nodeConfig, err := config.Get(ctx, cfg, proxy)
if err != nil {
return pkgerrors.WithMessage(err, "failed to retrieve agent configuration")
return errors.WithMessage(err, "failed to retrieve agent configuration")
}
if err := executor.Bootstrap(ctx, nodeConfig, cfg); err != nil {
@ -275,13 +274,13 @@ func RunStandalone(ctx context.Context, wg *sync.WaitGroup, cfg cmds.Agent) erro
if nodeConfig.SupervisorMetrics {
if err := metrics.DefaultMetrics.Start(ctx, nodeConfig); err != nil {
return pkgerrors.WithMessage(err, "failed to serve metrics")
return errors.WithMessage(err, "failed to serve metrics")
}
}
if nodeConfig.EnablePProf {
if err := profile.DefaultProfiler.Start(ctx, nodeConfig); err != nil {
return pkgerrors.WithMessage(err, "failed to serve pprof")
return errors.WithMessage(err, "failed to serve pprof")
}
}
@ -324,7 +323,7 @@ func createProxyAndValidateToken(ctx context.Context, cfg *cmds.Agent) (proxy.Pr
_, nodeIPs, err := util.GetHostnameAndIPs(cfg.NodeName, cfg.NodeIP.Value())
if err != nil {
return nil, pkgerrors.WithMessage(err, "failed to get node name and addresses")
return nil, errors.WithMessage(err, "failed to get node name and addresses")
}
proxy, err := proxy.NewSupervisorProxy(ctx, !cfg.DisableLoadBalancer, agentDir, cfg.ServerURL, cfg.LBServerPort, utilsnet.IsIPv6(nodeIPs[0]))
@ -388,7 +387,7 @@ func configureNode(ctx context.Context, nodeConfig *daemonconfig.Node, coreClien
return true, nil
}
if _, err := toolswatch.UntilWithSync(ctx, lw, &v1.Node{}, nil, condition); err != nil {
return pkgerrors.WithMessage(err, "failed to configure node")
return errors.WithMessage(err, "failed to configure node")
}
return nil
}

View File

@ -3,7 +3,6 @@ package tunnel
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"os"
@ -19,8 +18,8 @@ import (
"github.com/k3s-io/k3s/pkg/daemons/executor"
"github.com/k3s-io/k3s/pkg/signals"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
pkgerrors "github.com/pkg/errors"
"github.com/rancher/remotedialer"
"github.com/sirupsen/logrus"
"github.com/yl2chen/cidranger"
@ -118,7 +117,7 @@ func (a *agentTunnel) startWatches(ctx context.Context, config *daemonconfig.Nod
Group: "discovery.k8s.io",
Resource: "endpointslices",
}, ""); err != nil {
signals.RequestShutdown(pkgerrors.WithMessage(err, "tunnel watches failed to wait for RBAC"))
signals.RequestShutdown(errors.WithMessage(err, "tunnel watches failed to wait for RBAC"))
return
}

View File

@ -1,18 +1,17 @@
package util
import (
"errors"
"os"
"path/filepath"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
)
func WriteFile(name string, content string) error {
os.MkdirAll(filepath.Dir(name), 0755)
err := os.WriteFile(name, []byte(content), 0644)
if err != nil {
return pkgerrors.WithMessagef(err, "writing %s", name)
return errors.WithMessagef(err, "writing %s", name)
}
return nil
}
@ -23,11 +22,11 @@ func CopyFile(sourceFile string, destinationFile string, ignoreNotExist bool) er
if errors.Is(err, os.ErrNotExist) && ignoreNotExist {
return nil
} else if err != nil {
return pkgerrors.WithMessagef(err, "copying %s to %s", sourceFile, destinationFile)
return errors.WithMessagef(err, "copying %s to %s", sourceFile, destinationFile)
}
err = os.WriteFile(destinationFile, input, 0644)
if err != nil {
return pkgerrors.WithMessagef(err, "copying %s to %s", sourceFile, destinationFile)
return errors.WithMessagef(err, "copying %s to %s", sourceFile, destinationFile)
}
return nil
}

View File

@ -8,7 +8,7 @@ import (
"time"
"github.com/k3s-io/k3s/pkg/daemons/config"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/sirupsen/logrus"
)
@ -71,13 +71,13 @@ func WriteToDiskFromStorage(files PathsDataformat, bootstrap *config.ControlRunt
}
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
return pkgerrors.WithMessagef(err, "failed to mkdir %s", filepath.Dir(path))
return errors.WithMessagef(err, "failed to mkdir %s", filepath.Dir(path))
}
if err := os.WriteFile(path, bsf.Content, 0600); err != nil {
return pkgerrors.WithMessagef(err, "failed to write to %s", path)
return errors.WithMessagef(err, "failed to write to %s", path)
}
if err := os.Chtimes(path, bsf.Timestamp, bsf.Timestamp); err != nil {
return pkgerrors.WithMessagef(err, "failed to update modified time on %s", path)
return errors.WithMessagef(err, "failed to update modified time on %s", path)
}
}

View File

@ -3,7 +3,6 @@ package agent
import (
"context"
"crypto/tls"
"errors"
"os"
"path/filepath"
"sync"
@ -20,10 +19,10 @@ import (
"github.com/k3s-io/k3s/pkg/signals"
"github.com/k3s-io/k3s/pkg/spegel"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/util/permissions"
"github.com/k3s-io/k3s/pkg/version"
"github.com/k3s-io/k3s/pkg/vpn"
pkgerrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
@ -65,7 +64,7 @@ func Run(clx *cli.Context) (rerr error) {
if !cmds.AgentConfig.Rootless {
if err := permissions.IsPrivileged(); err != nil {
return pkgerrors.WithMessage(err, "agent requires additional privilege if not run with --rootless")
return errors.WithMessage(err, "agent requires additional privilege if not run with --rootless")
}
}

View File

@ -3,7 +3,6 @@ package cert
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
@ -23,10 +22,10 @@ import (
"github.com/k3s-io/k3s/pkg/proctitle"
"github.com/k3s-io/k3s/pkg/server"
k3sutil "github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/util/services"
"github.com/k3s-io/k3s/pkg/version"
"github.com/otiai10/copy"
pkgerrors "github.com/pkg/errors"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
@ -401,7 +400,7 @@ func rotateCA(app *cli.Context, cfg *cmds.Server, sync *cmds.CertRotateCA) error
url := fmt.Sprintf("/v1-%s/cert/cacerts?force=%t", version.Program, sync.Force)
if err = info.Put(url, buf.Bytes()); err != nil {
return pkgerrors.WithMessage(err, "see server log for details")
return errors.WithMessage(err, "see server log for details")
}
fmt.Println("certificates saved to datastore")

View File

@ -5,8 +5,8 @@ package cmds
import (
"os"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/moby/sys/userns"
pkgerrors "github.com/pkg/errors"
"github.com/rootless-containers/rootlesskit/pkg/parent/cgrouputil"
)
@ -17,7 +17,7 @@ func EvacuateCgroup2() error {
// The root cgroup has to be empty to enable subtree_control, so evacuate it by placing
// ourselves in the init cgroup.
if err := cgrouputil.EvacuateCgroup2("init"); err != nil {
return pkgerrors.WithMessage(err, "failed to evacuate root cgroup")
return errors.WithMessage(err, "failed to evacuate root cgroup")
}
}
return nil

View File

@ -12,9 +12,9 @@ import (
systemd "github.com/coreos/go-systemd/v22/daemon"
"github.com/k3s-io/k3s/pkg/proctitle"
"github.com/k3s-io/k3s/pkg/signals"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
"github.com/natefinch/lumberjack"
pkgerrors "github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@ -46,7 +46,7 @@ func forkIfLoggingOrReaping() error {
pwd, err := os.Getwd()
if err != nil {
return pkgerrors.WithMessage(err, "failed to get working directory")
return errors.WithMessage(err, "failed to get working directory")
}
if enableReaping {

View File

@ -4,7 +4,6 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
@ -21,8 +20,7 @@ import (
"github.com/k3s-io/k3s/pkg/etcd"
"github.com/k3s-io/k3s/pkg/proctitle"
"github.com/k3s-io/k3s/pkg/server"
util2 "github.com/k3s-io/k3s/pkg/util"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -95,7 +93,7 @@ func wrapServerError(err error) error {
// since the operation may have actualy succeeded despite the client timing out the request.
return err
}
return pkgerrors.WithMessage(err, "see server log for details")
return errors.WithMessage(err, "see server log for details")
}
// Save triggers an on-demand etcd snapshot operation
@ -108,7 +106,7 @@ func Save(app *cli.Context) error {
func save(app *cli.Context, cfg *cmds.Server) error {
if app.Args().Len() > 0 {
return util2.ErrCommandNoArgs
return errors.ErrCommandNoArgs
}
// Save always sets retention to 0 to disable automatic pruning.

View File

@ -16,8 +16,8 @@ import (
"github.com/k3s-io/k3s/pkg/secretsencrypt"
"github.com/k3s-io/k3s/pkg/server"
"github.com/k3s-io/k3s/pkg/server/handlers"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
pkgerrors "github.com/pkg/errors"
"github.com/urfave/cli/v2"
"k8s.io/utils/ptr"
)
@ -44,7 +44,7 @@ func commandPrep(cfg *cmds.Server) (*clientaccess.Info, error) {
}
func wrapServerError(err error) error {
return pkgerrors.WithMessage(err, "see server log for details")
return errors.WithMessage(err, "see server log for details")
}
func Enable(app *cli.Context) error {

View File

@ -2,7 +2,6 @@ package server
import (
"context"
"errors"
"fmt"
"net"
"os"
@ -30,10 +29,10 @@ import (
"github.com/k3s-io/k3s/pkg/signals"
"github.com/k3s-io/k3s/pkg/spegel"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/util/permissions"
"github.com/k3s-io/k3s/pkg/version"
"github.com/k3s-io/k3s/pkg/vpn"
pkgerrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -94,7 +93,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
if !cfg.DisableAgent && !cfg.Rootless {
if err := permissions.IsPrivileged(); err != nil {
return pkgerrors.WithMessage(err, "server requires additional privilege when not run with --rootless and/or --disable-agent")
return errors.WithMessage(err, "server requires additional privilege when not run with --rootless and/or --disable-agent")
}
}
@ -353,7 +352,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
for _, cidr := range util.SplitStringSlice(cmds.ServerConfig.ClusterCIDR.Value()) {
_, parsed, err := net.ParseCIDR(cidr)
if err != nil {
return pkgerrors.WithMessagef(err, "invalid cluster-cidr %s", cidr)
return errors.WithMessagef(err, "invalid cluster-cidr %s", cidr)
}
serverConfig.ControlConfig.ClusterIPRanges = append(serverConfig.ControlConfig.ClusterIPRanges, parsed)
}
@ -368,7 +367,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
for _, cidr := range util.SplitStringSlice(cmds.ServerConfig.ServiceCIDR.Value()) {
_, parsed, err := net.ParseCIDR(cidr)
if err != nil {
return pkgerrors.WithMessagef(err, "invalid service-cidr %s", cidr)
return errors.WithMessagef(err, "invalid service-cidr %s", cidr)
}
serverConfig.ControlConfig.ServiceIPRanges = append(serverConfig.ControlConfig.ServiceIPRanges, parsed)
}
@ -378,7 +377,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
serverConfig.ControlConfig.ServiceNodePortRange, err = utilnet.ParsePortRange(cfg.ServiceNodePortRange)
if err != nil {
return pkgerrors.WithMessagef(err, "invalid port range %s", cfg.ServiceNodePortRange)
return errors.WithMessagef(err, "invalid port range %s", cfg.ServiceNodePortRange)
}
// the apiserver service does not yet support dual-stack operation
@ -396,7 +395,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
for _, svcCIDR := range serverConfig.ControlConfig.ServiceIPRanges {
clusterDNS, err := utilsnet.GetIndexedIP(svcCIDR, 10)
if err != nil {
return pkgerrors.WithMessage(err, "cannot configure default cluster-dns address")
return errors.WithMessage(err, "cannot configure default cluster-dns address")
}
serverConfig.ControlConfig.ClusterDNSs = append(serverConfig.ControlConfig.ClusterDNSs, clusterDNS)
}
@ -446,7 +445,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
serverConfig.ControlConfig.MinTLSVersion = tlsMinVersionArg
serverConfig.ControlConfig.TLSMinVersion, err = kubeapiserverflag.TLSVersion(tlsMinVersionArg)
if err != nil {
return pkgerrors.WithMessage(err, "invalid tls-min-version")
return errors.WithMessage(err, "invalid tls-min-version")
}
serverConfig.StartupHooks = append(serverConfig.StartupHooks, cfg.StartupHooks...)
@ -476,7 +475,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
serverConfig.ControlConfig.CipherSuites = tlsCipherSuites
serverConfig.ControlConfig.TLSCipherSuites, err = kubeapiserverflag.TLSCipherSuites(tlsCipherSuites)
if err != nil {
return pkgerrors.WithMessage(err, "invalid tls-cipher-suites")
return errors.WithMessage(err, "invalid tls-cipher-suites")
}
// If performing a cluster reset, make sure control-plane components are

View File

@ -4,7 +4,6 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
@ -19,8 +18,8 @@ import (
"github.com/k3s-io/k3s/pkg/server"
"github.com/k3s-io/k3s/pkg/server/handlers"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
pkgerrors "github.com/pkg/errors"
"github.com/urfave/cli/v2"
"gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -122,7 +121,7 @@ func deleteToken(app *cli.Context, cfg *cmds.Token) error {
}
secretName := bootstraputil.BootstrapTokenSecretName(token)
if err := client.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), secretName, metav1.DeleteOptions{}); err != nil {
return pkgerrors.WithMessagef(err, "failed to delete bootstrap token %q", err)
return errors.WithMessagef(err, "failed to delete bootstrap token %q", err)
}
fmt.Printf("bootstrap token %q deleted\n", token)
@ -219,7 +218,7 @@ func list(app *cli.Context, cfg *cmds.Token) error {
secrets, err := client.CoreV1().Secrets(metav1.NamespaceSystem).List(context.TODO(), listOptions)
if err != nil {
return pkgerrors.WithMessagef(err, "failed to list bootstrap tokens")
return errors.WithMessagef(err, "failed to list bootstrap tokens")
}
tokens := make([]*kubeadm.BootstrapToken, len(secrets.Items))

View File

@ -3,7 +3,8 @@ package clientaccess
import (
"os"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
@ -12,17 +13,17 @@ import (
func WriteClientKubeConfig(destFile, url, serverCAFile, clientCertFile, clientKeyFile string) error {
serverCA, err := os.ReadFile(serverCAFile)
if err != nil {
return pkgerrors.WithMessagef(err, "failed to read %s", serverCAFile)
return errors.WithMessagef(err, "failed to read %s", serverCAFile)
}
clientCert, err := os.ReadFile(clientCertFile)
if err != nil {
return pkgerrors.WithMessagef(err, "failed to read %s", clientCertFile)
return errors.WithMessagef(err, "failed to read %s", clientCertFile)
}
clientKey, err := os.ReadFile(clientKeyFile)
if err != nil {
return pkgerrors.WithMessagef(err, "failed to read %s", clientKeyFile)
return errors.WithMessagef(err, "failed to read %s", clientKeyFile)
}
config := clientcmdapi.NewConfig()

View File

@ -7,7 +7,6 @@ import (
"crypto/x509"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@ -17,7 +16,7 @@ import (
"time"
"github.com/k3s-io/k3s/pkg/kubeadm"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/sirupsen/logrus"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@ -365,7 +364,7 @@ func (i *Info) Post(path string, body []byte, options ...any) ([]byte, error) {
func (i *Info) setServer(server string) error {
url, err := url.Parse(server)
if err != nil {
return pkgerrors.WithMessagef(err, "Invalid server url, failed to parse: %s", server)
return errors.WithMessagef(err, "Invalid server url, failed to parse: %s", server)
}
if url.Scheme != "https" {
@ -425,7 +424,7 @@ func getCACerts(u url.URL) ([]byte, error) {
// Download the CA bundle using a client that does not validate certs.
cacerts, err := get(url, insecureClient, "", "", "")
if err != nil {
return nil, pkgerrors.WithMessage(err, "failed to get CA certs")
return nil, errors.WithMessage(err, "failed to get CA certs")
}
// Request the CA bundle again, validating that the CA bundle can be loaded
@ -433,7 +432,7 @@ func getCACerts(u url.URL) ([]byte, error) {
// get an empty CA bundle. or if the dynamiclistener cert is incorrectly signed.
_, err = get(url, GetHTTPClient(cacerts, "", ""), "", "", "")
if err != nil {
return nil, pkgerrors.WithMessage(err, "CA cert validation failed")
return nil, errors.WithMessage(err, "CA cert validation failed")
}
return cacerts, nil

View File

@ -4,7 +4,6 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net"
@ -23,11 +22,11 @@ import (
"github.com/k3s-io/k3s/pkg/daemons/executor"
"github.com/k3s-io/k3s/pkg/etcd/store"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
"github.com/k3s-io/kine/pkg/endpoint"
"github.com/k3s-io/kine/pkg/tls"
"github.com/otiai10/copy"
pkgerrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.etcd.io/etcd/api/v3/mvccpb"
)
@ -37,7 +36,7 @@ import (
// ControlRuntimeBootstrap struct, either via HTTP or from the datastore.
func (c *Cluster) Bootstrap(ctx context.Context, clusterReset bool) error {
if err := c.assignManagedDriver(ctx); err != nil {
return pkgerrors.WithMessage(err, "failed to set datastore driver")
return errors.WithMessage(err, "failed to set datastore driver")
}
// Check if we need to bootstrap, and whether or not the managed database has already
@ -46,7 +45,7 @@ func (c *Cluster) Bootstrap(ctx context.Context, clusterReset bool) error {
// This also sets c.clientAccessInfo if c.config.JoinURL and c.config.Token are set.
shouldBootstrap, isInitialized, err := c.shouldBootstrapLoad(ctx)
if err != nil {
return pkgerrors.WithMessage(err, "failed to check if bootstrap data has been initialized")
return errors.WithMessage(err, "failed to check if bootstrap data has been initialized")
}
if c.managedDB != nil {
@ -54,7 +53,7 @@ func (c *Cluster) Bootstrap(ctx context.Context, clusterReset bool) error {
// secondary server with etcd disabled, start the etcd proxy so that we can attempt to use it
// when reconciling.
if err := c.startEtcdProxy(ctx); err != nil {
return pkgerrors.WithMessage(err, "failed to start etcd proxy")
return errors.WithMessage(err, "failed to start etcd proxy")
}
} else if isInitialized && !clusterReset {
// For secondary servers with etcd, first attempt to connect and reconcile using the join URL.
@ -71,7 +70,7 @@ func (c *Cluster) Bootstrap(ctx context.Context, clusterReset bool) error {
// extract bootstrap data from a copy of the etcd mvcc store and reconcile
// against that.
if err := c.ReconcileBootstrapData(ctx, nil, &c.config.Runtime.ControlRuntimeBootstrap, false); err != nil {
return pkgerrors.WithMessage(err, "failed to reconcile with local datastore")
return errors.WithMessage(err, "failed to reconcile with local datastore")
}
logrus.Info("Successfully reconciled with local datastore")
}
@ -129,7 +128,7 @@ func (c *Cluster) shouldBootstrapLoad(ctx context.Context) (bool, bool, error) {
// the hash in the token. The password isn't actually checked until later when actually bootstrapping.
info, err := clientaccess.ParseAndValidateToken(c.config.JoinURL, c.config.Token, opts...)
if err != nil {
return false, false, pkgerrors.WithMessage(err, "failed to validate token")
return false, false, errors.WithMessage(err, "failed to validate token")
}
c.clientAccessInfo = info
@ -313,7 +312,7 @@ func (c *Cluster) ReconcileBootstrapData(ctx context.Context, buf io.ReadSeeker,
if storageClient == nil {
storageClient, err = store.NewTemporaryStore(filepath.Join(c.config.DataDir, "db", "etcd"))
if err != nil {
return pkgerrors.WithMessage(err, "failed to create temporary datastore client")
return errors.WithMessage(err, "failed to create temporary datastore client")
}
}
@ -367,7 +366,7 @@ func (c *Cluster) ReconcileBootstrapData(ctx context.Context, buf io.ReadSeeker,
updated, newer, err := isNewerFile(path, fileData)
if err != nil {
return pkgerrors.WithMessagef(err, "failed to get update status of %s", pathKey)
return errors.WithMessagef(err, "failed to get update status of %s", pathKey)
}
if newer {
newerOnDisk = append(newerOnDisk, path)
@ -384,10 +383,10 @@ func (c *Cluster) ReconcileBootstrapData(ctx context.Context, buf io.ReadSeeker,
logrus.Infof("Cluster reset: backing up certificates directory to %s", tlsBackupDir)
if _, err := os.Stat(serverTLSDir); err != nil {
return pkgerrors.WithMessage(err, "cluster reset failed to stat server TLS dir")
return errors.WithMessage(err, "cluster reset failed to stat server TLS dir")
}
if err := copy.Copy(serverTLSDir, tlsBackupDir); err != nil {
return pkgerrors.WithMessage(err, "cluster reset failed to back up server TLS dir")
return errors.WithMessage(err, "cluster reset failed to back up server TLS dir")
}
} else if len(newerOnDisk) > 0 {
logrus.Fatal(strings.Join(newerOnDisk, ", ") + " newer than datastore and could cause a cluster outage. Remove the file(s) from disk and restart to be recreated from datastore.")
@ -410,13 +409,13 @@ func isNewerFile(path string, file bootstrap.File) (updated bool, newerOnDisk bo
logrus.Warn(path + " doesn't exist. continuing...")
return true, false, nil
}
return false, false, pkgerrors.WithMessagef(err, "reconcile failed to open")
return false, false, errors.WithMessagef(err, "reconcile failed to open")
}
defer f.Close()
data, err := io.ReadAll(f)
if err != nil {
return false, false, pkgerrors.WithMessagef(err, "reconcile failed to read")
return false, false, errors.WithMessagef(err, "reconcile failed to read")
}
if bytes.Equal(file.Content, data) {
@ -425,7 +424,7 @@ func isNewerFile(path string, file bootstrap.File) (updated bool, newerOnDisk bo
info, err := f.Stat()
if err != nil {
return false, false, pkgerrors.WithMessagef(err, "reconcile failed to stat")
return false, false, errors.WithMessagef(err, "reconcile failed to stat")
}
if info.ModTime().Unix()-file.Timestamp.Unix() >= systemTimeSkew {
@ -486,7 +485,7 @@ func (c *Cluster) bootstrap(ctx context.Context) error {
if c.managedDB != nil {
// Try to compare local config against the server we're joining.
if err := c.compareConfig(); err != nil {
return pkgerrors.WithMessage(err, "failed to validate server configuration")
return errors.WithMessage(err, "failed to validate server configuration")
}
// Try to bootstrap from the datastore using the local etcd proxy.
if data, err := c.getBootstrapData(ctx, c.clientAccessInfo.Password); err != nil {

View File

@ -2,7 +2,6 @@ package cluster
import (
"context"
"errors"
"net"
"net/url"
"strings"
@ -18,8 +17,8 @@ import (
"github.com/k3s-io/k3s/pkg/metrics"
"github.com/k3s-io/k3s/pkg/signals"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/kine/pkg/endpoint"
pkgerrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/wait"
utilsnet "k8s.io/utils/net"
@ -60,7 +59,7 @@ func (c *Cluster) Start(ctx context.Context, wg *sync.WaitGroup) error {
// start managed etcd database; when kine is in use this is a no-op.
if err := c.start(ctx, wg); err != nil {
return pkgerrors.WithMessage(err, "start managed database")
return errors.WithMessage(err, "start managed database")
}
// set c.config.Datastore and c.config.Runtime.EtcdConfig with values
@ -85,7 +84,7 @@ func (c *Cluster) Start(ctx context.Context, wg *sync.WaitGroup) error {
// always save to managed etcd, to ensure that any file modified locally are in sync with the datastore.
// this will fail if multiple keys exist, to prevent nodes from running with different bootstrap data.
if err := Save(ctx, c.config, false); err != nil && !errors.Is(err, context.Canceled) {
signals.RequestShutdown(pkgerrors.WithMessage(err, "failed to save bootstrap data"))
signals.RequestShutdown(errors.WithMessage(err, "failed to save bootstrap data"))
return
}
@ -127,7 +126,7 @@ func (c *Cluster) startEtcdProxy(ctx context.Context) error {
}
_, nodeIPs, err := util.GetHostnameAndIPs(cmds.AgentConfig.NodeName, cmds.AgentConfig.NodeIP.Value())
if err != nil {
pkgerrors.WithMessage(err, "failed to get node name and addresses")
errors.WithMessage(err, "failed to get node name and addresses")
}
defaultURL.Host = net.JoinHostPort(defaultURL.Hostname(), "2379")
@ -145,7 +144,7 @@ func (c *Cluster) startEtcdProxy(ctx context.Context) error {
for i, c := range clientURLs {
u, err := url.Parse(c)
if err != nil {
return pkgerrors.WithMessage(err, "failed to parse etcd ClientURL")
return errors.WithMessage(err, "failed to parse etcd ClientURL")
}
clientURLs[i] = u.Host
}
@ -196,7 +195,7 @@ func (c *Cluster) startStorage(ctx context.Context, bootstrap bool) error {
// start listening on the kine socket as an etcd endpoint, or return the external etcd endpoints
etcdConfig, err := endpoint.Listen(ctx, c.config.Datastore)
if err != nil {
return pkgerrors.WithMessage(err, "creating storage endpoint")
return errors.WithMessage(err, "creating storage endpoint")
}
// Persist the returned etcd configuration. We decide if we're doing leader election for embedded controllers

View File

@ -3,22 +3,21 @@
package containerd
import (
util2 "github.com/k3s-io/k3s/pkg/util"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
)
func OverlaySupported(root string) error {
return pkgerrors.WithMessagef(util2.ErrUnsupportedPlatform, "overlayfs is not supported")
return errors.WithMessagef(errors.ErrUnsupportedPlatform, "overlayfs is not supported")
}
func FuseoverlayfsSupported(root string) error {
return pkgerrors.WithMessagef(util2.ErrUnsupportedPlatform, "fuse-overlayfs is not supported")
return errors.WithMessagef(errors.ErrUnsupportedPlatform, "fuse-overlayfs is not supported")
}
func StargzSupported(root string) error {
return pkgerrors.WithMessagef(util2.ErrUnsupportedPlatform, "stargz is not supported")
return errors.WithMessagef(errors.ErrUnsupportedPlatform, "stargz is not supported")
}
func NixSupported(root string) error {
return pkgerrors.WithMessagef(util2.ErrUnsupportedPlatform, "nix is not supported")
return errors.WithMessagef(errors.ErrUnsupportedPlatform, "nix is not supported")
}

View File

@ -16,9 +16,9 @@ import (
"github.com/k3s-io/k3s/pkg/daemons/executor"
"github.com/k3s-io/k3s/pkg/signals"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
"github.com/otiai10/copy"
pkgerrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/component-base/logs"
@ -40,14 +40,14 @@ func Agent(ctx context.Context, nodeConfig *daemonconfig.Node, proxy proxy.Proxy
go func() {
<-executor.CRIReadyChan()
if err := startKubelet(ctx, &nodeConfig.AgentConfig); err != nil {
signals.RequestShutdown(pkgerrors.WithMessage(err, "failed to start kubelet"))
signals.RequestShutdown(errors.WithMessage(err, "failed to start kubelet"))
}
}()
go func() {
if !config.KubeProxyDisabled(ctx, nodeConfig, proxy) {
if err := startKubeProxy(ctx, &nodeConfig.AgentConfig); err != nil {
signals.RequestShutdown(pkgerrors.WithMessage(err, "failed to start kube-proxy"))
signals.RequestShutdown(errors.WithMessage(err, "failed to start kube-proxy"))
}
}
}()
@ -65,16 +65,16 @@ func startKubeProxy(ctx context.Context, cfg *daemonconfig.Agent) error {
func startKubelet(ctx context.Context, cfg *daemonconfig.Agent) error {
argsMap, defaultConfig, err := kubeletArgsAndConfig(cfg)
if err != nil {
return pkgerrors.WithMessage(err, "prepare default configuration drop-in")
return errors.WithMessage(err, "prepare default configuration drop-in")
}
extraArgs, err := extractConfigArgs(cfg.KubeletConfigDir, cfg.ExtraKubeletArgs, defaultConfig)
if err != nil {
return pkgerrors.WithMessage(err, "prepare user configuration drop-ins")
return errors.WithMessage(err, "prepare user configuration drop-ins")
}
if err := writeKubeletConfig(cfg.KubeletConfigDir, defaultConfig); err != nil {
return pkgerrors.WithMessage(err, "generate default kubelet configuration drop-in")
return errors.WithMessage(err, "generate default kubelet configuration drop-in")
}
args := util.GetArgs(argsMap, extraArgs)
@ -138,7 +138,7 @@ func extractConfigArgs(path string, extraArgs []string, config *kubeletconfig.Ku
src := strippedArgs["config"]
dest := filepath.Join(path, "10-cli-config.conf")
if err := agentutil.CopyFile(src, dest, false); err != nil {
return nil, pkgerrors.WithMessagef(err, "copy config %q into managed drop-in dir %q", src, dest)
return nil, errors.WithMessagef(err, "copy config %q into managed drop-in dir %q", src, dest)
}
}
// copy the config-dir into our managed config dir, unless its already in there
@ -146,7 +146,7 @@ func extractConfigArgs(path string, extraArgs []string, config *kubeletconfig.Ku
src := strippedArgs["config-dir"]
dest := filepath.Join(path, "20-cli-config-dir")
if err := copy.Copy(src, dest, copy.Options{PreserveOwner: true}); err != nil {
return nil, pkgerrors.WithMessagef(err, "copy config-dir %q into managed drop-in dir %q", src, dest)
return nil, errors.WithMessagef(err, "copy config-dir %q into managed drop-in dir %q", src, dest)
}
}
return args, nil
@ -248,12 +248,12 @@ func defaultKubeletConfig(cfg *daemonconfig.Agent) (*kubeletconfig.KubeletConfig
defaultConfig.StaticPodPath = cfg.PodManifests
}
if err := os.MkdirAll(defaultConfig.StaticPodPath, 0750); err != nil {
return nil, pkgerrors.WithMessagef(err, "failed to create static pod manifest dir %s", defaultConfig.StaticPodPath)
return nil, errors.WithMessagef(err, "failed to create static pod manifest dir %s", defaultConfig.StaticPodPath)
}
t, _, err := taints.ParseTaints(cfg.NodeTaints)
if err != nil {
return nil, pkgerrors.WithMessage(err, "failed to parse node taints")
return nil, errors.WithMessage(err, "failed to parse node taints")
}
defaultConfig.RegisterWithTaints = t

View File

@ -1,10 +1,10 @@
package proxy
import (
"errors"
"io"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/sirupsen/logrus"
)
@ -44,12 +44,12 @@ func (p *proxy) pipe(src, dst io.ReadWriter) {
for {
n, err := src.Read(buff)
if err != nil {
p.err(pkgerrors.WithMessage(err, "read failed"))
p.err(errors.WithMessage(err, "read failed"))
return
}
_, err = dst.Write(buff[:n])
if err != nil {
p.err(pkgerrors.WithMessage(err, "write failed"))
p.err(errors.WithMessage(err, "write failed"))
return
}
}

View File

@ -2,7 +2,6 @@ package control
import (
"context"
"errors"
"os"
"path/filepath"
"strconv"
@ -17,8 +16,8 @@ import (
"github.com/k3s-io/k3s/pkg/daemons/executor"
"github.com/k3s-io/k3s/pkg/signals"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
pkgerrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
authorizationv1 "k8s.io/api/authorization/v1"
v1 "k8s.io/api/core/v1"
@ -42,12 +41,12 @@ import (
func Prepare(ctx context.Context, wg *sync.WaitGroup, cfg *config.Control) error {
logsapi.ReapplyHandling = logsapi.ReapplyHandlingIgnoreUnchanged
if err := prepare(ctx, wg, cfg); err != nil {
return pkgerrors.WithMessage(err, "preparing server")
return errors.WithMessage(err, "preparing server")
}
tunnel, err := setupTunnel(ctx, cfg)
if err != nil {
return pkgerrors.WithMessage(err, "setup tunnel server")
return errors.WithMessage(err, "setup tunnel server")
}
cfg.Runtime.Tunnel = tunnel
@ -70,7 +69,7 @@ func Prepare(ctx context.Context, wg *sync.WaitGroup, cfg *config.Control) error
// not disabled on this node.
func Server(ctx context.Context, wg *sync.WaitGroup, cfg *config.Control) error {
if err := cfg.Cluster.Start(ctx, wg); err != nil {
return pkgerrors.WithMessage(err, "failed to start cluster")
return errors.WithMessage(err, "failed to start cluster")
}
// Create a new context to use for control-plane components that is
@ -194,7 +193,7 @@ func scheduler(ctx context.Context, cfg *config.Control) error {
logrus.Infof("Waiting for untainted node")
// this waits forever for an untainted node; if it returns ErrWaitTimeout the context has been cancelled, and it is not a fatal error
if err := waitForUntaintedNode(ctx, runtime.KubeConfigScheduler); err != nil && !errors.Is(err, wait.ErrWaitTimeout) {
signals.RequestShutdown(pkgerrors.WithMessage(err, "failed to wait for untained node"))
signals.RequestShutdown(errors.WithMessage(err, "failed to wait for untained node"))
}
}
}()
@ -325,15 +324,15 @@ func prepare(ctx context.Context, wg *sync.WaitGroup, config *config.Control) er
config.Cluster = cluster.New(config)
if err := config.Cluster.Bootstrap(ctx, config.ClusterReset); err != nil {
return pkgerrors.WithMessage(err, "failed to bootstrap cluster data")
return errors.WithMessage(err, "failed to bootstrap cluster data")
}
if err := deps.GenServerDeps(config); err != nil {
return pkgerrors.WithMessage(err, "failed to generate server dependencies")
return errors.WithMessage(err, "failed to generate server dependencies")
}
if err := config.Cluster.ListenAndServe(ctx); err != nil {
return pkgerrors.WithMessage(err, "failed to start supervisor listener")
return errors.WithMessage(err, "failed to start supervisor listener")
}
return nil
@ -470,7 +469,7 @@ func waitForUntaintedNode(ctx context.Context, kubeConfig string) error {
}
if _, err := toolswatch.UntilWithSync(ctx, lw, &v1.Node{}, nil, condition); err != nil {
return pkgerrors.WithMessage(err, "failed to wait for untainted node")
return errors.WithMessage(err, "failed to wait for untainted node")
}
return nil
}

View File

@ -2,6 +2,7 @@ package control
import (
"context"
"fmt"
"net/http"
"os"
"sync"
@ -19,7 +20,6 @@ import (
"github.com/k3s-io/k3s/pkg/etcd"
testutil "github.com/k3s-io/k3s/tests"
"github.com/k3s-io/k3s/tests/mock"
pkgerrors "github.com/pkg/errors"
"go.uber.org/mock/gomock"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apiserver/pkg/authentication/authenticator"
@ -198,7 +198,7 @@ func mockControl(ctx context.Context, t *testing.T, clusterInit bool) (*config.C
control.Cluster = cluster.New(control)
if err := prepare(ctx, nil, control); err != nil {
return nil, pkgerrors.WithMessage(err, "failed to prepare cluster")
return nil, fmt.Errorf("failed to prepare cluster: %w", err)
}
return control, nil

View File

@ -3,10 +3,10 @@ package datadir
import (
"path/filepath"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/util/home"
"github.com/k3s-io/k3s/pkg/util/permissions"
"github.com/k3s-io/k3s/pkg/version"
pkgerrors "github.com/pkg/errors"
"github.com/rancher/wrangler/v3/pkg/resolvehome"
)
var (
@ -29,9 +29,9 @@ func LocalHome(dataDir string, forceLocal bool) (string, error) {
}
}
dataDir, err := resolvehome.Resolve(dataDir)
dataDir, err := home.Resolve(dataDir)
if err != nil {
return "", pkgerrors.WithMessagef(err, "resolving %s", dataDir)
return "", errors.WithMessagef(err, "resolving %s", dataDir)
}
return filepath.Abs(dataDir)

View File

@ -19,7 +19,7 @@ import (
controllersv1 "github.com/k3s-io/api/pkg/generated/controllers/k3s.cattle.io/v1"
"github.com/k3s-io/k3s/pkg/agent/util"
pkgutil "github.com/k3s-io/k3s/pkg/util"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/rancher/wrangler/v3/pkg/apply"
"github.com/rancher/wrangler/v3/pkg/kv"
"github.com/rancher/wrangler/v3/pkg/merr"
@ -165,7 +165,7 @@ func (w *watcher) listFilesIn(base string, force bool) error {
// Disabled files are not just skipped, but actively deleted from the filesystem
if shouldDisableFile(base, path, w.disables) {
if err := w.delete(path); err != nil {
errs = append(errs, pkgerrors.WithMessagef(err, "failed to delete %s", path))
errs = append(errs, errors.WithMessagef(err, "failed to delete %s", path))
}
continue
}
@ -178,7 +178,7 @@ func (w *watcher) listFilesIn(base string, force bool) error {
continue
}
if err := w.deploy(path, !force); err != nil {
errs = append(errs, pkgerrors.WithMessagef(err, "failed to process %s", path))
errs = append(errs, errors.WithMessagef(err, "failed to process %s", path))
} else {
w.modTime[path] = modTime
}

View File

@ -10,7 +10,7 @@ import (
"strings"
"github.com/k3s-io/k3s/pkg/util/bindata"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/sirupsen/logrus"
)
@ -46,7 +46,7 @@ staging:
os.MkdirAll(filepath.Dir(p), 0700)
logrus.Info("Writing manifest: ", p)
if err := os.WriteFile(p, content, 0600); err != nil {
return pkgerrors.WithMessagef(err, "failed to write to %s", name)
return errors.WithMessagef(err, "failed to write to %s", name)
}
}

View File

@ -5,7 +5,6 @@ import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io/fs"
"net"
@ -31,11 +30,11 @@ import (
"github.com/k3s-io/k3s/pkg/server/auth"
"github.com/k3s-io/k3s/pkg/signals"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
kine "github.com/k3s-io/kine/pkg/app"
"github.com/k3s-io/kine/pkg/client"
"github.com/k3s-io/kine/pkg/endpoint"
pkgerrors "github.com/pkg/errors"
certutil "github.com/rancher/dynamiclistener/cert"
controllerv1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/v3/pkg/start"
@ -206,7 +205,7 @@ func (e *ETCD) Test(ctx context.Context, enableMaintenance bool) error {
status, err := e.status(ctx)
if err != nil {
return pkgerrors.WithMessage(err, "failed to get etcd status")
return errors.WithMessage(err, "failed to get etcd status")
} else if status.IsLearner {
return errors.New("this server has not yet been promoted from learner to voting member")
} else if status.Leader == 0 {
@ -225,19 +224,19 @@ func (e *ETCD) Test(ctx context.Context, enableMaintenance bool) error {
// defrag this node to reclaim freed space from compacted revisions
if err := e.defragment(ctx); err != nil {
return pkgerrors.WithMessage(err, "failed to defragment etcd database")
return errors.WithMessage(err, "failed to defragment etcd database")
}
// clear alarms on this node
if err := e.clearAlarms(ctx, status.Header.MemberId); err != nil {
return pkgerrors.WithMessage(err, "failed to disarm etcd alarms")
return errors.WithMessage(err, "failed to disarm etcd alarms")
}
// refresh status - note that errors may remain on other nodes, but this
// should not prevent us from continuing with startup.
status, err = e.status(ctx)
if err != nil {
return pkgerrors.WithMessage(err, "failed to get etcd status")
return errors.WithMessage(err, "failed to get etcd status")
}
logrus.Infof("Datastore using %d of %d bytes after defragment", status.DbSizeInUse, status.DbSize)
@ -331,7 +330,7 @@ func (e *ETCD) IsInitialized() (bool, error) {
} else if os.IsNotExist(err) {
return false, nil
}
return false, pkgerrors.WithMessage(err, "invalid state for wal directory "+dir)
return false, errors.WithMessage(err, "invalid state for wal directory "+dir)
}
// Reset resets an etcd node to a single node cluster.
@ -411,15 +410,15 @@ func (e *ETCD) Reset(ctx context.Context, wg *sync.WaitGroup, rebootstrap func()
if errors.Is(err, s3.ErrNoConfigSecret) {
return errors.New("cannot use S3 config secret when restoring snapshot; configuration must be set in CLI or config file")
}
return pkgerrors.WithMessage(err, "failed to initialize S3 client")
return errors.WithMessage(err, "failed to initialize S3 client")
}
dir, err := snapshotDir(e.config, true)
if err != nil {
return pkgerrors.WithMessage(err, "failed to get the snapshot dir")
return errors.WithMessage(err, "failed to get the snapshot dir")
}
path, err := s3client.Download(ctx, e.config.ClusterResetRestorePath, dir)
if err != nil {
return pkgerrors.WithMessage(err, "failed to download snapshot from S3")
return errors.WithMessage(err, "failed to download snapshot from S3")
}
e.config.ClusterResetRestorePath = path
logrus.Infof("S3 download complete for %s", e.config.ClusterResetRestorePath)
@ -452,7 +451,7 @@ func (e *ETCD) Reset(ctx context.Context, wg *sync.WaitGroup, rebootstrap func()
func (e *ETCD) Start(ctx context.Context, wg *sync.WaitGroup, clientAccessInfo *clientaccess.Info) error {
isInitialized, err := e.IsInitialized()
if err != nil {
return pkgerrors.WithMessagef(err, "failed to check for initialized etcd datastore")
return errors.WithMessagef(err, "failed to check for initialized etcd datastore")
}
if err := e.startClient(ctx); err != nil {
@ -532,7 +531,7 @@ func (e *ETCD) pollJoin(ctx context.Context, wg *sync.WaitGroup, clientAccessInf
}
return true, nil
}); err != nil {
signals.RequestShutdown(pkgerrors.WithMessage(err, "etcd cluster join failed"))
signals.RequestShutdown(errors.WithMessage(err, "etcd cluster join failed"))
}
}
@ -679,7 +678,7 @@ func (e *ETCD) Register(handler http.Handler) (http.Handler, error) {
// ensure client is started, as etcd startup may not have handled this if this is a control-plane-only node
if e.client == nil {
if err := e.startClient(ctx); err != nil {
panic(pkgerrors.WithMessage(err, "failed to start etcd client"))
panic(errors.WithMessage(err, "failed to start etcd client"))
}
}
@ -690,7 +689,7 @@ func (e *ETCD) Register(handler http.Handler) (http.Handler, error) {
// Re-run informer factory startup after core and leader-elected controllers have started.
// Additional caches may need to start for the newly added OnChange/OnRemove callbacks.
if err := start.All(ctx, 5, e.config.Runtime.K3s, e.config.Runtime.Core); err != nil {
panic(pkgerrors.WithMessage(err, "failed to start wrangler controllers"))
panic(errors.WithMessage(err, "failed to start wrangler controllers"))
}
}
}
@ -777,7 +776,7 @@ func (e *ETCD) infoHandler() http.Handler {
members, err := e.client.MemberList(ctx)
if err != nil {
util.SendError(pkgerrors.WithMessage(err, "failed to get etcd MemberList"), rw, req, http.StatusInternalServerError)
util.SendError(errors.WithMessage(err, "failed to get etcd MemberList"), rw, req, http.StatusInternalServerError)
return
}
@ -1324,7 +1323,7 @@ func (e *ETCD) trackLearnerProgress(ctx context.Context, progress *learnerProgre
func (e *ETCD) getETCDStatus(ctx context.Context, url string) (*clientv3.StatusResponse, error) {
resp, err := e.client.Status(ctx, url)
if err != nil {
return resp, pkgerrors.WithMessage(err, "failed to check etcd member status")
return resp, errors.WithMessage(err, "failed to check etcd member status")
}
if len(resp.Errors) != 0 {
return resp, errors.New("etcd member has status errors: " + strings.Join(resp.Errors, ","))
@ -1553,7 +1552,7 @@ func (e *ETCD) Restore(ctx context.Context) error {
if strings.HasSuffix(e.config.ClusterResetRestorePath, snapshot.CompressedExtension) {
dir, err := snapshotDir(e.config, true)
if err != nil {
return pkgerrors.WithMessage(err, "failed to get the snapshot dir")
return errors.WithMessage(err, "failed to get the snapshot dir")
}
decompressSnapshot, err := e.decompressSnapshot(dir, e.config.ClusterResetRestorePath)

View File

@ -5,7 +5,6 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/base64"
"errors"
"fmt"
"io/ioutil"
"net/http"
@ -24,10 +23,10 @@ import (
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/etcd/snapshot"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
pkgerrors "github.com/pkg/errors"
"github.com/rancher/wrangler/v3/pkg/generated/controllers/core"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
@ -100,13 +99,13 @@ func Start(ctx context.Context, config *config.Control) (*Controller, error) {
// cluster id hack: see https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/mVGobfD4TpY/nkdbkX1iBwAJ
ns, err := c.core.V1().Namespace().Get(metav1.NamespaceSystem, metav1.GetOptions{})
if err != nil {
return false, pkgerrors.WithMessage(err, "failed to set S3 snapshot cluster ID")
return false, errors.WithMessage(err, "failed to set S3 snapshot cluster ID")
}
c.clusterID = string(ns.UID)
tokenHash, err := util.GetTokenHash(config)
if err != nil {
return false, pkgerrors.WithMessage(err, "failed to set S3 snapshot server token hash")
return false, errors.WithMessage(err, "failed to set S3 snapshot server token hash")
}
c.tokenHash = tokenHash
@ -138,7 +137,7 @@ func (c *Controller) GetClient(ctx context.Context, etcdS3 *config.EtcdS3) (*Cli
if isDefault {
e, err := c.getConfigFromSecret(etcdS3.ConfigSecret)
if err != nil {
return nil, pkgerrors.WithMessagef(err, "failed to get config from etcd-s3-config-secret %q", etcdS3.ConfigSecret)
return nil, errors.WithMessagef(err, "failed to get config from etcd-s3-config-secret %q", etcdS3.ConfigSecret)
}
logrus.Infof("Using etcd s3 configuration from etcd-s3-config-secret %q", etcdS3.ConfigSecret)
etcdS3 = e
@ -197,7 +196,7 @@ func (c *Controller) GetClient(ctx context.Context, etcdS3 *config.EtcdS3) (*Cli
if etcdS3.Proxy != "none" {
u, err = url.Parse(etcdS3.Proxy)
if err != nil {
return nil, pkgerrors.WithMessage(err, "failed to parse etcd-s3-proxy value as URL")
return nil, errors.WithMessage(err, "failed to parse etcd-s3-proxy value as URL")
}
if u.Scheme == "" || u.Host == "" {
return nil, errors.New("proxy URL must include scheme and host")
@ -220,7 +219,7 @@ func (c *Controller) GetClient(ctx context.Context, etcdS3 *config.EtcdS3) (*Cli
})
if _, err := creds.Get(); err != nil {
return nil, pkgerrors.WithMessage(err, "failed to get credentials")
return nil, errors.WithMessage(err, "failed to get credentials")
}
opt := minio.Options{
@ -242,7 +241,7 @@ func (c *Controller) GetClient(ctx context.Context, etcdS3 *config.EtcdS3) (*Cli
exists, err := mc.BucketExists(ctx, etcdS3.Bucket)
if err != nil {
return nil, pkgerrors.WithMessagef(err, "failed to test for existence of bucket %s", etcdS3.Bucket)
return nil, errors.WithMessagef(err, "failed to test for existence of bucket %s", etcdS3.Bucket)
}
if !exists {
return nil, fmt.Errorf("bucket %s does not exist", etcdS3.Bucket)

View File

@ -5,7 +5,6 @@ import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
@ -25,9 +24,9 @@ import (
"github.com/k3s-io/k3s/pkg/etcd/snapshot"
"github.com/k3s-io/k3s/pkg/etcd/snapshotmetrics"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/util/metrics"
"github.com/k3s-io/k3s/pkg/version"
pkgerrors "github.com/pkg/errors"
"github.com/robfig/cron/v3"
"github.com/sirupsen/logrus"
snapshotv3 "go.etcd.io/etcd/client/v3/snapshot"
@ -232,7 +231,7 @@ func (e *ETCD) snapshot(ctx context.Context) (_ *managed.SnapshotResult, rerr er
endpoints := getEndpoints(e.config)
status, err := e.client.Status(ctx, endpoints[0])
if err != nil {
return nil, pkgerrors.WithMessage(err, "failed to check etcd status for snapshot")
return nil, errors.WithMessage(err, "failed to check etcd status for snapshot")
}
if status.IsLearner {
@ -242,17 +241,17 @@ func (e *ETCD) snapshot(ctx context.Context) (_ *managed.SnapshotResult, rerr er
snapshotDir, err := snapshotDir(e.config, true)
if err != nil {
return nil, pkgerrors.WithMessage(err, "failed to get etcd-snapshot-dir")
return nil, errors.WithMessage(err, "failed to get etcd-snapshot-dir")
}
cfg, err := getClientConfig(ctx, e.config)
if err != nil {
return nil, pkgerrors.WithMessage(err, "failed to get config for etcd snapshot")
return nil, errors.WithMessage(err, "failed to get config for etcd snapshot")
}
tokenHash, err := util.GetTokenHash(e.config)
if err != nil {
return nil, pkgerrors.WithMessage(err, "failed to get server token hash for etcd snapshot")
return nil, errors.WithMessage(err, "failed to get server token hash for etcd snapshot")
}
nodeName := os.Getenv("NODE_NAME")
@ -282,7 +281,7 @@ func (e *ETCD) snapshot(ctx context.Context) (_ *managed.SnapshotResult, rerr er
}
logrus.Errorf("Failed to take etcd snapshot: %v", err)
if err := e.addSnapshotData(*sf); err != nil {
return nil, pkgerrors.WithMessage(err, "failed to sync ETCDSnapshotFile")
return nil, errors.WithMessage(err, "failed to sync ETCDSnapshotFile")
}
}
@ -298,7 +297,7 @@ func (e *ETCD) snapshot(ctx context.Context) (_ *managed.SnapshotResult, rerr er
}
if err != nil {
return nil, pkgerrors.WithMessage(err, "failed to compress snapshot")
return nil, errors.WithMessage(err, "failed to compress snapshot")
}
snapshotPath = zipPath
logrus.Info("Compressed snapshot: " + snapshotPath)
@ -306,7 +305,7 @@ func (e *ETCD) snapshot(ctx context.Context) (_ *managed.SnapshotResult, rerr er
f, err := os.Stat(snapshotPath)
if err != nil {
return nil, pkgerrors.WithMessage(err, "unable to retrieve snapshot information from local snapshot")
return nil, errors.WithMessage(err, "unable to retrieve snapshot information from local snapshot")
}
sf = &snapshot.File{
@ -348,7 +347,7 @@ func (e *ETCD) snapshot(ctx context.Context) (_ *managed.SnapshotResult, rerr er
logrus.Warnf("Unable to initialize S3 client: %v", err)
if !errors.Is(err, s3.ErrNoConfigSecret) {
metrics.ObserveWithStatus(snapshotmetrics.SaveS3Count, s3Start, err)
err = pkgerrors.WithMessage(err, "failed to initialize S3 client")
err = errors.WithMessage(err, "failed to initialize S3 client")
sf = &snapshot.File{
Name: f.Name(),
NodeName: "s3",
@ -403,7 +402,7 @@ func (e *ETCD) listLocalSnapshots() (map[string]snapshot.File, error) {
snapshots := make(map[string]snapshot.File)
snapshotDir, err := snapshotDir(e.config, true)
if err != nil {
return snapshots, pkgerrors.WithMessage(err, "failed to get etcd-snapshot-dir")
return snapshots, errors.WithMessage(err, "failed to get etcd-snapshot-dir")
}
if err := filepath.Walk(snapshotDir, func(path string, file os.FileInfo, err error) error {
@ -471,7 +470,7 @@ func (e *ETCD) getS3Client(ctx context.Context) (*s3.Client, error) {
func (e *ETCD) PruneSnapshots(ctx context.Context) (*managed.SnapshotResult, error) {
snapshotDir, err := snapshotDir(e.config, false)
if err != nil {
return nil, pkgerrors.WithMessage(err, "failed to get etcd-snapshot-dir")
return nil, errors.WithMessage(err, "failed to get etcd-snapshot-dir")
}
res := &managed.SnapshotResult{}
@ -509,7 +508,7 @@ func (e *ETCD) ListSnapshots(ctx context.Context) (*k3s.ETCDSnapshotFileList, er
if s3client, err := e.getS3Client(ctx); err != nil {
logrus.Warnf("Unable to initialize S3 client: %v", err)
if !errors.Is(err, s3.ErrNoConfigSecret) {
return nil, pkgerrors.WithMessage(err, "failed to initialize S3 client")
return nil, errors.WithMessage(err, "failed to initialize S3 client")
}
} else {
sfs, err := s3client.ListSnapshots(ctx)
@ -543,7 +542,7 @@ func (e *ETCD) ListSnapshots(ctx context.Context) (*k3s.ETCDSnapshotFileList, er
func (e *ETCD) DeleteSnapshots(ctx context.Context, snapshots []string) (*managed.SnapshotResult, error) {
snapshotDir, err := snapshotDir(e.config, false)
if err != nil {
return nil, pkgerrors.WithMessage(err, "failed to get etcd-snapshot-dir")
return nil, errors.WithMessage(err, "failed to get etcd-snapshot-dir")
}
var s3client *s3.Client
@ -552,7 +551,7 @@ func (e *ETCD) DeleteSnapshots(ctx context.Context, snapshots []string) (*manage
if err != nil {
logrus.Warnf("Unable to initialize S3 client: %v", err)
if !errors.Is(err, s3.ErrNoConfigSecret) {
return nil, pkgerrors.WithMessage(err, "failed to initialize S3 client")
return nil, errors.WithMessage(err, "failed to initialize S3 client")
}
}
}
@ -716,7 +715,7 @@ func (e *ETCD) reconcileSnapshotData(ctx context.Context, res *managed.SnapshotR
logrus.Warnf("Unable to initialize S3 client: %v", err)
if !errors.Is(err, s3.ErrNoConfigSecret) {
metrics.ObserveWithStatus(snapshotmetrics.ReconcileS3Count, s3Start, err)
return pkgerrors.WithMessage(err, "failed to initialize S3 client")
return errors.WithMessage(err, "failed to initialize S3 client")
}
} else {
s3Snapshots, err := s3client.ListSnapshots(ctx)

View File

@ -2,7 +2,6 @@ package etcd
import (
"context"
"errors"
"os"
"sort"
"strconv"
@ -13,8 +12,8 @@ import (
controllersv1 "github.com/k3s-io/api/pkg/generated/controllers/k3s.cattle.io/v1"
"github.com/k3s-io/k3s/pkg/etcd/snapshot"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
pkgerrors "github.com/pkg/errors"
controllerv1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
@ -90,14 +89,14 @@ func (e *etcdSnapshotHandler) sync(key string, esf *k3s.ETCDSnapshotFile) (*k3s.
sfKey := sf.GenerateConfigMapKey()
m, err := sf.Marshal()
if err != nil {
return nil, pkgerrors.WithMessage(err, "failed to marshal snapshot ConfigMap data")
return nil, errors.WithMessage(err, "failed to marshal snapshot ConfigMap data")
}
marshalledSnapshot := string(m)
snapshotConfigMap, err := e.configmaps.Get(metav1.NamespaceSystem, snapshotConfigMapName, metav1.GetOptions{})
if err != nil {
if !apierrors.IsNotFound(err) {
return nil, pkgerrors.WithMessage(err, "failed to get snapshot ConfigMap")
return nil, errors.WithMessage(err, "failed to get snapshot ConfigMap")
}
snapshotConfigMap = &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@ -142,7 +141,7 @@ func (e *etcdSnapshotHandler) sync(key string, esf *k3s.ETCDSnapshotFile) (*k3s.
}
if err != nil {
err = pkgerrors.WithMessage(err, "failed to sync snapshot to ConfigMap")
err = errors.WithMessage(err, "failed to sync snapshot to ConfigMap")
}
return nil, err
@ -157,14 +156,14 @@ func (e *etcdSnapshotHandler) onRemove(key string, esf *k3s.ETCDSnapshotFile) (*
if apierrors.IsNotFound(err) {
return nil, nil
}
return nil, pkgerrors.WithMessage(err, "failed to get snapshot ConfigMap")
return nil, errors.WithMessage(err, "failed to get snapshot ConfigMap")
}
sfKey := generateETCDSnapshotFileConfigMapKey(*esf)
if _, ok := snapshotConfigMap.Data[sfKey]; ok {
delete(snapshotConfigMap.Data, sfKey)
if _, err := e.configmaps.Update(snapshotConfigMap); err != nil {
return nil, pkgerrors.WithMessage(err, "failed to remove snapshot from ConfigMap")
return nil, errors.WithMessage(err, "failed to remove snapshot from ConfigMap")
}
}
e.etcd.emitEvent(esf)
@ -243,7 +242,7 @@ func (e *etcdSnapshotHandler) reconcile() error {
snapshotConfigMap, err := e.configmaps.Get(metav1.NamespaceSystem, snapshotConfigMapName, metav1.GetOptions{})
if err != nil {
if !apierrors.IsNotFound(err) {
return pkgerrors.WithMessage(err, "failed to get snapshot ConfigMap")
return errors.WithMessage(err, "failed to get snapshot ConfigMap")
}
snapshotConfigMap = &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{

View File

@ -3,7 +3,6 @@ package etcd
import (
"context"
"encoding/json"
"errors"
"io"
"net/http"
@ -11,7 +10,7 @@ import (
"github.com/k3s-io/k3s/pkg/cluster/managed"
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/util"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/sirupsen/logrus"
)
@ -70,7 +69,7 @@ func (e *ETCD) snapshotHandler() http.Handler {
func (e *ETCD) handleList(rw http.ResponseWriter, req *http.Request) error {
if e.config.EtcdS3 != nil {
if _, err := e.getS3Client(req.Context()); err != nil {
err = pkgerrors.WithMessage(err, "failed to initialize S3 client")
err = errors.WithMessage(err, "failed to initialize S3 client")
util.SendError(err, rw, req, http.StatusBadRequest)
return nil
}
@ -87,7 +86,7 @@ func (e *ETCD) handleList(rw http.ResponseWriter, req *http.Request) error {
func (e *ETCD) handleSave(rw http.ResponseWriter, req *http.Request) error {
if e.config.EtcdS3 != nil {
if _, err := e.getS3Client(req.Context()); err != nil {
err = pkgerrors.WithMessage(err, "failed to initialize S3 client")
err = errors.WithMessage(err, "failed to initialize S3 client")
util.SendError(err, rw, req, http.StatusBadRequest)
return nil
}
@ -104,7 +103,7 @@ func (e *ETCD) handleSave(rw http.ResponseWriter, req *http.Request) error {
func (e *ETCD) handlePrune(rw http.ResponseWriter, req *http.Request) error {
if e.config.EtcdS3 != nil {
if _, err := e.getS3Client(req.Context()); err != nil {
err = pkgerrors.WithMessage(err, "failed to initialize S3 client")
err = errors.WithMessage(err, "failed to initialize S3 client")
util.SendError(err, rw, req, http.StatusBadRequest)
return nil
}
@ -121,7 +120,7 @@ func (e *ETCD) handlePrune(rw http.ResponseWriter, req *http.Request) error {
func (e *ETCD) handleDelete(rw http.ResponseWriter, req *http.Request, snapshots []string) error {
if e.config.EtcdS3 != nil {
if _, err := e.getS3Client(req.Context()); err != nil {
err = pkgerrors.WithMessage(err, "failed to initialize S3 client")
err = errors.WithMessage(err, "failed to initialize S3 client")
util.SendError(err, rw, req, http.StatusBadRequest)
return nil
}

View File

@ -2,15 +2,14 @@ package store
import (
"context"
"errors"
"fmt"
"os"
"runtime/debug"
"time"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/kine/pkg/endpoint"
"github.com/otiai10/copy"
pkgerrors "github.com/pkg/errors"
"github.com/rancher/wrangler/v3/pkg/merr"
"github.com/sirupsen/logrus"
"go.etcd.io/etcd/api/v3/mvccpb"
@ -254,7 +253,7 @@ func NewStore(dataDir string) (store *Store, rerr error) {
// need to check for backend path ourselves, as backend.New just creates
// a new empty database if the file does not exist or is empty.
if _, err := os.Stat(path); err != nil {
return nil, pkgerrors.WithMessage(err, "failed to stat MVCC KV store backend path")
return nil, errors.WithMessage(err, "failed to stat MVCC KV store backend path")
}
logrus.Infof("Opening etcd MVCC KV backend database at %s", path)

View File

@ -4,7 +4,6 @@ package embed
import (
"context"
"errors"
"flag"
"fmt"
"net"
@ -27,9 +26,9 @@ import (
"github.com/k3s-io/k3s/pkg/executor/embed/etcd"
"github.com/k3s-io/k3s/pkg/signals"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
"github.com/k3s-io/k3s/pkg/vpn"
pkgerrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/apiserver/pkg/authentication/authenticator"
cloudprovider "k8s.io/cloud-provider"
@ -97,7 +96,7 @@ func (e *Embedded) Bootstrap(ctx context.Context, nodeConfig *daemonconfig.Node,
if len(cfg.FlannelIface) > 0 {
nodeConfig.Flannel.Iface, err = net.InterfaceByName(cfg.FlannelIface)
if err != nil {
return pkgerrors.WithMessagef(err, "unable to find interface %s", cfg.FlannelIface)
return errors.WithMessagef(err, "unable to find interface %s", cfg.FlannelIface)
}
}
@ -140,7 +139,7 @@ func (e *Embedded) Bootstrap(ctx context.Context, nodeConfig *daemonconfig.Node,
nodeConfig.AgentConfig.NodeIP = vpnIPs[0].String()
nodeConfig.Flannel.Iface, err = net.InterfaceByName(vpnInfo.Interface)
if err != nil {
return pkgerrors.WithMessagef(err, "unable to find vpn interface: %s", vpnInfo.Interface)
return errors.WithMessagef(err, "unable to find vpn interface: %s", vpnInfo.Interface)
}
}
}
@ -148,7 +147,7 @@ func (e *Embedded) Bootstrap(ctx context.Context, nodeConfig *daemonconfig.Node,
// set paths for embedded flannel if enabled
hostLocal, err := exec.LookPath("host-local")
if err != nil {
return pkgerrors.WithMessagef(err, "failed to find host-local")
return errors.WithMessagef(err, "failed to find host-local")
}
if cfg.FlannelConf == "" {
@ -182,7 +181,7 @@ func (e *Embedded) Kubelet(ctx context.Context, args []string) error {
}()
err := command.ExecuteContext(ctx)
if err != nil && !errors.Is(err, context.Canceled) {
signals.RequestShutdown(pkgerrors.WithMessage(err, "kubelet exited"))
signals.RequestShutdown(errors.WithMessage(err, "kubelet exited"))
}
signals.RequestShutdown(nil)
}()
@ -203,7 +202,7 @@ func (e *Embedded) KubeProxy(ctx context.Context, args []string) error {
}()
err := command.ExecuteContext(ctx)
if err != nil && !errors.Is(err, context.Canceled) {
signals.RequestShutdown(pkgerrors.WithMessage(err, "kube-proxy exited"))
signals.RequestShutdown(errors.WithMessage(err, "kube-proxy exited"))
}
signals.RequestShutdown(nil)
}()
@ -229,7 +228,7 @@ func (e *Embedded) APIServer(ctx context.Context, args []string) error {
}()
err := command.ExecuteContext(ctx)
if err != nil && !errors.Is(err, context.Canceled) {
signals.RequestShutdown(pkgerrors.WithMessage(err, "apiserver exited"))
signals.RequestShutdown(errors.WithMessage(err, "apiserver exited"))
}
signals.RequestShutdown(nil)
}()
@ -251,7 +250,7 @@ func (e *Embedded) Scheduler(ctx context.Context, nodeReady <-chan struct{}, arg
}()
err := command.ExecuteContext(ctx)
if err != nil && !errors.Is(err, context.Canceled) {
signals.RequestShutdown(pkgerrors.WithMessage(err, "scheduler exited"))
signals.RequestShutdown(errors.WithMessage(err, "scheduler exited"))
}
signals.RequestShutdown(nil)
}()
@ -272,7 +271,7 @@ func (e *Embedded) ControllerManager(ctx context.Context, args []string) error {
}()
err := command.ExecuteContext(ctx)
if err != nil && !errors.Is(err, context.Canceled) {
signals.RequestShutdown(pkgerrors.WithMessage(err, "controller-manager exited"))
signals.RequestShutdown(errors.WithMessage(err, "controller-manager exited"))
}
signals.RequestShutdown(nil)
}()
@ -317,7 +316,7 @@ func (*Embedded) CloudControllerManager(ctx context.Context, ccmRBACReady <-chan
}()
err := command.ExecuteContext(ctx)
if err != nil && !errors.Is(err, context.Canceled) {
signals.RequestShutdown(pkgerrors.WithMessage(err, "cloud-controller-manager exited"))
signals.RequestShutdown(errors.WithMessage(err, "cloud-controller-manager exited"))
}
signals.RequestShutdown(nil)
}()

View File

@ -6,7 +6,8 @@ import (
"strings"
"time"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
@ -117,7 +118,7 @@ func BootstrapTokenFromSecret(secret *v1.Secret) (*BootstrapToken, error) {
// Create the BootstrapTokenString object based on the ID and Secret
bts, err := NewBootstrapTokenStringFromIDAndSecret(tokenID, tokenSecret)
if err != nil {
return nil, pkgerrors.WithMessage(err, "bootstrap Token Secret is invalid and couldn't be parsed")
return nil, errors.WithMessage(err, "bootstrap Token Secret is invalid and couldn't be parsed")
}
// Get the description (if any) from the Secret
@ -130,7 +131,7 @@ func BootstrapTokenFromSecret(secret *v1.Secret) (*BootstrapToken, error) {
if len(secretExpiration) > 0 {
expTime, err := time.Parse(time.RFC3339, secretExpiration)
if err != nil {
return nil, pkgerrors.WithMessagef(err, "can't parse expiration time of bootstrap token %q", secret.Name)
return nil, errors.WithMessagef(err, "can't parse expiration time of bootstrap token %q", secret.Name)
}
expires = &metav1.Time{Time: expTime}
}

View File

@ -11,8 +11,8 @@ import (
"github.com/k3s-io/k3s/pkg/configfilearg"
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
pkgerrors "github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
)
@ -46,7 +46,7 @@ func getNodeArgs() (string, error) {
}
nodeArgs, err := json.Marshal(nodeArgsList)
if err != nil {
return "", pkgerrors.WithMessage(err, "Failed to retrieve argument list for node")
return "", errors.WithMessage(err, "Failed to retrieve argument list for node")
}
return string(nodeArgs), nil
}
@ -66,7 +66,7 @@ func getNodeEnv() (string, error) {
}
k3sEnvJSON, err := json.Marshal(k3sEnv)
if err != nil {
return "", pkgerrors.WithMessage(err, "Failed to retrieve environment map for node")
return "", errors.WithMessage(err, "Failed to retrieve environment map for node")
}
return string(k3sEnvJSON), nil
}

View File

@ -2,14 +2,13 @@ package nodepassword
import (
"context"
"errors"
"fmt"
"strings"
"github.com/k3s-io/k3s/pkg/authenticator/hash"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
pkgerrors "github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -92,7 +91,7 @@ func (npc *nodePasswordController) ensure(nodeName, pass string) error {
func (npc *nodePasswordController) verifyNode(ctx context.Context, node *nodeInfo) error {
if nodeName, isNodeAuth := identifier.NodeIdentity(node.User); isNodeAuth {
if _, err := npc.nodes.Cache().Get(nodeName); err != nil {
return pkgerrors.WithMessage(err, "unable to verify node identity")
return errors.WithMessage(err, "unable to verify node identity")
}
}
return nil

View File

@ -2,7 +2,6 @@ package nodepassword
import (
"context"
"errors"
"net/http"
"os"
"path"
@ -14,7 +13,7 @@ import (
"github.com/gorilla/mux"
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/util"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
@ -143,16 +142,16 @@ func verifyLocalPassword(ctx context.Context, control *config.Control, mu *sync.
passBytes, err := os.ReadFile(nodePasswordFile)
if err != nil {
return "", http.StatusInternalServerError, pkgerrors.WithMessage(err, "unable to read node password file")
return "", http.StatusInternalServerError, errors.WithMessage(err, "unable to read node password file")
}
passHash, err := Hasher.CreateHash(strings.TrimSpace(string(passBytes)))
if err != nil {
return "", http.StatusInternalServerError, pkgerrors.WithMessage(err, "unable to hash node password file")
return "", http.StatusInternalServerError, errors.WithMessage(err, "unable to hash node password file")
}
if err := Hasher.VerifyHash(passHash, node.Password); err != nil {
return "", http.StatusForbidden, pkgerrors.WithMessage(err, "unable to verify local node password")
return "", http.StatusForbidden, errors.WithMessage(err, "unable to verify local node password")
}
mu.Lock()

View File

@ -9,7 +9,8 @@ import (
"path/filepath"
"strings"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@ -40,7 +41,7 @@ func setupMounts(stateDir string) error {
for _, v := range mountMap {
if err := setupMount(v[0], v[1]); err != nil {
return pkgerrors.WithMessagef(err, "failed to setup mount %s => %s", v[0], v[1])
return errors.WithMessagef(err, "failed to setup mount %s => %s", v[0], v[1])
}
}
@ -74,16 +75,16 @@ func setupMount(target, dir string) error {
}
if err := os.MkdirAll(toCreate, 0700); err != nil {
return pkgerrors.WithMessagef(err, "failed to create directory %s", toCreate)
return errors.WithMessagef(err, "failed to create directory %s", toCreate)
}
logrus.Debug("Mounting none ", toCreate, " tmpfs")
if err := unix.Mount("none", toCreate, "tmpfs", 0, ""); err != nil {
return pkgerrors.WithMessagef(err, "failed to mount tmpfs to %s", toCreate)
return errors.WithMessagef(err, "failed to mount tmpfs to %s", toCreate)
}
if err := os.MkdirAll(target, 0700); err != nil {
return pkgerrors.WithMessagef(err, "failed to create directory %s", target)
return errors.WithMessagef(err, "failed to create directory %s", target)
}
if dir == "" {
@ -91,7 +92,7 @@ func setupMount(target, dir string) error {
}
if err := os.MkdirAll(dir, 0700); err != nil {
return pkgerrors.WithMessagef(err, "failed to create directory %s", dir)
return errors.WithMessagef(err, "failed to create directory %s", dir)
}
logrus.Debug("Mounting ", dir, target, " none bind")

View File

@ -3,7 +3,6 @@
package rootless
import (
"errors"
"fmt"
"net"
"os"
@ -12,8 +11,8 @@ import (
"strconv"
"strings"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/opencontainers/cgroups"
pkgerrors "github.com/pkg/errors"
"github.com/rootless-containers/rootlesskit/pkg/child"
"github.com/rootless-containers/rootlesskit/pkg/copyup/tmpfssymlink"
"github.com/rootless-containers/rootlesskit/pkg/network/slirp4netns"
@ -133,7 +132,7 @@ func parseCIDR(s string) (*net.IPNet, error) {
func createParentOpt(driver portDriver, stateDir string, enableIPv6 bool) (*parent.Opt, error) {
if err := os.MkdirAll(stateDir, 0755); err != nil {
return nil, pkgerrors.WithMessagef(err, "failed to mkdir %s", stateDir)
return nil, errors.WithMessagef(err, "failed to mkdir %s", stateDir)
}
driver.SetStateDir(stateDir)

View File

@ -6,7 +6,6 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
@ -22,8 +21,8 @@ import (
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/daemons/control/deps"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
pkgerrors "github.com/pkg/errors"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/wrangler/v3/pkg/merr"
"github.com/sirupsen/logrus"
@ -78,12 +77,12 @@ func caCertReplace(control *config.Control, buf io.ReadCloser, force bool) error
}
if err := defaultBootstrap(control, tmpControl); err != nil {
return pkgerrors.WithMessage(err, "failed to set default bootstrap values")
return errors.WithMessage(err, "failed to set default bootstrap values")
}
if err := validateBootstrap(control, tmpControl); err != nil {
if !force {
return pkgerrors.WithMessage(err, "failed to validate new CA certificates and keys")
return errors.WithMessage(err, "failed to validate new CA certificates and keys")
}
logrus.Warnf("Save of CA certificates and keys forced, ignoring validation errors: %v", err)
}
@ -113,7 +112,7 @@ func defaultBootstrap(oldControl, newControl *config.Control) error {
newVal := newMeta.FieldByName(field.Name)
info, err := os.Stat(newVal.String())
if err != nil && !errors.Is(err, fs.ErrNotExist) {
errs = append(errs, pkgerrors.WithMessage(err, field.Name))
errs = append(errs, errors.WithMessage(err, field.Name))
continue
}
@ -151,19 +150,19 @@ func validateBootstrap(oldControl, newControl *config.Control) error {
// Check CA chain consistency and cert/key agreement
if strings.HasSuffix(field.Name, "CA") {
if err := validateCA(oldVal.String(), newVal.String()); err != nil {
errs = append(errs, pkgerrors.WithMessage(err, field.Name))
errs = append(errs, errors.WithMessage(err, field.Name))
}
newKeyVal := newMeta.FieldByName(field.Name + "Key")
oldKeyVal := oldMeta.FieldByName(field.Name + "Key")
if err := validateCAKey(oldVal.String(), oldKeyVal.String(), newVal.String(), newKeyVal.String()); err != nil {
errs = append(errs, pkgerrors.WithMessage(err, field.Name+"Key"))
errs = append(errs, errors.WithMessage(err, field.Name+"Key"))
}
}
// Check signing key rotation
if field.Name == "ServiceKey" {
if err := validateServiceKey(oldVal.String(), newVal.String()); err != nil {
errs = append(errs, pkgerrors.WithMessage(err, field.Name))
errs = append(errs, errors.WithMessage(err, field.Name))
}
}
}
@ -215,7 +214,7 @@ func validateCA(oldCAPath, newCAPath string) error {
// Verify the first cert in the bundle, using the combined roots and intermediates
_, err = newCerts[0].Verify(x509.VerifyOptions{Roots: roots, Intermediates: intermediates})
if err != nil {
err = pkgerrors.WithMessage(err, "new CA cert cannot be verified using old CA chain")
err = errors.WithMessage(err, "new CA cert cannot be verified using old CA chain")
}
return err
}
@ -229,7 +228,7 @@ func validateCAKey(oldCAPath, oldCAKeyPath, newCAPath, newCAKeyPath string) erro
_, err := tls.LoadX509KeyPair(newCAPath, newCAKeyPath)
if err != nil {
err = pkgerrors.WithMessage(err, "new CA cert and key cannot be loaded as X590KeyPair")
err = errors.WithMessage(err, "new CA cert and key cannot be loaded as X590KeyPair")
}
return err
}

View File

@ -4,7 +4,6 @@ import (
"context"
"crypto"
"crypto/x509"
"errors"
"fmt"
"io"
"net"
@ -20,7 +19,7 @@ import (
"github.com/k3s-io/k3s/pkg/etcd"
"github.com/k3s-io/k3s/pkg/nodepassword"
"github.com/k3s-io/k3s/pkg/util"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/sirupsen/logrus"
discoveryv1 "k8s.io/api/discovery/v1"
@ -136,7 +135,7 @@ func File(fileName ...string) http.Handler {
for _, f := range fileName {
bytes, err := os.ReadFile(f)
if err != nil {
util.SendError(pkgerrors.WithMessagef(err, "failed to read %s", f), resp, req, http.StatusInternalServerError)
util.SendError(errors.WithMessagef(err, "failed to read %s", f), resp, req, http.StatusInternalServerError)
return
}
resp.Write(bytes)
@ -167,7 +166,7 @@ func APIServers(control *config.Control) http.Handler {
endpoints := collectAddresses(ctx)
resp.Header().Set("content-type", "application/json")
if err := json.NewEncoder(resp).Encode(endpoints); err != nil {
util.SendError(pkgerrors.WithMessage(err, "failed to encode apiserver endpoints"), resp, req, http.StatusInternalServerError)
util.SendError(errors.WithMessage(err, "failed to encode apiserver endpoints"), resp, req, http.StatusInternalServerError)
}
})
}
@ -181,7 +180,7 @@ func Config(control *config.Control, cfg *cmds.Server) http.Handler {
control.DisableKubeProxy = cfg.DisableKubeProxy
resp.Header().Set("content-type", "application/json")
if err := json.NewEncoder(resp).Encode(control); err != nil {
util.SendError(pkgerrors.WithMessage(err, "failed to encode agent config"), resp, req, http.StatusInternalServerError)
util.SendError(errors.WithMessage(err, "failed to encode agent config"), resp, req, http.StatusInternalServerError)
}
})
}

View File

@ -27,13 +27,13 @@ import (
"github.com/k3s-io/k3s/pkg/server/handlers"
"github.com/k3s-io/k3s/pkg/static"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/util/home"
"github.com/k3s-io/k3s/pkg/util/permissions"
"github.com/k3s-io/k3s/pkg/version"
pkgerrors "github.com/pkg/errors"
"github.com/rancher/wrangler/v3/pkg/apply"
v1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/v3/pkg/leader"
"github.com/rancher/wrangler/v3/pkg/resolvehome"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -72,7 +72,7 @@ func PrepareServer(ctx context.Context, wg *sync.WaitGroup, config *Config, cfg
// and writes the admin kubeconfig.
func StartServer(ctx context.Context, wg *sync.WaitGroup, config *Config, cfg *cmds.Server) error {
if err := control.Server(ctx, wg, &config.ControlConfig); err != nil {
return pkgerrors.WithMessage(err, "starting kubernetes")
return errors.WithMessage(err, "starting kubernetes")
}
shArgs := cmds.StartupHookArgs{
@ -83,7 +83,7 @@ func StartServer(ctx context.Context, wg *sync.WaitGroup, config *Config, cfg *c
}
for _, hook := range config.StartupHooks {
if err := hook(ctx, config.ControlConfig.Runtime.StartupHooksWg, shArgs); err != nil {
return pkgerrors.WithMessage(err, "startup hook")
return errors.WithMessage(err, "startup hook")
}
}
go startOnAPIServerReady(ctx, config)
@ -111,17 +111,17 @@ func runControllers(ctx context.Context, config *Config) error {
sc, err := NewContext(ctx, config)
if err != nil {
return pkgerrors.WithMessage(err, "failed to create new server context")
return errors.WithMessage(err, "failed to create new server context")
}
controlConfig.Runtime.StartupHooksWg.Wait()
if err := stageFiles(ctx, sc, controlConfig); err != nil {
return pkgerrors.WithMessage(err, "failed to stage files")
return errors.WithMessage(err, "failed to stage files")
}
// start the nodepassword controller before we set controlConfig.Runtime.Core
if err := nodepassword.Register(ctx, sc.K8s, sc.Core.Core().V1().Secret(), sc.Core.Core().V1().Node()); err != nil {
return pkgerrors.WithMessage(err, "failed to start node-password secret controller")
return errors.WithMessage(err, "failed to start node-password secret controller")
}
controlConfig.Runtime.K8s = sc.K8s
@ -142,12 +142,12 @@ func runControllers(ctx context.Context, config *Config) error {
for _, controller := range config.Controllers {
if err := controller(ctx, sc); err != nil {
return pkgerrors.WithMessagef(err, "failed to start %s controller", util.GetFunctionName(controller))
return errors.WithMessagef(err, "failed to start %s controller", util.GetFunctionName(controller))
}
}
if err := sc.Start(ctx); err != nil {
return pkgerrors.WithMessage(err, "failed to start wranger controllers")
return errors.WithMessage(err, "failed to start wranger controllers")
}
if !controlConfig.DisableAPIServer {
@ -181,14 +181,14 @@ func apiserverControllers(ctx context.Context, sc *Context, config *Config) {
}
for _, controller := range config.LeaderControllers {
if err := controller(ctx, sc); err != nil {
panic(pkgerrors.WithMessagef(err, "failed to start %s leader controller", util.GetFunctionName(controller)))
panic(errors.WithMessagef(err, "failed to start %s leader controller", util.GetFunctionName(controller)))
}
}
// Re-run informer factory startup after core and leader-elected controllers have started.
// Additional caches may need to start for the newly added OnChange/OnRemove callbacks.
if err := sc.Start(ctx); err != nil {
panic(pkgerrors.WithMessage(err, "failed to start wranger controllers"))
panic(errors.WithMessage(err, "failed to start wranger controllers"))
}
}
@ -352,14 +352,14 @@ func HomeKubeConfig(write, rootless bool) (string, error) {
if permissions.IsPrivileged() == nil && !rootless {
return datadir.GlobalConfig, nil
}
return resolvehome.Resolve(datadir.HomeConfig)
return home.Resolve(datadir.HomeConfig)
}
if _, err := os.Stat(datadir.GlobalConfig); err == nil {
return datadir.GlobalConfig, nil
}
return resolvehome.Resolve(datadir.HomeConfig)
return home.Resolve(datadir.HomeConfig)
}
func printTokens(config *config.Control) error {
@ -496,11 +496,11 @@ func setupDataDirAndChdir(config *config.Control) error {
dataDir := config.DataDir
if err := os.MkdirAll(dataDir, 0700); err != nil {
return pkgerrors.WithMessagef(err, "can not mkdir %s", dataDir)
return errors.WithMessagef(err, "can not mkdir %s", dataDir)
}
if err := os.Chdir(dataDir); err != nil {
return pkgerrors.WithMessagef(err, "can not chdir %s", dataDir)
return errors.WithMessagef(err, "can not chdir %s", dataDir)
}
return nil

View File

@ -3,7 +3,6 @@ package spegel
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
@ -13,9 +12,9 @@ import (
"github.com/k3s-io/k3s/pkg/clientaccess"
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/version"
"github.com/libp2p/go-libp2p/core/peer"
pkgerrors "github.com/pkg/errors"
"github.com/rancher/wrangler/v3/pkg/merr"
"github.com/sirupsen/logrus"
"github.com/spegel-org/spegel/pkg/routing"
@ -107,14 +106,14 @@ func (c *agentBootstrapper) Run(ctx context.Context, id peer.AddrInfo) error {
withCert := clientaccess.WithClientCertificate(c.clientCert, c.clientKey)
info, err := clientaccess.ParseAndValidateToken(c.server, c.token, withCert)
if err != nil {
return pkgerrors.WithMessage(err, "failed to validate join token")
return errors.WithMessage(err, "failed to validate join token")
}
c.info = info
}
client, err := util.GetClientSet(c.kubeConfig)
if err != nil {
return pkgerrors.WithMessage(err, "failed to create kubernetes client")
return errors.WithMessage(err, "failed to create kubernetes client")
}
go wait.PollUntilContextCancel(ctx, 1*time.Second, true, func(ctx context.Context) (bool, error) {

View File

@ -5,7 +5,6 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"net"
"net/http"
@ -18,28 +17,27 @@ import (
"time"
"github.com/containerd/containerd/v2/core/remotes/docker"
"github.com/k3s-io/k3s/pkg/agent/https"
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/server/auth"
"github.com/k3s-io/k3s/pkg/util/logger"
"github.com/k3s-io/k3s/pkg/version"
"github.com/rancher/dynamiclistener/cert"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/go-logr/logr"
"github.com/gorilla/mux"
leveldb "github.com/ipfs/go-ds-leveldb"
ipfslog "github.com/ipfs/go-log/v2"
"github.com/k3s-io/k3s/pkg/agent/https"
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/server/auth"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/pkg/util/logger"
"github.com/k3s-io/k3s/pkg/version"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds"
pkgerrors "github.com/pkg/errors"
"github.com/rancher/dynamiclistener/cert"
"github.com/sirupsen/logrus"
"github.com/spegel-org/spegel/pkg/metrics"
"github.com/spegel-org/spegel/pkg/oci"
"github.com/spegel-org/spegel/pkg/registry"
"github.com/spegel-org/spegel/pkg/routing"
"github.com/spegel-org/spegel/pkg/state"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/component-base/metrics/legacyregistry"
)
@ -166,7 +164,7 @@ func (c *Config) Start(ctx context.Context, nodeConfig *config.Node, criReadyCha
// Get containerd client
caCert, err := os.ReadFile(c.ServerCAFile)
if err != nil {
return pkgerrors.WithMessage(err, "failed to read server CA")
return errors.WithMessage(err, "failed to read server CA")
}
certPool := x509.NewCertPool()
@ -190,33 +188,33 @@ func (c *Config) Start(ctx context.Context, nodeConfig *config.Node, criReadyCha
}
ociStore, err := NewDeferredContainerd(ctx, nodeConfig.Containerd.Address, registryNamespace, storeOpts...)
if err != nil {
return pkgerrors.WithMessage(err, "failed to create OCI store")
return errors.WithMessage(err, "failed to create OCI store")
}
// create or load persistent private key
keyFile := filepath.Join(nodeConfig.Containerd.Opt, "peer.key")
keyBytes, _, err := cert.LoadOrGenerateKeyFile(keyFile, false)
if err != nil {
return pkgerrors.WithMessage(err, "failed to load or generate p2p private key")
return errors.WithMessage(err, "failed to load or generate p2p private key")
}
privKey, err := cert.ParsePrivateKeyPEM(keyBytes)
if err != nil {
return pkgerrors.WithMessage(err, "failed to parse p2p private key")
return errors.WithMessage(err, "failed to parse p2p private key")
}
p2pKey, _, err := crypto.KeyPairFromStdKey(privKey)
if err != nil {
return pkgerrors.WithMessage(err, "failed to convert p2p private key")
return errors.WithMessage(err, "failed to convert p2p private key")
}
// create a peerstore to allow persisting nodes across restarts
peerFile := filepath.Join(nodeConfig.Containerd.Opt, "peerstore.db")
ds, err := leveldb.NewDatastore(peerFile, nil)
if err != nil {
return pkgerrors.WithMessage(err, "failed to create peerstore datastore")
return errors.WithMessage(err, "failed to create peerstore datastore")
}
ps, err := pstoreds.NewPeerstore(ctx, ds, pstoreds.DefaultOpts())
if err != nil {
return pkgerrors.WithMessage(err, "failed to create peerstore")
return errors.WithMessage(err, "failed to create peerstore")
}
// get latest tag configuration override
@ -250,7 +248,7 @@ func (c *Config) Start(ctx context.Context, nodeConfig *config.Node, criReadyCha
}
c.router, err = routing.NewP2PRouter(ctx, routerAddr, NewNotSelfBootstrapper(c.Bootstrapper), c.RegistryPort, opts...)
if err != nil {
return pkgerrors.WithMessage(err, "failed to create P2P router")
return errors.WithMessage(err, "failed to create P2P router")
}
go c.router.Run(ctx)
@ -263,7 +261,7 @@ func (c *Config) Start(ctx context.Context, nodeConfig *config.Node, criReadyCha
}
reg, err := registry.NewRegistry(ociStore, c.router, registryOpts...)
if err != nil {
return pkgerrors.WithMessage(err, "failed to create embedded registry")
return errors.WithMessage(err, "failed to create embedded registry")
}
regSvr := &http.Server{
Addr: ":" + c.RegistryPort,

View File

@ -8,7 +8,7 @@ import (
"path/filepath"
"github.com/k3s-io/k3s/pkg/util/bindata"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/sirupsen/logrus"
)
@ -27,7 +27,7 @@ func Stage(dataDir string) error {
logrus.Info("Writing static file: ", p)
os.MkdirAll(filepath.Dir(p), 0700)
if err := os.WriteFile(p, content, 0600); err != nil {
return pkgerrors.WithMessagef(err, "failed to write to %s", name)
return errors.WithMessagef(err, "failed to write to %s", name)
}
}

View File

@ -2,7 +2,6 @@ package util
import (
"context"
"errors"
"net"
"net/http"
"os"
@ -10,7 +9,7 @@ import (
"time"
"github.com/k3s-io/k3s/pkg/signals"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/rancher/wrangler/v3/pkg/merr"
"github.com/rancher/wrangler/v3/pkg/schemes"
"github.com/sirupsen/logrus"
@ -136,7 +135,7 @@ func APIServerReadyChan(ctx context.Context, kubeConfig string, timeout time.Dur
go func() {
if err := WaitForAPIServerReady(ctx, kubeConfig, timeout); err != nil {
signals.RequestShutdown(pkgerrors.WithMessage(err, "failed to wait for API server to become ready"))
signals.RequestShutdown(errors.WithMessage(err, "failed to wait for API server to become ready"))
return
}
close(ready)

View File

@ -1,31 +0,0 @@
package util
import (
"errors"
"fmt"
"path"
"runtime"
"slices"
"strings"
)
var ErrCommandNoArgs = errors.New("this command does not take any arguments")
var ErrUnsupportedPlatform = errors.New("unsupported platform")
func ErrWithStack(message string) error {
stack := []string{}
pcs := make([]uintptr, 32)
runtime.Callers(2, pcs)
frames := runtime.CallersFrames(pcs)
for {
frame, more := frames.Next()
if !strings.HasPrefix(frame.Function, "runtime.") {
stack = append(stack, fmt.Sprintf("%s(%s:%d)", frame.Function, path.Base(frame.File), frame.Line))
}
if !more {
break
}
}
slices.Reverse(stack)
return errors.New(message + " at " + strings.Join(stack, "->"))
}

50
pkg/util/errors/errors.go Normal file
View File

@ -0,0 +1,50 @@
package errors
import (
"errors"
"fmt"
"path"
"runtime"
"slices"
"strings"
)
// wrap stdlib errors functions to make it easier to import this package as a replacement
var As = errors.As
var Is = errors.Is
var New = errors.New
var ErrCommandNoArgs = New("this command does not take any arguments")
var ErrUnsupportedPlatform = New("unsupported platform")
func WithStack(err error) error {
stack := []string{}
pcs := make([]uintptr, 32)
runtime.Callers(2, pcs)
frames := runtime.CallersFrames(pcs)
for {
frame, more := frames.Next()
if !strings.HasPrefix(frame.Function, "runtime.") {
stack = append(stack, fmt.Sprintf("%s(%s:%d)", frame.Function, path.Base(frame.File), frame.Line))
}
if !more {
break
}
}
slices.Reverse(stack)
return fmt.Errorf("%w at %s", err, strings.Join(stack, "->"))
}
func WithMessage(err error, msg string) error {
if err == nil {
return nil
}
return fmt.Errorf("%s: %w", msg, err)
}
func WithMessagef(err error, format string, args ...any) error {
if err == nil {
return nil
}
return fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)
}

26
pkg/util/home/home.go Normal file
View File

@ -0,0 +1,26 @@
package home
import (
"os"
"strings"
"github.com/k3s-io/k3s/pkg/util/errors"
)
var (
homes = []string{"$HOME", "${HOME}", "~"}
)
func Resolve(s string) (string, error) {
for _, home := range homes {
if strings.Contains(s, home) {
homeDir, err := os.UserHomeDir()
if err != nil {
return "", errors.WithMessage(err, "determining current user")
}
s = strings.Replace(s, home, homeDir, -1)
}
}
return s, nil
}

View File

@ -3,9 +3,7 @@
package permissions
import (
"errors"
pkgerrors "github.com/pkg/errors"
"github.com/k3s-io/k3s/pkg/util/errors"
"golang.org/x/sys/windows"
)
@ -26,7 +24,7 @@ func IsPrivileged() error {
0, 0, 0, 0, 0, 0,
&sid)
if err != nil {
return pkgerrors.WithMessage(err, "failed to create Windows SID")
return errors.WithMessage(err, "failed to create Windows SID")
}
defer windows.FreeSid(sid)
@ -35,7 +33,7 @@ func IsPrivileged() error {
member, err := token.IsMember(sid)
if err != nil {
return pkgerrors.WithMessage(err, "failed to check group membership")
return errors.WithMessage(err, "failed to check group membership")
}
if !member {

View File

@ -2,7 +2,6 @@ package vpn
import (
"encoding/json"
"errors"
"fmt"
"net"
"net/netip"
@ -10,8 +9,8 @@ import (
"strings"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/errors"
pkgerrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -73,7 +72,7 @@ func StartVPN(vpnAuthConfigFile string) error {
logrus.Debugf("Flags passed to tailscale up: %v", args)
output, err := util.ExecCommand("tailscale", args)
if err != nil {
return pkgerrors.WithMessage(err, "tailscale up failed: "+output)
return errors.WithMessage(err, "tailscale up failed: "+output)
}
logrus.Debugf("Output from tailscale up: %v", output)
return nil

View File

@ -16,8 +16,8 @@ import (
"time"
"github.com/k3s-io/k3s/pkg/flock"
"github.com/k3s-io/k3s/pkg/util/errors"
"github.com/k3s-io/k3s/tests"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
@ -239,16 +239,16 @@ func K3sKillServer(server *K3sServer) error {
logrus.Warnf("Unable to kill k3s server: %v", err)
return nil
}
return errors.Wrap(err, "failed to find k3s process group")
return errors.WithMessage(err, "failed to find k3s process group")
}
if err := syscall.Kill(-pgid, syscall.SIGKILL); err != nil {
return errors.Wrap(err, "failed to kill k3s process group")
return errors.WithMessage(err, "failed to kill k3s process group")
}
if err := server.cmd.Process.Kill(); err != nil {
return errors.Wrap(err, "failed to kill k3s process")
return errors.WithMessage(err, "failed to kill k3s process")
}
if _, err = server.cmd.Process.Wait(); err != nil {
return errors.Wrap(err, "failed to wait for k3s process exit")
return errors.WithMessage(err, "failed to wait for k3s process exit")
}
//Unmount all the associated filesystems
unmountFolder("/run/k3s")

View File

@ -1,6 +1,7 @@
package kubeflags
import (
"errors"
"strings"
"testing"
@ -8,7 +9,6 @@ import (
testutil "github.com/k3s-io/k3s/tests/integration"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
)
var server *testutil.K3sServer

View File

@ -65,54 +65,62 @@ func (m *CoreMock) V1() corev1.Interface {
var _ corev1.Interface = &V1Mock{}
type V1Mock struct {
ConfigMapMock *fake.MockControllerInterface[*v1.ConfigMap, *v1.ConfigMapList]
ConfigMapCache *fake.MockCacheInterface[*v1.ConfigMap]
EndpointsMock *fake.MockControllerInterface[*v1.Endpoints, *v1.EndpointsList]
ConfigMapMock *fake.MockControllerInterface[*v1.ConfigMap, *v1.ConfigMapList]
EndpointsCache *fake.MockCacheInterface[*v1.Endpoints]
EventMock *fake.MockControllerInterface[*v1.Event, *v1.EventList]
EndpointsMock *fake.MockControllerInterface[*v1.Endpoints, *v1.EndpointsList]
EventCache *fake.MockCacheInterface[*v1.Event]
NamespaceMock *fake.MockNonNamespacedControllerInterface[*v1.Namespace, *v1.NamespaceList]
EventMock *fake.MockControllerInterface[*v1.Event, *v1.EventList]
LimitRangeCache *fake.MockCacheInterface[*v1.LimitRange]
LimitRangeMock *fake.MockControllerInterface[*v1.LimitRange, *v1.LimitRangeList]
NamespaceCache *fake.MockNonNamespacedCacheInterface[*v1.Namespace]
NodeMock *fake.MockNonNamespacedControllerInterface[*v1.Node, *v1.NodeList]
NamespaceMock *fake.MockNonNamespacedControllerInterface[*v1.Namespace, *v1.NamespaceList]
NodeCache *fake.MockNonNamespacedCacheInterface[*v1.Node]
PersistentVolumeMock *fake.MockNonNamespacedControllerInterface[*v1.PersistentVolume, *v1.PersistentVolumeList]
NodeMock *fake.MockNonNamespacedControllerInterface[*v1.Node, *v1.NodeList]
PersistentVolumeCache *fake.MockNonNamespacedCacheInterface[*v1.PersistentVolume]
PersistentVolumeClaimMock *fake.MockControllerInterface[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList]
PersistentVolumeClaimCache *fake.MockCacheInterface[*v1.PersistentVolumeClaim]
PodMock *fake.MockControllerInterface[*v1.Pod, *v1.PodList]
PersistentVolumeClaimMock *fake.MockControllerInterface[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList]
PersistentVolumeMock *fake.MockNonNamespacedControllerInterface[*v1.PersistentVolume, *v1.PersistentVolumeList]
PodCache *fake.MockCacheInterface[*v1.Pod]
SecretMock *fake.MockControllerInterface[*v1.Secret, *v1.SecretList]
PodMock *fake.MockControllerInterface[*v1.Pod, *v1.PodList]
ResourceQuotaCache *fake.MockCacheInterface[*v1.ResourceQuota]
ResourceQuotaMock *fake.MockControllerInterface[*v1.ResourceQuota, *v1.ResourceQuotaList]
SecretCache *fake.MockCacheInterface[*v1.Secret]
ServiceMock *fake.MockControllerInterface[*v1.Service, *v1.ServiceList]
ServiceCache *fake.MockCacheInterface[*v1.Service]
ServiceAccountMock *fake.MockControllerInterface[*v1.ServiceAccount, *v1.ServiceAccountList]
SecretMock *fake.MockControllerInterface[*v1.Secret, *v1.SecretList]
ServiceAccountCache *fake.MockCacheInterface[*v1.ServiceAccount]
ServiceAccountMock *fake.MockControllerInterface[*v1.ServiceAccount, *v1.ServiceAccountList]
ServiceCache *fake.MockCacheInterface[*v1.Service]
ServiceMock *fake.MockControllerInterface[*v1.Service, *v1.ServiceList]
}
func NewV1(c *gomock.Controller) *V1Mock {
return &V1Mock{
ConfigMapMock: fake.NewMockControllerInterface[*v1.ConfigMap, *v1.ConfigMapList](c),
ConfigMapCache: fake.NewMockCacheInterface[*v1.ConfigMap](c),
EndpointsMock: fake.NewMockControllerInterface[*v1.Endpoints, *v1.EndpointsList](c),
ConfigMapMock: fake.NewMockControllerInterface[*v1.ConfigMap, *v1.ConfigMapList](c),
EndpointsCache: fake.NewMockCacheInterface[*v1.Endpoints](c),
EventMock: fake.NewMockControllerInterface[*v1.Event, *v1.EventList](c),
EndpointsMock: fake.NewMockControllerInterface[*v1.Endpoints, *v1.EndpointsList](c),
EventCache: fake.NewMockCacheInterface[*v1.Event](c),
NamespaceMock: fake.NewMockNonNamespacedControllerInterface[*v1.Namespace, *v1.NamespaceList](c),
EventMock: fake.NewMockControllerInterface[*v1.Event, *v1.EventList](c),
LimitRangeCache: fake.NewMockCacheInterface[*v1.LimitRange](c),
LimitRangeMock: fake.NewMockControllerInterface[*v1.LimitRange, *v1.LimitRangeList](c),
NamespaceCache: fake.NewMockNonNamespacedCacheInterface[*v1.Namespace](c),
NodeMock: fake.NewMockNonNamespacedControllerInterface[*v1.Node, *v1.NodeList](c),
NamespaceMock: fake.NewMockNonNamespacedControllerInterface[*v1.Namespace, *v1.NamespaceList](c),
NodeCache: fake.NewMockNonNamespacedCacheInterface[*v1.Node](c),
PersistentVolumeMock: fake.NewMockNonNamespacedControllerInterface[*v1.PersistentVolume, *v1.PersistentVolumeList](c),
NodeMock: fake.NewMockNonNamespacedControllerInterface[*v1.Node, *v1.NodeList](c),
PersistentVolumeCache: fake.NewMockNonNamespacedCacheInterface[*v1.PersistentVolume](c),
PersistentVolumeClaimMock: fake.NewMockControllerInterface[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList](c),
PersistentVolumeClaimCache: fake.NewMockCacheInterface[*v1.PersistentVolumeClaim](c),
PodMock: fake.NewMockControllerInterface[*v1.Pod, *v1.PodList](c),
PersistentVolumeClaimMock: fake.NewMockControllerInterface[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList](c),
PersistentVolumeMock: fake.NewMockNonNamespacedControllerInterface[*v1.PersistentVolume, *v1.PersistentVolumeList](c),
PodCache: fake.NewMockCacheInterface[*v1.Pod](c),
SecretMock: fake.NewMockControllerInterface[*v1.Secret, *v1.SecretList](c),
PodMock: fake.NewMockControllerInterface[*v1.Pod, *v1.PodList](c),
ResourceQuotaCache: fake.NewMockCacheInterface[*v1.ResourceQuota](c),
ResourceQuotaMock: fake.NewMockControllerInterface[*v1.ResourceQuota, *v1.ResourceQuotaList](c),
SecretCache: fake.NewMockCacheInterface[*v1.Secret](c),
ServiceMock: fake.NewMockControllerInterface[*v1.Service, *v1.ServiceList](c),
ServiceCache: fake.NewMockCacheInterface[*v1.Service](c),
ServiceAccountMock: fake.NewMockControllerInterface[*v1.ServiceAccount, *v1.ServiceAccountList](c),
SecretMock: fake.NewMockControllerInterface[*v1.Secret, *v1.SecretList](c),
ServiceAccountCache: fake.NewMockCacheInterface[*v1.ServiceAccount](c),
ServiceAccountMock: fake.NewMockControllerInterface[*v1.ServiceAccount, *v1.ServiceAccountList](c),
ServiceCache: fake.NewMockCacheInterface[*v1.Service](c),
ServiceMock: fake.NewMockControllerInterface[*v1.Service, *v1.ServiceList](c),
}
}
@ -128,6 +136,10 @@ func (m *V1Mock) Event() corev1.EventController {
return m.EventMock
}
func (m *V1Mock) LimitRange() corev1.LimitRangeController {
return m.LimitRangeMock
}
func (m *V1Mock) Namespace() corev1.NamespaceController {
return m.NamespaceMock
}
@ -148,6 +160,10 @@ func (m *V1Mock) Pod() corev1.PodController {
return m.PodMock
}
func (m *V1Mock) ResourceQuota() corev1.ResourceQuotaController {
return m.ResourceQuotaMock
}
func (m *V1Mock) Secret() corev1.SecretController {
return m.SecretMock
}