feat(system): upgrade portainer on kubernetes [EE-4625] (#8448)

pull/8621/head
Chaim Lev-Ari 2023-03-08 04:34:55 +02:00 committed by GitHub
parent 0669ad77d3
commit 4c86be725d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 394 additions and 139 deletions

View File

@ -684,7 +684,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
log.Fatal().Msg("failed to fetch SSL settings from DB")
}
upgradeService, err := upgrade.NewService(*flags.Assets, composeDeployer)
upgradeService, err := upgrade.NewService(*flags.Assets, composeDeployer, kubernetesClientFactory)
if err != nil {
log.Fatal().Err(err).Msg("failed initializing upgrade service")
}

View File

@ -8,6 +8,8 @@ import (
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/platform"
)
type systemUpgradePayload struct {
@ -28,13 +30,19 @@ func (payload *systemUpgradePayload) Validate(r *http.Request) error {
return nil
}
var platformToEndpointType = map[platform.ContainerPlatform]portainer.EndpointType{
platform.PlatformDockerStandalone: portainer.DockerEnvironment,
platform.PlatformDockerSwarm: portainer.DockerEnvironment,
platform.PlatformKubernetes: portainer.KubernetesLocalEnvironment,
}
// @id systemUpgrade
// @summary Upgrade Portainer to BE
// @description Upgrade Portainer to BE
// @description **Access policy**: administrator
// @tags system
// @produce json
// @success 200 {object} status "Success"
// @success 204 {object} status "Success"
// @router /system/upgrade [post]
func (handler *Handler) systemUpgrade(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
payload, err := request.GetPayload[systemUpgradePayload](r)
@ -42,10 +50,40 @@ func (handler *Handler) systemUpgrade(w http.ResponseWriter, r *http.Request) *h
return httperror.BadRequest("Invalid request payload", err)
}
err = handler.upgradeService.Upgrade(payload.License)
environment, err := handler.guessLocalEndpoint()
if err != nil {
return httperror.InternalServerError("Failed to guess local endpoint", err)
}
err = handler.upgradeService.Upgrade(environment, payload.License)
if err != nil {
return httperror.InternalServerError("Failed to upgrade Portainer", err)
}
return response.Empty(w)
}
func (handler *Handler) guessLocalEndpoint() (*portainer.Endpoint, error) {
platform, err := platform.DetermineContainerPlatform()
if err != nil {
return nil, errors.Wrap(err, "failed to determine container platform")
}
endpointType, ok := platformToEndpointType[platform]
if !ok {
return nil, errors.New("failed to determine endpoint type")
}
endpoints, err := handler.dataStore.Endpoint().Endpoints()
if err != nil {
return nil, errors.Wrap(err, "failed to retrieve endpoints")
}
for _, endpoint := range endpoints {
if endpoint.Type == endpointType {
return &endpoint, nil
}
}
return nil, errors.New("failed to find local endpoint")
}

View File

@ -1,23 +1,13 @@
package upgrade
import (
"bytes"
"context"
"fmt"
"os"
"strings"
"time"
"github.com/cbroglie/mustache"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/pkg/errors"
libstack "github.com/portainer/docker-compose-wrapper"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/docker"
"github.com/portainer/portainer/api/filesystem"
"github.com/portainer/portainer/api/kubernetes/cli"
"github.com/portainer/portainer/api/platform"
"github.com/rs/zerolog/log"
)
const (
@ -36,19 +26,23 @@ const (
)
type Service interface {
Upgrade(licenseKey string) error
Upgrade(environment *portainer.Endpoint, licenseKey string) error
}
type service struct {
composeDeployer libstack.Deployer
isUpdating bool
platform platform.ContainerPlatform
assetsPath string
composeDeployer libstack.Deployer
kubernetesClientFactory *cli.ClientFactory
isUpdating bool
platform platform.ContainerPlatform
assetsPath string
}
func NewService(
assetsPath string,
composeDeployer libstack.Deployer,
kubernetesClientFactory *cli.ClientFactory,
) (Service, error) {
platform, err := platform.DetermineContainerPlatform()
if err != nil {
@ -56,13 +50,14 @@ func NewService(
}
return &service{
assetsPath: assetsPath,
composeDeployer: composeDeployer,
platform: platform,
assetsPath: assetsPath,
composeDeployer: composeDeployer,
kubernetesClientFactory: kubernetesClientFactory,
platform: platform,
}, nil
}
func (service *service) Upgrade(licenseKey string) error {
func (service *service) Upgrade(environment *portainer.Endpoint, licenseKey string) error {
service.isUpdating = true
switch service.platform {
@ -70,113 +65,9 @@ func (service *service) Upgrade(licenseKey string) error {
return service.upgradeDocker(licenseKey, portainer.APIVersion, "standalone")
case platform.PlatformDockerSwarm:
return service.upgradeDocker(licenseKey, portainer.APIVersion, "swarm")
// case platform.PlatformKubernetes:
// case platform.PlatformPodman:
// case platform.PlatformNomad:
// default:
case platform.PlatformKubernetes:
return service.upgradeKubernetes(environment, licenseKey, portainer.APIVersion)
}
return fmt.Errorf("unsupported platform %s", service.platform)
}
func (service *service) upgradeDocker(licenseKey, version, envType string) error {
ctx := context.TODO()
templateName := filesystem.JoinPaths(service.assetsPath, "mustache-templates", mustacheUpgradeDockerTemplateFile)
portainerImagePrefix := os.Getenv(portainerImagePrefixEnvVar)
if portainerImagePrefix == "" {
portainerImagePrefix = "portainer/portainer-ee"
}
image := fmt.Sprintf("%s:%s", portainerImagePrefix, version)
skipPullImage := os.Getenv(skipPullImageEnvVar)
if err := service.checkImage(ctx, image, skipPullImage != ""); err != nil {
return err
}
composeFile, err := mustache.RenderFile(templateName, map[string]string{
"image": image,
"skip_pull_image": skipPullImage,
"updater_image": os.Getenv(updaterImageEnvVar),
"license": licenseKey,
"envType": envType,
})
log.Debug().
Str("composeFile", composeFile).
Msg("Compose file for upgrade")
if err != nil {
return errors.Wrap(err, "failed to render upgrade template")
}
tmpDir := os.TempDir()
timeId := time.Now().Unix()
filePath := filesystem.JoinPaths(tmpDir, fmt.Sprintf("upgrade-%d.yml", timeId))
r := bytes.NewReader([]byte(composeFile))
err = filesystem.CreateFile(filePath, r)
if err != nil {
return errors.Wrap(err, "failed to create upgrade compose file")
}
projectName := fmt.Sprintf(
"portainer-upgrade-%d-%s",
timeId,
strings.Replace(version, ".", "-", -1))
err = service.composeDeployer.Deploy(
ctx,
[]string{filePath},
libstack.DeployOptions{
ForceRecreate: true,
AbortOnContainerExit: true,
Options: libstack.Options{
ProjectName: projectName,
},
},
)
// optimally, server was restarted by the updater, so we should not reach this point
if err != nil {
return errors.Wrap(err, "failed to deploy upgrade stack")
}
return errors.New("upgrade failed: server should have been restarted by the updater")
}
func (service *service) checkImage(ctx context.Context, image string, skipPullImage bool) error {
cli, err := docker.CreateClientFromEnv()
if err != nil {
return errors.Wrap(err, "failed to create docker client")
}
if skipPullImage {
filters := filters.NewArgs()
filters.Add("reference", image)
images, err := cli.ImageList(ctx, types.ImageListOptions{
Filters: filters,
})
if err != nil {
return errors.Wrap(err, "failed to list images")
}
if len(images) == 0 {
return errors.Errorf("image %s not found locally", image)
}
return nil
} else {
// check if available on registry
_, err := cli.DistributionInspect(ctx, image, "")
if err != nil {
return errors.Errorf("image %s not found on registry", image)
}
return nil
}
}

View File

@ -0,0 +1,121 @@
package upgrade
import (
"bytes"
"context"
"fmt"
"os"
"strings"
"time"
"github.com/cbroglie/mustache"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/pkg/errors"
libstack "github.com/portainer/docker-compose-wrapper"
"github.com/portainer/portainer/api/filesystem"
"github.com/rs/zerolog/log"
)
func (service *service) upgradeDocker(licenseKey, version, envType string) error {
ctx := context.TODO()
templateName := filesystem.JoinPaths(service.assetsPath, "mustache-templates", mustacheUpgradeDockerTemplateFile)
portainerImagePrefix := os.Getenv(portainerImagePrefixEnvVar)
if portainerImagePrefix == "" {
portainerImagePrefix = "portainer/portainer-ee"
}
image := fmt.Sprintf("%s:%s", portainerImagePrefix, version)
skipPullImage := os.Getenv(skipPullImageEnvVar)
if err := service.checkImageForDocker(ctx, image, skipPullImage != ""); err != nil {
return err
}
composeFile, err := mustache.RenderFile(templateName, map[string]string{
"image": image,
"skip_pull_image": skipPullImage,
"updater_image": os.Getenv(updaterImageEnvVar),
"license": licenseKey,
"envType": envType,
})
log.Debug().
Str("composeFile", composeFile).
Msg("Compose file for upgrade")
if err != nil {
return errors.Wrap(err, "failed to render upgrade template")
}
tmpDir := os.TempDir()
timeId := time.Now().Unix()
filePath := filesystem.JoinPaths(tmpDir, fmt.Sprintf("upgrade-%d.yml", timeId))
r := bytes.NewReader([]byte(composeFile))
err = filesystem.CreateFile(filePath, r)
if err != nil {
return errors.Wrap(err, "failed to create upgrade compose file")
}
projectName := fmt.Sprintf(
"portainer-upgrade-%d-%s",
timeId,
strings.Replace(version, ".", "-", -1))
err = service.composeDeployer.Deploy(
ctx,
[]string{filePath},
libstack.DeployOptions{
ForceRecreate: true,
AbortOnContainerExit: true,
Options: libstack.Options{
ProjectName: projectName,
},
},
)
// optimally, server was restarted by the updater, so we should not reach this point
if err != nil {
return errors.Wrap(err, "failed to deploy upgrade stack")
}
return errors.New("upgrade failed: server should have been restarted by the updater")
}
func (service *service) checkImageForDocker(ctx context.Context, image string, skipPullImage bool) error {
cli, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return errors.Wrap(err, "failed to create docker client")
}
if skipPullImage {
filters := filters.NewArgs()
filters.Add("reference", image)
images, err := cli.ImageList(ctx, types.ImageListOptions{
Filters: filters,
})
if err != nil {
return errors.Wrap(err, "failed to list images")
}
if len(images) == 0 {
return errors.Errorf("image %s not found locally", image)
}
return nil
} else {
// check if available on registry
_, err := cli.DistributionInspect(ctx, image, "")
if err != nil {
return errors.Errorf("image %s not found on registry", image)
}
return nil
}
}

View File

@ -0,0 +1,201 @@
package upgrade
import (
"context"
"fmt"
"os"
"time"
"github.com/pkg/errors"
portainer "github.com/portainer/portainer/api"
"github.com/rs/zerolog/log"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
func ptr[T any](i T) *T { return &i }
func (service *service) upgradeKubernetes(environment *portainer.Endpoint, licenseKey, version string) error {
ctx := context.TODO()
kubeCLI, err := service.kubernetesClientFactory.CreateClient(environment)
if err != nil {
return errors.WithMessage(err, "failed to get kubernetes client")
}
namespace := "portainer"
taskName := fmt.Sprintf("portainer-upgrade-%d", time.Now().Unix())
jobsCli := kubeCLI.BatchV1().Jobs(namespace)
updaterImage := os.Getenv(updaterImageEnvVar)
if updaterImage == "" {
updaterImage = "portainer/portainer-updater:latest"
}
portainerImagePrefix := os.Getenv(portainerImagePrefixEnvVar)
if portainerImagePrefix == "" {
portainerImagePrefix = "portainer/portainer-ee"
}
image := fmt.Sprintf("%s:%s", portainerImagePrefix, version)
if err := service.checkImageForKubernetes(ctx, kubeCLI, namespace, image); err != nil {
return err
}
job, err := jobsCli.Create(ctx, &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: taskName,
Namespace: namespace,
},
Spec: batchv1.JobSpec{
TTLSecondsAfterFinished: ptr[int32](5 * 60), // cleanup after 5 minutes
BackoffLimit: ptr[int32](0),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: "Never",
ServiceAccountName: "portainer-sa-clusteradmin",
Containers: []corev1.Container{
{
Name: taskName,
Image: updaterImage,
Args: []string{
"--pretty-log",
"--log-level", "DEBUG",
"portainer",
"--env-type", "kubernetes",
"--image", image,
"--license", licenseKey,
},
},
},
},
},
},
}, metav1.CreateOptions{})
if err != nil {
return errors.WithMessage(err, "failed to create upgrade job")
}
watcher, err := jobsCli.Watch(ctx, metav1.ListOptions{
FieldSelector: "metadata.name=" + taskName,
TimeoutSeconds: ptr[int64](60),
})
if err != nil {
return errors.WithMessage(err, "failed to watch upgrade job")
}
for event := range watcher.ResultChan() {
job, ok := event.Object.(*batchv1.Job)
if !ok {
continue
}
for _, c := range job.Status.Conditions {
if c.Type == batchv1.JobComplete {
log.Debug().
Str("job", job.Name).
Msg("Upgrade job completed")
return nil
}
if c.Type == batchv1.JobFailed {
return fmt.Errorf("upgrade failed: %s", c.Message)
}
}
}
log.Debug().
Str("job", job.Name).
Msg("Upgrade job created")
return errors.New("upgrade failed: server should have been restarted by the updater")
}
func (service *service) checkImageForKubernetes(ctx context.Context, kubeCLI *kubernetes.Clientset, namespace, image string) error {
podsCli := kubeCLI.CoreV1().Pods(namespace)
log.Debug().
Str("image", image).
Msg("Checking image")
podName := fmt.Sprintf("portainer-image-check-%d", time.Now().Unix())
_, err := podsCli.Create(ctx, &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: corev1.PodSpec{
RestartPolicy: "Never",
Containers: []corev1.Container{
{
Name: fmt.Sprint(podName, "-container"),
Image: image,
},
},
},
}, metav1.CreateOptions{})
if err != nil {
log.Warn().Err(err).Msg("failed to create image check pod")
return errors.WithMessage(err, "failed to create image check pod")
}
defer func() {
log.Debug().
Str("pod", podName).
Msg("Deleting image check pod")
if err := podsCli.Delete(ctx, podName, metav1.DeleteOptions{}); err != nil {
log.Warn().Err(err).Msg("failed to delete image check pod")
}
}()
i := 0
for {
time.Sleep(2 * time.Second)
log.Debug().
Str("image", image).
Int("try", i).
Msg("Checking image")
i++
pod, err := podsCli.Get(ctx, podName, metav1.GetOptions{})
if err != nil {
return errors.WithMessage(err, "failed to get image check pod")
}
for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.Ready {
log.Debug().
Str("image", image).
Str("pod", podName).
Msg("Image check container ready, assuming image is available")
return nil
}
if containerStatus.State.Waiting != nil {
if containerStatus.State.Waiting.Reason == "ErrImagePull" || containerStatus.State.Waiting.Reason == "ImagePullBackOff" {
log.Debug().
Str("image", image).
Str("pod", podName).
Str("reason", containerStatus.State.Waiting.Reason).
Str("message", containerStatus.State.Waiting.Message).
Str("container", containerStatus.Name).
Msg("Image check container failed because of missing image")
return fmt.Errorf("image %s not found", image)
}
}
}
}
}

View File

@ -41,10 +41,12 @@ func DetermineContainerPlatform() (ContainerPlatform, error) {
if podmanModeEnvVar == "1" {
return PlatformPodman, nil
}
serviceHostKubernetesEnvVar := os.Getenv(KubernetesServiceHost)
if serviceHostKubernetesEnvVar != "" {
return PlatformKubernetes, nil
}
nomadJobName := os.Getenv(NomadJobName)
if nomadJobName != "" {
return PlatformNomad, nil

View File

@ -24,6 +24,7 @@ export const UpgradeBEBannerWrapper = withHideOnExtension(
const enabledPlatforms: Array<ContainerPlatform> = [
'Docker Standalone',
'Docker Swarm',
'Kubernetes',
];
function UpgradeBEBanner() {

View File

@ -1,21 +1,22 @@
#!/bin/sh
set -x
DEBUG=${DEBUG:-""}
if [ -n "$DEBUG" ]; then
set -x
fi
mkdir -p dist
# populate tool versions
BUILDNUMBER="N/A"
CONTAINER_IMAGE_TAG="N/A"
NODE_VERSION="0"
YARN_VERSION="0"
WEBPACK_VERSION="0"
GO_VERSION="0"
BUILD_NUMBER=${BUILD_NUMBER:-"N/A"}
CONTAINER_IMAGE_TAG=${CONTAINER_IMAGE_TAG:-"N/A"}
NODE_VERSION=${NODE_VERSION:-"0"}
YARN_VERSION=${YARN_VERSION:-"0"}
WEBPACK_VERSION=${WEBPACK_VERSION:-"0"}
GO_VERSION=${GO_VERSION:-"0"}
# copy templates
cp -r "./mustache-templates" "./dist"
cd api
# the go get adds 8 seconds
go get -t -d -v ./...
@ -25,7 +26,7 @@ GOOS=$1 GOARCH=$2 CGO_ENABLED=0 go build \
-trimpath \
--installsuffix cgo \
--ldflags "-s \
--X 'github.com/portainer/portainer/api/build.BuildNumber=${BUILDNUMBER}' \
--X 'github.com/portainer/portainer/api/build.BuildNumber=${BUILD_NUMBER}' \
--X 'github.com/portainer/portainer/api/build.ImageTag=${CONTAINER_IMAGE_TAG}' \
--X 'github.com/portainer/portainer/api/build.NodejsVersion=${NODE_VERSION}' \
--X 'github.com/portainer/portainer/api/build.YarnVersion=${YARN_VERSION}' \