feat(k8s): review the resource assignement when creating a kubernetes application EE-437 (#5254)

* feat(nodes limits)Review the resource assignement when creating a Kubernetes application EE-437

* feat(nodes limits) review feedback EE-437

* feat(nodes limits) workaround for lodash cloneDeep not working in production mode EE-437

* feat(nodes limits) calculate max cpu of slide bar with floor function instead of round function EE-437

* feat(nodes limits) another review feedback EE-437

* feat(nodes limits) cleanup code EE-437

* feat(nodes limits) EE-437 pr feedback update

* feat(nodes limits) EE-437 rebase onto develop branch

* feat(nodes limits) EE-437 another pr feedback update

Co-authored-by: Simon Meng <simon.meng@portainer.io>
pull/5458/head
cong meng 2021-09-01 09:08:01 +12:00 committed by GitHub
parent 0ffbe6a42e
commit c597ae96e2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 453 additions and 42 deletions

View File

@ -39,6 +39,8 @@ func NewHandler(bouncer *security.RequestBouncer, authorizationService *authoriz
kubeRouter.PathPrefix("/config").Handler(
bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.getKubernetesConfig))).Methods(http.MethodGet)
kubeRouter.PathPrefix("/nodes_limits").Handler(
bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.getKubernetesNodesLimits))).Methods(http.MethodGet)
// namespaces
// in the future this piece of code might be in another package (or a few different packages - namespaces/namespace?)

View File

@ -0,0 +1,52 @@
package kubernetes
import (
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response"
portainer "github.com/portainer/portainer/api"
bolterrors "github.com/portainer/portainer/api/bolt/errors"
"net/http"
)
// @id getKubernetesNodesLimits
// @summary Get CPU and memory limits of all nodes within k8s cluster
// @description Get CPU and memory limits of all nodes within k8s cluster
// @description **Access policy**: authorized
// @tags kubernetes
// @security jwt
// @accept json
// @produce json
// @param id path int true "Endpoint identifier"
// @success 200 {object} K8sNodesLimits "Success"
// @failure 400 "Invalid request"
// @failure 401 "Unauthorized"
// @failure 403 "Permission denied"
// @failure 404 "Endpoint not found"
// @failure 500 "Server error"
// @router /kubernetes/{id}/nodes_limits [get]
func (handler *Handler) getKubernetesNodesLimits(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
endpointID, err := request.RetrieveNumericRouteVariableValue(r, "id")
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid endpoint identifier route variable", err}
}
endpoint, err := handler.dataStore.Endpoint().Endpoint(portainer.EndpointID(endpointID))
if err == bolterrors.ErrObjectNotFound {
return &httperror.HandlerError{http.StatusNotFound, "Unable to find an endpoint with the specified identifier inside the database", err}
} else if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find an endpoint with the specified identifier inside the database", err}
}
cli, err := handler.kubernetesClientFactory.GetKubeClient(endpoint)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to create Kubernetes client", err}
}
nodesLimits, err := cli.GetNodesLimits()
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve nodes limits", err}
}
return response.JSON(w, nodesLimits)
}

View File

@ -0,0 +1,42 @@
package cli
import (
portainer "github.com/portainer/portainer/api"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetNodesLimits gets the CPU and Memory limits(unused resources) of all nodes in the current k8s endpoint connection
func (kcl *KubeClient) GetNodesLimits() (portainer.K8sNodesLimits, error) {
nodesLimits := make(portainer.K8sNodesLimits)
nodes, err := kcl.cli.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
pods, err := kcl.cli.CoreV1().Pods("").List(metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, item := range nodes.Items {
cpu := item.Status.Allocatable.Cpu().MilliValue()
memory := item.Status.Allocatable.Memory().Value()
nodesLimits[item.ObjectMeta.Name] = &portainer.K8sNodeLimits{
CPU: cpu,
Memory: memory,
}
}
for _, item := range pods.Items {
if nodeLimits, ok := nodesLimits[item.Spec.NodeName]; ok {
for _, container := range item.Spec.Containers {
nodeLimits.CPU -= container.Resources.Requests.Cpu().MilliValue()
nodeLimits.Memory -= container.Resources.Requests.Memory().Value()
}
}
}
return nodesLimits, nil
}

View File

@ -0,0 +1,137 @@
package cli
import (
portainer "github.com/portainer/portainer/api"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
kfake "k8s.io/client-go/kubernetes/fake"
"reflect"
"testing"
)
func newNodes() *v1.NodeList {
return &v1.NodeList{
Items: []v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "test-node-0",
},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("2"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("4M"),
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "test-node-1",
},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("3"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("6M"),
},
},
},
},
}
}
func newPods() *v1.PodList {
return &v1.PodList{
Items: []v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "test-container-0",
Namespace: "test-namespace-0",
},
Spec: v1.PodSpec{
NodeName: "test-node-0",
Containers: []v1.Container{
{
Name: "test-container-0",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("2M"),
},
},
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "test-container-1",
Namespace: "test-namespace-1",
},
Spec: v1.PodSpec{
NodeName: "test-node-1",
Containers: []v1.Container{
{
Name: "test-container-1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("2"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("3M"),
},
},
},
},
},
},
},
}
}
func TestKubeClient_GetNodesLimits(t *testing.T) {
type fields struct {
cli kubernetes.Interface
}
fieldsInstance := fields{
cli: kfake.NewSimpleClientset(newNodes(), newPods()),
}
tests := []struct {
name string
fields fields
want portainer.K8sNodesLimits
wantErr bool
}{
{
name: "2 nodes 2 pods",
fields: fieldsInstance,
want: portainer.K8sNodesLimits{
"test-node-0": &portainer.K8sNodeLimits{
CPU: 1000,
Memory: 2000000,
},
"test-node-1": &portainer.K8sNodeLimits{
CPU: 1000,
Memory: 3000000,
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
kcl := &KubeClient{
cli: tt.fields.cli,
}
got, err := kcl.GetNodesLimits()
if (err != nil) != tt.wantErr {
t.Errorf("GetNodesLimits() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("GetNodesLimits() got = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -398,6 +398,13 @@ type (
// JobType represents a job type
JobType int
K8sNodeLimits struct {
CPU int64 `json:"CPU"`
Memory int64 `json:"Memory"`
}
K8sNodesLimits map[string]*K8sNodeLimits
K8sNamespaceAccessPolicy struct {
UserAccessPolicies UserAccessPolicies `json:"UserAccessPolicies"`
TeamAccessPolicies TeamAccessPolicies `json:"TeamAccessPolicies"`
@ -1220,6 +1227,7 @@ type (
CreateUserShellPod(ctx context.Context, serviceAccountName string) (*KubernetesShellPod, error)
StartExecProcess(token string, useAdminToken bool, namespace, podName, containerName string, command []string, stdin io.Reader, stdout io.Writer) error
NamespaceAccessPoliciesDeleteNamespace(namespace string) error
GetNodesLimits() (K8sNodesLimits, error)
GetNamespaceAccessPolicies() (map[string]K8sNamespaceAccessPolicy, error)
UpdateNamespaceAccessPolicies(accessPolicies map[string]K8sNamespaceAccessPolicy) error
DeleteRegistrySecret(registry *Registry, namespace string) error

View File

@ -0,0 +1,65 @@
import _ from 'lodash-es';
/**
* NodesLimits Model
*/
export class KubernetesNodesLimits {
constructor(nodesLimits) {
this.MaxCPU = 0;
this.MaxMemory = 0;
this.nodesLimits = this.convertCPU(nodesLimits);
this.calculateMaxCPUMemory();
}
convertCPU(nodesLimits) {
_.forEach(nodesLimits, (value) => {
if (value.CPU) {
value.CPU /= 1000.0;
}
});
return nodesLimits;
}
calculateMaxCPUMemory() {
const nodesLimitsArray = Object.values(this.nodesLimits);
this.MaxCPU = _.maxBy(nodesLimitsArray, 'CPU').CPU;
this.MaxMemory = _.maxBy(nodesLimitsArray, 'Memory').Memory;
}
// check if there is enough cpu and memory to allocate containers in replica mode
overflowForReplica(cpu, memory, instances) {
_.forEach(this.nodesLimits, (value) => {
instances -= Math.min(Math.floor(value.CPU / cpu), Math.floor(value.Memory / memory));
});
return instances > 0;
}
// check if there is enough cpu and memory to allocate containers in global mode
overflowForGlobal(cpu, memory) {
let overflow = false;
_.forEach(this.nodesLimits, (value) => {
if (cpu > value.CPU || memory > value.Memory) {
overflow = true;
}
});
return overflow;
}
excludesPods(pods, cpuLimit, memoryLimit) {
const nodesLimits = this.nodesLimits;
_.forEach(pods, (value) => {
const node = value.Node;
if (node && nodesLimits[node]) {
nodesLimits[node].CPU += cpuLimit;
nodesLimits[node].Memory += memoryLimit;
}
});
this.calculateMaxCPUMemory();
}
}

View File

@ -0,0 +1,21 @@
import angular from 'angular';
angular.module('portainer.kubernetes').factory('KubernetesNodesLimits', KubernetesNodesLimitsFactory);
/* @ngInject */
function KubernetesNodesLimitsFactory($resource, API_ENDPOINT_KUBERNETES, EndpointProvider) {
const url = API_ENDPOINT_KUBERNETES + '/:endpointId/nodes_limits';
return $resource(
url,
{
endpointId: EndpointProvider.endpointID,
},
{
get: {
method: 'GET',
ignoreLoadingBar: true,
transformResponse: (data) => ({ data: JSON.parse(data) }),
},
}
);
}

View File

@ -0,0 +1,25 @@
import angular from 'angular';
import PortainerError from 'Portainer/error';
import { KubernetesNodesLimits } from 'Kubernetes/models/nodes-limits/models';
class KubernetesNodesLimitsService {
/* @ngInject */
constructor(KubernetesNodesLimits) {
this.KubernetesNodesLimits = KubernetesNodesLimits;
}
/**
* GET
*/
async get() {
try {
const nodesLimits = await this.KubernetesNodesLimits.get().$promise;
return new KubernetesNodesLimits(nodesLimits.data);
} catch (err) {
throw new PortainerError('Unable to retrieve nodes limits', err);
}
}
}
export default KubernetesNodesLimitsService;
angular.module('portainer.kubernetes').service('KubernetesNodesLimitsService', KubernetesNodesLimitsService);

View File

@ -722,6 +722,13 @@
</p>
</div>
</div>
<div class="form-group" ng-if="ctrl.nodeLimitsOverflow()">
<div class="col-sm-12 small text-danger">
<i class="fa fa-exclamation-circle red-icon" aria-hidden="true" style="margin-right: 2px;"></i>
These reservations would exceed the resources currently available in the cluster.
</div>
</div>
<!-- !cpu-limit-input -->
<!-- #endregion -->

View File

@ -49,7 +49,8 @@ class KubernetesCreateApplicationController {
KubernetesIngressService,
KubernetesPersistentVolumeClaimService,
KubernetesVolumeService,
RegistryService
RegistryService,
KubernetesNodesLimitsService
) {
this.$async = $async;
this.$state = $state;
@ -65,6 +66,7 @@ class KubernetesCreateApplicationController {
this.KubernetesIngressService = KubernetesIngressService;
this.KubernetesPersistentVolumeClaimService = KubernetesPersistentVolumeClaimService;
this.RegistryService = RegistryService;
this.KubernetesNodesLimitsService = KubernetesNodesLimitsService;
this.ApplicationDeploymentTypes = KubernetesApplicationDeploymentTypes;
this.ApplicationDataAccessPolicies = KubernetesApplicationDataAccessPolicies;
@ -92,6 +94,10 @@ class KubernetesCreateApplicationController {
memory: 0,
cpu: 0,
},
namespaceLimits: {
memory: 0,
cpu: 0,
},
resourcePoolHasQuota: false,
viewReady: false,
availableSizeUnits: ['MB', 'GB', 'TB'],
@ -583,14 +589,28 @@ class KubernetesCreateApplicationController {
return !this.state.sliders.memory.max || !this.state.sliders.cpu.max;
}
resourceReservationsOverflow() {
const instances = this.formValues.ReplicaCount;
nodeLimitsOverflow() {
const cpu = this.formValues.CpuLimit;
const maxCpu = this.state.sliders.cpu.max;
const memory = this.formValues.MemoryLimit;
const maxMemory = this.state.sliders.memory.max;
const memory = KubernetesResourceReservationHelper.bytesValue(this.formValues.MemoryLimit);
if (cpu * instances > maxCpu) {
const overflow = this.nodesLimits.overflowForReplica(cpu, memory, 1);
return overflow;
}
effectiveInstances() {
return this.formValues.DeploymentType === this.ApplicationDeploymentTypes.GLOBAL ? this.nodeNumber : this.formValues.ReplicaCount;
}
resourceReservationsOverflow() {
const instances = this.effectiveInstances();
const cpu = this.formValues.CpuLimit;
const maxCpu = this.state.namespaceLimits.cpu;
const memory = KubernetesResourceReservationHelper.bytesValue(this.formValues.MemoryLimit);
const maxMemory = this.state.namespaceLimits.memory;
// multiply 1000 can avoid 0.1 * 3 > 0.3
if (cpu * 1000 * instances > maxCpu * 1000) {
return true;
}
@ -598,17 +618,23 @@ class KubernetesCreateApplicationController {
return true;
}
return false;
if (this.formValues.DeploymentType === this.ApplicationDeploymentTypes.REPLICATED) {
return this.nodesLimits.overflowForReplica(cpu, memory, instances);
}
// DeploymentType == GLOBAL
return this.nodesLimits.overflowForGlobal(cpu, memory);
}
autoScalerOverflow() {
const instances = this.formValues.AutoScaler.MaxReplicas;
const cpu = this.formValues.CpuLimit;
const maxCpu = this.state.sliders.cpu.max;
const memory = this.formValues.MemoryLimit;
const maxMemory = this.state.sliders.memory.max;
const maxCpu = this.state.namespaceLimits.cpu;
const memory = KubernetesResourceReservationHelper.bytesValue(this.formValues.MemoryLimit);
const maxMemory = this.state.namespaceLimits.memory;
if (cpu * instances > maxCpu) {
// multiply 1000 can avoid 0.1 * 3 > 0.3
if (cpu * 1000 * instances > maxCpu * 1000) {
return true;
}
@ -616,7 +642,7 @@ class KubernetesCreateApplicationController {
return true;
}
return false;
return this.nodesLimits.overflowForReplica(cpu, memory, instances);
}
publishViaLoadBalancerEnabled() {
@ -732,50 +758,66 @@ class KubernetesCreateApplicationController {
/* #region DATA AUTO REFRESH */
updateSliders() {
const quota = this.formValues.ResourcePool.Quota;
let minCpu = 0,
minMemory = 0,
maxCpu = this.state.namespaceLimits.cpu,
maxMemory = this.state.namespaceLimits.memory;
if (quota) {
if (quota.CpuLimit) {
minCpu = KubernetesApplicationQuotaDefaults.CpuLimit;
}
if (quota.MemoryLimit) {
minMemory = KubernetesResourceReservationHelper.bytesValue(KubernetesApplicationQuotaDefaults.MemoryLimit);
}
}
maxCpu = Math.min(maxCpu, this.nodesLimits.MaxCPU);
maxMemory = Math.min(maxMemory, this.nodesLimits.MaxMemory);
if (maxMemory < minMemory) {
minMemory = 0;
maxMemory = 0;
}
this.state.sliders.memory.min = KubernetesResourceReservationHelper.megaBytesValue(minMemory);
this.state.sliders.memory.max = KubernetesResourceReservationHelper.megaBytesValue(maxMemory);
this.state.sliders.cpu.min = minCpu;
this.state.sliders.cpu.max = _.floor(maxCpu, 2);
if (!this.state.isEdit) {
this.formValues.CpuLimit = minCpu;
this.formValues.MemoryLimit = KubernetesResourceReservationHelper.megaBytesValue(minMemory);
}
}
updateNamespaceLimits() {
let maxCpu = this.state.nodes.cpu;
let maxMemory = this.state.nodes.memory;
const quota = this.formValues.ResourcePool.Quota;
this.state.resourcePoolHasQuota = false;
const quota = this.formValues.ResourcePool.Quota;
let minCpu,
maxCpu,
minMemory,
maxMemory = 0;
if (quota) {
if (quota.CpuLimit) {
this.state.resourcePoolHasQuota = true;
minCpu = KubernetesApplicationQuotaDefaults.CpuLimit;
maxCpu = quota.CpuLimit - quota.CpuLimitUsed;
if (this.state.isEdit && this.savedFormValues.CpuLimit) {
maxCpu += this.savedFormValues.CpuLimit * this.savedFormValues.ReplicaCount;
maxCpu += this.savedFormValues.CpuLimit * this.effectiveInstances();
}
} else {
minCpu = 0;
maxCpu = this.state.nodes.cpu;
}
if (quota.MemoryLimit) {
this.state.resourcePoolHasQuota = true;
minMemory = KubernetesApplicationQuotaDefaults.MemoryLimit;
maxMemory = quota.MemoryLimit - quota.MemoryLimitUsed;
if (this.state.isEdit && this.savedFormValues.MemoryLimit) {
maxMemory += KubernetesResourceReservationHelper.bytesValue(this.savedFormValues.MemoryLimit) * this.savedFormValues.ReplicaCount;
maxMemory += KubernetesResourceReservationHelper.bytesValue(this.savedFormValues.MemoryLimit) * this.effectiveInstances();
}
} else {
minMemory = 0;
maxMemory = this.state.nodes.memory;
}
} else {
minCpu = 0;
maxCpu = this.state.nodes.cpu;
minMemory = 0;
maxMemory = this.state.nodes.memory;
}
this.state.sliders.memory.min = minMemory;
this.state.sliders.memory.max = KubernetesResourceReservationHelper.megaBytesValue(maxMemory);
this.state.sliders.cpu.min = minCpu;
this.state.sliders.cpu.max = _.round(maxCpu, 2);
if (!this.state.isEdit) {
this.formValues.CpuLimit = minCpu;
this.formValues.MemoryLimit = minMemory;
}
this.state.namespaceLimits.cpu = maxCpu;
this.state.namespaceLimits.memory = maxMemory;
}
refreshStacks(namespace) {
@ -863,6 +905,7 @@ class KubernetesCreateApplicationController {
onResourcePoolSelectionChange() {
return this.$async(async () => {
const namespace = this.formValues.ResourcePool.Namespace.Name;
this.updateNamespaceLimits();
this.updateSliders();
await this.refreshNamespaceData(namespace);
this.resetFormValues();
@ -947,12 +990,14 @@ class KubernetesCreateApplicationController {
this.state.useLoadBalancer = this.endpoint.Kubernetes.Configuration.UseLoadBalancer;
this.state.useServerMetrics = this.endpoint.Kubernetes.Configuration.UseServerMetrics;
const [resourcePools, nodes, ingresses] = await Promise.all([
const [resourcePools, nodes, ingresses, nodesLimits] = await Promise.all([
this.KubernetesResourcePoolService.get(),
this.KubernetesNodeService.get(),
this.KubernetesIngressService.get(),
this.KubernetesNodesLimitsService.get(),
]);
this.ingresses = ingresses;
this.nodesLimits = nodesLimits;
this.resourcePools = _.filter(resourcePools, (resourcePool) => !KubernetesNamespaceHelper.isSystemNamespace(resourcePool.Namespace.Name));
this.formValues.ResourcePool = this.resourcePools[0];
@ -965,6 +1010,7 @@ class KubernetesCreateApplicationController {
this.state.nodes.cpu += item.CPU;
});
this.nodesLabels = KubernetesNodeHelper.generateNodeLabelsFromNodes(nodes);
this.nodeNumber = nodes.length;
const namespace = this.state.isEdit ? this.$state.params.namespace : this.formValues.ResourcePool.Namespace.Name;
await this.refreshNamespaceData(namespace);
@ -998,6 +1044,12 @@ class KubernetesCreateApplicationController {
this.formValues.AutoScaler = KubernetesApplicationHelper.generateAutoScalerFormValueFromHorizontalPodAutoScaler(null, this.formValues.ReplicaCount);
this.formValues.OriginalIngressClasses = angular.copy(this.ingresses);
}
if (this.state.isEdit) {
this.nodesLimits.excludesPods(this.application.Pods, this.formValues.CpuLimit, KubernetesResourceReservationHelper.bytesValue(this.formValues.MemoryLimit));
}
this.updateNamespaceLimits();
this.updateSliders();
} catch (err) {
this.Notifications.error('Failure', err, 'Unable to load view data');