enhance: only use streaming service on e2e and nightly e2e (#38743)

issue: #38399

Signed-off-by: chyezh <chyezh@outlook.com>
pull/38767/head
Zhen Ye 2024-12-26 11:32:50 +08:00 committed by GitHub
parent 85f462be1a
commit 5001878b8a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 29 additions and 378 deletions

View File

@ -96,7 +96,7 @@ pipeline {
axes {
axis {
name 'milvus_deployment_option'
values 'standalone', 'distributed-pulsar', 'distributed-kafka', 'standalone-authentication', 'standalone-one-pod', 'distributed-streaming-service'
values 'standalone', 'distributed-pulsar', 'distributed-kafka', 'standalone-authentication', 'standalone-one-pod'
}
}
stages {

View File

@ -89,7 +89,7 @@ pipeline {
axes {
axis {
name 'milvus_deployment_option'
values 'standalone', 'distributed', 'standalone-kafka-mmap', 'distributed-streaming-service'
values 'standalone', 'distributed', 'standalone-kafka-mmap'
}
}
stages {
@ -100,32 +100,15 @@ pipeline {
def helm_release_name = get_helm_release_name milvus_deployment_option
// pvc name would be <pod-name>-volume-0, used for pytest result archiving
def pvc = env.JENKINS_AGENT_NAME + '-volume-0'
if (milvus_deployment_option == 'distributed-streaming-service') {
try {
tekton.pytest helm_release_name: helm_release_name,
pvc: pvc,
milvus_helm_version: milvus_helm_chart_version,
ciMode: 'e2e',
milvus_image_tag: milvus_image_tag,
pytest_image: pytest_image,
helm_image: helm_image,
milvus_deployment_option: milvus_deployment_option,
verbose: 'false'
} catch (Exception e) {
println e
}
} else {
tekton.pytest helm_release_name: helm_release_name,
pvc: pvc,
milvus_helm_version: milvus_helm_chart_version,
ciMode: 'e2e',
milvus_image_tag: milvus_image_tag,
pytest_image: pytest_image,
helm_image: helm_image,
milvus_deployment_option: milvus_deployment_option,
verbose: 'false'
}
tekton.pytest helm_release_name: helm_release_name,
pvc: pvc,
milvus_helm_version: milvus_helm_chart_version,
ciMode: 'e2e',
milvus_image_tag: milvus_image_tag,
pytest_image: pytest_image,
helm_image: helm_image,
milvus_deployment_option: milvus_deployment_option,
verbose: 'false'
}
}
}

View File

@ -8,6 +8,8 @@ affinity:
weight: 1
cluster:
enabled: true
streaming:
enabled: true
dataCoordinator:
resources:
limits:

View File

@ -1,274 +0,0 @@
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
cluster:
enabled: true
streaming:
enabled: true
dataCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
dataNode:
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
etcd:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 100Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: PR-35426-20240812-46dadb120
indexCoordinator:
gc:
interval: 1
resources:
limits:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
indexNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
kafka:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
zookeeper:
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
log:
level: debug
extraConfigFiles:
user.yaml: |+
indexCoord:
scheduler:
interval: 100
indexNode:
scheduler:
buildParallel: 4
metrics:
serviceMonitor:
enabled: true
minio:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
mode: standalone
resources:
requests:
cpu: "0.3"
memory: 512Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
proxy:
resources:
limits:
cpu: "1"
requests:
cpu: "0.3"
memory: 256Mi
pulsar:
bookkeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
nettyMaxFrameSizeBytes: "104867840"
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
broker:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
backlogQuotaDefaultLimitGB: "8"
backlogQuotaDefaultRetentionPolicy: producer_exception
defaultRetentionSizeInMB: "8192"
defaultRetentionTimeInMinutes: "10080"
maxMessageSize: "104857600"
replicaCount: 2
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
components:
autorecovery: false
proxy:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-XX:MaxDirectMemorySize=2048m
PULSAR_MEM: |
-Xms1024m -Xmx1024m
httpNumThreads: "50"
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
wsResources:
requests:
cpu: "0.1"
memory: 100Mi
zookeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no
PULSAR_MEM: |
-Xms1024m -Xmx1024m
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
queryCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 100Mi
queryNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
rootCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 256Mi
service:
type: ClusterIP
standalone:
disk:
enabled: true
resources:
limits:
cpu: "4"
requests:
cpu: "1"
memory: 3.5Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists

View File

@ -8,6 +8,8 @@ affinity:
weight: 1
cluster:
enabled: false
streaming:
enabled: true
dataCoordinator:
resources:
limits:

View File

@ -8,6 +8,8 @@ affinity:
weight: 1
cluster:
enabled: false
streaming:
enabled: true
dataCoordinator:
resources:
limits:

View File

@ -8,6 +8,8 @@ affinity:
weight: 1
cluster:
enabled: false
streaming:
enabled: true
etcd:
enabled: false
metrics:

View File

@ -1,5 +1,7 @@
cluster:
enabled: true
streaming:
enabled: true
common:
security:
authorizationEnabled: false

View File

@ -1,5 +1,7 @@
cluster:
enabled: true
streaming:
enabled: true
common:
security:
authorizationEnabled: false

View File

@ -1,76 +0,0 @@
cluster:
enabled: true
streaming:
enabled: true
common:
security:
authorizationEnabled: false
dataCoordinator:
gc:
dropTolerance: 86400
missingTolerance: 86400
profiling:
enabled: true
dataNode:
profiling:
enabled: true
replicas: 2
etcd:
metrics:
enabled: true
podMonitor:
enabled: true
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: nightly-20240821-ed4eaff
indexCoordinator:
gc:
interval: 1
profiling:
enabled: true
indexNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
kafka:
enabled: false
metrics:
jmx:
enabled: true
kafka:
enabled: true
serviceMonitor:
enabled: true
log:
level: debug
metrics:
serviceMonitor:
enabled: true
minio:
mode: standalone
proxy:
profiling:
enabled: true
replicas: 2
pulsar:
broker:
replicaCount: 2
enabled: true
queryCoordinator:
profiling:
enabled: true
queryNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
service:
type: ClusterIP
standalone:
disk:
enabled: true

View File

@ -1,5 +1,7 @@
cluster:
enabled: false
streaming:
enabled: true
common:
security:
authorizationEnabled: false

View File

@ -1,5 +1,7 @@
cluster:
enabled: false
streaming:
enabled: true
common:
security:
authorizationEnabled: true

View File

@ -8,6 +8,8 @@ affinity:
weight: 1
cluster:
enabled: false
streaming:
enabled: true
common:
security:
authorizationEnabled: false