chore: backport ci changes to 1.10 (#24776)

* chore: use 'packager' and 'slack' images (#24742)

* chore: refactor unit tests (#24774)
pull/24840/head
Brandon Pfeifer 2024-03-15 16:56:07 -04:00 committed by GitHub
parent 6d4b0e0e46
commit d043281c55
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 161 additions and 516 deletions

View File

@ -1,7 +1,7 @@
version: 2.1
orbs:
aws-cli: circleci/aws-cli@4.1.2
aws-s3: circleci/aws-s3@4.0.0
aws-s3: circleci/aws-s3@2.0.0
parameters:
cross-container-tag:
# when updating the go version, should also update the go version in go.mod
@ -13,15 +13,6 @@ parameters:
type: string
default: build
commands:
install_rust:
steps:
- run:
name: Install Rust toolchain
command: |
./scripts/ci/install-rust.sh
echo 'export PATH=${HOME}/.cargo/bin:${PATH}' >> $BASH_ENV
jobs:
build_binaries:
docker:
@ -108,44 +99,27 @@ jobs:
- /go/pkg/mod
- /root/.cargo
- /root/.cache/go-build
build_packages:
machine:
enabled: true
docker_layer_caching: true
image: ubuntu-2004:202107-02
docker:
- image: us-east1-docker.pkg.dev/influxdata-team-edge/ci-support/ci-packager:latest
auth:
username: _json_key
password: $CISUPPORT_GCS_AUTHORIZATION
steps:
- checkout
- attach_workspace:
at: /tmp/workspace
- checkout
- run: |
export DEBIAN_FRONTEND=noninteractive
sudo -E apt-get update
sudo -E apt-get install --no-install-recommends --yes \
asciidoc \
build-essential \
git \
python3 \
rpm \
ruby-dev \
xmlto
# The Ruby version in the Ubuntu 20.04 repositories is 2. We need to install compatible versions of dotenv.
sudo gem install dotenv -v 2.8.1
sudo gem install fpm
( cd man ; make build ; gzip -9 ./*.1 )
python3 -m pip install -r .circleci/scripts/package/requirements.txt
# Unfortunately, this must be executed as root. This is so permission
# modifying commands (chown, chmod, etc.) succeed.
sudo --preserve-env=CIRCLE_TAG,CIRCLE_SHA1 .circleci/scripts/package/build.py
- store_artifacts:
path: packages/
packager .circleci/packages/config.yaml
- persist_to_workspace:
root: .
paths:
- packages
- store_artifacts:
path: packages
sign_packages:
circleci_ip_ranges: true
@ -177,9 +151,11 @@ jobs:
then
# Since all artifacts are present, sign them here. This saves Circle
# credits over spinning up another instance just to separate out the
# checksum job. Individual checksums are written by the
# "build_packages" script.
# checksum job.
sha256sum "${target}" >> "/tmp/workspace/packages/influxdb.${CIRCLE_TAG}.digests"
md5sum "${target}" >"${target}.md5"
sha256sum "${target}" >"${target}.sha256"
fi
done
- persist_to_workspace:
@ -219,83 +195,77 @@ jobs:
- attach_workspace:
at: /tmp/workspace
- checkout
- run:
name: checkfmt
command: ./checkfmt.sh
- run:
name: codegen
command: ./generate.sh
- run:
name: go vet
command: go vet ./...
- run: ./checkfmt.sh
- run: ./generate.sh
- run: go vet ./...
unit_test:
docker:
- image: quay.io/influxdb/cross-builder:<< pipeline.parameters.cross-container-tag >>
steps:
- checkout
- restore_cache:
keys:
- influxdb-cache-v1-{{ checksum "go.mod" }}
- influxdb-cache-v1
- run:
name: Execute tests
command: |
set -x
mkdir -p junit
gotestsum --junitfile junit/influxdb.junit.xml -- ./...
no_output_timeout: 1500s
- store_test_results:
path: junit/
unit_test_tsi:
docker:
- image: quay.io/influxdb/cross-builder:<< pipeline.parameters.cross-container-tag >>
resource_class: large
steps:
- checkout
- restore_cache:
keys:
- influxdb-cache-v1-{{ checksum "go.mod" }}
- influxdb-cache-v1
- run:
name: Execute tests
command: |
set -x
mkdir -p junit-tsi
export INFLUXDB_DATA_INDEX_VERSION="tsi1"
gotestsum --junitfile junit-tsi/influxdb.junit.xml -- ./...
no_output_timeout: 1500s
- store_test_results:
path: junit-tsi/
unit_test_race:
docker:
- image: quay.io/influxdb/cross-builder:<< pipeline.parameters.cross-container-tag >>
resource_class: xlarge
parameters:
data:
type: string
default: inmem
race:
type: boolean
default: false
environment:
INFLUXDB_DATA_INDEX_VERSION: << parameters.data >>
GORACE: halt_on_error=1
steps:
- checkout
- restore_cache:
keys:
- influxdb-cache-v1-{{ checksum "go.mod" }}
- influxdb-cache-v1
- run:
name: Execute tests
command: |
set -x
mkdir -p junit-race/
export GORACE="halt_on_error=1"
# "resource_class: xlarge" creates a Docker container with eight
# virtual cpu cores. However, applications like "nproc" return
# the host machine's core count (which in this case is 36).
# When less cores are available than advertised, the
# race-tests fail.
#
# We'll manually reduce the number of available cores to what
# is specified by the CircleCI documentation:
# https://circleci.com/product/features/resource-classes/
taskset -c 0-7 \
gotestsum --junitfile junit-race/influxdb.junit.xml -- -race ./...
no_output_timeout: 1500s
- store_test_results:
path: junit-race/
- when:
condition: << parameters.race >>
steps:
- run:
name: Execute Tests
command: |
mkdir -p junit-race-<< parameters.data >>
# "resource_class: xlarge" creates a Docker container with eight
# virtual cpu cores. However, applications like "nproc" return
# the host machine's core count (which in this case is 36).
# When less cores are available than advertised, the tests
# sometimes fail.
#
# We'll manually reduce the number of available cores to what
# is specified by the CircleCI documentation:
# https://circleci.com/product/features/resource-classes/
taskset -c 0-7 \
gotestsum \
--format=standard-verbose \
--junitfile=junit-race-<< parameters.data >>/influxdb.junit.xml \
-- -race ./...
- store_test_results:
path: junit-race-<< parameters.data >>/
- when:
condition: { not: << parameters.race >> }
steps:
- run:
name: Execute Tests
command: |
mkdir -p junit-<< parameters.data >>
# "resource_class: xlarge" creates a Docker container with eight
# virtual cpu cores. However, applications like "nproc" return
# the host machine's core count (which in this case is 36).
# When less cores are available than advertised, the tests
# sometimes fail.
#
# We'll manually reduce the number of available cores to what
# is specified by the CircleCI documentation:
# https://circleci.com/product/features/resource-classes/
taskset -c 0-7 \
gotestsum \
--format=standard-verbose \
--junitfile=junit-<< parameters.data >>/influxdb.junit.xml \
-- ./...
- store_test_results:
path: junit-<< parameters.data >>/
fluxtest:
docker:
- image: quay.io/influxdb/cross-builder:<< pipeline.parameters.cross-container-tag >>
@ -323,47 +293,63 @@ jobs:
- changelog_artifacts
publish_changelog:
docker:
- image: cimg/base:current
parameters:
build_type:
workflow:
type: string
docker:
- image: cimg/python:3.6
steps:
- attach_workspace:
at: /tmp/workspace
- aws-cli/setup:
aws_access_key_id: INFLUXDB1X_AWS_ACCESS_KEY_ID
aws_secret_access_key: INFLUXDB1X_AWS_SECRET_ACCESS_KEY
region: us-east-1
- when:
condition:
equal: [ << parameters.build_type >>, nightly ]
equal: [ << parameters.workflow >>, release ]
steps:
- aws-s3/copy:
from: /tmp/workspace/changelog_artifacts/CHANGELOG.md
to: s3://${INFLUXDB1X_ARTIFACTS_BUCKET}/influxdb/1.10/CHANGELOG.nightly.md
aws-region: INFLUXDB1X_AWS_REGION
aws-access-key-id: INFLUXDB1X_AWS_ACCESS_KEY_ID
aws-secret-access-key: INFLUXDB1X_AWS_SECRET_ACCESS_KEY
from: /tmp/workspace/changelog_artifacts/CHANGELOG.md
to: s3://${INFLUXDB1X_ARTIFACTS_BUCKET}/influxdb/releases/<< pipeline.git.tag >>/CHANGELOG.<< pipeline.git.tag >>.md
- when:
condition:
equal: [ << parameters.build_type >>, release ]
equal: [ << parameters.workflow >>, nightly ]
steps:
- aws-s3/copy:
from: /tmp/workspace/changelog_artifacts/CHANGELOG.md
to: s3://${INFLUXDB1X_ARTIFACTS_BUCKET}/influxdb/1.10/CHANGELOG.<< pipeline.git.tag >>.md
aws-region: INFLUXDB1X_AWS_REGION
aws-access-key-id: INFLUXDB1X_AWS_ACCESS_KEY_ID
aws-secret-access-key: INFLUXDB1X_AWS_SECRET_ACCESS_KEY
from: /tmp/workspace/changelog_artifacts/CHANGELOG.md
to: s3://${INFLUXDB1X_ARTIFACTS_BUCKET}/influxdb/nightlies/<< pipeline.git.branch >>/CHANGELOG.md
publish_packages:
docker:
- image: cimg/base:current
- image: cimg/python:3.6
steps:
- attach_workspace:
at: /tmp/workspace
- checkout
- aws-cli/setup:
aws_access_key_id: INFLUXDB1X_AWS_ACCESS_KEY_ID
aws_secret_access_key: INFLUXDB1X_AWS_SECRET_ACCESS_KEY
region: us-east-1
- aws-s3/sync:
from: /tmp/workspace/packages
to: s3://${INFLUXDB1X_ARTIFACTS_BUCKET}/influxdb/1.10/
aws-region: INFLUXDB1X_AWS_REGION
aws-access-key-id: INFLUXDB1X_AWS_ACCESS_KEY_ID
aws-secret-access-key: INFLUXDB1X_AWS_SECRET_ACCESS_KEY
from: /tmp/workspace/packages
to: s3://${INFLUXDB1X_ARTIFACTS_BUCKET}/influxdb/releases/<< pipeline.git.tag >>
slack:
docker:
- image: us-east1-docker.pkg.dev/influxdata-team-edge/ci-support/ci-slack:latest
auth:
username: _json_key
password: $CISUPPORT_GCS_AUTHORIZATION
steps:
- attach_workspace:
at: /tmp/workspace
- run:
command: |
SLACK_ARTIFACT_URL=s3://${INFLUXDB1X_ARTIFACTS_BUCKET}/influxdb/releases/<< pipeline.git.tag >> slack
environment:
SLACK_ARTIFACT_ROOT: /tmp/workspace/packages
SLACK_RELEASE_MESSAGE: New InfluxDB Release
release_filter: &release_filter
filters:
@ -400,23 +386,33 @@ workflows:
<<: *release_filter
- publish_changelog:
<<: *release_filter
build_type: release
workflow: release
requires:
- changelog
- publish_packages:
<<: *release_filter
requires:
- sign_packages
- slack:
<<: *release_filter
requires:
- publish_packages
- static_code_checks:
<<: *release_filter
- fluxtest:
<<: *release_filter
- unit_test:
<<: *release_filter
- unit_test_tsi:
name: unit_test_inmem
data: inmem
- unit_test:
<<: *release_filter
- unit_test_race:
name: unit_test_tsi1
data: tsi1
- unit_test:
<<: *release_filter
name: unit_test_race
race: true
on_push:
when:
equal: [ << pipeline.parameters.workflow >>, build ]
@ -430,9 +426,15 @@ workflows:
- build_packages
- static_code_checks
- fluxtest
- unit_test
- unit_test_tsi
- unit_test_race
- unit_test:
name: unit_test_inmem
data: inmem
- unit_test:
name: unit_test_tsi1
data: tsi1
- unit_test:
name: unit_test_race
race: true
nightly:
when:
and:
@ -448,22 +450,28 @@ workflows:
jobs:
- changelog
- publish_changelog:
build_type: nightly
workflow: nightly
requires:
- changelog
- static_code_checks
- fluxtest
- unit_test
- unit_test_tsi
- unit_test_race
- unit_test:
name: unit_test_inmem
data: inmem
- unit_test:
name: unit_test_tsi1
data: tsi1
- unit_test:
name: unit_test_race
race: true
- build_binaries:
requires:
- changelog
- static_code_checks
- fluxtest
- unit_test
- unit_test_tsi
- unit_test_inmem
- unit_test_race
- unit_test_tsi1
- build_packages:
requires:
- build_binaries

View File

@ -1,4 +1,11 @@
---
version:
release:
match: '^v[0-9]+.[0-9]+.[0-9]+'
value: '{{env.CIRCLE_TAG[1:]}}'
default:
value: '1.x-{{env.CIRCLE_SHA1[:8]}}'
sources:
- binary: /tmp/workspace/bins/influxdb_bin_linux_amd64-*.tar.gz
target: packages/
@ -6,7 +13,9 @@ sources:
plat: linux
packages:
- name: influxdb
- name: influxdb
description: Distributed time-series database.
license: MIT
binaries:
- influx
- influx_inspect
@ -48,4 +57,7 @@ packages:
group: root
perms: 0755
target: usr/lib/influxdb/scripts/influxd-systemd-start.sh
source: .circleci/scripts/package/influxdb
rpm_attributes:
- 750,influxdb,influxdb:/var/log/influxdb
- 750,influxdb,influxdb:/var/lib/influxdb
source: .circleci/packages/influxdb

View File

@ -1,369 +0,0 @@
#!/usr/bin/env python3
import glob
import os
import re
import shutil
import subprocess
import tempfile
import yaml
def build_linux_archive(source, package, version):
"""
Builds a Linux Archive.
This archive contains the binary artifacts, configuration, and scripts
installed by the DEB and RPM packages. This mimics the file-system. So,
binaries are installed into "/usr/bin", configuration into "/etc", and
scripts into their relevant directories. Permissions match those of
the DEB and RPM packages.
"""
with tempfile.TemporaryDirectory() as workspace:
# fmt: off
shutil.copytree(os.path.join(package["source"], "fs"),
workspace, dirs_exist_ok=True, ignore=shutil.ignore_patterns(".keepdir"))
# fmt: on
for extra in package["extras"]:
shutil.copy(extra["source"], os.path.join(workspace, extra["target"]))
for binary in package["binaries"]:
# Since the binaries for different platforms and architectures
# are named the same, the binaries are stored within archives.
# The archive name specifies the platform and architecture.
# Each binary must be extracted with `tar`.
# fmt: off
subprocess.check_call(
[
# globbing is required as the archive name contains the
# release version or git commit of the repository. This
# allows the configuration to remain untouched between
# different builds.
"tar", "-xf", glob.glob(source["binary"])[0],
# binaries are copied to "usr/bin"
"-C", os.path.join(workspace, "usr/bin"),
binary,
]
)
# fmt: on
# After the package contents are copied into the working directory,
# the permissions must be updated. Since the CI executor may change
# occasionally (images/ORBs deprecated over time), the umask may
# not be what we expect. This allows this packaging script to be
# agnostic to umask/system configuration.
for root, dirs, files in os.walk(workspace):
for target in [os.path.join(root, f) for f in files]:
# files in "usr/bin" are executable
if os.path.relpath(root, workspace) == "usr/bin":
os.chmod(target, 0o0755)
else:
# standard file permissions
os.chmod(target, 0o0644)
# fmt: off
shutil.chown(
target,
user = "root",
group = "root")
# fmt: on
for target in [os.path.join(root, d) for d in dirs]:
# standard directory permissions
os.chmod(target, 0o0755)
# fmt: off
shutil.chown(
target,
user = "root",
group = "root")
# fmt: on
for override in package["perm_overrides"]:
target = os.path.join(workspace, override["target"])
os.chmod(target, override["perms"])
# "owner" and "group" should be a system account and group with
# a well-defined UID and GID. Otherwise, the UID/GID might vary
# between systems. When the archive is extracted/package is
# installed, things may not behave as we would expect.
# fmt: off
shutil.chown(
target,
user = override["owner"],
group = override["group"])
# fmt: on
os.makedirs(source["target"], exist_ok=True)
# fmt: off
subprocess.check_call([
"tar", "-czf",
os.path.join(
source["target"],
"{:s}-{:s}_{:s}_{:s}.tar.gz".format(
package["name"],
version,
source["plat"],
source["arch"]
)
),
# ".keepdir" allows Git to track otherwise empty directories. The presence
# of the directories allows `package["extras"]` and `package["binaries"]`
# to be copied into the archive without requiring "mkdir". These should
# directories are excluded from the final archive.
"--exclude", ".keepdir",
# This re-parents the contents of the archive with `package["name"]-version`.
# It is undocumented, however, when matching, "--transform" always removes
# the trailing slash. This regex must handle "./" and "./<more components>".
"--transform",
"s#^.\(/\|$\)#{:s}-{:s}/#".format(
package["name"],
version
),
# compress everything within `workspace`
"-C", workspace, '.'
])
# fmt: on
def build_darwin_archive(source, package, version):
"""
Builds a Darwin Archive.
This archive contains binary artifacts and configuration. Unlike the
linux archive, which contains the configuration and matches the file-
system of the DEB and RPM packages, everything is located within the
root of the archive. However, permissions do match those of the DEB
and RPM packages.
"""
with tempfile.TemporaryDirectory() as workspace:
for extra in package["extras"]:
target = os.path.join(workspace, os.path.basename(extra["target"]))
shutil.copy(extra["source"], target)
os.chmod(target, 0o0644)
# fmt: off
shutil.chown(
target,
user = "root",
group = "root")
# fmt: on
for binary in package["binaries"]:
# Since the binaries for different platforms and architectures
# are named the same, the binaries are stored within archives.
# The archive name specifies the platform and architecture.
# Each binary must be extracted with `tar`.
# fmt: off
subprocess.check_call([
# globbing is required as the archive name contains the
# release version or git commit of the repository. This
# allows the configuration to remain untouched between
# different builds.
"tar", "-xf", glob.glob(source["binary"])[0],
# binaries are copied to "/"
"-C", workspace,
binary
])
# fmt: on
target = os.path.join(workspace, binary)
os.chmod(target, 0o0755)
# fmt: off
shutil.chown(
target,
user = "root",
group = "root")
# fmt: on
os.makedirs(source["target"], exist_ok=True)
# fmt: off
subprocess.check_call([
"tar", "-czf",
os.path.join(
source["target"],
"{:s}-{:s}_{:s}_{:s}.tar.gz".format(
package["name"],
version,
source["plat"],
source["arch"]
)
),
# This re-parents the contents of the archive with `package["name"]-version`.
# It is undocumented, however, when matching, "--transform" always removes
# the trailing slash. This regex must handle "./" and "./<more components>".
"--transform",
"s#^.\(/\|$\)#{:s}-{:s}/#".format(
package["name"],
version
),
# compress everything within `workspace`
"-C", workspace, '.'
])
# fmt: on
def build_linux_package(source, package, version):
"""
Constructs a DEB or RPM Package.
"""
with tempfile.TemporaryDirectory() as workspace:
# fmt: off
shutil.copytree(package["source"], workspace,
dirs_exist_ok=True, ignore=shutil.ignore_patterns(".keepdir"))
# fmt: on
for extra in package["extras"]:
shutil.copy(extra["source"], os.path.join(workspace, "fs", extra["target"]))
for binary in package["binaries"]:
# Since the binaries for different platforms and architectures
# are named the same, the binaries are stored within archives.
# The archive name specifies the platform and architecture.
# Each binary must be extracted with `tar`.
# fmt: off
subprocess.check_call(
[
# globbing is required as the archive name contains the
# release version or git commit of the repository. This
# allows the configuration to remain untouched between
# different builds.
"tar", "-xf", glob.glob(source["binary"])[0],
# binaries are copied to "usr/bin"
"-C", os.path.join(workspace, "fs/usr/bin"),
binary,
]
)
# fmt: on
# After the package contents are copied into the working directory,
# the permissions must be updated. Since the CI executor may change
# occasionally (images/ORBs deprecated over time), the umask may
# not be what we expect. This allows this packaging script to be
# agnostic to umask/system configuration.
for root, dirs, files in os.walk(workspace):
for target in [os.path.join(root, f) for f in files]:
# files in "fs/usr/bin" are executable
if os.path.relpath(root, workspace) == "fs/usr/bin":
os.chmod(target, 0o0755)
else:
# standard file permissions
os.chmod(target, 0o0644)
# fmt: off
shutil.chown(
target,
user = "root",
group = "root")
# fmt: on
for target in [os.path.join(root, d) for d in dirs]:
# standard directory permissions
os.chmod(target, 0o0755)
# fmt: off
shutil.chown(
target,
user = "root",
group = "root")
# fmt: on
for override in package["perm_overrides"]:
target = os.path.join(workspace, "fs", override["target"])
os.chmod(target, override["perms"])
# "owner" and "group" should be a system account and group with
# a well-defined UID and GID. Otherwise, the UID/GID might vary
# between systems. When the archive is extracted/package is
# installed, things may not behave as we would expect.
# fmt: off
shutil.chown(
target,
user = override["owner"],
group = override["group"])
# fmt: on
os.makedirs(source["target"], exist_ok=True)
fpm_wrapper(source, package, version, workspace, "rpm")
fpm_wrapper(source, package, version, workspace, "deb")
def fpm_wrapper(source, package, version, workspace, package_type):
"""
Constructs either a DEB/RPM Package.
This wraps some configuration settings that are *only* relevant
to `fpm`.
"""
conffiles = []
for root, dirs, files in os.walk(os.path.join(workspace, "fs/etc")):
for file in files:
# fmt: off
conffiles.extend([
"--config-files", os.path.join("/", os.path.relpath(root, os.path.join(workspace, "fs")), file)
])
# fmt: on
# `source["arch"]` matches DEB architecture names. When building RPMs, it must
# be converted into RPM architecture names.
architecture = source["arch"]
if package_type == "rpm":
if architecture == "amd64":
architecture = "x86_64"
# fmt: off
p = subprocess.check_call([
"fpm",
"--log", "error",
# package description
"--name", package["name"],
"--vendor", "InfluxData",
"--description", "Distributed time-series database.",
"--url", "https://influxdata.com",
"--maintainer", "support@influxdb.com",
"--license", "Proprietary",
# package configuration
"--input-type", "dir",
"--output-type", package_type,
"--architecture", architecture,
"--version", version,
"--iteration", "1",
# maintainer scripts
"--after-install", os.path.join(workspace, "control/post-install"),
"--after-remove", os.path.join(workspace, "control/post-uninstall"),
"--before-install", os.path.join(workspace, "control/pre-install"),
# package conffiles
"--rpm-attr", "750,influxdb,influxdb:/var/log/influxdb",
"--rpm-attr", "750,influxdb,influxdb:/var/lib/influxdb",
*conffiles,
# package options
"--chdir", os.path.join(workspace, "fs/"),
"--package", source["target"]
])
# fmt: on
circle_tag = os.getenv("CIRCLE_TAG", default="")
circle_sha = os.getenv("CIRCLE_SHA1", default="DEADBEEF")
# Determine if `circle_tag` matches the semantic version regex. Otherwise,
# assume that `circle_tag` is not intended to tag a release. The regex is
# permissive of what occurs after the semantic version. This allows for
# alphas, betas, and release candidates.
if re.match("^v[0-9]+.[0-9]+.[0-9]+", circle_tag):
version = circle_tag[1:]
else:
# When `circle_tag` cannot be used to construct the package version,
# use `circle_sha`. Since `circle_sha` can start with an alpha (non-
# -numeric) character, prefix it with "1.x-".
version = "1.x-" + circle_sha[:8]
with open(".circleci/scripts/package/config.yaml") as file:
document = yaml.load(file, Loader=yaml.SafeLoader)
# fmt: off
for s, p in [
(s, p)
for s in document["sources" ]
for p in document["packages"]
]:
# fmt: on
if s["plat"] == "linux":
build_linux_archive(s, p, version)
build_linux_package(s, p, version)
if s["plat"] == "darwin":
build_darwin_archive(s, p, version)

View File

@ -1,2 +0,0 @@
PyYAML==6.0
regex==2023.6.3

4
.gitignore vendored
View File

@ -44,10 +44,6 @@ gosym
gocode
inspect-raft
# dependencies
out_rpm/
packages/
# autconf
autom4te.cache/
config.log