Compare commits

...

145 Commits

Author SHA1 Message Date
Ali 8b73ad3b6f chore(kubernetes): node view react migration [r8s-331] (#746) 2025-09-08 22:51:32 +12:00
Ali 6fc2a8234d fix(registry): allow trusted tls custom registries [r8s-489] (#1116) 2025-09-08 09:28:40 +12:00
Ali e2c2724e36 fix(helm): update helm repo validation to match helm cli [r8s-531] (#1141) 2025-09-08 08:58:04 +12:00
Malcolm Lockyer 6abfbe8553 fix(fips): encrypt the chisel private key file for fips [be-12132] (#1143) 2025-09-05 13:17:30 +12:00
andres-portainer 54f6add45d fix(compose): fix a data race in a test BE-12231 (#1148) 2025-09-04 17:31:57 -03:00
andres-portainer f8ae5368bf fix(git): add a minimum interval validation BE-12220 (#1144) 2025-09-04 15:11:12 -03:00
andres-portainer 2ba348551d fix(scheduler): fix a data race in the job scheduler BE-12229 (#1146) 2025-09-04 15:09:52 -03:00
andres-portainer 110f88f22d chore(endpointutils): remove unnecessary field BE-10415 (#1136) 2025-09-04 11:22:46 -03:00
James Player c90a15dd0f refactor(app/repository): migrate edit repository view to React [R8S-332] (#768) 2025-09-04 16:27:39 +12:00
andres-portainer f4335e1e72 fix(registries): clear sensitive fields in the update handler BE-12215 (#1128) 2025-09-02 15:44:09 -03:00
andres-portainer 8d9e1a0ad5 fix(csp): add object-src to the CSP header BE-12217 (#1126) 2025-09-02 11:39:46 -03:00
andres-portainer 48dcfcb08f fix(forbidigo): add more rules to avoid skipping TLS verifications BE-11973 (#1123) 2025-09-01 16:57:22 -03:00
andres-portainer def19be230 fix(depguard): mitigate improper usage of openpgp BE-11977 (#1122) 2025-09-01 14:44:45 -03:00
andres-portainer 36154e9d33 fix(depguard): add a rule against golang.org/x/crypto BE-11978 (#1119) 2025-09-01 10:54:24 -03:00
Oscar Zhou 7cf6bb78d6 fix(container): inaccurate healthy container count [BE-2290] (#1114) 2025-09-01 17:01:13 +12:00
Cara Ryan 541f281b29 fix(kubernetes): Namespace resource limits and requests display consistent value (#1055) 2025-09-01 10:25:53 +12:00
Viktor Pettersson 965ef5246b feat(autopatch): implement OCI registry patch finder BE-12111 (#1044) 2025-08-27 19:04:41 +12:00
James Carppe 9c88057bd1 Updates for release 2.33.1 (#1109) 2025-08-27 16:56:01 +12:00
andres-portainer 8c52e92705 chore(bbolt): upgrade bbolt to v1.4.3 BE-12193 (#1103) 2025-08-25 15:51:56 -03:00
Devon Steenberg 3a727d24ce fix(sslflags): Deprecate ssl flags [BE-12168] (#1075) 2025-08-25 14:35:55 +12:00
Malcolm Lockyer 185558a642 fix(standard): manual endpoint refresh fails to save new status [be-12188] (#1092) 2025-08-25 13:49:17 +12:00
Ali 35aa525bd2 fix(environments): create k8s specific edge agent before connecting [r8s-438] (#1088)
Merging because this change is unrelated to the failing kubernetes/tests/helm-oci.spec.ts tests
2025-08-25 09:32:10 +12:00
Oscar Zhou 2ce8788487 fix(autoupdate): update tooltips in edge stack gitops update [BE-12177] (#1084) 2025-08-23 10:56:04 +12:00
andres-portainer ec0e98a64b chore(linters): enable testifylint BE-12183 (#1091) 2025-08-22 15:31:10 -03:00
Steven Kang 121e9f03a4 fix: GHSA-2464-8j7c-4cjm - develop [R8S-495] (#1087) 2025-08-22 14:03:13 +12:00
andres-portainer a0295b1a39 chore(go): upgrade Go to v1.25.0 BE-12181 (#1071) 2025-08-20 12:55:06 -03:00
andres-portainer 30aba86380 chore(benchmarks): use b.Loop() BE-12182 (#1072) 2025-08-20 12:54:26 -03:00
James Carppe 89f5a20786 Updates for release 2.33.0 (#1067) 2025-08-20 15:35:58 +12:00
James Player ef7caa260b fix(UI): add experimental features back in [r8s-483] (#1061) 2025-08-19 16:55:24 +12:00
Steven Kang 39d50ef70e fix: cve-2025-55198 and cve-2025-55199 - develop [R8S-482] (#1057) 2025-08-19 16:22:52 +12:00
James Player 58a1392480 fix(helm): support http and custom tls helm registries, give help when misconfigured - develop [r8s-472] (#1050)
Co-authored-by: testA113 <aliharriss1995@gmail.com>
2025-08-19 13:32:32 +12:00
James Player 06f6bcc340 fix(ui): Fixed react-select TooManyResultsSelector filter and improved scrolling (#1024) 2025-08-19 09:35:00 +12:00
LP B c9d18b614b fix(api/edge-stacks): avoid overriding updates with old values (#1047) 2025-08-16 03:52:13 +02:00
andres-portainer 2035c42c3c fix(migrator): rewrite a migration so it is idempotent BE-12053 (#1042) 2025-08-15 09:26:10 -03:00
Malcolm Lockyer a760426b87 fix(fips): use standard lib pbkdf2 [be-12164] (#1038) 2025-08-15 11:44:35 +12:00
andres-portainer 10b129a02e fix(crypto): replace fips140 calls with fips calls BE-11979 (#1033) 2025-08-14 19:36:15 -03:00
Cara Ryan 129b9d5db9 fix(pending-actions): Small improvements to pending actions (R8S-350) (#949) 2025-08-15 10:07:51 +12:00
andres-portainer 2c08becf6c feat(openai): remove OpenAI BE-12018 (#873) 2025-08-14 10:42:21 -03:00
Ali a3bfe7cb0c fix(logs): improve log rendering performance [r8s-437] (#993) 2025-08-14 13:55:37 +12:00
andres-portainer 7049a8a2bb fix(linters): add many linters BE-12112 (#1009) 2025-08-13 19:42:24 -03:00
LP B 1197b1dd8d feat(api): Permissions-Policy header deny all (#1021) 2025-08-13 22:07:55 +02:00
andres-portainer 7f167ff2fc fix(auth): remove a nil pointer dereference BE-12149 (#1014) 2025-08-13 13:20:56 -03:00
Andrew Amesbury 3ade5cdf19 bump version to 2.33.0-rc1 (#1019) 2025-08-13 14:40:34 +12:00
LP B 5f6fa4d79f fix(app/update_schedule): create schedule performance issues at scale (#1002) 2025-08-12 16:50:11 +02:00
Ali 3ee20863d6 fix(editor): remove yaml specific highlighting [r8s-441] (#1010) 2025-08-12 11:53:31 +12:00
Steven Kang 8fe5eaee29 feat(ui): Kubernetes - Create from Manifest - tidy up [R8S-67] (#971) 2025-08-12 11:49:33 +12:00
Cara Ryan 208534c9d9 fix(helm): helm apps do not combine in applications view if different namespace [R8S-420] (#988) 2025-08-12 10:23:27 +12:00
Steven Kang 3f030394c6 fix(security): remediation of cve-2025-54338 and cve-2025-8556 (#989) 2025-08-12 09:08:29 +12:00
Devon Steenberg 6ca0085ec8 fix(stackbuilders): swarm and k8s deploys [BE-12138] (#1003) 2025-08-11 15:44:36 +12:00
Malcolm Lockyer 2cf1649c67 fix(encryption): in fips mode, use pbkdf2 for db password [be-11933] (#985)
Co-authored-by: andres-portainer <andres-portainer@users.noreply.github.com>
2025-08-11 12:03:38 +12:00
andres-portainer 64ed988169 fix(linters): upgrade golangci-lint to v2.3.1 BE-12136 (#997) 2025-08-08 21:39:21 -03:00
LP B 85b7e881eb docs(api/dashboard): docker/{envId}/dashboard incorrectly marked as POST instead of GET (#996) 2025-08-08 09:31:34 +02:00
andres-portainer 9325cb2872 fix(all): avoid using pointers to zero sized structs BE-12129 (#986) 2025-08-07 09:47:42 -03:00
Steven Kang e39dcc458b fix(security): ghsa-fv92-fjc5-jj9h [R8S-449] (#979) 2025-08-07 12:21:31 +12:00
Devon Steenberg 84b4b30f21 fix(rand): Use crypto/rand instead of math/rand in FIPS mode [BE-12071] (#961)
Co-authored-by: codecov-ai[bot] <156709835+codecov-ai[bot]@users.noreply.github.com>
2025-08-06 10:19:15 +12:00
andres-portainer 6c47598cd9 fix(apikey): use HMAC-SHA256 for FIPS mode API keys BE-11936 (#980) 2025-08-05 13:09:35 -03:00
andres-portainer d00d71ecbf fix(linter): add linter rules to reduce the chance for invalid FIPS settings BE-11979 (#975) 2025-08-05 09:23:07 -03:00
Ali dc273b2d63 fix(helm): don't block install with dry-run errors [r8s-454] (#976)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-05 18:53:41 +12:00
James Carppe 497b16e942 Chore update readme graphic (#963)
Co-authored-by: Phil Calder <4473109+predlac@users.noreply.github.com>
2025-08-05 17:14:54 +12:00
LP B a472de1919 fix(app/edge-jobs): edge job results page crash at scale (#954) 2025-08-04 17:10:46 +02:00
Malcolm Lockyer d306d7a983 fix(encryption): replace encryption related methods for fips mode [be-11933] (#919)
Co-authored-by: andres-portainer <andres-portainer@users.noreply.github.com>
2025-08-04 17:04:03 +12:00
andres-portainer 163aa57e5c fix(tls): centralize the TLS configuration to ensure FIPS compliance BE-11979 (#960) 2025-08-01 22:23:59 -03:00
andres-portainer 3eab294908 fix(linters): add the bodyclose linter BE-12112 (#959) 2025-07-30 11:35:30 -03:00
Viktor Pettersson da30780ac2 feat(autopatch): implement patch finder for retrieving latest patches from GitHub (#957) BE-12085 2025-07-30 15:57:32 +12:00
Ali ef53354193 fix(snapshot): show snapshot stats [r8s-432] (#952) 2025-07-29 22:51:05 +12:00
andres-portainer e9ce3d2213 fix(endpointedge): optimize buildSchedules() BE-12099 (#955) 2025-07-28 19:19:07 -03:00
andres-portainer a46db61c4c fix(endpointrelation): optimize updateEdgeStacksAfterRelationChange() BE-12092 (#941) 2025-07-28 13:19:05 -03:00
Steven Kang 5e271fd4a4 feat(ui): reordered kubernetes create from code options [R8S-429] (#951) 2025-07-28 15:41:12 +12:00
James Player 6481483074 fix(app/sidebar): Custom logo UI issue [r8s-435] (#939) 2025-07-25 15:29:06 +12:00
James Player 7bcb37c761 feat(app/kubernetes): Popout kubectl shell into new window [r8s-307] (#922) 2025-07-25 15:24:32 +12:00
LP B e7d97d7a2b fix(app/edge-configs): high numbers UI overlap (#931) 2025-07-24 16:37:07 +02:00
James Carppe 1afae99345 Updates for release 2.32.0 (#936) 2025-07-24 14:30:37 +12:00
Steven Kang bdb2e2f417 fix(transport): portainer generated kubeconfig causes kubectl exec fail [R8S-430] (#929) 2025-07-24 13:11:13 +12:00
andres-portainer bba3751268 fix(roar): return empty slices instead of nil for easier API compatibility BE-12053 (#932) 2025-07-23 14:06:20 -03:00
Ali 60bc04bc33 feat(helm): show manifest previews/changes when installing and upgrading a helm chart [r8s-405] (#898) 2025-07-23 10:52:58 +12:00
andres-portainer a4cff13531 fix(bouncer): add missing domain to CSP header BE-12067 (#916) 2025-07-21 21:32:50 -03:00
andres-portainer 937456596a fix(edgegroups): convert the related endpoint IDs to roaring bitmaps to increase performance BE-12053 (#903) 2025-07-21 21:31:13 -03:00
Devon Steenberg caf382b64c feat(git): support bearer token auth for git [BE-11770] (#879) 2025-07-22 08:36:08 +12:00
Ali 55cc250d2e fix(pods): represent pod container statuses correctly [r8s-416] (#910) 2025-07-21 15:05:08 +12:00
Ali eaa2be017d fix(helm): ensure the form is not 'dirty', when the values are unchanged [r8s-421] (#901) 2025-07-17 12:07:11 +12:00
James Player 4e4c5ffdb6 fix(app/kubernetes): Fix listing of secrets and configmaps with same name [r8s-288] (#897) 2025-07-16 16:37:59 +12:00
James Player 383bcc4113 fix(docker/images): Fix image detail actions icon colours [be-12044] (#892) 2025-07-15 13:57:43 +12:00
James Player 9f906b7417 refactor(app/tests): Make createMockUsers more deterministic [r8s-406] (#887) 2025-07-14 17:16:33 +12:00
Cara Ryan db2e168540 chore: bump version to 2.32.0 (#884) 2025-07-14 10:23:05 +12:00
Ali 2697d6c5d7 feat(oci): oci helm support [r8s-361] (#787) 2025-07-13 10:37:43 +12:00
andres-portainer b6a6ce9aaf fix(endpointedge): fix a deadlock in createAsyncEdgeAgentEndpoint() BE-12039 (#883) 2025-07-11 18:54:05 -03:00
Ali 89f6a94bd8 chore(select): show data-cy react select [r8s-402] (#881) 2025-07-11 20:06:41 +12:00
Steven Kang 96f2d69ae5 feat(observability): alerting experimental feature (#801)
Co-authored-by: JamesPlayer <james.player@portainer.io>
2025-07-11 16:55:23 +12:00
Cara Ryan b7e906701a fix(kubernetes): Namespace access permission changes role bindings not created [R8S-366] (#826) 2025-07-11 14:55:48 +12:00
Steven Kang 150d986179 fix: CVE-2025-53547 (#880) 2025-07-11 13:57:21 +12:00
James Player ef10ea2a7d fix(ui): Fixed TagsDatatable name column link (#847) 2025-07-11 11:01:37 +12:00
Viktor Pettersson 3bf84e8b0c fix(tags): reconcile edge relations prior to deletion [BE-11969] (#867) 2025-07-10 10:52:12 +12:00
andres-portainer ea4b334c7e feat(csp): enable CSP by default BE-11961 (#872) 2025-07-09 16:15:43 -03:00
Oscar Zhou 4d11aa8655 fix(tag): ignore "environment not found" when deleting tag [BE-11944] (#869) 2025-07-09 09:55:59 -03:00
andres-portainer 302deb8299 chore(dataservices): enhance ReadAll() so it takes predicates for filtering results BE-12016 (#866) 2025-07-07 14:29:56 -03:00
Viktor Pettersson 0c80b1067d fix(styles): update datetime picker styles for improved dark mode support [BE-11672] (#863) 2025-07-07 20:54:44 +12:00
Steven Kang 0a36d4fbfd fix: `kubectl` sdk - capture fatal error and return instead of exiting 1 [r7s-371] (#841) 2025-07-07 11:29:29 +12:00
Oscar Zhou c20a8b5a68 fix(template): app template v3 error [BE-11998] (#854) 2025-07-04 11:49:33 -03:00
Devon Steenberg 8ffe4e284a fix(tls): set insecureSkipVerify to false in FIPS mode [BE-11932] (#849) 2025-07-04 10:48:54 +12:00
Steven Kang 1332f718ae feat: add warning events count next to the status badge (#828) 2025-07-04 10:07:57 +12:00
James Player f4df51884c fix(tests): Fix ServicesDatatable tests - r8s-395 (#860) 2025-07-03 16:01:08 +12:00
James Carppe ce86129478 Updates for release 2.31.3 (#859) 2025-07-03 15:17:50 +12:00
andres-portainer 097b125e3a fix(boltdb): change some options to increase performance BE-12002 (#848) 2025-07-02 18:17:19 -03:00
andres-portainer 5c6b53922a feat(go): upgrade to Go v1.24.4 BE-11774 (#855) 2025-07-02 18:14:29 -03:00
James Carppe e1b9f23f73 Updates for release 2.27.9 (#853) 2025-07-02 17:45:59 +12:00
LP B e1c480d3c3 feat(app/edge-stacks): summarize the edge stack statuses in the backend (#818) 2025-07-01 15:04:10 +02:00
Steven Kang 363a62d885 fix: bump the docker binary version to v28.3.0 [r8s-390] (#837) 2025-07-01 20:10:39 +12:00
James Player c6ee9a5a52 feat(ui): Rebranding - r8s-374 (#840) 2025-07-01 12:58:31 +12:00
andres-portainer cf5990ccba fix(edgestackstatus): improve error handling BE-11963 (#844) 2025-06-30 20:54:16 -03:00
Oscar Zhou b6f3682a62 refactor(edge): init endpoint relation when endpoint is created [BE-11928] (#814) 2025-06-30 15:15:56 -03:00
LP B b43f864511 fix(api/endpoints): filter out waiting room environments for non admins (#810) 2025-06-30 15:35:51 +02:00
Oscar Zhou 0556ffb4a1 feat(csrf): add trusted origins cli flags [BE-11972] (#836) 2025-06-27 17:41:10 -03:00
Ali 303047656e fix(k8s-services): avoid rerendering services table [r8s-387] (#832) 2025-06-27 22:48:40 +12:00
Steven Kang 8d29b5ae71 fix: kubeconfig download button inconsistency between http and https (#829) 2025-06-27 09:38:04 +12:00
James Carppe 7d7ae24351 Updates for release 2.31.2 (#834) 2025-06-26 15:41:23 +12:00
James Carppe 97838e614d Updates for release 2.27.8 (#827) 2025-06-25 17:11:58 +12:00
Steven Kang c897baad20 fix: fetching values from both install and upgrade views - develop [R8S-368] (#820) 2025-06-24 15:46:10 +12:00
andres-portainer d51e9205d9 fix(endpointrelation): use a read-write transaction for mutations BE-11964 (#819) 2025-06-20 20:03:35 -03:00
James Carppe e051c86bb5 Updates for release 2.31.1 (#816) 2025-06-19 14:07:18 +12:00
Steven Kang c2b48cd003 feat(k8s): CloudNativePG in applications list and details - [R8S-357] (#777) 2025-06-19 09:03:52 +12:00
James Carppe a7009eb8d5 Update bug report template for 2.27.7 (#805) 2025-06-17 12:52:12 +12:00
andres-portainer 036b87b649 fix(middlewares): fix data race in WithEndpoint() BE-11949 (#803) 2025-06-16 12:56:51 -03:00
Steven Kang f07a3b1875 security: cve-2025-22874 & cve-2025-22871 bump go to 1.23.10 (#798) 2025-06-12 17:30:53 +12:00
Yajith Dayarathna 6e89ccc0ae fix(api-documentation): swagger document genration error (#795) 2025-06-12 13:39:34 +12:00
James Carppe cc67612432 Update bug report template for 2.31.0 (#793) 2025-06-12 13:26:25 +12:00
Malcolm Lockyer 17ebe221bb chore: bump version to 2.31.0 (#789) 2025-06-10 16:47:17 +12:00
Ali 1963edda66 feat(helm): add registry dropdown [r8s-340] (#779) 2025-06-09 20:08:50 +12:00
Cara Ryan c9e3717ce3 fix(kubernetes): Display more than 10 workloads under Helm expandable in the Applications view [R8S-339] (#781) 2025-06-09 15:12:24 +12:00
Oscar Zhou 9a85246631 fix(edgestack): display deploying status by default after creating edgestack [BE-11924] (#783) 2025-06-07 09:06:57 +12:00
andres-portainer 75f165d1ff feat(edgestackstatus): optimize the Edge Stack structures BE-11740 (#756) 2025-06-05 19:46:10 -03:00
Viktor Pettersson eaf0deb2f6 feat(update-schedules): new update schedules view [BE-11754, BE-11887] (#686) 2025-06-05 17:03:43 +12:00
Ali a9061e5258 feat(helm): enhance helm chart install [r8s-341] (#766) 2025-06-05 13:13:45 +12:00
James Player caac45b834 feat(UI): Add repository url to Helm chart installation list items (#769) 2025-06-05 10:14:39 +12:00
LP B 24ff7a7911 chore(deps): upgrade docker/cli to v28.2.1 | docker/docker to v28.2.1 | docker/compose to v2.36.2 (#758) 2025-05-30 09:12:27 +02:00
Devon Steenberg b767dcb27e fix(proxy): whitelist headers for proxy to forward [BE-11819] (#665) 2025-05-30 11:49:23 +12:00
Cara Ryan 731afbee46 feat(helm): filter on chart versions at API level [R8S-324] (#754) 2025-05-27 15:20:28 +12:00
Cara Ryan 07dfd981a2 fix(kubernetes): events api to call the backend [R8S-243] (#563) 2025-05-27 13:55:31 +12:00
Cara Ryan 32ef208278 Revert "feat(helm): filter on chart versions at API level [R8S-324]" (#753) 2025-05-26 16:58:53 +12:00
Cara Ryan a80b185e10 feat(helm): filter on chart versions at API level [R8S-324] (#747) 2025-05-26 14:10:38 +12:00
Malcolm Lockyer b96328e098 fix(async-perf): In async poll snapshot handling, reduce redundant json marshal [be-11861] (#726) 2025-05-23 12:42:45 +12:00
Devon Steenberg 45471ce86d fix(docker): check len of device capabilities [BE-11898] (#750) 2025-05-22 14:27:14 +12:00
Viktor Pettersson 1bc91d0c7c fix(edge-update): set edge stack status to EdgeStackStatusError to avoid redeployment of portainer-updater [BE-11855] (#714) 2025-05-20 08:28:40 +02:00
James Carppe 799325d9f8 Update bug report template for 2.30.1 (#749) 2025-05-20 14:40:43 +12:00
James Carppe b540709e03 Update bug report template for 2.30.0 (#737) 2025-05-15 12:09:28 +12:00
Oscar Zhou 44daab04ac fix(libclient): option to disable external http request [BE-11696] (#719) 2025-05-15 09:54:35 +12:00
732 changed files with 30629 additions and 7003 deletions

View File

@ -94,11 +94,23 @@ body:
description: We only provide support for current versions of Portainer as per the lifecycle policy linked above. If you are on an older version of Portainer we recommend [updating first](https://docs.portainer.io/start/upgrade) in case your bug has already been fixed.
multiple: false
options:
- '2.33.1'
- '2.33.0'
- '2.32.0'
- '2.31.3'
- '2.31.2'
- '2.31.1'
- '2.31.0'
- '2.30.1'
- '2.30.0'
- '2.29.2'
- '2.29.1'
- '2.29.0'
- '2.28.1'
- '2.28.0'
- '2.27.9'
- '2.27.8'
- '2.27.7'
- '2.27.6'
- '2.27.5'
- '2.27.4'

View File

@ -1,41 +1,73 @@
version: "2"
linters:
# Disable all linters, the defaults don't pass on our code yet
disable-all: true
# Enable these for now
default: none
enable:
- unused
- depguard
- gosimple
- govet
- errorlint
- bodyclose
- copyloopvar
- depguard
- errorlint
- forbidigo
- govet
- ineffassign
- intrange
- perfsprint
- ineffassign
linters-settings:
depguard:
rules:
main:
deny:
- pkg: 'encoding/json'
desc: 'use github.com/segmentio/encoding/json'
- pkg: 'golang.org/x/exp'
desc: 'exp is not allowed'
- pkg: 'github.com/portainer/libcrypto'
desc: 'use github.com/portainer/portainer/pkg/libcrypto'
- pkg: 'github.com/portainer/libhttp'
desc: 'use github.com/portainer/portainer/pkg/libhttp'
files:
- '!**/*_test.go'
- '!**/base.go'
- '!**/base_tx.go'
# errorlint is causing a typecheck error for some reason. The go compiler will report these
# anyway, so ignore them from the linter
issues:
exclude-rules:
- path: ./
linters:
- typecheck
- staticcheck
- unused
- mirror
- durationcheck
- errorlint
- govet
- zerologlint
- testifylint
settings:
staticcheck:
checks: ["all", "-ST1003", "-ST1005", "-ST1016", "-SA1019", "-QF1003"]
depguard:
rules:
main:
files:
- '!**/*_test.go'
- '!**/base.go'
- '!**/base_tx.go'
deny:
- pkg: encoding/json
desc: use github.com/segmentio/encoding/json
- pkg: golang.org/x/exp
desc: exp is not allowed
- pkg: github.com/portainer/libcrypto
desc: use github.com/portainer/portainer/pkg/libcrypto
- pkg: github.com/portainer/libhttp
desc: use github.com/portainer/portainer/pkg/libhttp
- pkg: golang.org/x/crypto
desc: golang.org/x/crypto is not allowed because of FIPS mode
- pkg: github.com/ProtonMail/go-crypto/openpgp
desc: github.com/ProtonMail/go-crypto/openpgp is not allowed because of FIPS mode
forbidigo:
forbid:
- pattern: ^tls\.Config$
msg: Use crypto.CreateTLSConfiguration() instead
- pattern: ^tls\.Config\.(InsecureSkipVerify|MinVersion|MaxVersion|CipherSuites|CurvePreferences)$
msg: Do not set this field directly, use crypto.CreateTLSConfiguration() instead
- pattern: ^object\.(Commit|Tag)\.Verify$
msg: "Not allowed because of FIPS mode"
- pattern: ^(types\.SystemContext\.)?(DockerDaemonInsecureSkipTLSVerify|DockerInsecureSkipTLSVerify|OCIInsecureSkipTLSVerify)$
msg: "Not allowed because of FIPS mode"
analyze-types: true
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
paths:
- third_party$
- builtin$
- examples$
formatters:
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$

View File

@ -8,9 +8,9 @@ Portainer consists of a single container that can run on any cluster. It can be
**Portainer Business Edition** builds on the open-source base and includes a range of advanced features and functions (like RBAC and Support) that are specific to the needs of business users.
- [Compare Portainer CE and Compare Portainer BE](https://portainer.io/products)
- [Compare Portainer CE and Compare Portainer BE](https://www.portainer.io/features)
- [Take3 get 3 free nodes of Portainer Business for as long as you want them](https://www.portainer.io/take-3)
- [Portainer BE install guide](https://install.portainer.io)
- [Portainer BE install guide](https://academy.portainer.io/install/)
## Latest Version
@ -20,22 +20,19 @@ Portainer CE is updated regularly. We aim to do an update release every couple o
## Getting started
- [Deploy Portainer](https://docs.portainer.io/start/install)
- [Deploy Portainer](https://docs.portainer.io/start/install-ce)
- [Documentation](https://docs.portainer.io)
- [Contribute to the project](https://docs.portainer.io/contribute/contribute)
## Features & Functions
View [this](https://www.portainer.io/products) table to see all of the Portainer CE functionality and compare to Portainer Business.
- [Portainer CE for Docker / Docker Swarm](https://www.portainer.io/solutions/docker)
- [Portainer CE for Kubernetes](https://www.portainer.io/solutions/kubernetes-ui)
View [this](https://www.portainer.io/features) table to see all of the Portainer CE functionality and compare to Portainer Business.
## Getting help
Portainer CE is an open source project and is supported by the community. You can buy a supported version of Portainer at portainer.io
Learn more about Portainer's community support channels [here.](https://www.portainer.io/get-support-for-portainer)
Learn more about Portainer's community support channels [here.](https://www.portainer.io/resources/get-help/get-support)
- Issues: https://github.com/portainer/portainer/issues
- Slack (chat): [https://portainer.io/slack](https://portainer.io/slack)
@ -53,13 +50,13 @@ You can join the Portainer Community by visiting [https://www.portainer.io/join-
## Work for us
If you are a developer, and our code in this repo makes sense to you, we would love to hear from you. We are always on the hunt for awesome devs, either freelance or employed. Drop us a line to info@portainer.io with your details and/or visit our [careers page](https://portainer.io/careers).
If you are a developer, and our code in this repo makes sense to you, we would love to hear from you. We are always on the hunt for awesome devs, either freelance or employed. Drop us a line to success@portainer.io with your details and/or visit our [careers page](https://apply.workable.com/portainer/).
## Privacy
**To make sure we focus our development effort in the right places we need to know which features get used most often. To give us this information we use [Matomo Analytics](https://matomo.org/), which is hosted in Germany and is fully GDPR compliant.**
When Portainer first starts, you are given the option to DISABLE analytics. If you **don't** choose to disable it, we collect anonymous usage as per [our privacy policy](https://www.portainer.io/privacy-policy). **Please note**, there is no personally identifiable information sent or stored at any time and we only use the data to help us improve Portainer.
When Portainer first starts, you are given the option to DISABLE analytics. If you **don't** choose to disable it, we collect anonymous usage as per [our privacy policy](https://www.portainer.io/legal/privacy-policy). **Please note**, there is no personally identifiable information sent or stored at any time and we only use the data to help us improve Portainer.
## Limitations

View File

@ -16,7 +16,7 @@ import (
// GetAgentVersionAndPlatform returns the agent version and platform
//
// it sends a ping to the agent and parses the version and platform from the headers
func GetAgentVersionAndPlatform(endpointUrl string, tlsConfig *tls.Config) (portainer.AgentPlatform, string, error) {
func GetAgentVersionAndPlatform(endpointUrl string, tlsConfig *tls.Config) (portainer.AgentPlatform, string, error) { //nolint:forbidigo
httpCli := &http.Client{
Timeout: 3 * time.Second,
}

View File

@ -10,31 +10,31 @@ func Test_generateRandomKey(t *testing.T) {
is := assert.New(t)
tests := []struct {
name string
wantLenth int
name string
wantLength int
}{
{
name: "Generate a random key of length 16",
wantLenth: 16,
name: "Generate a random key of length 16",
wantLength: 16,
},
{
name: "Generate a random key of length 32",
wantLenth: 32,
name: "Generate a random key of length 32",
wantLength: 32,
},
{
name: "Generate a random key of length 64",
wantLenth: 64,
name: "Generate a random key of length 64",
wantLength: 64,
},
{
name: "Generate a random key of length 128",
wantLenth: 128,
name: "Generate a random key of length 128",
wantLength: 128,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := GenerateRandomKey(tt.wantLenth)
is.Equal(tt.wantLenth, len(got))
got := GenerateRandomKey(tt.wantLength)
is.Len(got, tt.wantLength)
})
}

View File

@ -10,9 +10,10 @@ import (
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/datastore"
"github.com/stretchr/testify/assert"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_SatisfiesAPIKeyServiceInterface(t *testing.T) {
@ -30,7 +31,7 @@ func Test_GenerateApiKey(t *testing.T) {
t.Run("Successfully generates API key", func(t *testing.T) {
desc := "test-1"
rawKey, apiKey, err := service.GenerateApiKey(portainer.User{ID: 1}, desc)
is.NoError(err)
require.NoError(t, err)
is.NotEmpty(rawKey)
is.NotEmpty(apiKey)
is.Equal(desc, apiKey.Description)
@ -38,7 +39,7 @@ func Test_GenerateApiKey(t *testing.T) {
t.Run("Api key prefix is 7 chars", func(t *testing.T) {
rawKey, apiKey, err := service.GenerateApiKey(portainer.User{ID: 1}, "test-2")
is.NoError(err)
require.NoError(t, err)
is.Equal(rawKey[:7], apiKey.Prefix)
is.Len(apiKey.Prefix, 7)
@ -46,7 +47,7 @@ func Test_GenerateApiKey(t *testing.T) {
t.Run("Api key has 'ptr_' as prefix", func(t *testing.T) {
rawKey, _, err := service.GenerateApiKey(portainer.User{ID: 1}, "test-x")
is.NoError(err)
require.NoError(t, err)
is.Equal(portainerAPIKeyPrefix, "ptr_")
is.True(strings.HasPrefix(rawKey, "ptr_"))
@ -55,7 +56,7 @@ func Test_GenerateApiKey(t *testing.T) {
t.Run("Successfully caches API key", func(t *testing.T) {
user := portainer.User{ID: 1}
_, apiKey, err := service.GenerateApiKey(user, "test-3")
is.NoError(err)
require.NoError(t, err)
userFromCache, apiKeyFromCache, ok := service.cache.Get(apiKey.Digest)
is.True(ok)
@ -65,7 +66,7 @@ func Test_GenerateApiKey(t *testing.T) {
t.Run("Decoded raw api-key digest matches generated digest", func(t *testing.T) {
rawKey, apiKey, err := service.GenerateApiKey(portainer.User{ID: 1}, "test-4")
is.NoError(err)
require.NoError(t, err)
generatedDigest := sha256.Sum256([]byte(rawKey))
@ -83,10 +84,10 @@ func Test_GetAPIKey(t *testing.T) {
t.Run("Successfully returns all API keys", func(t *testing.T) {
user := portainer.User{ID: 1}
_, apiKey, err := service.GenerateApiKey(user, "test-1")
is.NoError(err)
require.NoError(t, err)
apiKeyGot, err := service.GetAPIKey(apiKey.ID)
is.NoError(err)
require.NoError(t, err)
is.Equal(apiKey, apiKeyGot)
})
@ -102,12 +103,12 @@ func Test_GetAPIKeys(t *testing.T) {
t.Run("Successfully returns all API keys", func(t *testing.T) {
user := portainer.User{ID: 1}
_, _, err := service.GenerateApiKey(user, "test-1")
is.NoError(err)
require.NoError(t, err)
_, _, err = service.GenerateApiKey(user, "test-2")
is.NoError(err)
require.NoError(t, err)
keys, err := service.GetAPIKeys(user.ID)
is.NoError(err)
require.NoError(t, err)
is.Len(keys, 2)
})
}
@ -122,10 +123,10 @@ func Test_GetDigestUserAndKey(t *testing.T) {
t.Run("Successfully returns user and api key associated to digest", func(t *testing.T) {
user := portainer.User{ID: 1}
_, apiKey, err := service.GenerateApiKey(user, "test-1")
is.NoError(err)
require.NoError(t, err)
userGot, apiKeyGot, err := service.GetDigestUserAndKey(apiKey.Digest)
is.NoError(err)
require.NoError(t, err)
is.Equal(user, userGot)
is.Equal(*apiKey, apiKeyGot)
})
@ -133,10 +134,10 @@ func Test_GetDigestUserAndKey(t *testing.T) {
t.Run("Successfully caches user and api key associated to digest", func(t *testing.T) {
user := portainer.User{ID: 1}
_, apiKey, err := service.GenerateApiKey(user, "test-1")
is.NoError(err)
require.NoError(t, err)
userGot, apiKeyGot, err := service.GetDigestUserAndKey(apiKey.Digest)
is.NoError(err)
require.NoError(t, err)
is.Equal(user, userGot)
is.Equal(*apiKey, apiKeyGot)
@ -158,14 +159,14 @@ func Test_UpdateAPIKey(t *testing.T) {
user := portainer.User{ID: 1}
store.User().Create(&user)
_, apiKey, err := service.GenerateApiKey(user, "test-x")
is.NoError(err)
require.NoError(t, err)
apiKey.LastUsed = time.Now().UTC().Unix()
err = service.UpdateAPIKey(apiKey)
is.NoError(err)
require.NoError(t, err)
_, apiKeyGot, err := service.GetDigestUserAndKey(apiKey.Digest)
is.NoError(err)
require.NoError(t, err)
log.Debug().Str("wanted", fmt.Sprintf("%+v", apiKey)).Str("got", fmt.Sprintf("%+v", apiKeyGot)).Msg("")
@ -174,7 +175,7 @@ func Test_UpdateAPIKey(t *testing.T) {
t.Run("Successfully updates api-key in cache upon api-key update", func(t *testing.T) {
_, apiKey, err := service.GenerateApiKey(portainer.User{ID: 1}, "test-x2")
is.NoError(err)
require.NoError(t, err)
_, apiKeyFromCache, ok := service.cache.Get(apiKey.Digest)
is.True(ok)
@ -184,7 +185,7 @@ func Test_UpdateAPIKey(t *testing.T) {
is.NotEqual(*apiKey, apiKeyFromCache)
err = service.UpdateAPIKey(apiKey)
is.NoError(err)
require.NoError(t, err)
_, updatedAPIKeyFromCache, ok := service.cache.Get(apiKey.Digest)
is.True(ok)
@ -202,30 +203,30 @@ func Test_DeleteAPIKey(t *testing.T) {
t.Run("Successfully updates the api-key", func(t *testing.T) {
user := portainer.User{ID: 1}
_, apiKey, err := service.GenerateApiKey(user, "test-1")
is.NoError(err)
require.NoError(t, err)
_, apiKeyGot, err := service.GetDigestUserAndKey(apiKey.Digest)
is.NoError(err)
require.NoError(t, err)
is.Equal(*apiKey, apiKeyGot)
err = service.DeleteAPIKey(apiKey.ID)
is.NoError(err)
require.NoError(t, err)
_, _, err = service.GetDigestUserAndKey(apiKey.Digest)
is.Error(err)
require.Error(t, err)
})
t.Run("Successfully removes api-key from cache upon deletion", func(t *testing.T) {
user := portainer.User{ID: 1}
_, apiKey, err := service.GenerateApiKey(user, "test-1")
is.NoError(err)
require.NoError(t, err)
_, apiKeyFromCache, ok := service.cache.Get(apiKey.Digest)
is.True(ok)
is.Equal(*apiKey, apiKeyFromCache)
err = service.DeleteAPIKey(apiKey.ID)
is.NoError(err)
require.NoError(t, err)
_, _, ok = service.cache.Get(apiKey.Digest)
is.False(ok)
@ -243,10 +244,10 @@ func Test_InvalidateUserKeyCache(t *testing.T) {
// generate api keys
user := portainer.User{ID: 1}
_, apiKey1, err := service.GenerateApiKey(user, "test-1")
is.NoError(err)
require.NoError(t, err)
_, apiKey2, err := service.GenerateApiKey(user, "test-2")
is.NoError(err)
require.NoError(t, err)
// verify api keys are present in cache
_, apiKeyFromCache, ok := service.cache.Get(apiKey1.Digest)
@ -273,11 +274,11 @@ func Test_InvalidateUserKeyCache(t *testing.T) {
// generate keys for 2 users
user1 := portainer.User{ID: 1}
_, apiKey1, err := service.GenerateApiKey(user1, "test-1")
is.NoError(err)
require.NoError(t, err)
user2 := portainer.User{ID: 2}
_, apiKey2, err := service.GenerateApiKey(user2, "test-2")
is.NoError(err)
require.NoError(t, err)
// verify keys in cache
_, apiKeyFromCache, ok := service.cache.Get(apiKey1.Digest)

View File

@ -8,15 +8,19 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func listFiles(dir string) []string {
items := make([]string, 0)
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if path == dir {
return nil
}
items = append(items, path)
return nil
})
@ -26,13 +30,21 @@ func listFiles(dir string) []string {
func Test_shouldCreateArchive(t *testing.T) {
tmpdir := t.TempDir()
content := []byte("content")
os.WriteFile(path.Join(tmpdir, "outer"), content, 0600)
err := os.WriteFile(path.Join(tmpdir, "outer"), content, 0600)
require.NoError(t, err)
os.MkdirAll(path.Join(tmpdir, "dir"), 0700)
os.WriteFile(path.Join(tmpdir, "dir", ".dotfile"), content, 0600)
os.WriteFile(path.Join(tmpdir, "dir", "inner"), content, 0600)
require.NoError(t, err)
err = os.WriteFile(path.Join(tmpdir, "dir", ".dotfile"), content, 0600)
require.NoError(t, err)
err = os.WriteFile(path.Join(tmpdir, "dir", "inner"), content, 0600)
require.NoError(t, err)
gzPath, err := TarGzDir(tmpdir)
assert.Nil(t, err)
require.NoError(t, err)
assert.Equal(t, filepath.Join(tmpdir, filepath.Base(tmpdir)+".tar.gz"), gzPath)
extractionDir := t.TempDir()
@ -45,7 +57,8 @@ func Test_shouldCreateArchive(t *testing.T) {
wasExtracted := func(p string) {
fullpath := path.Join(extractionDir, p)
assert.Contains(t, extractedFiles, fullpath)
copyContent, _ := os.ReadFile(fullpath)
copyContent, err := os.ReadFile(fullpath)
require.NoError(t, err)
assert.Equal(t, content, copyContent)
}
@ -57,13 +70,21 @@ func Test_shouldCreateArchive(t *testing.T) {
func Test_shouldCreateArchive2(t *testing.T) {
tmpdir := t.TempDir()
content := []byte("content")
os.WriteFile(path.Join(tmpdir, "outer"), content, 0600)
os.MkdirAll(path.Join(tmpdir, "dir"), 0700)
os.WriteFile(path.Join(tmpdir, "dir", ".dotfile"), content, 0600)
os.WriteFile(path.Join(tmpdir, "dir", "inner"), content, 0600)
err := os.WriteFile(path.Join(tmpdir, "outer"), content, 0600)
require.NoError(t, err)
err = os.MkdirAll(path.Join(tmpdir, "dir"), 0700)
require.NoError(t, err)
err = os.WriteFile(path.Join(tmpdir, "dir", ".dotfile"), content, 0600)
require.NoError(t, err)
err = os.WriteFile(path.Join(tmpdir, "dir", "inner"), content, 0600)
require.NoError(t, err)
gzPath, err := TarGzDir(tmpdir)
assert.Nil(t, err)
require.NoError(t, err)
assert.Equal(t, filepath.Join(tmpdir, filepath.Base(tmpdir)+".tar.gz"), gzPath)
extractionDir := t.TempDir()

View File

@ -5,6 +5,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestUnzipFile(t *testing.T) {
@ -20,7 +21,7 @@ func TestUnzipFile(t *testing.T) {
err := UnzipFile("./testdata/sample_archive.zip", dir)
assert.NoError(t, err)
require.NoError(t, err)
archiveDir := dir + "/sample_archive"
assert.FileExists(t, filepath.Join(archiveDir, "0.txt"))
assert.FileExists(t, filepath.Join(archiveDir, "0", "1.txt"))

View File

@ -54,8 +54,8 @@ func ecdsaGenerateKey(c elliptic.Curve, rand io.Reader) (*ecdsa.PrivateKey, erro
}
priv := new(ecdsa.PrivateKey)
priv.PublicKey.Curve = c
priv.Curve = c
priv.D = k
priv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())
priv.X, priv.Y = c.ScalarBaseMult(k.Bytes())
return priv, nil
}

View File

@ -9,10 +9,15 @@ import (
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/datastore"
"github.com/portainer/portainer/pkg/fips"
"github.com/stretchr/testify/require"
)
func init() {
fips.InitFIPS(false)
}
func TestPingAgentPanic(t *testing.T) {
endpoint := &portainer.Endpoint{
ID: 1,

View File

@ -4,7 +4,6 @@ import (
"encoding/base64"
"errors"
"fmt"
"math/rand"
"net"
"strings"
"time"
@ -14,6 +13,7 @@ import (
"github.com/portainer/portainer/api/internal/edge/cache"
"github.com/portainer/portainer/api/internal/endpointutils"
"github.com/portainer/portainer/pkg/libcrypto"
"github.com/portainer/portainer/pkg/librand"
"github.com/dchest/uniuri"
"github.com/rs/zerolog/log"
@ -200,7 +200,9 @@ func (service *Service) getUnusedPort() int {
conn, err := net.DialTCP("tcp", nil, &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: port})
if err == nil {
conn.Close()
if err := conn.Close(); err != nil {
log.Warn().Msg("failed to close tcp connection that checks if port is free")
}
log.Debug().
Int("port", port).
@ -213,7 +215,7 @@ func (service *Service) getUnusedPort() int {
}
func randomInt(min, max int) int {
return min + rand.Intn(max-min)
return min + librand.Intn(max-min)
}
func generateRandomCredentials() (string, string) {

79
api/chisel/tunnel_test.go Normal file
View File

@ -0,0 +1,79 @@
package chisel
import (
"net"
"strings"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
)
type testSettingsService struct {
dataservices.SettingsService
}
func (s *testSettingsService) Settings() (*portainer.Settings, error) {
return &portainer.Settings{
EdgeAgentCheckinInterval: 1,
}, nil
}
type testStore struct {
dataservices.DataStore
}
func (s *testStore) Settings() dataservices.SettingsService {
return &testSettingsService{}
}
func TestGetUnusedPort(t *testing.T) {
testCases := []struct {
name string
existingTunnels map[portainer.EndpointID]*portainer.TunnelDetails
expectedError error
}{
{
name: "simple case",
},
{
name: "existing tunnels",
existingTunnels: map[portainer.EndpointID]*portainer.TunnelDetails{
portainer.EndpointID(1): {
Port: 53072,
},
portainer.EndpointID(2): {
Port: 63072,
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
store := &testStore{}
s := NewService(store, nil, nil)
s.activeTunnels = tc.existingTunnels
port := s.getUnusedPort()
if port < 49152 || port > 65535 {
t.Fatalf("Expected port to be inbetween 49152 and 65535 but got %d", port)
}
for _, tun := range tc.existingTunnels {
if tun.Port == port {
t.Fatalf("returned port %d already has an existing tunnel", port)
}
}
conn, err := net.DialTCP("tcp", nil, &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: port})
if err == nil {
// Ignore error
_ = conn.Close()
t.Fatalf("expected port %d to be unused", port)
} else if !strings.Contains(err.Error(), "connection refused") {
t.Fatalf("unexpected error: %v", err)
}
})
}
}

View File

@ -9,8 +9,8 @@ import (
portainer "github.com/portainer/portainer/api"
"github.com/alecthomas/kingpin/v2"
"github.com/rs/zerolog/log"
"gopkg.in/alecthomas/kingpin.v2"
)
// Service implements the CLIService interface
@ -35,16 +35,9 @@ func CLIFlags() *portainer.CLIFlags {
FeatureFlags: kingpin.Flag("feat", "List of feature flags").Strings(),
EnableEdgeComputeFeatures: kingpin.Flag("edge-compute", "Enable Edge Compute features").Bool(),
NoAnalytics: kingpin.Flag("no-analytics", "Disable Analytics in app (deprecated)").Bool(),
TLS: kingpin.Flag("tlsverify", "TLS support").Default(defaultTLS).Bool(),
TLSSkipVerify: kingpin.Flag("tlsskipverify", "Disable TLS server verification").Default(defaultTLSSkipVerify).Bool(),
TLSCacert: kingpin.Flag("tlscacert", "Path to the CA").Default(defaultTLSCACertPath).String(),
TLSCert: kingpin.Flag("tlscert", "Path to the TLS certificate file").Default(defaultTLSCertPath).String(),
TLSKey: kingpin.Flag("tlskey", "Path to the TLS key").Default(defaultTLSKeyPath).String(),
HTTPDisabled: kingpin.Flag("http-disabled", "Serve portainer only on https").Default(defaultHTTPDisabled).Bool(),
HTTPEnabled: kingpin.Flag("http-enabled", "Serve portainer on http").Default(defaultHTTPEnabled).Bool(),
SSL: kingpin.Flag("ssl", "Secure Portainer instance using SSL (deprecated)").Default(defaultSSL).Bool(),
SSLCert: kingpin.Flag("sslcert", "Path to the SSL certificate used to secure the Portainer instance").String(),
SSLKey: kingpin.Flag("sslkey", "Path to the SSL key used to secure the Portainer instance").String(),
Rollback: kingpin.Flag("rollback", "Rollback the database to the previous backup").Bool(),
SnapshotInterval: kingpin.Flag("snapshot-interval", "Duration between each environment snapshot job").String(),
AdminPassword: kingpin.Flag("admin-password", "Set admin password with provided hash").String(),
@ -61,15 +54,46 @@ func CLIFlags() *portainer.CLIFlags {
LogMode: kingpin.Flag("log-mode", "Set the logging output mode").Default("PRETTY").Enum("NOCOLOR", "PRETTY", "JSON"),
KubectlShellImage: kingpin.Flag("kubectl-shell-image", "Kubectl shell image").Envar(portainer.KubectlShellImageEnvVar).Default(portainer.DefaultKubectlShellImage).String(),
PullLimitCheckDisabled: kingpin.Flag("pull-limit-check-disabled", "Pull limit check").Envar(portainer.PullLimitCheckDisabledEnvVar).Default(defaultPullLimitCheckDisabled).Bool(),
TrustedOrigins: kingpin.Flag("trusted-origins", "List of trusted origins for CSRF protection. Separate multiple origins with a comma.").Envar(portainer.TrustedOriginsEnvVar).String(),
CSP: kingpin.Flag("csp", "Content Security Policy (CSP) header").Envar(portainer.CSPEnvVar).Default("true").Bool(),
}
}
// ParseFlags parse the CLI flags and return a portainer.Flags struct
func (*Service) ParseFlags(version string) (*portainer.CLIFlags, error) {
func (Service) ParseFlags(version string) (*portainer.CLIFlags, error) {
kingpin.Version(version)
var hasSSLFlag, hasSSLCertFlag, hasSSLKeyFlag bool
sslFlag := kingpin.Flag(
"ssl",
"Secure Portainer instance using SSL (deprecated)",
).Default(defaultSSL).IsSetByUser(&hasSSLFlag)
ssl := sslFlag.Bool()
sslCertFlag := kingpin.Flag(
"sslcert",
"Path to the SSL certificate used to secure the Portainer instance",
).IsSetByUser(&hasSSLCertFlag)
sslCert := sslCertFlag.String()
sslKeyFlag := kingpin.Flag(
"sslkey",
"Path to the SSL key used to secure the Portainer instance",
).IsSetByUser(&hasSSLKeyFlag)
sslKey := sslKeyFlag.String()
flags := CLIFlags()
var hasTLSFlag, hasTLSCertFlag, hasTLSKeyFlag bool
tlsFlag := kingpin.Flag("tlsverify", "TLS support").Default(defaultTLS).IsSetByUser(&hasTLSFlag)
flags.TLS = tlsFlag.Bool()
tlsCertFlag := kingpin.Flag(
"tlscert",
"Path to the TLS certificate file",
).Default(defaultTLSCertPath).IsSetByUser(&hasTLSCertFlag)
flags.TLSCert = tlsCertFlag.String()
tlsKeyFlag := kingpin.Flag("tlskey", "Path to the TLS key").Default(defaultTLSKeyPath).IsSetByUser(&hasTLSKeyFlag)
flags.TLSKey = tlsKeyFlag.String()
flags.TLSCacert = kingpin.Flag("tlscacert", "Path to the CA").Default(defaultTLSCACertPath).String()
kingpin.Parse()
if !filepath.IsAbs(*flags.Assets) {
@ -81,11 +105,46 @@ func (*Service) ParseFlags(version string) (*portainer.CLIFlags, error) {
*flags.Assets = filepath.Join(filepath.Dir(ex), *flags.Assets)
}
// If the user didn't provide a tls flag remove the defaults to match previous behaviour
if !hasTLSFlag {
if !hasTLSCertFlag {
*flags.TLSCert = ""
}
if !hasTLSKeyFlag {
*flags.TLSKey = ""
}
}
if hasSSLFlag {
log.Warn().Msgf("the %q flag is deprecated. use %q instead.", sslFlag.Model().Name, tlsFlag.Model().Name)
if !hasTLSFlag {
flags.TLS = ssl
}
}
if hasSSLCertFlag {
log.Warn().Msgf("the %q flag is deprecated. use %q instead.", sslCertFlag.Model().Name, tlsCertFlag.Model().Name)
if !hasTLSCertFlag {
flags.TLSCert = sslCert
}
}
if hasSSLKeyFlag {
log.Warn().Msgf("the %q flag is deprecated. use %q instead.", sslKeyFlag.Model().Name, tlsKeyFlag.Model().Name)
if !hasTLSKeyFlag {
flags.TLSKey = sslKey
}
}
return flags, nil
}
// ValidateFlags validates the values of the flags.
func (*Service) ValidateFlags(flags *portainer.CLIFlags) error {
func (Service) ValidateFlags(flags *portainer.CLIFlags) error {
displayDeprecationWarnings(flags)
if err := validateEndpointURL(*flags.EndpointURL); err != nil {
@ -107,10 +166,6 @@ func displayDeprecationWarnings(flags *portainer.CLIFlags) {
if *flags.NoAnalytics {
log.Warn().Msg("the --no-analytics flag has been kept to allow migration of instances running a previous version of Portainer with this flag enabled, to version 2.0 where enabling this flag will have no effect")
}
if *flags.SSL {
log.Warn().Msg("SSL is enabled by default and there is no need for the --ssl flag, it has been kept to allow migration of instances running a previous version of Portainer with this flag enabled")
}
}
func validateEndpointURL(endpointURL string) error {

209
api/cli/cli_test.go Normal file
View File

@ -0,0 +1,209 @@
package cli
import (
"io"
"os"
"strings"
"testing"
zerolog "github.com/rs/zerolog/log"
"github.com/stretchr/testify/require"
)
func TestOptionParser(t *testing.T) {
p := Service{}
require.NotNil(t, p)
a := os.Args
defer func() { os.Args = a }()
os.Args = []string{"portainer", "--edge-compute"}
opts, err := p.ParseFlags("2.34.5")
require.NoError(t, err)
require.False(t, *opts.HTTPDisabled)
require.True(t, *opts.EnableEdgeComputeFeatures)
}
func TestParseTLSFlags(t *testing.T) {
testCases := []struct {
name string
args []string
expectedTLSFlag bool
expectedTLSCertFlag string
expectedTLSKeyFlag string
expectedLogMessages []string
}{
{
name: "no flags",
expectedTLSFlag: false,
expectedTLSCertFlag: "",
expectedTLSKeyFlag: "",
},
{
name: "only ssl flag",
args: []string{
"portainer",
"--ssl",
},
expectedTLSFlag: true,
expectedTLSCertFlag: "",
expectedTLSKeyFlag: "",
},
{
name: "only tls flag",
args: []string{
"portainer",
"--tlsverify",
},
expectedTLSFlag: true,
expectedTLSCertFlag: defaultTLSCertPath,
expectedTLSKeyFlag: defaultTLSKeyPath,
},
{
name: "partial ssl flags",
args: []string{
"portainer",
"--ssl",
"--sslcert=ssl-cert-flag-value",
},
expectedTLSFlag: true,
expectedTLSCertFlag: "ssl-cert-flag-value",
expectedTLSKeyFlag: "",
},
{
name: "partial tls flags",
args: []string{
"portainer",
"--tlsverify",
"--tlscert=tls-cert-flag-value",
},
expectedTLSFlag: true,
expectedTLSCertFlag: "tls-cert-flag-value",
expectedTLSKeyFlag: defaultTLSKeyPath,
},
{
name: "partial tls and ssl flags",
args: []string{
"portainer",
"--tlsverify",
"--tlscert=tls-cert-flag-value",
"--sslkey=ssl-key-flag-value",
},
expectedTLSFlag: true,
expectedTLSCertFlag: "tls-cert-flag-value",
expectedTLSKeyFlag: "ssl-key-flag-value",
},
{
name: "partial tls and ssl flags 2",
args: []string{
"portainer",
"--ssl",
"--tlscert=tls-cert-flag-value",
"--sslkey=ssl-key-flag-value",
},
expectedTLSFlag: true,
expectedTLSCertFlag: "tls-cert-flag-value",
expectedTLSKeyFlag: "ssl-key-flag-value",
},
{
name: "ssl flags",
args: []string{
"portainer",
"--ssl",
"--sslcert=ssl-cert-flag-value",
"--sslkey=ssl-key-flag-value",
},
expectedTLSFlag: true,
expectedTLSCertFlag: "ssl-cert-flag-value",
expectedTLSKeyFlag: "ssl-key-flag-value",
expectedLogMessages: []string{
"the \\\"ssl\\\" flag is deprecated. use \\\"tlsverify\\\" instead.",
"the \\\"sslcert\\\" flag is deprecated. use \\\"tlscert\\\" instead.",
"the \\\"sslkey\\\" flag is deprecated. use \\\"tlskey\\\" instead.",
},
},
{
name: "tls flags",
args: []string{
"portainer",
"--tlsverify",
"--tlscert=tls-cert-flag-value",
"--tlskey=tls-key-flag-value",
},
expectedTLSFlag: true,
expectedTLSCertFlag: "tls-cert-flag-value",
expectedTLSKeyFlag: "tls-key-flag-value",
},
{
name: "tls and ssl flags",
args: []string{
"portainer",
"--tlsverify",
"--tlscert=tls-cert-flag-value",
"--tlskey=tls-key-flag-value",
"--ssl",
"--sslcert=ssl-cert-flag-value",
"--sslkey=ssl-key-flag-value",
},
expectedTLSFlag: true,
expectedTLSCertFlag: "tls-cert-flag-value",
expectedTLSKeyFlag: "tls-key-flag-value",
expectedLogMessages: []string{
"the \\\"ssl\\\" flag is deprecated. use \\\"tlsverify\\\" instead.",
"the \\\"sslcert\\\" flag is deprecated. use \\\"tlscert\\\" instead.",
"the \\\"sslkey\\\" flag is deprecated. use \\\"tlskey\\\" instead.",
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
var logOutput strings.Builder
setupLogOutput(t, &logOutput)
if tc.args == nil {
tc.args = []string{"portainer"}
}
setOsArgs(t, tc.args)
s := Service{}
flags, err := s.ParseFlags("test-version")
if err != nil {
t.Fatalf("error parsing flags: %v", err)
}
if flags.TLS == nil {
t.Fatal("TLS flag was nil")
}
require.Equal(t, tc.expectedTLSFlag, *flags.TLS, "tlsverify flag didn't match")
require.Equal(t, tc.expectedTLSCertFlag, *flags.TLSCert, "tlscert flag didn't match")
require.Equal(t, tc.expectedTLSKeyFlag, *flags.TLSKey, "tlskey flag didn't match")
for _, expectedLogMessage := range tc.expectedLogMessages {
require.Contains(t, logOutput.String(), expectedLogMessage, "Log didn't contain expected message")
}
})
}
}
func setOsArgs(t *testing.T, args []string) {
t.Helper()
previousArgs := os.Args
os.Args = args
t.Cleanup(func() {
os.Args = previousArgs
})
}
func setupLogOutput(t *testing.T, w io.Writer) {
t.Helper()
oldLogger := zerolog.Logger
zerolog.Logger = zerolog.Output(w)
t.Cleanup(func() {
zerolog.Logger = oldLogger
})
}

View File

@ -6,7 +6,7 @@ import (
"fmt"
"strings"
"gopkg.in/alecthomas/kingpin.v2"
"github.com/alecthomas/kingpin/v2"
)
type pairList []portainer.Pair

View File

@ -49,16 +49,18 @@ import (
"github.com/portainer/portainer/api/stacks/deployments"
"github.com/portainer/portainer/pkg/build"
"github.com/portainer/portainer/pkg/featureflags"
"github.com/portainer/portainer/pkg/fips"
"github.com/portainer/portainer/pkg/libhelm"
libhelmtypes "github.com/portainer/portainer/pkg/libhelm/types"
"github.com/portainer/portainer/pkg/libstack/compose"
"github.com/portainer/portainer/pkg/validate"
"github.com/gofrs/uuid"
"github.com/rs/zerolog/log"
)
func initCLI() *portainer.CLIFlags {
cliService := &cli.Service{}
cliService := cli.Service{}
flags, err := cliService.ParseFlags(portainer.APIVersion)
if err != nil {
@ -305,8 +307,19 @@ func initKeyPair(fileService portainer.FileService, signatureService portainer.D
return generateAndStoreKeyPair(fileService, signatureService)
}
// dbSecretPath build the path to the file that contains the db encryption
// secret. Normally in Docker this is built from the static path inside
// /run/portainer for example: /run/portainer/<keyFilenameFlag> but for ease of
// use outside Docker it also accepts an absolute path
func dbSecretPath(keyFilenameFlag string) string {
if path.IsAbs(keyFilenameFlag) {
return keyFilenameFlag
}
return path.Join("/run/portainer", keyFilenameFlag)
}
func loadEncryptionSecretKey(keyfilename string) []byte {
content, err := os.ReadFile(path.Join("/run/secrets", keyfilename))
content, err := os.ReadFile(keyfilename)
if err != nil {
if os.IsNotExist(err) {
log.Info().Str("filename", keyfilename).Msg("encryption key file not present")
@ -318,6 +331,7 @@ func loadEncryptionSecretKey(keyfilename string) []byte {
}
// return a 32 byte hash of the secret (required for AES)
// fips compliant version of this is not implemented in -ce
hash := sha256.Sum256(content)
return hash[:]
@ -330,8 +344,23 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
featureflags.Parse(*flags.FeatureFlags, portainer.SupportedFeatureFlags)
}
trustedOrigins := []string{}
if *flags.TrustedOrigins != "" {
// validate if the trusted origins are valid urls
for _, origin := range strings.Split(*flags.TrustedOrigins, ",") {
if !validate.IsTrustedOrigin(origin) {
log.Fatal().Str("trusted_origin", origin).Msg("invalid url for trusted origin. Please check the trusted origins flag.")
}
trustedOrigins = append(trustedOrigins, origin)
}
}
// -ce can not ever be run in FIPS mode
fips.InitFIPS(false)
fileService := initFileService(*flags.Data)
encryptionKey := loadEncryptionSecretKey(*flags.SecretKeyName)
encryptionKey := loadEncryptionSecretKey(dbSecretPath(*flags.SecretKeyName))
if encryptionKey == nil {
log.Info().Msg("proceeding without encryption key")
}
@ -364,21 +393,22 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
log.Fatal().Err(err).Msg("failed initializing JWT service")
}
ldapService := &ldap.Service{}
ldapService := ldap.Service{}
oauthService := oauth.NewService()
gitService := git.NewService(shutdownCtx)
openAMTService := openamt.NewService()
// Setting insecureSkipVerify to true to preserve the old behaviour.
openAMTService := openamt.NewService(true)
cryptoService := &crypto.Service{}
cryptoService := crypto.Service{}
signatureService := initDigitalSignatureService()
edgeStacksService := edgestacks.NewService(dataStore)
sslService, err := initSSLService(*flags.AddrHTTPS, *flags.SSLCert, *flags.SSLKey, fileService, dataStore, shutdownTrigger)
sslService, err := initSSLService(*flags.AddrHTTPS, *flags.TLSCert, *flags.TLSKey, fileService, dataStore, shutdownTrigger)
if err != nil {
log.Fatal().Err(err).Msg("")
}
@ -437,7 +467,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
snapshotService.Start()
proxyManager.NewProxyFactory(dataStore, signatureService, reverseTunnelService, dockerClientFactory, kubernetesClientFactory, kubernetesTokenCacheManager, gitService, snapshotService)
proxyManager.NewProxyFactory(dataStore, signatureService, reverseTunnelService, dockerClientFactory, kubernetesClientFactory, kubernetesTokenCacheManager, gitService, snapshotService, jwtService)
helmPackageManager, err := initHelmPackageManager()
if err != nil {
@ -545,6 +575,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
Status: applicationStatus,
BindAddress: *flags.Addr,
BindAddressHTTPS: *flags.AddrHTTPS,
CSP: *flags.CSP,
HTTPEnabled: sslDBSettings.HTTPEnabled,
AssetsPath: *flags.Assets,
DataStore: dataStore,
@ -578,6 +609,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
PendingActionsService: pendingActionsService,
PlatformService: platformService,
PullLimitCheckDisabled: *flags.PullLimitCheckDisabled,
TrustedOrigins: trustedOrigins,
}
}

View File

@ -0,0 +1,57 @@
package main
import (
"os"
"path"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const secretFileName = "secret.txt"
func createPasswordFile(t *testing.T, secretPath, password string) string {
err := os.WriteFile(secretPath, []byte(password), 0600)
require.NoError(t, err)
return secretPath
}
func TestLoadEncryptionSecretKey(t *testing.T) {
tempDir := t.TempDir()
secretPath := path.Join(tempDir, secretFileName)
// first pointing to file that does not exist, gives nil hash (no encryption)
encryptionKey := loadEncryptionSecretKey(secretPath)
require.Nil(t, encryptionKey)
// point to a directory instead of a file
encryptionKey = loadEncryptionSecretKey(tempDir)
require.Nil(t, encryptionKey)
password := "portainer@1234"
createPasswordFile(t, secretPath, password)
encryptionKey = loadEncryptionSecretKey(secretPath)
require.NotNil(t, encryptionKey)
// should be 32 bytes for aes256 encryption
require.Len(t, encryptionKey, 32)
}
func TestDBSecretPath(t *testing.T) {
tests := []struct {
keyFilenameFlag string
expected string
}{
{keyFilenameFlag: "secret.txt", expected: "/run/portainer/secret.txt"},
{keyFilenameFlag: "/tmp/secret.txt", expected: "/tmp/secret.txt"},
{keyFilenameFlag: "/run/portainer/secret.txt", expected: "/run/portainer/secret.txt"},
{keyFilenameFlag: "./secret.txt", expected: "/run/portainer/secret.txt"},
{keyFilenameFlag: "../secret.txt", expected: "/run/secret.txt"},
{keyFilenameFlag: "foo/bar/secret.txt", expected: "/run/portainer/foo/bar/secret.txt"},
}
for _, test := range tests {
assert.Equal(t, test.expected, dbSecretPath(test.keyFilenameFlag))
}
}

View File

@ -5,13 +5,19 @@ import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/pbkdf2"
"crypto/rand"
"crypto/sha256"
"errors"
"fmt"
"io"
"strings"
"golang.org/x/crypto/argon2"
"golang.org/x/crypto/scrypt"
"github.com/portainer/portainer/pkg/fips"
// Not allowed in FIPS mode
"golang.org/x/crypto/argon2" //nolint:depguard
"golang.org/x/crypto/scrypt" //nolint:depguard
)
const (
@ -19,20 +25,32 @@ const (
aesGcmHeader = "AES256-GCM" // The encrypted file header
aesGcmBlockSize = 1024 * 1024 // 1MB block for aes gcm
aesGcmFIPSHeader = "FIPS-AES256-GCM"
aesGcmFIPSBlockSize = 16 * 1024 * 1024 // 16MB block for aes gcm
// Argon2 settings
// Recommded settings lower memory hardware according to current OWASP recommendations
// Recommended settings lower memory hardware according to current OWASP recommendations
// Considering some people run portainer on a NAS I think it's prudent not to assume we're on server grade hardware
// https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#argon2id
argon2MemoryCost = 12 * 1024
argon2TimeCost = 3
argon2Threads = 1
argon2KeyLength = 32
pbkdf2Iterations = 600_000 // use recommended iterations from https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 a little overkill for this use
pbkdf2SaltLength = 32
)
// AesEncrypt reads from input, encrypts with AES-256 and writes to output. passphrase is used to generate an encryption key
func AesEncrypt(input io.Reader, output io.Writer, passphrase []byte) error {
if err := aesEncryptGCM(input, output, passphrase); err != nil {
return fmt.Errorf("error encrypting file: %w", err)
if fips.FIPSMode() {
if err := aesEncryptGCMFIPS(input, output, passphrase); err != nil {
return fmt.Errorf("error encrypting file: %w", err)
}
} else {
if err := aesEncryptGCM(input, output, passphrase); err != nil {
return fmt.Errorf("error encrypting file: %w", err)
}
}
return nil
@ -40,14 +58,35 @@ func AesEncrypt(input io.Reader, output io.Writer, passphrase []byte) error {
// AesDecrypt reads from input, decrypts with AES-256 and returns the reader to read the decrypted content from
func AesDecrypt(input io.Reader, passphrase []byte) (io.Reader, error) {
return aesDecrypt(input, passphrase, fips.FIPSMode())
}
func aesDecrypt(input io.Reader, passphrase []byte, fipsMode bool) (io.Reader, error) {
// Read file header to determine how it was encrypted
inputReader := bufio.NewReader(input)
header, err := inputReader.Peek(len(aesGcmHeader))
header, err := inputReader.Peek(len(aesGcmFIPSHeader))
if err != nil {
return nil, fmt.Errorf("error reading encrypted backup file header: %w", err)
}
if string(header) == aesGcmHeader {
if strings.HasPrefix(string(header), aesGcmFIPSHeader) {
if !fipsMode {
return nil, errors.New("fips encrypted file detected but fips mode is not enabled")
}
reader, err := aesDecryptGCMFIPS(inputReader, passphrase)
if err != nil {
return nil, fmt.Errorf("error decrypting file: %w", err)
}
return reader, nil
}
if strings.HasPrefix(string(header), aesGcmHeader) {
if fipsMode {
return nil, errors.New("fips mode is enabled but non-fips encrypted file detected")
}
reader, err := aesDecryptGCM(inputReader, passphrase)
if err != nil {
return nil, fmt.Errorf("error decrypting file: %w", err)
@ -114,15 +153,14 @@ func aesEncryptGCM(input io.Reader, output io.Writer, passphrase []byte) error {
break // end of plaintext input
}
if err != nil && !(errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) {
if err != nil && !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
return err
}
// Seal encrypts the plaintext using the nonce returning the updated slice.
ciphertext = aesgcm.Seal(ciphertext[:0], nonce.Value(), buf[:n], nil)
_, err = output.Write(ciphertext)
if err != nil {
if _, err := output.Write(ciphertext); err != nil {
return err
}
@ -183,7 +221,7 @@ func aesDecryptGCM(input io.Reader, passphrase []byte) (io.Reader, error) {
break // end of ciphertext
}
if err != nil && !(errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) {
if err != nil && !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
return nil, err
}
@ -203,15 +241,138 @@ func aesDecryptGCM(input io.Reader, passphrase []byte) (io.Reader, error) {
return &buf, nil
}
// aesEncryptGCMFIPS reads from input, encrypts with AES-256 in a fips compliant
// way and writes to output. passphrase is used to generate an encryption key.
func aesEncryptGCMFIPS(input io.Reader, output io.Writer, passphrase []byte) error {
salt := make([]byte, pbkdf2SaltLength)
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
return err
}
key, err := pbkdf2.Key(sha256.New, string(passphrase), salt, pbkdf2Iterations, 32)
if err != nil {
return fmt.Errorf("error deriving key: %w", err)
}
block, err := aes.NewCipher(key)
if err != nil {
return err
}
// write the header
if _, err := output.Write([]byte(aesGcmFIPSHeader)); err != nil {
return err
}
// Write nonce and salt to the output file
if _, err := output.Write(salt); err != nil {
return err
}
// Buffer for reading plaintext blocks
buf := make([]byte, aesGcmFIPSBlockSize)
// Encrypt plaintext in blocks
for {
// new random nonce for each block
aesgcm, err := cipher.NewGCMWithRandomNonce(block)
if err != nil {
return fmt.Errorf("error creating gcm: %w", err)
}
n, err := io.ReadFull(input, buf)
if n == 0 {
break // end of plaintext input
}
if err != nil && !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
return err
}
// Seal encrypts the plaintext
ciphertext := aesgcm.Seal(nil, nil, buf[:n], nil)
if _, err := output.Write(ciphertext); err != nil {
return err
}
}
return nil
}
// aesDecryptGCMFIPS reads from input, decrypts with AES-256 in a fips compliant
// way and returns the reader to read the decrypted content from.
func aesDecryptGCMFIPS(input io.Reader, passphrase []byte) (io.Reader, error) {
// Reader & verify header
header := make([]byte, len(aesGcmFIPSHeader))
if _, err := io.ReadFull(input, header); err != nil {
return nil, err
}
if string(header) != aesGcmFIPSHeader {
return nil, errors.New("invalid header")
}
// Read salt
salt := make([]byte, pbkdf2SaltLength)
if _, err := io.ReadFull(input, salt); err != nil {
return nil, err
}
key, err := pbkdf2.Key(sha256.New, string(passphrase), salt, pbkdf2Iterations, 32)
if err != nil {
return nil, fmt.Errorf("error deriving key: %w", err)
}
// Initialize AES cipher block
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
// Initialize a buffer to store decrypted data
buf := bytes.Buffer{}
// Decrypt the ciphertext in blocks
for {
// Create GCM mode with the cipher block
aesgcm, err := cipher.NewGCMWithRandomNonce(block)
if err != nil {
return nil, err
}
// Read a block of ciphertext from the input reader
ciphertextBlock := make([]byte, aesGcmFIPSBlockSize+aesgcm.Overhead())
n, err := io.ReadFull(input, ciphertextBlock)
if n == 0 {
break // end of ciphertext
}
if err != nil && !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
return nil, err
}
// Decrypt the block of ciphertext
plaintext, err := aesgcm.Open(nil, nil, ciphertextBlock[:n], nil)
if err != nil {
return nil, err
}
if _, err := buf.Write(plaintext); err != nil {
return nil, err
}
}
return &buf, nil
}
// aesDecryptOFB reads from input, decrypts with AES-256 and returns the reader to a read decrypted content from.
// passphrase is used to generate an encryption key.
// note: This function used to decrypt files that were encrypted without a header i.e. old archives
func aesDecryptOFB(input io.Reader, passphrase []byte) (io.Reader, error) {
var emptySalt []byte = make([]byte, 0)
// making a 32 bytes key that would correspond to AES-256
// don't necessarily need a salt, so just kept in empty
key, err := scrypt.Key(passphrase, emptySalt, 32768, 8, 1, 32)
key, err := scrypt.Key(passphrase, nil, 32768, 8, 1, 32)
if err != nil {
return nil, err
}
@ -228,3 +389,18 @@ func aesDecryptOFB(input io.Reader, passphrase []byte) (io.Reader, error) {
return reader, nil
}
// HasEncryptedHeader checks if the data has an encrypted header, note that fips
// mode changes this behavior and so will only recognize data encrypted by the
// same mode (fips enabled or disabled)
func HasEncryptedHeader(data []byte) bool {
return hasEncryptedHeader(data, fips.FIPSMode())
}
func hasEncryptedHeader(data []byte, fipsMode bool) bool {
if fipsMode {
return bytes.HasPrefix(data, []byte(aesGcmFIPSHeader))
}
return bytes.HasPrefix(data, []byte(aesGcmHeader))
}

View File

@ -1,15 +1,25 @@
package crypto
import (
"crypto/aes"
"crypto/cipher"
"io"
"math/rand"
"os"
"path/filepath"
"testing"
"github.com/portainer/portainer/pkg/fips"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/scrypt"
)
func init() {
fips.InitFIPS(false)
}
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
func randBytes(n int) []byte {
@ -17,201 +27,399 @@ func randBytes(n int) []byte {
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}
return b
}
type encryptFunc func(input io.Reader, output io.Writer, passphrase []byte) error
type decryptFunc func(input io.Reader, passphrase []byte) (io.Reader, error)
func Test_encryptAndDecrypt_withTheSamePassword(t *testing.T) {
const passphrase = "passphrase"
tmpdir := t.TempDir()
testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc, decryptShouldSucceed bool) {
tmpdir := t.TempDir()
var (
originFilePath = filepath.Join(tmpdir, "origin")
encryptedFilePath = filepath.Join(tmpdir, "encrypted")
decryptedFilePath = filepath.Join(tmpdir, "decrypted")
)
var (
originFilePath = filepath.Join(tmpdir, "origin")
encryptedFilePath = filepath.Join(tmpdir, "encrypted")
decryptedFilePath = filepath.Join(tmpdir, "decrypted")
)
content := randBytes(1024*1024*100 + 523)
os.WriteFile(originFilePath, content, 0600)
content := randBytes(1024*1024*100 + 523)
os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath)
encryptedFileWriter, _ := os.Create(encryptedFilePath)
err := AesEncrypt(originFile, encryptedFileWriter, []byte(passphrase))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
err := encrypt(originFile, encryptedFileWriter, []byte(passphrase))
require.NoError(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedContent, err := os.ReadFile(encryptedFilePath)
require.NoError(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
encryptedFileReader, err := os.Open(encryptedFilePath)
require.NoError(t, err)
defer encryptedFileReader.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
decryptedFileWriter, err := os.Create(decryptedFilePath)
require.NoError(t, err)
defer decryptedFileWriter.Close()
decryptedReader, err := AesDecrypt(encryptedFileReader, []byte(passphrase))
assert.Nil(t, err, "Failed to decrypt file")
decryptedReader, err := decrypt(encryptedFileReader, []byte(passphrase))
if !decryptShouldSucceed {
require.Error(t, err, "Failed to decrypt file as indicated by decryptShouldSucceed")
} else {
require.NoError(t, err, "Failed to decrypt file indicated by decryptShouldSucceed")
io.Copy(decryptedFileWriter, decryptedReader)
io.Copy(decryptedFileWriter, decryptedReader)
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
}
t.Run("fips", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS, true)
})
t.Run("non_fips", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCM, true)
})
t.Run("system_fips_mode_public_entry_points", func(t *testing.T) {
// use the init mode, public entry points
testFunc(t, AesEncrypt, AesDecrypt, true)
})
t.Run("fips_encrypted_file_header_fails_in_non_fips_mode", func(t *testing.T) {
// use aesDecrypt which checks the header, confirm that it fails
decrypt := func(input io.Reader, passphrase []byte) (io.Reader, error) {
return aesDecrypt(input, passphrase, false)
}
testFunc(t, aesEncryptGCMFIPS, decrypt, false)
})
t.Run("non_fips_encrypted_file_header_fails_in_fips_mode", func(t *testing.T) {
// use aesDecrypt which checks the header, confirm that it fails
decrypt := func(input io.Reader, passphrase []byte) (io.Reader, error) {
return aesDecrypt(input, passphrase, true)
}
testFunc(t, aesEncryptGCM, decrypt, false)
})
t.Run("fips_encrypted_file_fails_in_non_fips_mode", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCM, false)
})
t.Run("non_fips_encrypted_file_with_fips_mode_should_fail", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCMFIPS, false)
})
t.Run("fips_with_base_aesDecrypt", func(t *testing.T) {
// maximize coverage, use the base aesDecrypt function with valid fips mode
decrypt := func(input io.Reader, passphrase []byte) (io.Reader, error) {
return aesDecrypt(input, passphrase, true)
}
testFunc(t, aesEncryptGCMFIPS, decrypt, true)
})
t.Run("legacy", func(t *testing.T) {
testFunc(t, legacyAesEncrypt, aesDecryptOFB, true)
})
}
func Test_encryptAndDecrypt_withStrongPassphrase(t *testing.T) {
const passphrase = "A strong passphrase with special characters: !@#$%^&*()_+"
tmpdir := t.TempDir()
var (
originFilePath = filepath.Join(tmpdir, "origin2")
encryptedFilePath = filepath.Join(tmpdir, "encrypted2")
decryptedFilePath = filepath.Join(tmpdir, "decrypted2")
)
testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) {
tmpdir := t.TempDir()
content := randBytes(500)
os.WriteFile(originFilePath, content, 0600)
var (
originFilePath = filepath.Join(tmpdir, "origin2")
encryptedFilePath = filepath.Join(tmpdir, "encrypted2")
decryptedFilePath = filepath.Join(tmpdir, "decrypted2")
)
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
content := randBytes(500)
os.WriteFile(originFilePath, content, 0600)
encryptedFileWriter, _ := os.Create(encryptedFilePath)
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
err := AesEncrypt(originFile, encryptedFileWriter, []byte(passphrase))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath)
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
err := encrypt(originFile, encryptedFileWriter, []byte(passphrase))
require.NoError(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
encryptedContent, err := os.ReadFile(encryptedFilePath)
require.NoError(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
decryptedReader, err := AesDecrypt(encryptedFileReader, []byte(passphrase))
assert.Nil(t, err, "Failed to decrypt file")
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
io.Copy(decryptedFileWriter, decryptedReader)
decryptedReader, err := decrypt(encryptedFileReader, []byte(passphrase))
require.NoError(t, err, "Failed to decrypt file")
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
io.Copy(decryptedFileWriter, decryptedReader)
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
t.Run("fips", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS)
})
t.Run("non_fips", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCM)
})
}
func Test_encryptAndDecrypt_withTheSamePasswordSmallFile(t *testing.T) {
tmpdir := t.TempDir()
testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) {
tmpdir := t.TempDir()
var (
originFilePath = filepath.Join(tmpdir, "origin2")
encryptedFilePath = filepath.Join(tmpdir, "encrypted2")
decryptedFilePath = filepath.Join(tmpdir, "decrypted2")
)
var (
originFilePath = filepath.Join(tmpdir, "origin2")
encryptedFilePath = filepath.Join(tmpdir, "encrypted2")
decryptedFilePath = filepath.Join(tmpdir, "decrypted2")
)
content := randBytes(500)
os.WriteFile(originFilePath, content, 0600)
content := randBytes(500)
os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath)
encryptedFileWriter, _ := os.Create(encryptedFilePath)
err := AesEncrypt(originFile, encryptedFileWriter, []byte("passphrase"))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
err := encrypt(originFile, encryptedFileWriter, []byte("passphrase"))
require.NoError(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedContent, err := os.ReadFile(encryptedFilePath)
require.NoError(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
encryptedFileReader, err := os.Open(encryptedFilePath)
require.NoError(t, err)
defer encryptedFileReader.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
decryptedFileWriter, err := os.Create(decryptedFilePath)
require.NoError(t, err)
defer decryptedFileWriter.Close()
decryptedReader, err := AesDecrypt(encryptedFileReader, []byte("passphrase"))
assert.Nil(t, err, "Failed to decrypt file")
decryptedReader, err := decrypt(encryptedFileReader, []byte("passphrase"))
require.NoError(t, err, "Failed to decrypt file")
io.Copy(decryptedFileWriter, decryptedReader)
_, err = io.Copy(decryptedFileWriter, decryptedReader)
require.NoError(t, err)
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
decryptedContent, err := os.ReadFile(decryptedFilePath)
require.NoError(t, err)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
t.Run("fips", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS)
})
t.Run("non_fips", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCM)
})
}
func Test_encryptAndDecrypt_withEmptyPassword(t *testing.T) {
tmpdir := t.TempDir()
testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) {
tmpdir := t.TempDir()
var (
originFilePath = filepath.Join(tmpdir, "origin")
encryptedFilePath = filepath.Join(tmpdir, "encrypted")
decryptedFilePath = filepath.Join(tmpdir, "decrypted")
)
var (
originFilePath = filepath.Join(tmpdir, "origin")
encryptedFilePath = filepath.Join(tmpdir, "encrypted")
decryptedFilePath = filepath.Join(tmpdir, "decrypted")
)
content := randBytes(1024 * 50)
os.WriteFile(originFilePath, content, 0600)
content := randBytes(1024 * 50)
err := os.WriteFile(originFilePath, content, 0600)
require.NoError(t, err)
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
originFile, err := os.Open(originFilePath)
require.NoError(t, err)
defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath)
defer encryptedFileWriter.Close()
encryptedFileWriter, err := os.Create(encryptedFilePath)
require.NoError(t, err)
defer encryptedFileWriter.Close()
err := AesEncrypt(originFile, encryptedFileWriter, []byte(""))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
err = encrypt(originFile, encryptedFileWriter, []byte(""))
require.NoError(t, err, "Failed to encrypt a file")
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
encryptedContent, err := os.ReadFile(encryptedFilePath)
require.NoError(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
encryptedFileReader, err := os.Open(encryptedFilePath)
require.NoError(t, err)
defer encryptedFileReader.Close()
decryptedReader, err := AesDecrypt(encryptedFileReader, []byte(""))
assert.Nil(t, err, "Failed to decrypt file")
decryptedFileWriter, err := os.Create(decryptedFilePath)
require.NoError(t, err)
defer decryptedFileWriter.Close()
io.Copy(decryptedFileWriter, decryptedReader)
decryptedReader, err := decrypt(encryptedFileReader, []byte(""))
require.NoError(t, err, "Failed to decrypt file")
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
_, err = io.Copy(decryptedFileWriter, decryptedReader)
require.NoError(t, err)
decryptedContent, err := os.ReadFile(decryptedFilePath)
require.NoError(t, err)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
t.Run("fips", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS)
})
t.Run("non_fips", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCM)
})
}
func Test_decryptWithDifferentPassphrase_shouldProduceWrongResult(t *testing.T) {
tmpdir := t.TempDir()
testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) {
tmpdir := t.TempDir()
var (
originFilePath = filepath.Join(tmpdir, "origin")
encryptedFilePath = filepath.Join(tmpdir, "encrypted")
decryptedFilePath = filepath.Join(tmpdir, "decrypted")
)
var (
originFilePath = filepath.Join(tmpdir, "origin")
encryptedFilePath = filepath.Join(tmpdir, "encrypted")
decryptedFilePath = filepath.Join(tmpdir, "decrypted")
)
content := randBytes(1034)
os.WriteFile(originFilePath, content, 0600)
content := randBytes(1034)
os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath)
defer encryptedFileWriter.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath)
defer encryptedFileWriter.Close()
err := AesEncrypt(originFile, encryptedFileWriter, []byte("passphrase"))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
err := encrypt(originFile, encryptedFileWriter, []byte("passphrase"))
require.NoError(t, err, "Failed to encrypt a file")
encryptedContent, err := os.ReadFile(encryptedFilePath)
require.NoError(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
_, err = AesDecrypt(encryptedFileReader, []byte("garbage"))
assert.NotNil(t, err, "Should not allow decrypt with wrong passphrase")
_, err = decrypt(encryptedFileReader, []byte("garbage"))
require.Error(t, err, "Should not allow decrypt with wrong passphrase")
}
t.Run("fips", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS)
})
t.Run("non_fips", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCM)
})
}
func legacyAesEncrypt(input io.Reader, output io.Writer, passphrase []byte) error {
key, err := scrypt.Key(passphrase, nil, 32768, 8, 1, 32)
if err != nil {
return err
}
block, err := aes.NewCipher(key)
if err != nil {
return err
}
var iv [aes.BlockSize]byte
stream := cipher.NewOFB(block, iv[:])
writer := &cipher.StreamWriter{S: stream, W: output}
if _, err := io.Copy(writer, input); err != nil {
return err
}
return nil
}
func Test_hasEncryptedHeader(t *testing.T) {
tests := []struct {
name string
data []byte
fipsMode bool
want bool
}{
{
name: "non-FIPS mode with valid header",
data: []byte("AES256-GCM" + "some encrypted data"),
fipsMode: false,
want: true,
},
{
name: "non-FIPS mode with FIPS header",
data: []byte("FIPS-AES256-GCM" + "some encrypted data"),
fipsMode: false,
want: false,
},
{
name: "FIPS mode with valid header",
data: []byte("FIPS-AES256-GCM" + "some encrypted data"),
fipsMode: true,
want: true,
},
{
name: "FIPS mode with non-FIPS header",
data: []byte("AES256-GCM" + "some encrypted data"),
fipsMode: true,
want: false,
},
{
name: "invalid header",
data: []byte("INVALID-HEADER" + "some data"),
fipsMode: false,
want: false,
},
{
name: "empty data",
data: []byte{},
fipsMode: false,
want: false,
},
{
name: "nil data",
data: nil,
fipsMode: false,
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := hasEncryptedHeader(tt.data, tt.fipsMode)
assert.Equal(t, tt.want, got)
})
}
}

View File

@ -112,7 +112,7 @@ func (service *ECDSAService) CreateSignature(message string) (string, error) {
message = service.secret
}
hash := libcrypto.HashFromBytes([]byte(message))
hash := libcrypto.InsecureHashFromBytes([]byte(message))
r, s, err := ecdsa.Sign(rand.Reader, service.privateKey, hash)
if err != nil {

22
api/crypto/ecdsa_test.go Normal file
View File

@ -0,0 +1,22 @@
package crypto
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestCreateSignature(t *testing.T) {
var s = NewECDSAService("secret")
privKey, pubKey, err := s.GenerateKeyPair()
require.NoError(t, err)
require.NotEmpty(t, privKey)
require.NotEmpty(t, pubKey)
m := "test message"
r, err := s.CreateSignature(m)
require.NoError(t, err)
require.NotEqual(t, r, m)
require.NotEmpty(t, r)
}

View File

@ -1,22 +1,24 @@
package crypto
import (
"golang.org/x/crypto/bcrypt"
// Not allowed in FIPS mode
"golang.org/x/crypto/bcrypt" //nolint:depguard
)
// Service represents a service for encrypting/hashing data.
type Service struct{}
// Hash hashes a string using the bcrypt algorithm
func (*Service) Hash(data string) (string, error) {
func (Service) Hash(data string) (string, error) {
bytes, err := bcrypt.GenerateFromPassword([]byte(data), bcrypt.DefaultCost)
if err != nil {
return "", err
}
return string(bytes), err
}
// CompareHashAndData compares a hash to clear data and returns an error if the comparison fails.
func (*Service) CompareHashAndData(hash string, data string) error {
func (Service) CompareHashAndData(hash string, data string) error {
return bcrypt.CompareHashAndPassword([]byte(hash), []byte(data))
}

View File

@ -2,10 +2,12 @@ package crypto
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestService_Hash(t *testing.T) {
var s = &Service{}
var s = Service{}
type args struct {
hash string
@ -51,3 +53,11 @@ func TestService_Hash(t *testing.T) {
})
}
}
func TestHash(t *testing.T) {
s := Service{}
hash, err := s.Hash("Passw0rd!")
require.NoError(t, err)
require.NotEmpty(t, hash)
}

View File

@ -15,7 +15,7 @@ func NewNonce(size int) *Nonce {
}
// NewRandomNonce generates a new initial nonce with the lower byte set to a random value
// This ensures there are plenty of nonce values availble before rolling over
// This ensures there are plenty of nonce values available before rolling over
// Based on ideas from the Secure Programming Cookbook for C and C++ by John Viega, Matt Messier
// https://www.oreilly.com/library/view/secure-programming-cookbook/0596003943/ch04s09.html
func NewRandomNonce(size int) (*Nonce, error) {

View File

@ -4,11 +4,32 @@ import (
"crypto/tls"
"crypto/x509"
"os"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/pkg/fips"
)
// CreateTLSConfiguration creates a basic tls.Config with recommended TLS settings
func CreateTLSConfiguration() *tls.Config {
return &tls.Config{
func CreateTLSConfiguration(insecureSkipVerify bool) *tls.Config { //nolint:forbidigo
return createTLSConfiguration(fips.FIPSMode(), insecureSkipVerify)
}
func createTLSConfiguration(fipsEnabled bool, insecureSkipVerify bool) *tls.Config { //nolint:forbidigo
if fipsEnabled {
return &tls.Config{ //nolint:forbidigo
MinVersion: tls.VersionTLS12,
MaxVersion: tls.VersionTLS13,
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
},
CurvePreferences: []tls.CurveID{tls.CurveP256, tls.CurveP384, tls.CurveP521},
}
}
return &tls.Config{ //nolint:forbidigo
MinVersion: tls.VersionTLS12,
CipherSuites: []uint16{
tls.TLS_AES_128_GCM_SHA256,
@ -29,24 +50,33 @@ func CreateTLSConfiguration() *tls.Config {
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
},
InsecureSkipVerify: insecureSkipVerify, //nolint:forbidigo
}
}
// CreateTLSConfigurationFromBytes initializes a tls.Config using a CA certificate, a certificate and a key
// loaded from memory.
func CreateTLSConfigurationFromBytes(caCert, cert, key []byte, skipClientVerification, skipServerVerification bool) (*tls.Config, error) {
config := CreateTLSConfiguration()
config.InsecureSkipVerify = skipServerVerification
func CreateTLSConfigurationFromBytes(useTLS bool, caCert, cert, key []byte, skipClientVerification, skipServerVerification bool) (*tls.Config, error) { //nolint:forbidigo
return createTLSConfigurationFromBytes(fips.FIPSMode(), useTLS, caCert, cert, key, skipClientVerification, skipServerVerification)
}
if !skipClientVerification {
func createTLSConfigurationFromBytes(fipsEnabled, useTLS bool, caCert, cert, key []byte, skipClientVerification, skipServerVerification bool) (*tls.Config, error) { //nolint:forbidigo
if !useTLS {
return nil, nil
}
config := createTLSConfiguration(fipsEnabled, skipServerVerification)
if !skipClientVerification || fipsEnabled {
certificate, err := tls.X509KeyPair(cert, key)
if err != nil {
return nil, err
}
config.Certificates = []tls.Certificate{certificate}
}
if !skipServerVerification {
if !skipServerVerification || fipsEnabled {
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
config.RootCAs = caCertPool
@ -57,29 +87,36 @@ func CreateTLSConfigurationFromBytes(caCert, cert, key []byte, skipClientVerific
// CreateTLSConfigurationFromDisk initializes a tls.Config using a CA certificate, a certificate and a key
// loaded from disk.
func CreateTLSConfigurationFromDisk(caCertPath, certPath, keyPath string, skipServerVerification bool) (*tls.Config, error) {
config := CreateTLSConfiguration()
config.InsecureSkipVerify = skipServerVerification
func CreateTLSConfigurationFromDisk(config portainer.TLSConfiguration) (*tls.Config, error) { //nolint:forbidigo
return createTLSConfigurationFromDisk(fips.FIPSMode(), config)
}
if certPath != "" && keyPath != "" {
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
func createTLSConfigurationFromDisk(fipsEnabled bool, config portainer.TLSConfiguration) (*tls.Config, error) { //nolint:forbidigo
if !config.TLS {
return nil, nil
}
tlsConfig := createTLSConfiguration(fipsEnabled, config.TLSSkipVerify)
if config.TLSCertPath != "" && config.TLSKeyPath != "" {
cert, err := tls.LoadX509KeyPair(config.TLSCertPath, config.TLSKeyPath)
if err != nil {
return nil, err
}
config.Certificates = []tls.Certificate{cert}
tlsConfig.Certificates = []tls.Certificate{cert}
}
if !skipServerVerification && caCertPath != "" {
caCert, err := os.ReadFile(caCertPath)
if !tlsConfig.InsecureSkipVerify && config.TLSCACertPath != "" { //nolint:forbidigo
caCert, err := os.ReadFile(config.TLSCACertPath)
if err != nil {
return nil, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
config.RootCAs = caCertPool
tlsConfig.RootCAs = caCertPool
}
return config, nil
return tlsConfig, nil
}

87
api/crypto/tls_test.go Normal file
View File

@ -0,0 +1,87 @@
package crypto
import (
"crypto/tls"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/stretchr/testify/require"
)
func TestCreateTLSConfiguration(t *testing.T) {
// InsecureSkipVerify = false
config := CreateTLSConfiguration(false)
require.Equal(t, config.MinVersion, uint16(tls.VersionTLS12)) //nolint:forbidigo
require.False(t, config.InsecureSkipVerify) //nolint:forbidigo
// InsecureSkipVerify = true
config = CreateTLSConfiguration(true)
require.Equal(t, config.MinVersion, uint16(tls.VersionTLS12)) //nolint:forbidigo
require.True(t, config.InsecureSkipVerify) //nolint:forbidigo
}
func TestCreateTLSConfigurationFIPS(t *testing.T) {
fips := true
fipsCipherSuites := []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
}
fipsCurvePreferences := []tls.CurveID{tls.CurveP256, tls.CurveP384, tls.CurveP521}
config := createTLSConfiguration(fips, false)
require.Equal(t, config.MinVersion, uint16(tls.VersionTLS12)) //nolint:forbidigo
require.Equal(t, config.MaxVersion, uint16(tls.VersionTLS13)) //nolint:forbidigo
require.Equal(t, config.CipherSuites, fipsCipherSuites) //nolint:forbidigo
require.Equal(t, config.CurvePreferences, fipsCurvePreferences) //nolint:forbidigo
require.False(t, config.InsecureSkipVerify) //nolint:forbidigo
}
func TestCreateTLSConfigurationFromBytes(t *testing.T) {
// No TLS
config, err := CreateTLSConfigurationFromBytes(false, nil, nil, nil, false, false)
require.NoError(t, err)
require.Nil(t, config)
// Skip TLS client/server verifications
config, err = CreateTLSConfigurationFromBytes(true, nil, nil, nil, true, true)
require.NoError(t, err)
require.NotNil(t, config)
// Empty TLS
config, err = CreateTLSConfigurationFromBytes(true, nil, nil, nil, false, false)
require.Error(t, err)
require.Nil(t, config)
}
func TestCreateTLSConfigurationFromDisk(t *testing.T) {
// No TLS
config, err := CreateTLSConfigurationFromDisk(portainer.TLSConfiguration{})
require.NoError(t, err)
require.Nil(t, config)
// Skip TLS verifications
config, err = CreateTLSConfigurationFromDisk(portainer.TLSConfiguration{
TLS: true,
TLSSkipVerify: true,
})
require.NoError(t, err)
require.NotNil(t, config)
}
func TestCreateTLSConfigurationFromDiskFIPS(t *testing.T) {
fips := true
// Skipping TLS verifications cannot be done in FIPS mode
config, err := createTLSConfigurationFromDisk(fips, portainer.TLSConfiguration{
TLS: true,
TLSSkipVerify: true,
})
require.NoError(t, err)
require.NotNil(t, config)
require.False(t, config.InsecureSkipVerify) //nolint:forbidigo
}

View File

@ -138,6 +138,8 @@ func (connection *DbConnection) Open() error {
db, err := bolt.Open(databasePath, 0600, &bolt.Options{
Timeout: 1 * time.Second,
InitialMmapSize: connection.InitialMmapSize,
FreelistType: bolt.FreelistMapType,
NoFreelistSync: true,
})
if err != nil {
return err

View File

@ -4,8 +4,6 @@ import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"io"
"github.com/pkg/errors"
"github.com/segmentio/encoding/json"
@ -65,18 +63,18 @@ func (connection *DbConnection) UnmarshalObject(data []byte, object any) error {
// https://gist.github.com/atoponce/07d8d4c833873be2f68c34f9afc5a78a#symmetric-encryption
func encrypt(plaintext []byte, passphrase []byte) (encrypted []byte, err error) {
block, _ := aes.NewCipher(passphrase)
gcm, err := cipher.NewGCM(block)
block, err := aes.NewCipher(passphrase)
if err != nil {
return encrypted, err
}
nonce := make([]byte, gcm.NonceSize())
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
// NewGCMWithRandomNonce in go 1.24 handles setting up the nonce and adding it to the encrypted output
gcm, err := cipher.NewGCMWithRandomNonce(block)
if err != nil {
return encrypted, err
}
return gcm.Seal(nonce, nonce, plaintext, nil), nil
return gcm.Seal(nil, nil, plaintext, nil), nil
}
func decrypt(encrypted []byte, passphrase []byte) (plaintextByte []byte, err error) {
@ -89,19 +87,17 @@ func decrypt(encrypted []byte, passphrase []byte) (plaintextByte []byte, err err
return encrypted, errors.Wrap(err, "Error creating cypher block")
}
gcm, err := cipher.NewGCM(block)
// NewGCMWithRandomNonce in go 1.24 handles reading the nonce from the encrypted input for us
gcm, err := cipher.NewGCMWithRandomNonce(block)
if err != nil {
return encrypted, errors.Wrap(err, "Error creating GCM")
}
nonceSize := gcm.NonceSize()
if len(encrypted) < nonceSize {
if len(encrypted) < gcm.NonceSize() {
return encrypted, errEncryptedStringTooShort
}
nonce, ciphertextByteClean := encrypted[:nonceSize], encrypted[nonceSize:]
plaintextByte, err = gcm.Open(nil, nonce, ciphertextByteClean, nil)
plaintextByte, err = gcm.Open(nil, nil, encrypted, nil)
if err != nil {
return encrypted, errors.Wrap(err, "Error decrypting text")
}

View File

@ -1,12 +1,19 @@
package boltdb
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"fmt"
"io"
"testing"
"github.com/gofrs/uuid"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
@ -87,7 +94,7 @@ func Test_MarshalObjectUnencrypted(t *testing.T) {
for _, test := range tests {
t.Run(fmt.Sprintf("%s -> %s", test.object, test.expected), func(t *testing.T) {
data, err := conn.MarshalObject(test.object)
is.NoError(err)
require.NoError(t, err)
is.Equal(test.expected, string(data))
})
}
@ -128,7 +135,7 @@ func Test_UnMarshalObjectUnencrypted(t *testing.T) {
t.Run(fmt.Sprintf("%s -> %s", test.object, test.expected), func(t *testing.T) {
var object string
err := conn.UnmarshalObject(test.object, &object)
is.NoError(err)
require.NoError(t, err)
is.Equal(test.expected, object)
})
}
@ -160,18 +167,109 @@ func Test_ObjectMarshallingEncrypted(t *testing.T) {
}
key := secretToEncryptionKey(passphrase)
conn := DbConnection{EncryptionKey: key}
conn := DbConnection{EncryptionKey: key, isEncrypted: true}
for _, test := range tests {
t.Run(fmt.Sprintf("%s -> %s", test.object, test.expected), func(t *testing.T) {
data, err := conn.MarshalObject(test.object)
is.NoError(err)
require.NoError(t, err)
var object []byte
err = conn.UnmarshalObject(data, &object)
is.NoError(err)
require.NoError(t, err)
is.Equal(test.object, object)
})
}
}
func Test_NonceSources(t *testing.T) {
// ensure that the new go 1.24 NewGCMWithRandomNonce works correctly with
// the old way of creating and including the nonce
encryptOldFn := func(plaintext []byte, passphrase []byte) (encrypted []byte, err error) {
block, _ := aes.NewCipher(passphrase)
gcm, err := cipher.NewGCM(block)
if err != nil {
return encrypted, err
}
nonce := make([]byte, gcm.NonceSize())
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
return encrypted, err
}
return gcm.Seal(nonce, nonce, plaintext, nil), nil
}
decryptOldFn := func(encrypted []byte, passphrase []byte) (plaintext []byte, err error) {
block, err := aes.NewCipher(passphrase)
if err != nil {
return encrypted, errors.Wrap(err, "Error creating cypher block")
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return encrypted, errors.Wrap(err, "Error creating GCM")
}
nonceSize := gcm.NonceSize()
if len(encrypted) < nonceSize {
return encrypted, errEncryptedStringTooShort
}
nonce, ciphertextByteClean := encrypted[:nonceSize], encrypted[nonceSize:]
plaintext, err = gcm.Open(nil, nonce, ciphertextByteClean, nil)
if err != nil {
return encrypted, errors.Wrap(err, "Error decrypting text")
}
return plaintext, err
}
encryptNewFn := encrypt
decryptNewFn := decrypt
passphrase := make([]byte, 32)
_, err := io.ReadFull(rand.Reader, passphrase)
require.NoError(t, err)
junk := make([]byte, 1024)
_, err = io.ReadFull(rand.Reader, junk)
require.NoError(t, err)
junkEnc := make([]byte, base64.StdEncoding.EncodedLen(len(junk)))
base64.StdEncoding.Encode(junkEnc, junk)
cases := [][]byte{
[]byte("test"),
[]byte("35"),
[]byte("9ca4a1dd-a439-4593-b386-a7dfdc2e9fc6"),
[]byte(jsonobject),
passphrase,
junk,
junkEnc,
}
for _, plain := range cases {
var enc, dec []byte
var err error
enc, err = encryptOldFn(plain, passphrase)
require.NoError(t, err)
dec, err = decryptNewFn(enc, passphrase)
require.NoError(t, err)
require.Equal(t, plain, dec)
enc, err = encryptNewFn(plain, passphrase)
require.NoError(t, err)
dec, err = decryptOldFn(enc, passphrase)
require.NoError(t, err)
require.Equal(t, plain, dec)
}
}

View File

@ -10,7 +10,7 @@ type BaseCRUD[T any, I constraints.Integer] interface {
Create(element *T) error
Read(ID I) (*T, error)
Exists(ID I) (bool, error)
ReadAll() ([]T, error)
ReadAll(predicates ...func(T) bool) ([]T, error)
Update(ID I, element *T) error
Delete(ID I) error
}
@ -56,12 +56,13 @@ func (service BaseDataService[T, I]) Exists(ID I) (bool, error) {
return exists, err
}
func (service BaseDataService[T, I]) ReadAll() ([]T, error) {
// ReadAll retrieves all the elements that satisfy all the provided predicates.
func (service BaseDataService[T, I]) ReadAll(predicates ...func(T) bool) ([]T, error) {
var collection = make([]T, 0)
return collection, service.Connection.ViewTx(func(tx portainer.Transaction) error {
var err error
collection, err = service.Tx(tx).ReadAll()
collection, err = service.Tx(tx).ReadAll(predicates...)
return err
})

View File

@ -0,0 +1,92 @@
package dataservices
import (
"strconv"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/slicesx"
"github.com/stretchr/testify/require"
)
type testObject struct {
ID int
Value int
}
type mockConnection struct {
store map[int]testObject
portainer.Connection
}
func (m mockConnection) UpdateObject(bucket string, key []byte, value interface{}) error {
obj := value.(*testObject)
m.store[obj.ID] = *obj
return nil
}
func (m mockConnection) GetAll(bucketName string, obj any, appendFn func(o any) (any, error)) error {
for _, v := range m.store {
if _, err := appendFn(&v); err != nil {
return err
}
}
return nil
}
func (m mockConnection) UpdateTx(fn func(portainer.Transaction) error) error {
return fn(m)
}
func (m mockConnection) ViewTx(fn func(portainer.Transaction) error) error {
return fn(m)
}
func (m mockConnection) ConvertToKey(v int) []byte {
return []byte(strconv.Itoa(v))
}
func TestReadAll(t *testing.T) {
service := BaseDataService[testObject, int]{
Bucket: "testBucket",
Connection: mockConnection{store: make(map[int]testObject)},
}
data := []testObject{
{ID: 1, Value: 1},
{ID: 2, Value: 2},
{ID: 3, Value: 3},
{ID: 4, Value: 4},
{ID: 5, Value: 5},
}
for _, item := range data {
err := service.Update(item.ID, &item)
require.NoError(t, err)
}
// ReadAll without predicates
result, err := service.ReadAll()
require.NoError(t, err)
expected := append([]testObject{}, data...)
require.ElementsMatch(t, expected, result)
// ReadAll with predicates
hasLowID := func(obj testObject) bool { return obj.ID < 3 }
isEven := func(obj testObject) bool { return obj.Value%2 == 0 }
result, err = service.ReadAll(hasLowID, isEven)
require.NoError(t, err)
expected = slicesx.Filter(expected, hasLowID)
expected = slicesx.Filter(expected, isEven)
require.ElementsMatch(t, expected, result)
}

View File

@ -34,13 +34,32 @@ func (service BaseDataServiceTx[T, I]) Exists(ID I) (bool, error) {
return service.Tx.KeyExists(service.Bucket, identifier)
}
func (service BaseDataServiceTx[T, I]) ReadAll() ([]T, error) {
// ReadAll retrieves all the elements that satisfy all the provided predicates.
func (service BaseDataServiceTx[T, I]) ReadAll(predicates ...func(T) bool) ([]T, error) {
var collection = make([]T, 0)
if len(predicates) == 0 {
return collection, service.Tx.GetAll(
service.Bucket,
new(T),
AppendFn(&collection),
)
}
filterFn := func(element T) bool {
for _, p := range predicates {
if !p(element) {
return false
}
}
return true
}
return collection, service.Tx.GetAll(
service.Bucket,
new(T),
AppendFn(&collection),
FilterFn(&collection, filterFn),
)
}

View File

@ -17,11 +17,29 @@ func (service ServiceTx) UpdateEdgeGroupFunc(ID portainer.EdgeGroupID, updateFun
}
func (service ServiceTx) Create(group *portainer.EdgeGroup) error {
return service.Tx.CreateObject(
es := group.Endpoints
group.Endpoints = nil // Clear deprecated field
err := service.Tx.CreateObject(
BucketName,
func(id uint64) (int, any) {
group.ID = portainer.EdgeGroupID(id)
return int(group.ID), group
},
)
group.Endpoints = es // Restore endpoints after create
return err
}
func (service ServiceTx) Update(ID portainer.EdgeGroupID, group *portainer.EdgeGroup) error {
es := group.Endpoints
group.Endpoints = nil // Clear deprecated field
err := service.BaseDataServiceTx.Update(ID, group)
group.Endpoints = es // Restore endpoints after update
return err
}

View File

@ -0,0 +1,50 @@
package edgestack
import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/database/boltdb"
"github.com/stretchr/testify/require"
)
func TestUpdate(t *testing.T) {
var conn portainer.Connection = &boltdb.DbConnection{Path: t.TempDir()}
err := conn.Open()
require.NoError(t, err)
defer conn.Close()
service, err := NewService(conn, func(portainer.Transaction, portainer.EdgeStackID) {})
require.NoError(t, err)
const edgeStackID = 1
edgeStack := &portainer.EdgeStack{
ID: edgeStackID,
Name: "Test Stack",
}
err = service.Create(edgeStackID, edgeStack)
require.NoError(t, err)
err = service.UpdateEdgeStackFunc(edgeStackID, func(edgeStack *portainer.EdgeStack) {
edgeStack.Name = "Updated Stack"
})
require.NoError(t, err)
updatedStack, err := service.EdgeStack(edgeStackID)
require.NoError(t, err)
require.Equal(t, "Updated Stack", updatedStack.Name)
err = conn.UpdateTx(func(tx portainer.Transaction) error {
return service.UpdateEdgeStackFuncTx(tx, edgeStackID, func(edgeStack *portainer.EdgeStack) {
edgeStack.Name = "Updated Stack Again"
})
})
require.NoError(t, err)
updatedStack, err = service.EdgeStack(edgeStackID)
require.NoError(t, err)
require.Equal(t, "Updated Stack Again", updatedStack.Name)
}

View File

@ -0,0 +1,89 @@
package edgestackstatus
import (
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
)
var _ dataservices.EdgeStackStatusService = &Service{}
const BucketName = "edge_stack_status"
type Service struct {
conn portainer.Connection
}
func (service *Service) BucketName() string {
return BucketName
}
func NewService(connection portainer.Connection) (*Service, error) {
if err := connection.SetServiceName(BucketName); err != nil {
return nil, err
}
return &Service{conn: connection}, nil
}
func (s *Service) Tx(tx portainer.Transaction) ServiceTx {
return ServiceTx{
service: s,
tx: tx,
}
}
func (s *Service) Create(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error {
return s.conn.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).Create(edgeStackID, endpointID, status)
})
}
func (s *Service) Read(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) (*portainer.EdgeStackStatusForEnv, error) {
var element *portainer.EdgeStackStatusForEnv
return element, s.conn.ViewTx(func(tx portainer.Transaction) error {
var err error
element, err = s.Tx(tx).Read(edgeStackID, endpointID)
return err
})
}
func (s *Service) ReadAll(edgeStackID portainer.EdgeStackID) ([]portainer.EdgeStackStatusForEnv, error) {
var collection = make([]portainer.EdgeStackStatusForEnv, 0)
return collection, s.conn.ViewTx(func(tx portainer.Transaction) error {
var err error
collection, err = s.Tx(tx).ReadAll(edgeStackID)
return err
})
}
func (s *Service) Update(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error {
return s.conn.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).Update(edgeStackID, endpointID, status)
})
}
func (s *Service) Delete(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) error {
return s.conn.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).Delete(edgeStackID, endpointID)
})
}
func (s *Service) DeleteAll(edgeStackID portainer.EdgeStackID) error {
return s.conn.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).DeleteAll(edgeStackID)
})
}
func (s *Service) Clear(edgeStackID portainer.EdgeStackID, relatedEnvironmentsIDs []portainer.EndpointID) error {
return s.conn.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).Clear(edgeStackID, relatedEnvironmentsIDs)
})
}
func (s *Service) key(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) []byte {
return append(s.conn.ConvertToKey(int(edgeStackID)), s.conn.ConvertToKey(int(endpointID))...)
}

View File

@ -0,0 +1,95 @@
package edgestackstatus
import (
"fmt"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
)
var _ dataservices.EdgeStackStatusService = &Service{}
type ServiceTx struct {
service *Service
tx portainer.Transaction
}
func (service ServiceTx) Create(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error {
identifier := service.service.key(edgeStackID, endpointID)
return service.tx.CreateObjectWithStringId(BucketName, identifier, status)
}
func (s ServiceTx) Read(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) (*portainer.EdgeStackStatusForEnv, error) {
var status portainer.EdgeStackStatusForEnv
identifier := s.service.key(edgeStackID, endpointID)
if err := s.tx.GetObject(BucketName, identifier, &status); err != nil {
return nil, err
}
return &status, nil
}
func (s ServiceTx) ReadAll(edgeStackID portainer.EdgeStackID) ([]portainer.EdgeStackStatusForEnv, error) {
keyPrefix := s.service.conn.ConvertToKey(int(edgeStackID))
statuses := make([]portainer.EdgeStackStatusForEnv, 0)
if err := s.tx.GetAllWithKeyPrefix(BucketName, keyPrefix, &portainer.EdgeStackStatusForEnv{}, dataservices.AppendFn(&statuses)); err != nil {
return nil, fmt.Errorf("unable to retrieve EdgeStackStatus for EdgeStack %d: %w", edgeStackID, err)
}
return statuses, nil
}
func (s ServiceTx) Update(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error {
identifier := s.service.key(edgeStackID, endpointID)
return s.tx.UpdateObject(BucketName, identifier, status)
}
func (s ServiceTx) Delete(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) error {
identifier := s.service.key(edgeStackID, endpointID)
return s.tx.DeleteObject(BucketName, identifier)
}
func (s ServiceTx) DeleteAll(edgeStackID portainer.EdgeStackID) error {
keyPrefix := s.service.conn.ConvertToKey(int(edgeStackID))
statuses := make([]portainer.EdgeStackStatusForEnv, 0)
if err := s.tx.GetAllWithKeyPrefix(BucketName, keyPrefix, &portainer.EdgeStackStatusForEnv{}, dataservices.AppendFn(&statuses)); err != nil {
return fmt.Errorf("unable to retrieve EdgeStackStatus for EdgeStack %d: %w", edgeStackID, err)
}
for _, status := range statuses {
if err := s.tx.DeleteObject(BucketName, s.service.key(edgeStackID, status.EndpointID)); err != nil {
return fmt.Errorf("unable to delete EdgeStackStatus for EdgeStack %d and Endpoint %d: %w", edgeStackID, status.EndpointID, err)
}
}
return nil
}
func (s ServiceTx) Clear(edgeStackID portainer.EdgeStackID, relatedEnvironmentsIDs []portainer.EndpointID) error {
for _, envID := range relatedEnvironmentsIDs {
existingStatus, err := s.Read(edgeStackID, envID)
if err != nil && !dataservices.IsErrObjectNotFound(err) {
return fmt.Errorf("unable to retrieve status for environment %d: %w", envID, err)
}
var deploymentInfo portainer.StackDeploymentInfo
if existingStatus != nil {
deploymentInfo = existingStatus.DeploymentInfo
}
if err := s.Update(edgeStackID, envID, &portainer.EdgeStackStatusForEnv{
EndpointID: envID,
Status: []portainer.EdgeStackDeploymentStatus{},
DeploymentInfo: deploymentInfo,
}); err != nil {
return err
}
}
return nil
}

View File

@ -6,8 +6,6 @@ import (
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/internal/edge/cache"
"github.com/rs/zerolog/log"
)
// BucketName represents the name of the bucket where this service stores data.
@ -16,7 +14,6 @@ const BucketName = "endpoint_relations"
// Service represents a service for managing environment(endpoint) relation data.
type Service struct {
connection portainer.Connection
updateStackFn func(ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error
updateStackFnTx func(tx portainer.Transaction, ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error
endpointRelationsCache []portainer.EndpointRelation
mu sync.Mutex
@ -29,10 +26,8 @@ func (service *Service) BucketName() string {
}
func (service *Service) RegisterUpdateStackFunction(
updateFunc func(portainer.EdgeStackID, func(*portainer.EdgeStack)) error,
updateFuncTx func(portainer.Transaction, portainer.EdgeStackID, func(*portainer.EdgeStack)) error,
) {
service.updateStackFn = updateFunc
service.updateStackFnTx = updateFuncTx
}
@ -91,106 +86,26 @@ func (service *Service) Create(endpointRelation *portainer.EndpointRelation) err
// UpdateEndpointRelation updates an Environment(Endpoint) relation object
func (service *Service) UpdateEndpointRelation(endpointID portainer.EndpointID, endpointRelation *portainer.EndpointRelation) error {
previousRelationState, _ := service.EndpointRelation(endpointID)
identifier := service.connection.ConvertToKey(int(endpointID))
err := service.connection.UpdateObject(BucketName, identifier, endpointRelation)
cache.Del(endpointID)
if err != nil {
return err
}
updatedRelationState, _ := service.EndpointRelation(endpointID)
service.mu.Lock()
service.endpointRelationsCache = nil
service.mu.Unlock()
service.updateEdgeStacksAfterRelationChange(previousRelationState, updatedRelationState)
return nil
return service.connection.UpdateTx(func(tx portainer.Transaction) error {
return service.Tx(tx).UpdateEndpointRelation(endpointID, endpointRelation)
})
}
func (service *Service) AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error {
return service.connection.ViewTx(func(tx portainer.Transaction) error {
return service.Tx(tx).AddEndpointRelationsForEdgeStack(endpointIDs, edgeStackID)
func (service *Service) AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStack *portainer.EdgeStack) error {
return service.connection.UpdateTx(func(tx portainer.Transaction) error {
return service.Tx(tx).AddEndpointRelationsForEdgeStack(endpointIDs, edgeStack)
})
}
func (service *Service) RemoveEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error {
return service.connection.ViewTx(func(tx portainer.Transaction) error {
return service.connection.UpdateTx(func(tx portainer.Transaction) error {
return service.Tx(tx).RemoveEndpointRelationsForEdgeStack(endpointIDs, edgeStackID)
})
}
// DeleteEndpointRelation deletes an Environment(Endpoint) relation object
func (service *Service) DeleteEndpointRelation(endpointID portainer.EndpointID) error {
deletedRelation, _ := service.EndpointRelation(endpointID)
identifier := service.connection.ConvertToKey(int(endpointID))
err := service.connection.DeleteObject(BucketName, identifier)
cache.Del(endpointID)
if err != nil {
return err
}
service.mu.Lock()
service.endpointRelationsCache = nil
service.mu.Unlock()
service.updateEdgeStacksAfterRelationChange(deletedRelation, nil)
return nil
}
func (service *Service) updateEdgeStacksAfterRelationChange(previousRelationState *portainer.EndpointRelation, updatedRelationState *portainer.EndpointRelation) {
relations, _ := service.EndpointRelations()
stacksToUpdate := map[portainer.EdgeStackID]bool{}
if previousRelationState != nil {
for stackId, enabled := range previousRelationState.EdgeStacks {
// flag stack for update if stack is not in the updated relation state
// = stack has been removed for this relation
// or this relation has been deleted
if enabled && (updatedRelationState == nil || !updatedRelationState.EdgeStacks[stackId]) {
stacksToUpdate[stackId] = true
}
}
}
if updatedRelationState != nil {
for stackId, enabled := range updatedRelationState.EdgeStacks {
// flag stack for update if stack is not in the previous relation state
// = stack has been added for this relation
if enabled && (previousRelationState == nil || !previousRelationState.EdgeStacks[stackId]) {
stacksToUpdate[stackId] = true
}
}
}
// for each stack referenced by the updated relation
// list how many time this stack is referenced in all relations
// in order to update the stack deployments count
for refStackId, refStackEnabled := range stacksToUpdate {
if !refStackEnabled {
continue
}
numDeployments := 0
for _, r := range relations {
for sId, enabled := range r.EdgeStacks {
if enabled && sId == refStackId {
numDeployments += 1
}
}
}
if err := service.updateStackFn(refStackId, func(edgeStack *portainer.EdgeStack) {
edgeStack.NumDeployments = numDeployments
}); err != nil {
log.Error().Err(err).Msg("could not update the number of deployments")
}
}
return service.connection.UpdateTx(func(tx portainer.Transaction) error {
return service.Tx(tx).DeleteEndpointRelation(endpointID)
})
}

View File

@ -0,0 +1,140 @@
package endpointrelation
import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/database/boltdb"
"github.com/portainer/portainer/api/dataservices/edgestack"
"github.com/portainer/portainer/api/internal/edge/cache"
"github.com/stretchr/testify/require"
)
func TestUpdateRelation(t *testing.T) {
const endpointID = 1
const edgeStackID1 = 1
const edgeStackID2 = 2
var conn portainer.Connection = &boltdb.DbConnection{Path: t.TempDir()}
err := conn.Open()
require.NoError(t, err)
defer conn.Close()
service, err := NewService(conn)
require.NoError(t, err)
updateStackFnTxCalled := false
edgeStacks := make(map[portainer.EdgeStackID]portainer.EdgeStack)
edgeStacks[edgeStackID1] = portainer.EdgeStack{ID: edgeStackID1}
edgeStacks[edgeStackID2] = portainer.EdgeStack{ID: edgeStackID2}
service.RegisterUpdateStackFunction(func(tx portainer.Transaction, ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error {
updateStackFnTxCalled = true
s, ok := edgeStacks[ID]
require.True(t, ok)
updateFunc(&s)
edgeStacks[ID] = s
return nil
})
// Nil relation
cache.Set(endpointID, []byte("value"))
err = service.UpdateEndpointRelation(endpointID, nil)
_, cacheKeyExists := cache.Get(endpointID)
require.NoError(t, err)
require.False(t, updateStackFnTxCalled)
require.False(t, cacheKeyExists)
// Add a relation to two edge stacks
cache.Set(endpointID, []byte("value"))
err = service.UpdateEndpointRelation(endpointID, &portainer.EndpointRelation{
EndpointID: endpointID,
EdgeStacks: map[portainer.EdgeStackID]bool{
edgeStackID1: true,
edgeStackID2: true,
},
})
_, cacheKeyExists = cache.Get(endpointID)
require.NoError(t, err)
require.True(t, updateStackFnTxCalled)
require.False(t, cacheKeyExists)
require.Equal(t, 1, edgeStacks[edgeStackID1].NumDeployments)
require.Equal(t, 1, edgeStacks[edgeStackID2].NumDeployments)
// Remove a relation to one edge stack
updateStackFnTxCalled = false
cache.Set(endpointID, []byte("value"))
err = service.UpdateEndpointRelation(endpointID, &portainer.EndpointRelation{
EndpointID: endpointID,
EdgeStacks: map[portainer.EdgeStackID]bool{
2: true,
},
})
_, cacheKeyExists = cache.Get(endpointID)
require.NoError(t, err)
require.True(t, updateStackFnTxCalled)
require.False(t, cacheKeyExists)
require.Equal(t, 0, edgeStacks[edgeStackID1].NumDeployments)
require.Equal(t, 1, edgeStacks[edgeStackID2].NumDeployments)
// Delete the relation
updateStackFnTxCalled = false
cache.Set(endpointID, []byte("value"))
err = service.DeleteEndpointRelation(endpointID)
_, cacheKeyExists = cache.Get(endpointID)
require.NoError(t, err)
require.True(t, updateStackFnTxCalled)
require.False(t, cacheKeyExists)
require.Equal(t, 0, edgeStacks[edgeStackID1].NumDeployments)
require.Equal(t, 0, edgeStacks[edgeStackID2].NumDeployments)
}
func TestAddEndpointRelationsForEdgeStack(t *testing.T) {
var conn portainer.Connection = &boltdb.DbConnection{Path: t.TempDir()}
err := conn.Open()
require.NoError(t, err)
defer conn.Close()
service, err := NewService(conn)
require.NoError(t, err)
edgeStackService, err := edgestack.NewService(conn, func(t portainer.Transaction, esi portainer.EdgeStackID) {})
require.NoError(t, err)
service.RegisterUpdateStackFunction(edgeStackService.UpdateEdgeStackFuncTx)
require.NoError(t, edgeStackService.Create(1, &portainer.EdgeStack{}))
require.NoError(t, service.Create(&portainer.EndpointRelation{EndpointID: 1, EdgeStacks: map[portainer.EdgeStackID]bool{}}))
require.NoError(t, service.AddEndpointRelationsForEdgeStack([]portainer.EndpointID{1}, &portainer.EdgeStack{ID: 1}))
}
func TestEndpointRelations(t *testing.T) {
var conn portainer.Connection = &boltdb.DbConnection{Path: t.TempDir()}
err := conn.Open()
require.NoError(t, err)
defer conn.Close()
service, err := NewService(conn)
require.NoError(t, err)
require.NoError(t, service.Create(&portainer.EndpointRelation{EndpointID: 1}))
rels, err := service.EndpointRelations()
require.NoError(t, err)
require.Len(t, rels, 1)
}

View File

@ -76,14 +76,14 @@ func (service ServiceTx) UpdateEndpointRelation(endpointID portainer.EndpointID,
return nil
}
func (service ServiceTx) AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error {
func (service ServiceTx) AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStack *portainer.EdgeStack) error {
for _, endpointID := range endpointIDs {
rel, err := service.EndpointRelation(endpointID)
if err != nil {
return err
}
rel.EdgeStacks[edgeStackID] = true
rel.EdgeStacks[edgeStack.ID] = true
identifier := service.service.connection.ConvertToKey(int(endpointID))
err = service.tx.UpdateObject(BucketName, identifier, rel)
@ -97,8 +97,12 @@ func (service ServiceTx) AddEndpointRelationsForEdgeStack(endpointIDs []portaine
service.service.endpointRelationsCache = nil
service.service.mu.Unlock()
if err := service.service.updateStackFnTx(service.tx, edgeStackID, func(edgeStack *portainer.EdgeStack) {
edgeStack.NumDeployments += len(endpointIDs)
if err := service.service.updateStackFnTx(service.tx, edgeStack.ID, func(es *portainer.EdgeStack) {
es.NumDeployments += len(endpointIDs)
// sync changes in `edgeStack` in case it is re-persisted after `AddEndpointRelationsForEdgeStack` call
// to avoid overriding with the previous values
edgeStack.NumDeployments = es.NumDeployments
}); err != nil {
log.Error().Err(err).Msg("could not update the number of deployments")
}
@ -186,53 +190,49 @@ func (service ServiceTx) cachedEndpointRelations() ([]portainer.EndpointRelation
}
func (service ServiceTx) updateEdgeStacksAfterRelationChange(previousRelationState *portainer.EndpointRelation, updatedRelationState *portainer.EndpointRelation) {
relations, _ := service.EndpointRelations()
stacksToUpdate := map[portainer.EdgeStackID]bool{}
if previousRelationState != nil {
for stackId, enabled := range previousRelationState.EdgeStacks {
// flag stack for update if stack is not in the updated relation state
// = stack has been removed for this relation
// or this relation has been deleted
if enabled && (updatedRelationState == nil || !updatedRelationState.EdgeStacks[stackId]) {
stacksToUpdate[stackId] = true
}
}
}
if err := service.service.updateStackFnTx(service.tx, stackId, func(edgeStack *portainer.EdgeStack) {
// Sanity check
if edgeStack.NumDeployments <= 0 {
log.Error().
Int("edgestack_id", int(edgeStack.ID)).
Int("endpoint_id", int(previousRelationState.EndpointID)).
Int("num_deployments", edgeStack.NumDeployments).
Msg("cannot decrement the number of deployments for an edge stack with zero deployments")
if updatedRelationState != nil {
for stackId, enabled := range updatedRelationState.EdgeStacks {
// flag stack for update if stack is not in the previous relation state
// = stack has been added for this relation
if enabled && (previousRelationState == nil || !previousRelationState.EdgeStacks[stackId]) {
stacksToUpdate[stackId] = true
}
}
}
return
}
// for each stack referenced by the updated relation
// list how many time this stack is referenced in all relations
// in order to update the stack deployments count
for refStackId, refStackEnabled := range stacksToUpdate {
if !refStackEnabled {
continue
}
numDeployments := 0
for _, r := range relations {
for sId, enabled := range r.EdgeStacks {
if enabled && sId == refStackId {
numDeployments += 1
edgeStack.NumDeployments--
}); err != nil {
log.Error().Err(err).Msg("could not update the number of deployments")
}
cache.Del(previousRelationState.EndpointID)
}
}
}
if err := service.service.updateStackFnTx(service.tx, refStackId, func(edgeStack *portainer.EdgeStack) {
edgeStack.NumDeployments = numDeployments
}); err != nil {
log.Error().Err(err).Msg("could not update the number of deployments")
if updatedRelationState == nil {
return
}
for stackId, enabled := range updatedRelationState.EdgeStacks {
// flag stack for update if stack is not in the previous relation state
// = stack has been added for this relation
if enabled && (previousRelationState == nil || !previousRelationState.EdgeStacks[stackId]) {
if err := service.service.updateStackFnTx(service.tx, stackId, func(edgeStack *portainer.EdgeStack) {
edgeStack.NumDeployments++
}); err != nil {
log.Error().Err(err).Msg("could not update the number of deployments")
}
cache.Del(updatedRelationState.EndpointID)
}
}
}

View File

@ -12,6 +12,7 @@ type (
EdgeGroup() EdgeGroupService
EdgeJob() EdgeJobService
EdgeStack() EdgeStackService
EdgeStackStatus() EdgeStackStatusService
Endpoint() EndpointService
EndpointGroup() EndpointGroupService
EndpointRelation() EndpointRelationService
@ -39,8 +40,8 @@ type (
Open() (newStore bool, err error)
Init() error
Close() error
UpdateTx(func(DataStoreTx) error) error
ViewTx(func(DataStoreTx) error) error
UpdateTx(func(tx DataStoreTx) error) error
ViewTx(func(tx DataStoreTx) error) error
MigrateData() error
Rollback(force bool) error
CheckCurrentEdition() error
@ -89,6 +90,16 @@ type (
BucketName() string
}
EdgeStackStatusService interface {
Create(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error
Read(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) (*portainer.EdgeStackStatusForEnv, error)
ReadAll(edgeStackID portainer.EdgeStackID) ([]portainer.EdgeStackStatusForEnv, error)
Update(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error
Delete(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) error
DeleteAll(edgeStackID portainer.EdgeStackID) error
Clear(edgeStackID portainer.EdgeStackID, relatedEnvironmentsIDs []portainer.EndpointID) error
}
// EndpointService represents a service for managing environment(endpoint) data
EndpointService interface {
Endpoint(ID portainer.EndpointID) (*portainer.Endpoint, error)
@ -115,7 +126,7 @@ type (
EndpointRelation(EndpointID portainer.EndpointID) (*portainer.EndpointRelation, error)
Create(endpointRelation *portainer.EndpointRelation) error
UpdateEndpointRelation(EndpointID portainer.EndpointID, endpointRelation *portainer.EndpointRelation) error
AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error
AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStack *portainer.EdgeStack) error
RemoveEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error
DeleteEndpointRelation(EndpointID portainer.EndpointID) error
BucketName() string

View File

@ -6,12 +6,11 @@ import (
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/rs/zerolog/log"
)
const (
BucketName = "pending_actions"
)
const BucketName = "pending_actions"
type Service struct {
dataservices.BaseDataService[portainer.PendingAction, portainer.PendingActionID]
@ -78,15 +77,14 @@ func (s ServiceTx) Update(ID portainer.PendingActionID, config *portainer.Pendin
func (s ServiceTx) DeleteByEndpointID(ID portainer.EndpointID) error {
log.Debug().Int("endpointId", int(ID)).Msg("deleting pending actions for endpoint")
pendingActions, err := s.BaseDataServiceTx.ReadAll()
pendingActions, err := s.ReadAll()
if err != nil {
return fmt.Errorf("failed to retrieve pending-actions for endpoint (%d): %w", ID, err)
}
for _, pendingAction := range pendingActions {
if pendingAction.EndpointID == ID {
err := s.BaseDataServiceTx.Delete(pendingAction.ID)
if err != nil {
if err := s.Delete(pendingAction.ID); err != nil {
log.Debug().Int("endpointId", int(ID)).Msgf("failed to delete pending action: %v", err)
}
}

View File

@ -0,0 +1,32 @@
package pendingactions_test
import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/datastore"
"github.com/stretchr/testify/require"
)
func TestDeleteByEndpoint(t *testing.T) {
_, store := datastore.MustNewTestStore(t, false, false)
// Create Endpoint 1
err := store.PendingActions().Create(&portainer.PendingAction{EndpointID: 1})
require.NoError(t, err)
// Create Endpoint 2
err = store.PendingActions().Create(&portainer.PendingAction{EndpointID: 2})
require.NoError(t, err)
// Delete Endpoint 1
err = store.PendingActions().DeleteByEndpointID(1)
require.NoError(t, err)
// Check that only Endpoint 2 remains
pendingActions, err := store.PendingActions().ReadAll()
require.NoError(t, err)
require.Len(t, pendingActions, 1)
require.Equal(t, portainer.EndpointID(2), pendingActions[0].EndpointID)
}

View File

@ -51,3 +51,20 @@ func (service *Service) ReadWithoutSnapshotRaw(ID portainer.EndpointID) (*portai
return snapshot, err
}
func (service *Service) ReadRawMessage(ID portainer.EndpointID) (*portainer.SnapshotRawMessage, error) {
var snapshot *portainer.SnapshotRawMessage
err := service.Connection.ViewTx(func(tx portainer.Transaction) error {
var err error
snapshot, err = service.Tx(tx).ReadRawMessage(ID)
return err
})
return snapshot, err
}
func (service *Service) CreateRawMessage(snapshot *portainer.SnapshotRawMessage) error {
return service.Connection.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot)
}

View File

@ -35,3 +35,19 @@ func (service ServiceTx) ReadWithoutSnapshotRaw(ID portainer.EndpointID) (*porta
return &snapshot.Snapshot, nil
}
func (service ServiceTx) ReadRawMessage(ID portainer.EndpointID) (*portainer.SnapshotRawMessage, error) {
var snapshot = portainer.SnapshotRawMessage{}
identifier := service.Connection.ConvertToKey(int(ID))
if err := service.Tx.GetObject(service.Bucket, identifier, &snapshot); err != nil {
return nil, err
}
return &snapshot, nil
}
func (service ServiceTx) CreateRawMessage(snapshot *portainer.SnapshotRawMessage) error {
return service.Tx.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot)
}

View File

@ -4,17 +4,18 @@ import (
"testing"
"time"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/datastore"
"github.com/portainer/portainer/api/filesystem"
"github.com/gofrs/uuid"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/filesystem"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newGuidString(t *testing.T) string {
uuid, err := uuid.NewV4()
assert.NoError(t, err)
require.NoError(t, err)
return uuid.String()
}
@ -41,7 +42,7 @@ func TestService_StackByWebhookID(t *testing.T) {
// can find a stack by webhook ID
got, err := store.StackService.StackByWebhookID(webhookID)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, stack, *got)
// returns nil and object not found error if there's no stack associated with the webhook
@ -94,10 +95,10 @@ func Test_RefreshableStacks(t *testing.T) {
for _, stack := range []*portainer.Stack{&staticStack, &stackWithWebhook, &refreshableStack} {
err := store.Stack().Create(stack)
assert.NoError(t, err)
require.NoError(t, err)
}
stacks, err := store.Stack().RefreshableStacks()
assert.NoError(t, err)
require.NoError(t, err)
assert.ElementsMatch(t, []portainer.Stack{refreshableStack}, stacks)
}

View File

@ -5,7 +5,9 @@ import (
"github.com/portainer/portainer/api/dataservices/errors"
"github.com/portainer/portainer/api/datastore"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_teamByName(t *testing.T) {
@ -13,7 +15,7 @@ func Test_teamByName(t *testing.T) {
_, store := datastore.MustNewTestStore(t, true, true)
_, err := store.Team().TeamByName("name")
assert.ErrorIs(t, err, errors.ErrObjectNotFound)
require.ErrorIs(t, err, errors.ErrObjectNotFound)
})
@ -29,7 +31,7 @@ func Test_teamByName(t *testing.T) {
teamBuilder.createNew("name1")
_, err := store.Team().TeamByName("name")
assert.ErrorIs(t, err, errors.ErrObjectNotFound)
require.ErrorIs(t, err, errors.ErrObjectNotFound)
})
t.Run("When there is an object with the same name should return the object", func(t *testing.T) {
@ -44,7 +46,7 @@ func Test_teamByName(t *testing.T) {
expectedTeam := teamBuilder.createNew("name1")
team, err := store.Team().TeamByName("name1")
assert.NoError(t, err, "TeamByName should succeed")
require.NoError(t, err, "TeamByName should succeed")
assert.Equal(t, expectedTeam, team)
})
}

View File

@ -6,12 +6,14 @@ import (
"strings"
"testing"
"github.com/dchest/uniuri"
"github.com/pkg/errors"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/chisel"
"github.com/portainer/portainer/api/crypto"
"github.com/dchest/uniuri"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
@ -30,45 +32,21 @@ func TestStoreFull(t *testing.T) {
_, store := MustNewTestStore(t, true, true)
testCases := map[string]func(t *testing.T){
"User Accounts": func(t *testing.T) {
store.testUserAccounts(t)
},
"Environments": func(t *testing.T) {
store.testEnvironments(t)
},
"Settings": func(t *testing.T) {
store.testSettings(t)
},
"SSL Settings": func(t *testing.T) {
store.testSSLSettings(t)
},
"Tunnel Server": func(t *testing.T) {
store.testTunnelServer(t)
},
"Custom Templates": func(t *testing.T) {
store.testCustomTemplates(t)
},
"Registries": func(t *testing.T) {
store.testRegistries(t)
},
"Resource Control": func(t *testing.T) {
store.testResourceControl(t)
},
"Schedules": func(t *testing.T) {
store.testSchedules(t)
},
"Tags": func(t *testing.T) {
store.testTags(t)
},
// "Test Title": func(t *testing.T) {
// },
"User Accounts": store.testUserAccounts,
"Environments": store.testEnvironments,
"Settings": store.testSettings,
"SSL Settings": store.testSSLSettings,
"Tunnel Server": store.testTunnelServer,
"Custom Templates": store.testCustomTemplates,
"Registries": store.testRegistries,
"Resource Control": store.testResourceControl,
"Schedules": store.testSchedules,
"Tags": store.testTags,
}
for name, test := range testCases {
t.Run(name, test)
}
}
func (store *Store) testEnvironments(t *testing.T) {
@ -167,7 +145,7 @@ func (store *Store) CreateEndpoint(t *testing.T, name string, endpointType porta
store.Endpoint().Create(expectedEndpoint)
endpoint, err := store.Endpoint().Endpoint(id)
is.NoError(err, "Endpoint() should not return an error")
require.NoError(t, err, "Endpoint() should not return an error")
is.Equal(expectedEndpoint, endpoint, "endpoint should be the same")
return endpoint.ID
@ -194,7 +172,7 @@ func (store *Store) testSSLSettings(t *testing.T) {
store.SSLSettings().UpdateSettings(ssl)
settings, err := store.SSLSettings().Settings()
is.NoError(err, "Get sslsettings should succeed")
require.NoError(t, err, "Get sslsettings should succeed")
is.Equal(ssl, settings, "Stored SSLSettings should be the same as what is read out")
}
@ -203,27 +181,27 @@ func (store *Store) testTunnelServer(t *testing.T) {
expectPrivateKeySeed := uniuri.NewLen(16)
err := store.TunnelServer().UpdateInfo(&portainer.TunnelServerInfo{PrivateKeySeed: expectPrivateKeySeed})
is.NoError(err, "UpdateInfo should have succeeded")
require.NoError(t, err, "UpdateInfo should have succeeded")
serverInfo, err := store.TunnelServer().Info()
is.NoError(err, "Info should have succeeded")
require.NoError(t, err, "Info should have succeeded")
is.Equal(expectPrivateKeySeed, serverInfo.PrivateKeySeed, "hashed passwords should not differ")
}
// add users, read them back and check the details are unchanged
func (store *Store) testUserAccounts(t *testing.T) {
is := assert.New(t)
err := store.createAccount(adminUsername, adminPassword, portainer.AdministratorRole)
is.NoError(err, "CreateAccount should succeed")
store.checkAccount(adminUsername, adminPassword, portainer.AdministratorRole)
is.NoError(err, "Account failure")
require.NoError(t, err, "CreateAccount should succeed")
err = store.checkAccount(adminUsername, adminPassword, portainer.AdministratorRole)
require.NoError(t, err, "Account failure")
err = store.createAccount(standardUsername, standardPassword, portainer.StandardUserRole)
is.NoError(err, "CreateAccount should succeed")
store.checkAccount(standardUsername, standardPassword, portainer.StandardUserRole)
is.NoError(err, "Account failure")
require.NoError(t, err, "CreateAccount should succeed")
err = store.checkAccount(standardUsername, standardPassword, portainer.StandardUserRole)
require.NoError(t, err, "Account failure")
}
// create an account with the provided details
@ -232,18 +210,13 @@ func (store *Store) createAccount(username, password string, role portainer.User
user := &portainer.User{Username: username, Role: role}
// encrypt the password
cs := &crypto.Service{}
cs := crypto.Service{}
user.Password, err = cs.Hash(password)
if err != nil {
return err
}
err = store.User().Create(user)
if err != nil {
return err
}
return nil
return store.User().Create(user)
}
func (store *Store) checkAccount(username, expectPassword string, expectRole portainer.UserRole) error {
@ -259,13 +232,8 @@ func (store *Store) checkAccount(username, expectPassword string, expectRole por
}
// Check the password
cs := &crypto.Service{}
expectPasswordHash, err := cs.Hash(expectPassword)
if err != nil {
return errors.Wrap(err, "hash failed")
}
if user.Password != expectPasswordHash {
cs := crypto.Service{}
if cs.CompareHashAndData(user.Password, expectPassword) != nil {
return fmt.Errorf("%s user password hash failure", user.Username)
}
@ -277,7 +245,7 @@ func (store *Store) testSettings(t *testing.T) {
// since many settings are default and basically nil, I'm going to update some and read them back
expectedSettings, err := store.Settings().Settings()
is.NoError(err, "Settings() should not return an error")
require.NoError(t, err, "Settings() should not return an error")
expectedSettings.TemplatesURL = "http://portainer.io/application-templates"
expectedSettings.HelmRepositoryURL = "http://portainer.io/helm-repository"
expectedSettings.EdgeAgentCheckinInterval = 60
@ -291,10 +259,10 @@ func (store *Store) testSettings(t *testing.T) {
expectedSettings.SnapshotInterval = "10m"
err = store.Settings().UpdateSettings(expectedSettings)
is.NoError(err, "UpdateSettings() should succeed")
require.NoError(t, err, "UpdateSettings() should succeed")
settings, err := store.Settings().Settings()
is.NoError(err, "Settings() should not return an error")
require.NoError(t, err, "Settings() should not return an error")
is.Equal(expectedSettings, settings, "stored settings should match")
}
@ -317,7 +285,7 @@ func (store *Store) testCustomTemplates(t *testing.T) {
customTemplate.Create(expectedTemplate)
actualTemplate, err := customTemplate.Read(expectedTemplate.ID)
is.NoError(err, "CustomTemplate should not return an error")
require.NoError(t, err, "CustomTemplate should not return an error")
is.Equal(expectedTemplate, actualTemplate, "expected and actual template do not match")
}
@ -345,17 +313,17 @@ func (store *Store) testRegistries(t *testing.T) {
}
err := regService.Create(reg1)
is.NoError(err)
require.NoError(t, err)
err = regService.Create(reg2)
is.NoError(err)
require.NoError(t, err)
actualReg1, err := regService.Read(reg1.ID)
is.NoError(err)
require.NoError(t, err)
is.Equal(reg1, actualReg1, "registries differ")
actualReg2, err := regService.Read(reg2.ID)
is.NoError(err)
require.NoError(t, err)
is.Equal(reg2, actualReg2, "registries differ")
}
@ -378,10 +346,10 @@ func (store *Store) testSchedules(t *testing.T) {
}
err := schedule.CreateSchedule(s)
is.NoError(err, "CreateSchedule should succeed")
require.NoError(t, err, "CreateSchedule should succeed")
actual, err := schedule.Schedule(s.ID)
is.NoError(err, "schedule should be found")
require.NoError(t, err, "schedule should be found")
is.Equal(s, actual, "schedules differ")
}
@ -401,16 +369,16 @@ func (store *Store) testTags(t *testing.T) {
}
err := tags.Create(tag1)
is.NoError(err, "Tags.Create should succeed")
require.NoError(t, err, "Tags.Create should succeed")
err = tags.Create(tag2)
is.NoError(err, "Tags.Create should succeed")
require.NoError(t, err, "Tags.Create should succeed")
actual, err := tags.Read(tag1.ID)
is.NoError(err, "tag1 should be found")
require.NoError(t, err, "tag1 should be found")
is.Equal(tag1, actual, "tags differ")
actual, err = tags.Read(tag2.ID)
is.NoError(err, "tag2 should be found")
require.NoError(t, err, "tag2 should be found")
is.Equal(tag2, actual, "tags differ")
}

View File

@ -40,13 +40,11 @@ func (store *Store) MigrateData() error {
}
// before we alter anything in the DB, create a backup
_, err = store.Backup("")
if err != nil {
if _, err := store.Backup(""); err != nil {
return errors.Wrap(err, "while backing up database")
}
err = store.FailSafeMigrate(migrator, version)
if err != nil {
if err := store.FailSafeMigrate(migrator, version); err != nil {
err = errors.Wrap(err, "failed to migrate database")
log.Warn().Err(err).Msg("migration failed, restoring database to previous version")
@ -85,7 +83,9 @@ func (store *Store) newMigratorParameters(version *models.Version, flags *portai
DockerhubService: store.DockerHubService,
AuthorizationService: authorization.NewService(store),
EdgeStackService: store.EdgeStackService,
EdgeStackStatusService: store.EdgeStackStatusService,
EdgeJobService: store.EdgeJobService,
EdgeGroupService: store.EdgeGroupService,
TunnelServerService: store.TunnelServerService,
PendingActionsService: store.PendingActionsService,
}
@ -140,8 +140,7 @@ func (store *Store) connectionRollback(force bool) error {
}
}
err := store.Restore()
if err != nil {
if err := store.Restore(); err != nil {
return err
}

View File

@ -6,7 +6,9 @@ import (
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/datastore/migrator"
gittypes "github.com/portainer/portainer/api/git/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMigrateStackEntryPoint(t *testing.T) {
@ -28,25 +30,25 @@ func TestMigrateStackEntryPoint(t *testing.T) {
for _, s := range stacks {
err := stackService.Create(s)
assert.NoError(t, err, "failed to create stack")
require.NoError(t, err, "failed to create stack")
}
s, err := stackService.Read(1)
assert.NoError(t, err)
require.NoError(t, err)
assert.Nil(t, s.GitConfig, "first stack should not have git config")
s, err = stackService.Read(2)
assert.NoError(t, err)
assert.Equal(t, "", s.GitConfig.ConfigFilePath, "not migrated yet migrated")
require.NoError(t, err)
assert.Empty(t, s.GitConfig.ConfigFilePath, "not migrated yet migrated")
err = migrator.MigrateStackEntryPoint(stackService)
assert.NoError(t, err, "failed to migrate entry point to Git ConfigFilePath")
require.NoError(t, err, "failed to migrate entry point to Git ConfigFilePath")
s, err = stackService.Read(1)
assert.NoError(t, err)
require.NoError(t, err)
assert.Nil(t, s.GitConfig, "first stack should not have git config")
s, err = stackService.Read(2)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, "dir/sub/compose.yml", s.GitConfig.ConfigFilePath, "second stack should have config file path migrated")
}

View File

@ -0,0 +1,31 @@
package migrator
import portainer "github.com/portainer/portainer/api"
func (m *Migrator) migrateEdgeStacksStatuses_2_31_0() error {
edgeStacks, err := m.edgeStackService.EdgeStacks()
if err != nil {
return err
}
for _, edgeStack := range edgeStacks {
for envID, status := range edgeStack.Status {
if err := m.edgeStackStatusService.Create(edgeStack.ID, envID, &portainer.EdgeStackStatusForEnv{
EndpointID: envID,
Status: status.Status,
DeploymentInfo: status.DeploymentInfo,
ReadyRePullImage: status.ReadyRePullImage,
}); err != nil {
return err
}
}
edgeStack.Status = nil
if err := m.edgeStackService.UpdateEdgeStack(edgeStack.ID, &edgeStack); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,33 @@
package migrator
import (
"github.com/pkg/errors"
portainer "github.com/portainer/portainer/api"
perrors "github.com/portainer/portainer/api/dataservices/errors"
"github.com/portainer/portainer/api/internal/endpointutils"
)
func (m *Migrator) addEndpointRelationForEdgeAgents_2_32_0() error {
endpoints, err := m.endpointService.Endpoints()
if err != nil {
return err
}
for _, endpoint := range endpoints {
if endpointutils.IsEdgeEndpoint(&endpoint) {
_, err := m.endpointRelationService.EndpointRelation(endpoint.ID)
if err != nil && errors.Is(err, perrors.ErrObjectNotFound) {
relation := &portainer.EndpointRelation{
EndpointID: endpoint.ID,
EdgeStacks: make(map[portainer.EdgeStackID]bool),
}
if err := m.endpointRelationService.Create(relation); err != nil {
return err
}
}
}
}
return nil
}

View File

@ -0,0 +1,25 @@
package migrator
import (
"github.com/portainer/portainer/api/roar"
)
func (m *Migrator) migrateEdgeGroupEndpointsToRoars_2_33_0() error {
egs, err := m.edgeGroupService.ReadAll()
if err != nil {
return err
}
for _, eg := range egs {
if eg.EndpointIDs.Len() == 0 {
eg.EndpointIDs = roar.FromSlice(eg.Endpoints)
eg.Endpoints = nil
}
if err := m.edgeGroupService.Update(eg.ID, &eg); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,55 @@
package migrator
import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/database/boltdb"
"github.com/portainer/portainer/api/dataservices/edgegroup"
"github.com/stretchr/testify/require"
)
func TestMigrateEdgeGroupEndpointsToRoars_2_33_0Idempotency(t *testing.T) {
var conn portainer.Connection = &boltdb.DbConnection{Path: t.TempDir()}
err := conn.Open()
require.NoError(t, err)
defer conn.Close()
edgeGroupService, err := edgegroup.NewService(conn)
require.NoError(t, err)
edgeGroup := &portainer.EdgeGroup{
ID: 1,
Name: "test-edge-group",
Endpoints: []portainer.EndpointID{1, 2, 3},
}
err = conn.CreateObjectWithId(edgegroup.BucketName, int(edgeGroup.ID), edgeGroup)
require.NoError(t, err)
m := NewMigrator(&MigratorParameters{EdgeGroupService: edgeGroupService})
// Run migration once
err = m.migrateEdgeGroupEndpointsToRoars_2_33_0()
require.NoError(t, err)
migratedEdgeGroup, err := edgeGroupService.Read(edgeGroup.ID)
require.NoError(t, err)
require.Empty(t, migratedEdgeGroup.Endpoints)
require.Equal(t, len(edgeGroup.Endpoints), migratedEdgeGroup.EndpointIDs.Len())
// Run migration again to ensure the results didn't change
err = m.migrateEdgeGroupEndpointsToRoars_2_33_0()
require.NoError(t, err)
migratedEdgeGroup, err = edgeGroupService.Read(edgeGroup.ID)
require.NoError(t, err)
require.Empty(t, migratedEdgeGroup.Endpoints)
require.Equal(t, len(edgeGroup.Endpoints), migratedEdgeGroup.EndpointIDs.Len())
}

View File

@ -3,12 +3,13 @@ package migrator
import (
"errors"
"github.com/Masterminds/semver"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/database/models"
"github.com/portainer/portainer/api/dataservices/dockerhub"
"github.com/portainer/portainer/api/dataservices/edgegroup"
"github.com/portainer/portainer/api/dataservices/edgejob"
"github.com/portainer/portainer/api/dataservices/edgestack"
"github.com/portainer/portainer/api/dataservices/edgestackstatus"
"github.com/portainer/portainer/api/dataservices/endpoint"
"github.com/portainer/portainer/api/dataservices/endpointgroup"
"github.com/portainer/portainer/api/dataservices/endpointrelation"
@ -27,6 +28,8 @@ import (
"github.com/portainer/portainer/api/dataservices/user"
"github.com/portainer/portainer/api/dataservices/version"
"github.com/portainer/portainer/api/internal/authorization"
"github.com/Masterminds/semver"
"github.com/rs/zerolog/log"
)
@ -56,7 +59,9 @@ type (
authorizationService *authorization.Service
dockerhubService *dockerhub.Service
edgeStackService *edgestack.Service
edgeStackStatusService *edgestackstatus.Service
edgeJobService *edgejob.Service
edgeGroupService *edgegroup.Service
TunnelServerService *tunnelserver.Service
pendingActionsService *pendingactions.Service
}
@ -84,7 +89,9 @@ type (
AuthorizationService *authorization.Service
DockerhubService *dockerhub.Service
EdgeStackService *edgestack.Service
EdgeStackStatusService *edgestackstatus.Service
EdgeJobService *edgejob.Service
EdgeGroupService *edgegroup.Service
TunnelServerService *tunnelserver.Service
PendingActionsService *pendingactions.Service
}
@ -114,12 +121,15 @@ func NewMigrator(parameters *MigratorParameters) *Migrator {
authorizationService: parameters.AuthorizationService,
dockerhubService: parameters.DockerhubService,
edgeStackService: parameters.EdgeStackService,
edgeStackStatusService: parameters.EdgeStackStatusService,
edgeJobService: parameters.EdgeJobService,
edgeGroupService: parameters.EdgeGroupService,
TunnelServerService: parameters.TunnelServerService,
pendingActionsService: parameters.PendingActionsService,
}
migrator.initMigrations()
return migrator
}
@ -242,6 +252,12 @@ func (m *Migrator) initMigrations() {
m.migratePendingActionsDataForDB130,
)
m.addMigrations("2.31.0", m.migrateEdgeStacksStatuses_2_31_0)
m.addMigrations("2.32.0", m.addEndpointRelationForEdgeAgents_2_32_0)
m.addMigrations("2.33.0-rc1", m.migrateEdgeGroupEndpointsToRoars_2_33_0)
// Add new migrations above...
// One function per migration, each versions migration funcs in the same file.
}

View File

@ -2,6 +2,7 @@ package postinit
import (
"context"
"fmt"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
@ -83,17 +84,27 @@ func (postInitMigrator *PostInitMigrator) PostInitMigrate() error {
// try to create a post init migration pending action. If it already exists, do nothing
// this function exists for readability, not reusability
// TODO: This should be moved into pending actions as part of the pending action migration
func (postInitMigrator *PostInitMigrator) createPostInitMigrationPendingAction(environmentID portainer.EndpointID) error {
// If there are no pending actions for the given endpoint, create one
err := postInitMigrator.dataStore.PendingActions().Create(&portainer.PendingAction{
action := portainer.PendingAction{
EndpointID: environmentID,
Action: actions.PostInitMigrateEnvironment,
})
if err != nil {
log.Error().Err(err).Msgf("Error creating pending action for environment %d", environmentID)
}
return nil
pendingActions, err := postInitMigrator.dataStore.PendingActions().ReadAll()
if err != nil {
return fmt.Errorf("failed to retrieve pending actions: %w", err)
}
for _, dba := range pendingActions {
if dba.EndpointID == action.EndpointID && dba.Action == action.Action {
log.Debug().
Str("action", action.Action).
Int("endpoint_id", int(action.EndpointID)).
Msg("pending action already exists for environment, skipping...")
return nil
}
}
return postInitMigrator.dataStore.PendingActions().Create(&action)
}
// MigrateEnvironment runs migrations on a single environment
@ -149,7 +160,7 @@ func (migrator *PostInitMigrator) MigrateGPUs(e portainer.Endpoint, dockerClient
return migrator.dataStore.UpdateTx(func(tx dataservices.DataStoreTx) error {
environment, err := tx.Endpoint().Endpoint(e.ID)
if err != nil {
log.Error().Err(err).Msgf("Error getting environment %d", environment.ID)
log.Error().Err(err).Msgf("Error getting environment %d", e.ID)
return err
}
// Early exit if we do not need to migrate!
@ -175,10 +186,11 @@ func (migrator *PostInitMigrator) MigrateGPUs(e portainer.Endpoint, dockerClient
continue
}
deviceRequests := containerDetails.HostConfig.Resources.DeviceRequests
deviceRequests := containerDetails.HostConfig.DeviceRequests
for _, deviceRequest := range deviceRequests {
if deviceRequest.Driver == "nvidia" {
environment.EnableGPUManagement = true
break containersLoop
}
}
@ -186,8 +198,7 @@ func (migrator *PostInitMigrator) MigrateGPUs(e portainer.Endpoint, dockerClient
// set the MigrateGPUs flag to false so we don't run this again
environment.PostInitMigrations.MigrateGPUs = false
err = tx.Endpoint().UpdateEndpoint(environment.ID, environment)
if err != nil {
if err := tx.Endpoint().UpdateEndpoint(environment.ID, environment); err != nil {
log.Error().Err(err).Msgf("Error updating EnableGPUManagement flag for environment %d", environment.ID)
return err
}

View File

@ -0,0 +1,174 @@
package postinit
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/datastore"
"github.com/portainer/portainer/api/pendingactions/actions"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/segmentio/encoding/json"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMigrateGPUs(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.HasSuffix(r.URL.Path, "/containers/json") {
containerSummary := []container.Summary{{ID: "container1"}}
if err := json.NewEncoder(w).Encode(containerSummary); err != nil {
w.WriteHeader(http.StatusInternalServerError)
}
return
}
container := container.InspectResponse{
ContainerJSONBase: &container.ContainerJSONBase{
ID: "container1",
HostConfig: &container.HostConfig{
Resources: container.Resources{
DeviceRequests: []container.DeviceRequest{
{Driver: "nvidia"},
},
},
},
},
}
if err := json.NewEncoder(w).Encode(container); err != nil {
w.WriteHeader(http.StatusInternalServerError)
}
}))
defer srv.Close()
_, store := datastore.MustNewTestStore(t, true, false)
migrator := &PostInitMigrator{dataStore: store}
dockerCli, err := client.NewClientWithOpts(client.WithHost(srv.URL), client.WithHTTPClient(http.DefaultClient))
require.NoError(t, err)
// Nonexistent endpoint
err = migrator.MigrateGPUs(portainer.Endpoint{}, dockerCli)
require.Error(t, err)
// Valid endpoint
endpoint := portainer.Endpoint{ID: 1, PostInitMigrations: portainer.EndpointPostInitMigrations{MigrateGPUs: true}}
err = store.Endpoint().Create(&endpoint)
require.NoError(t, err)
err = migrator.MigrateGPUs(endpoint, dockerCli)
require.NoError(t, err)
migratedEndpoint, err := store.Endpoint().Endpoint(endpoint.ID)
require.NoError(t, err)
require.Equal(t, endpoint.ID, migratedEndpoint.ID)
require.False(t, migratedEndpoint.PostInitMigrations.MigrateGPUs)
require.True(t, migratedEndpoint.EnableGPUManagement)
}
func TestPostInitMigrate_PendingActionsCreated(t *testing.T) {
tests := []struct {
name string
existingPendingActions []*portainer.PendingAction
expectedPendingActions int
expectedAction string
}{
{
name: "when existing non-matching action exists, should add migration action",
existingPendingActions: []*portainer.PendingAction{
{
EndpointID: 7,
Action: "some-other-action",
},
},
expectedPendingActions: 2,
expectedAction: actions.PostInitMigrateEnvironment,
},
{
name: "when matching action exists, should not add duplicate",
existingPendingActions: []*portainer.PendingAction{
{
EndpointID: 7,
Action: actions.PostInitMigrateEnvironment,
},
},
expectedPendingActions: 1,
expectedAction: actions.PostInitMigrateEnvironment,
},
{
name: "when no actions exist, should add migration action",
existingPendingActions: []*portainer.PendingAction{},
expectedPendingActions: 1,
expectedAction: actions.PostInitMigrateEnvironment,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
is := assert.New(t)
_, store := datastore.MustNewTestStore(t, true, true)
// Create test endpoint
endpoint := &portainer.Endpoint{
ID: 7,
UserTrusted: true,
Type: portainer.EdgeAgentOnDockerEnvironment,
Edge: portainer.EnvironmentEdgeSettings{
AsyncMode: false,
},
EdgeID: "edgeID",
}
err := store.Endpoint().Create(endpoint)
require.NoError(t, err, "error creating endpoint")
// Create any existing pending actions
for _, action := range tt.existingPendingActions {
err = store.PendingActions().Create(action)
require.NoError(t, err, "error creating pending action")
}
migrator := NewPostInitMigrator(
nil, // kubeFactory not needed for this test
nil, // dockerFactory not needed for this test
store,
"", // assetsPath not needed for this test
nil, // kubernetesDeployer not needed for this test
)
err = migrator.PostInitMigrate()
require.NoError(t, err, "PostInitMigrate should not return error")
// Verify the results
pendingActions, err := store.PendingActions().ReadAll()
require.NoError(t, err, "error reading pending actions")
is.Len(pendingActions, tt.expectedPendingActions, "unexpected number of pending actions")
// If we expect any actions, verify at least one has the expected action type
if tt.expectedPendingActions > 0 {
hasExpectedAction := false
for _, action := range pendingActions {
if action.Action == tt.expectedAction {
hasExpectedAction = true
is.Equal(endpoint.ID, action.EndpointID, "action should reference correct endpoint")
break
}
}
is.True(hasExpectedAction, "should have found action of expected type")
}
})
}
}

View File

@ -13,6 +13,7 @@ import (
"github.com/portainer/portainer/api/dataservices/edgegroup"
"github.com/portainer/portainer/api/dataservices/edgejob"
"github.com/portainer/portainer/api/dataservices/edgestack"
"github.com/portainer/portainer/api/dataservices/edgestackstatus"
"github.com/portainer/portainer/api/dataservices/endpoint"
"github.com/portainer/portainer/api/dataservices/endpointgroup"
"github.com/portainer/portainer/api/dataservices/endpointrelation"
@ -39,6 +40,8 @@ import (
"github.com/segmentio/encoding/json"
)
var _ dataservices.DataStore = &Store{}
// Store defines the implementation of portainer.DataStore using
// BoltDB as the storage system.
type Store struct {
@ -51,6 +54,7 @@ type Store struct {
EdgeGroupService *edgegroup.Service
EdgeJobService *edgejob.Service
EdgeStackService *edgestack.Service
EdgeStackStatusService *edgestackstatus.Service
EndpointGroupService *endpointgroup.Service
EndpointService *endpoint.Service
EndpointRelationService *endpointrelation.Service
@ -107,7 +111,13 @@ func (store *Store) initServices() error {
return err
}
store.EdgeStackService = edgeStackService
endpointRelationService.RegisterUpdateStackFunction(edgeStackService.UpdateEdgeStackFunc, edgeStackService.UpdateEdgeStackFuncTx)
endpointRelationService.RegisterUpdateStackFunction(edgeStackService.UpdateEdgeStackFuncTx)
edgeStackStatusService, err := edgestackstatus.NewService(store.connection)
if err != nil {
return err
}
store.EdgeStackStatusService = edgeStackStatusService
edgeGroupService, err := edgegroup.NewService(store.connection)
if err != nil {
@ -269,6 +279,10 @@ func (store *Store) EdgeStack() dataservices.EdgeStackService {
return store.EdgeStackService
}
func (store *Store) EdgeStackStatus() dataservices.EdgeStackStatusService {
return store.EdgeStackStatusService
}
// Environment(Endpoint) gives access to the Environment(Endpoint) data management layer
func (store *Store) Endpoint() dataservices.EndpointService {
return store.EndpointService

View File

@ -32,6 +32,10 @@ func (tx *StoreTx) EdgeStack() dataservices.EdgeStackService {
return tx.store.EdgeStackService.Tx(tx.tx)
}
func (tx *StoreTx) EdgeStackStatus() dataservices.EdgeStackStatusService {
return tx.store.EdgeStackStatusService.Tx(tx.tx)
}
func (tx *StoreTx) Endpoint() dataservices.EndpointService {
return tx.store.EndpointService.Tx(tx.tx)
}

View File

@ -8,6 +8,7 @@
}
],
"edge_stack": null,
"edge_stack_status": null,
"edgegroups": null,
"edgejobs": null,
"endpoint_groups": [
@ -82,7 +83,6 @@
"MigrateIngresses": true
},
"PublicURL": "",
"QueryDate": 0,
"SecuritySettings": {
"allowBindMountsForRegularUsers": true,
"allowContainerCapabilitiesForRegularUsers": true,
@ -120,6 +120,10 @@
"Ecr": {
"Region": ""
},
"Github": {
"OrganisationName": "",
"UseOrganisation": false
},
"Gitlab": {
"InstanceURL": "",
"ProjectId": 0,
@ -610,7 +614,7 @@
"RequiredPasswordLength": 12
},
"KubeconfigExpiry": "0",
"KubectlShellImage": "portainer/kubectl-shell:2.30.0",
"KubectlShellImage": "portainer/kubectl-shell:2.33.0-rc1",
"LDAPSettings": {
"AnonymousMode": true,
"AutoCreateUsers": true,
@ -678,14 +682,11 @@
"Images": null,
"Info": {
"Architecture": "",
"BridgeNfIp6tables": false,
"BridgeNfIptables": false,
"CDISpecDirs": null,
"CPUSet": false,
"CPUShares": false,
"CgroupDriver": "",
"ContainerdCommit": {
"Expected": "",
"ID": ""
},
"Containers": 0,
@ -709,7 +710,6 @@
"IndexServerAddress": "",
"InitBinary": "",
"InitCommit": {
"Expected": "",
"ID": ""
},
"Isolation": "",
@ -738,7 +738,6 @@
},
"RegistryConfig": null,
"RuncCommit": {
"Expected": "",
"ID": ""
},
"Runtimes": null,
@ -780,6 +779,7 @@
"ImageCount": 9,
"IsPodman": false,
"NodeCount": 0,
"PerformanceMetrics": null,
"RunningContainerCount": 5,
"ServiceCount": 0,
"StackCount": 2,
@ -943,7 +943,7 @@
}
],
"version": {
"VERSION": "{\"SchemaVersion\":\"2.30.0\",\"MigratorCount\":0,\"Edition\":1,\"InstanceID\":\"463d5c47-0ea5-4aca-85b1-405ceefee254\"}"
"VERSION": "{\"SchemaVersion\":\"2.33.0-rc1\",\"MigratorCount\":1,\"Edition\":1,\"InstanceID\":\"463d5c47-0ea5-4aca-85b1-405ceefee254\"}"
},
"webhooks": null
}

View File

@ -24,6 +24,8 @@ const (
dockerClientVersion = "1.37"
)
type NodeNamesCtxKey struct{}
// ClientFactory is used to create Docker clients
type ClientFactory struct {
signatureService portainer.DigitalSignatureService
@ -162,7 +164,7 @@ func (t *NodeNameTransport) RoundTrip(req *http.Request) (*http.Response, error)
return resp, nil
}
nodeNames, ok := req.Context().Value("nodeNames").(map[string]string)
nodeNames, ok := req.Context().Value(NodeNamesCtxKey{}).(map[string]string)
if ok {
for idx, r := range rs {
// as there is no way to differentiate the same image available in multiple nodes only by their ID
@ -181,10 +183,11 @@ func httpClient(endpoint *portainer.Endpoint, timeout *time.Duration) (*http.Cli
}
if endpoint.TLSConfig.TLS {
tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig.TLSCACertPath, endpoint.TLSConfig.TLSCertPath, endpoint.TLSConfig.TLSKeyPath, endpoint.TLSConfig.TLSSkipVerify)
tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig)
if err != nil {
return nil, err
}
transport.TLSClientConfig = tlsConfig
}

View File

@ -0,0 +1,30 @@
package client
import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/pkg/fips"
"github.com/stretchr/testify/require"
)
func TestHttpClient(t *testing.T) {
fips.InitFIPS(false)
// Valid TLS configuration
endpoint := &portainer.Endpoint{}
endpoint.TLSConfig = portainer.TLSConfiguration{TLS: true}
cli, err := httpClient(endpoint, nil)
require.NoError(t, err)
require.NotNil(t, cli)
// Invalid TLS configuration
endpoint.TLSConfig.TLSCertPath = "/invalid/path/client.crt"
endpoint.TLSConfig.TLSKeyPath = "/invalid/path/client.key"
cli, err = httpClient(endpoint, nil)
require.Error(t, err)
require.Nil(t, cli)
}

View File

@ -1,37 +0,0 @@
package docker
import "github.com/docker/docker/api/types"
type ContainerStats struct {
Running int `json:"running"`
Stopped int `json:"stopped"`
Healthy int `json:"healthy"`
Unhealthy int `json:"unhealthy"`
Total int `json:"total"`
}
func CalculateContainerStats(containers []types.Container) ContainerStats {
var running, stopped, healthy, unhealthy int
for _, container := range containers {
switch container.State {
case "running":
running++
case "healthy":
running++
healthy++
case "unhealthy":
running++
unhealthy++
case "exited", "stopped":
stopped++
}
}
return ContainerStats{
Running: running,
Stopped: stopped,
Healthy: healthy,
Unhealthy: unhealthy,
Total: len(containers),
}
}

View File

@ -1,27 +0,0 @@
package docker
import (
"testing"
"github.com/docker/docker/api/types"
"github.com/stretchr/testify/assert"
)
func TestCalculateContainerStats(t *testing.T) {
containers := []types.Container{
{State: "running"},
{State: "running"},
{State: "exited"},
{State: "stopped"},
{State: "healthy"},
{State: "unhealthy"},
}
stats := CalculateContainerStats(containers)
assert.Equal(t, 4, stats.Running)
assert.Equal(t, 2, stats.Stopped)
assert.Equal(t, 1, stats.Healthy)
assert.Equal(t, 1, stats.Unhealthy)
assert.Equal(t, 6, stats.Total)
}

View File

@ -9,7 +9,7 @@ import (
"github.com/containers/image/v5/docker"
imagetypes "github.com/containers/image/v5/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/image"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
@ -85,7 +85,7 @@ func (c *DigestClient) RemoteDigest(image Image) (digest.Digest, error) {
return rmDigest, nil
}
func ParseLocalImage(inspect types.ImageInspect) (*Image, error) {
func ParseLocalImage(inspect image.InspectResponse) (*Image, error) {
if IsLocalImage(inspect) || IsDanglingImage(inspect) {
return nil, errors.New("the image is not regular")
}

View File

@ -0,0 +1,33 @@
package images
import (
"testing"
"github.com/docker/docker/api/types/image"
"github.com/stretchr/testify/require"
)
func TestParseLocalImage(t *testing.T) {
// Test with a regular image
img, err := ParseLocalImage(image.InspectResponse{
ID: "sha256:9234e8fb04c47cfe0f49931e4ac7eb76fa904e33b7f8576aec0501c085f02516",
RepoTags: []string{"myimage:latest"},
RepoDigests: []string{"myimage@sha256:4bcff63911fcb4448bd4fdacec207030997caf25e9bea4045fa6c8c44de311d1"},
})
require.NoError(t, err)
require.NotNil(t, img)
require.Equal(t, "library/myimage", img.Path)
require.Equal(t, "latest", img.Tag)
require.Equal(t, "sha256:4bcff63911fcb4448bd4fdacec207030997caf25e9bea4045fa6c8c44de311d1", img.Digest.String())
// Test with a dangling image
img, err = ParseLocalImage(image.InspectResponse{
ID: "sha256:abcdef1234567890",
RepoTags: []string{"<none>:<none>"},
RepoDigests: []string{"<none>@<none>"},
})
require.Error(t, err)
require.Nil(t, img)
}

View File

@ -7,9 +7,9 @@ import (
"strings"
"text/template"
"github.com/docker/docker/api/types"
"github.com/containers/image/v5/docker/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/image"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
@ -179,11 +179,11 @@ func IsLocalImage(image types.ImageInspect) bool {
// IsDanglingImage returns whether the given image is "dangling" which means
// that there are no repository references to the given image and it has no
// child images
func IsDanglingImage(image types.ImageInspect) bool {
func IsDanglingImage(image image.InspectResponse) bool {
return len(image.RepoTags) == 1 && image.RepoTags[0] == "<none>:<none>" && len(image.RepoDigests) == 1 && image.RepoDigests[0] == "<none>@<none>"
}
// IsNoTagImage returns whether the given image is damaged, has no tags
func IsNoTagImage(image types.ImageInspect) bool {
func IsNoTagImage(image image.InspectResponse) bool {
return len(image.RepoTags) == 0
}

View File

@ -4,6 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestImageParser(t *testing.T) {
@ -14,7 +15,7 @@ func TestImageParser(t *testing.T) {
image, err := ParseImage(ParseImageOptions{
Name: "portainer/portainer-ee",
})
is.NoError(err, "")
require.NoError(t, err)
is.Equal("docker.io/portainer/portainer-ee:latest", image.FullName())
is.Equal("portainer/portainer-ee", image.Opts.Name)
is.Equal("latest", image.Tag)
@ -30,10 +31,10 @@ func TestImageParser(t *testing.T) {
image, err := ParseImage(ParseImageOptions{
Name: "gcr.io/k8s-minikube/kicbase@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2",
})
is.NoError(err, "")
require.NoError(t, err)
is.Equal("gcr.io/k8s-minikube/kicbase@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.FullName())
is.Equal("gcr.io/k8s-minikube/kicbase@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name)
is.Equal("", image.Tag)
is.Empty(image.Tag)
is.Equal("k8s-minikube/kicbase", image.Path)
is.Equal("gcr.io", image.Domain)
is.Equal("https://gcr.io/k8s-minikube/kicbase", image.HubLink)
@ -47,7 +48,7 @@ func TestImageParser(t *testing.T) {
image, err := ParseImage(ParseImageOptions{
Name: "gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2",
})
is.NoError(err, "")
require.NoError(t, err)
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30", image.FullName())
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name)
is.Equal("v0.0.30", image.Tag)
@ -68,8 +69,9 @@ func TestUpdateParsedImage(t *testing.T) {
image, err := ParseImage(ParseImageOptions{
Name: "gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2",
})
is.NoError(err, "")
_ = image.WithTag("v0.0.31")
require.NoError(t, err)
err = image.WithTag("v0.0.31")
require.NoError(t, err)
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.31", image.FullName())
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name)
is.Equal("v0.0.31", image.Tag)
@ -86,8 +88,9 @@ func TestUpdateParsedImage(t *testing.T) {
image, err := ParseImage(ParseImageOptions{
Name: "gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2",
})
is.NoError(err, "")
_ = image.WithDigest("sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b3")
require.NoError(t, err)
err = image.WithDigest("sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b3")
require.NoError(t, err)
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30", image.FullName())
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name)
is.Equal("v0.0.30", image.Tag)
@ -104,8 +107,9 @@ func TestUpdateParsedImage(t *testing.T) {
image, err := ParseImage(ParseImageOptions{
Name: "gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2",
})
is.NoError(err, "")
_ = image.TrimDigest()
require.NoError(t, err)
err = image.TrimDigest()
require.NoError(t, err)
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30", image.FullName())
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name)
is.Equal("v0.0.30", image.Tag)

View File

@ -4,7 +4,9 @@ import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFindBestMatchNeedAuthRegistry(t *testing.T) {
@ -15,9 +17,9 @@ func TestFindBestMatchNeedAuthRegistry(t *testing.T) {
registries := []portainer.Registry{createNewRegistry("docker.io", "USERNAME", false),
createNewRegistry("hub-mirror.c.163.com", "", false)}
r, err := findBestMatchRegistry(image, registries)
is.NoError(err, "")
is.NotNil(r, "")
is.False(r.Authentication, "")
require.NoError(t, err)
is.NotNil(r)
is.False(r.Authentication)
is.Equal("docker.io", r.URL)
})
@ -26,9 +28,9 @@ func TestFindBestMatchNeedAuthRegistry(t *testing.T) {
registries := []portainer.Registry{createNewRegistry("docker.io", "", false),
createNewRegistry("hub-mirror.c.163.com", "USERNAME", false)}
r, err := findBestMatchRegistry(image, registries)
is.NoError(err, "")
is.NotNil(r, "")
is.False(r.Authentication, "")
require.NoError(t, err)
is.NotNil(r)
is.False(r.Authentication)
is.Equal("docker.io", r.URL)
})
@ -37,9 +39,9 @@ func TestFindBestMatchNeedAuthRegistry(t *testing.T) {
registries := []portainer.Registry{createNewRegistry("docker.io", "USERNAME", true),
createNewRegistry("hub-mirror.c.163.com", "", false)}
r, err := findBestMatchRegistry(image, registries)
is.NoError(err, "")
is.NotNil(r, "")
is.True(r.Authentication, "")
require.NoError(t, err)
is.NotNil(r)
is.True(r.Authentication)
is.Equal("docker.io", r.URL)
})
@ -47,9 +49,9 @@ func TestFindBestMatchNeedAuthRegistry(t *testing.T) {
image := "portainer/portainer-ee:latest"
registries := []portainer.Registry{createNewRegistry("docker.io", "", true)}
r, err := findBestMatchRegistry(image, registries)
is.NoError(err, "")
is.NotNil(r, "")
is.True(r.Authentication, "")
require.NoError(t, err)
is.NotNil(r)
is.True(r.Authentication)
is.Equal("docker.io", r.URL)
})
}

View File

@ -0,0 +1,92 @@
package stats
import (
"context"
"errors"
"sync"
"github.com/docker/docker/api/types/container"
)
type ContainerStats struct {
Running int `json:"running"`
Stopped int `json:"stopped"`
Healthy int `json:"healthy"`
Unhealthy int `json:"unhealthy"`
Total int `json:"total"`
}
type DockerClient interface {
ContainerInspect(ctx context.Context, containerID string) (container.InspectResponse, error)
}
func CalculateContainerStats(ctx context.Context, cli DockerClient, containers []container.Summary) (ContainerStats, error) {
var running, stopped, healthy, unhealthy int
var mu sync.Mutex
var wg sync.WaitGroup
semaphore := make(chan struct{}, 5)
var aggErr error
var aggMu sync.Mutex
for i := range containers {
id := containers[i].ID
semaphore <- struct{}{}
wg.Go(func() {
defer func() { <-semaphore }()
containerInspection, err := cli.ContainerInspect(ctx, id)
stat := ContainerStats{}
if err != nil {
aggMu.Lock()
aggErr = errors.Join(aggErr, err)
aggMu.Unlock()
return
}
stat = getContainerStatus(containerInspection.State)
mu.Lock()
running += stat.Running
stopped += stat.Stopped
healthy += stat.Healthy
unhealthy += stat.Unhealthy
mu.Unlock()
})
}
wg.Wait()
return ContainerStats{
Running: running,
Stopped: stopped,
Healthy: healthy,
Unhealthy: unhealthy,
Total: len(containers),
}, aggErr
}
func getContainerStatus(state *container.State) ContainerStats {
stat := ContainerStats{}
if state == nil {
return stat
}
switch state.Status {
case container.StateRunning:
stat.Running++
case container.StateExited, container.StateDead:
stat.Stopped++
}
if state.Health != nil {
switch state.Health.Status {
case container.Healthy:
stat.Healthy++
case container.Unhealthy:
stat.Unhealthy++
}
}
return stat
}

View File

@ -0,0 +1,234 @@
package stats
import (
"context"
"errors"
"testing"
"time"
"github.com/docker/docker/api/types/container"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
// MockDockerClient implements the DockerClient interface for testing
type MockDockerClient struct {
mock.Mock
}
func (m *MockDockerClient) ContainerInspect(ctx context.Context, containerID string) (container.InspectResponse, error) {
args := m.Called(ctx, containerID)
return args.Get(0).(container.InspectResponse), args.Error(1)
}
func TestCalculateContainerStats(t *testing.T) {
mockClient := new(MockDockerClient)
// Test containers - using enough containers to test concurrent processing
containers := []container.Summary{
{ID: "container1"},
{ID: "container2"},
{ID: "container3"},
{ID: "container4"},
{ID: "container5"},
{ID: "container6"},
{ID: "container7"},
{ID: "container8"},
{ID: "container9"},
{ID: "container10"},
}
// Setup mock expectations with different container states to test various scenarios
containerStates := []struct {
id string
status string
health *container.Health
expected ContainerStats
}{
{"container1", container.StateRunning, &container.Health{Status: container.Healthy}, ContainerStats{Running: 1, Stopped: 0, Healthy: 1, Unhealthy: 0}},
{"container2", container.StateRunning, &container.Health{Status: container.Unhealthy}, ContainerStats{Running: 1, Stopped: 0, Healthy: 0, Unhealthy: 1}},
{"container3", container.StateRunning, nil, ContainerStats{Running: 1, Stopped: 0, Healthy: 0, Unhealthy: 0}},
{"container4", container.StateExited, nil, ContainerStats{Running: 0, Stopped: 1, Healthy: 0, Unhealthy: 0}},
{"container5", container.StateDead, nil, ContainerStats{Running: 0, Stopped: 1, Healthy: 0, Unhealthy: 0}},
{"container6", container.StateRunning, &container.Health{Status: container.Healthy}, ContainerStats{Running: 1, Stopped: 0, Healthy: 1, Unhealthy: 0}},
{"container7", container.StateRunning, &container.Health{Status: container.Unhealthy}, ContainerStats{Running: 1, Stopped: 0, Healthy: 0, Unhealthy: 1}},
{"container8", container.StateExited, nil, ContainerStats{Running: 0, Stopped: 1, Healthy: 0, Unhealthy: 0}},
{"container9", container.StateRunning, nil, ContainerStats{Running: 1, Stopped: 0, Healthy: 0, Unhealthy: 0}},
{"container10", container.StateDead, nil, ContainerStats{Running: 0, Stopped: 1, Healthy: 0, Unhealthy: 0}},
}
expected := ContainerStats{}
// Setup mock expectations for all containers with artificial delays to simulate real Docker calls
for _, state := range containerStates {
mockClient.On("ContainerInspect", mock.Anything, state.id).Return(container.InspectResponse{
ContainerJSONBase: &container.ContainerJSONBase{
State: &container.State{
Status: state.status,
Health: state.health,
},
},
}, nil).After(50 * time.Millisecond) // Simulate 50ms Docker API call
expected.Running += state.expected.Running
expected.Stopped += state.expected.Stopped
expected.Healthy += state.expected.Healthy
expected.Unhealthy += state.expected.Unhealthy
expected.Total++
}
// Call the function and measure time
startTime := time.Now()
stats, err := CalculateContainerStats(context.Background(), mockClient, containers)
require.NoError(t, err, "failed to calculate container stats")
duration := time.Since(startTime)
// Assert results
assert.Equal(t, expected, stats)
assert.Equal(t, expected.Running, stats.Running)
assert.Equal(t, expected.Stopped, stats.Stopped)
assert.Equal(t, expected.Healthy, stats.Healthy)
assert.Equal(t, expected.Unhealthy, stats.Unhealthy)
assert.Equal(t, 10, stats.Total)
// Verify concurrent processing by checking that all mock calls were made
mockClient.AssertExpectations(t)
// Test concurrency: With 5 workers and 10 containers taking 50ms each:
// Sequential would take: 10 * 50ms = 500ms
sequentialTime := 10 * 50 * time.Millisecond
// Verify that concurrent processing is actually faster than sequential
// Allow some overhead for goroutine scheduling
assert.Less(t, duration, sequentialTime, "Concurrent processing should be faster than sequential")
// Concurrent should take: ~100-150ms (depending on scheduling)
assert.Less(t, duration, 150*time.Millisecond, "Concurrent processing should be significantly faster")
assert.Greater(t, duration, 100*time.Millisecond, "Concurrent processing should be longer than 100ms")
}
func TestCalculateContainerStatsAllErrors(t *testing.T) {
mockClient := new(MockDockerClient)
// Test containers
containers := []container.Summary{
{ID: "container1"},
{ID: "container2"},
}
// Setup mock expectations with all calls returning errors
mockClient.On("ContainerInspect", mock.Anything, "container1").Return(container.InspectResponse{}, errors.New("network error"))
mockClient.On("ContainerInspect", mock.Anything, "container2").Return(container.InspectResponse{}, errors.New("permission denied"))
// Call the function
stats, err := CalculateContainerStats(context.Background(), mockClient, containers)
// Assert that an error was returned
require.Error(t, err, "should return error when all containers fail to inspect")
assert.Contains(t, err.Error(), "network error", "error should contain one of the original error messages")
assert.Contains(t, err.Error(), "permission denied", "error should contain the other original error message")
// Assert that stats are zero since no containers were successfully processed
expectedStats := ContainerStats{
Running: 0,
Stopped: 0,
Healthy: 0,
Unhealthy: 0,
Total: 2, // total containers processed
}
assert.Equal(t, expectedStats, stats)
// Verify all mock calls were made
mockClient.AssertExpectations(t)
}
func TestGetContainerStatus(t *testing.T) {
testCases := []struct {
name string
state *container.State
expected ContainerStats
}{
{
name: "running healthy container",
state: &container.State{
Status: container.StateRunning,
Health: &container.Health{
Status: container.Healthy,
},
},
expected: ContainerStats{
Running: 1,
Stopped: 0,
Healthy: 1,
Unhealthy: 0,
},
},
{
name: "running unhealthy container",
state: &container.State{
Status: container.StateRunning,
Health: &container.Health{
Status: container.Unhealthy,
},
},
expected: ContainerStats{
Running: 1,
Stopped: 0,
Healthy: 0,
Unhealthy: 1,
},
},
{
name: "running container without health check",
state: &container.State{
Status: container.StateRunning,
},
expected: ContainerStats{
Running: 1,
Stopped: 0,
Healthy: 0,
Unhealthy: 0,
},
},
{
name: "exited container",
state: &container.State{
Status: container.StateExited,
},
expected: ContainerStats{
Running: 0,
Stopped: 1,
Healthy: 0,
Unhealthy: 0,
},
},
{
name: "dead container",
state: &container.State{
Status: container.StateDead,
},
expected: ContainerStats{
Running: 0,
Stopped: 1,
Healthy: 0,
Unhealthy: 0,
},
},
{
name: "nil state",
state: nil,
expected: ContainerStats{
Running: 0,
Stopped: 0,
Healthy: 0,
Unhealthy: 0,
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
stat := getContainerStatus(testCase.state)
assert.Equal(t, testCase.expected, stat)
})
}
}

View File

@ -8,7 +8,9 @@ import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_createEnvFile(t *testing.T) {
@ -61,7 +63,7 @@ func Test_createEnvFile(t *testing.T) {
assert.Equal(t, tt.expected, string(content))
} else {
assert.Equal(t, "", result)
assert.Empty(t, result)
}
})
}
@ -79,7 +81,7 @@ func Test_createEnvFile_mergesDefultAndInplaceEnvVars(t *testing.T) {
}
result, err := createEnvFile(stack)
assert.Equal(t, filepath.Join(stack.ProjectPath, "stack.env"), result)
assert.NoError(t, err)
require.NoError(t, err)
assert.FileExists(t, path.Join(dir, "stack.env"))
f, _ := os.Open(path.Join(dir, "stack.env"))
content, _ := io.ReadAll(f)

View File

@ -7,6 +7,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type mockKubectlClient struct {
@ -68,7 +69,7 @@ func TestExecuteKubectlOperation_Apply_Success(t *testing.T) {
manifests := []string{"manifest1.yaml", "manifest2.yaml"}
err := testExecuteKubectlOperation(mockClient, "apply", manifests)
assert.NoError(t, err)
require.NoError(t, err)
assert.True(t, called)
}
@ -86,7 +87,7 @@ func TestExecuteKubectlOperation_Apply_Error(t *testing.T) {
manifests := []string{"error.yaml"}
err := testExecuteKubectlOperation(mockClient, "apply", manifests)
assert.Error(t, err)
require.Error(t, err)
assert.Contains(t, err.Error(), expectedErr.Error())
assert.True(t, called)
}
@ -104,7 +105,7 @@ func TestExecuteKubectlOperation_Delete_Success(t *testing.T) {
manifests := []string{"manifest1.yaml"}
err := testExecuteKubectlOperation(mockClient, "delete", manifests)
assert.NoError(t, err)
require.NoError(t, err)
assert.True(t, called)
}
@ -122,7 +123,7 @@ func TestExecuteKubectlOperation_Delete_Error(t *testing.T) {
manifests := []string{"error.yaml"}
err := testExecuteKubectlOperation(mockClient, "delete", manifests)
assert.Error(t, err)
require.Error(t, err)
assert.Contains(t, err.Error(), expectedErr.Error())
assert.True(t, called)
}
@ -140,7 +141,7 @@ func TestExecuteKubectlOperation_RolloutRestart_Success(t *testing.T) {
resources := []string{"deployment/nginx"}
err := testExecuteKubectlOperation(mockClient, "rollout-restart", resources)
assert.NoError(t, err)
require.NoError(t, err)
assert.True(t, called)
}
@ -158,7 +159,7 @@ func TestExecuteKubectlOperation_RolloutRestart_Error(t *testing.T) {
resources := []string{"deployment/error"}
err := testExecuteKubectlOperation(mockClient, "rollout-restart", resources)
assert.Error(t, err)
require.Error(t, err)
assert.Contains(t, err.Error(), expectedErr.Error())
assert.True(t, called)
}
@ -168,6 +169,6 @@ func TestExecuteKubectlOperation_UnsupportedOperation(t *testing.T) {
err := testExecuteKubectlOperation(mockClient, "unsupported", []string{})
assert.Error(t, err)
require.Error(t, err)
assert.Contains(t, err.Error(), "unsupported operation")
}

View File

@ -7,12 +7,13 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_copyFile_returnsError_whenSourceDoesNotExist(t *testing.T) {
tmpdir := t.TempDir()
err := copyFile("does-not-exist", tmpdir)
assert.Error(t, err)
require.Error(t, err)
}
func Test_copyFile_shouldMakeAbackup(t *testing.T) {
@ -21,7 +22,7 @@ func Test_copyFile_shouldMakeAbackup(t *testing.T) {
os.WriteFile(path.Join(tmpdir, "origin"), content, 0600)
err := copyFile(path.Join(tmpdir, "origin"), path.Join(tmpdir, "copy"))
assert.NoError(t, err)
require.NoError(t, err)
copyContent, _ := os.ReadFile(path.Join(tmpdir, "copy"))
assert.Equal(t, content, copyContent)
@ -30,7 +31,7 @@ func Test_copyFile_shouldMakeAbackup(t *testing.T) {
func Test_CopyDir_shouldCopyAllFilesAndDirectories(t *testing.T) {
destination := t.TempDir()
err := CopyDir("./testdata/copy_test", destination, true)
assert.NoError(t, err)
require.NoError(t, err)
assert.FileExists(t, filepath.Join(destination, "copy_test", "outer"))
assert.FileExists(t, filepath.Join(destination, "copy_test", "dir", ".dotfile"))
@ -40,7 +41,7 @@ func Test_CopyDir_shouldCopyAllFilesAndDirectories(t *testing.T) {
func Test_CopyDir_shouldCopyOnlyDirContents(t *testing.T) {
destination := t.TempDir()
err := CopyDir("./testdata/copy_test", destination, false)
assert.NoError(t, err)
require.NoError(t, err)
assert.FileExists(t, filepath.Join(destination, "outer"))
assert.FileExists(t, filepath.Join(destination, "dir", ".dotfile"))
@ -50,7 +51,7 @@ func Test_CopyDir_shouldCopyOnlyDirContents(t *testing.T) {
func Test_CopyPath_shouldSkipWhenNotExist(t *testing.T) {
tmpdir := t.TempDir()
err := CopyPath("does-not-exists", tmpdir)
assert.NoError(t, err)
require.NoError(t, err)
assert.NoFileExists(t, tmpdir)
}
@ -62,17 +63,17 @@ func Test_CopyPath_shouldCopyFile(t *testing.T) {
os.MkdirAll(path.Join(tmpdir, "backup"), 0700)
err := CopyPath(path.Join(tmpdir, "file"), path.Join(tmpdir, "backup"))
assert.NoError(t, err)
require.NoError(t, err)
copyContent, err := os.ReadFile(path.Join(tmpdir, "backup", "file"))
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, content, copyContent)
}
func Test_CopyPath_shouldCopyDir(t *testing.T) {
destination := t.TempDir()
err := CopyPath("./testdata/copy_test", destination)
assert.NoError(t, err)
require.NoError(t, err)
assert.FileExists(t, filepath.Join(destination, "copy_test", "outer"))
assert.FileExists(t, filepath.Join(destination, "copy_test", "dir", ".dotfile"))

View File

@ -848,7 +848,7 @@ func defaultMTLSCertPathUnderFileStore() (string, string, string) {
return caCertPath, certPath, keyPath
}
// GetDefaultChiselPrivateKeyPath returns the chisle private key path
// GetDefaultChiselPrivateKeyPath returns the chisel private key path
func (service *Service) GetDefaultChiselPrivateKeyPath() string {
privateKeyPath := defaultChiselPrivateKeyPathUnderFileStore()
return service.wrapFileStore(privateKeyPath)

View File

@ -8,6 +8,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_fileSystemService_FileExists_whenFileExistsShouldReturnTrue(t *testing.T) {
@ -30,14 +31,14 @@ func Test_FileExists_whenFileNotExistsShouldReturnFalse(t *testing.T) {
func testHelperFileExists_fileExists(t *testing.T, checker func(path string) (bool, error)) {
file, err := os.CreateTemp("", t.Name())
assert.NoError(t, err, "CreateTemp should not fail")
require.NoError(t, err, "CreateTemp should not fail")
t.Cleanup(func() {
os.RemoveAll(file.Name())
})
exists, err := checker(file.Name())
assert.NoError(t, err, "FileExists should not fail")
require.NoError(t, err, "FileExists should not fail")
assert.True(t, exists)
}
@ -46,10 +47,10 @@ func testHelperFileExists_fileNotExists(t *testing.T, checker func(path string)
filePath := path.Join(t.TempDir(), fmt.Sprintf("%s%d", t.Name(), rand.Int()))
err := os.RemoveAll(filePath)
assert.NoError(t, err, "RemoveAll should not fail")
require.NoError(t, err, "RemoveAll should not fail")
exists, err := checker(filePath)
assert.NoError(t, err, "FileExists should not fail")
require.NoError(t, err, "FileExists should not fail")
assert.False(t, exists)
}

View File

@ -6,6 +6,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var content = []byte("content")
@ -13,25 +14,25 @@ var content = []byte("content")
func Test_movePath_shouldFailIfSourceDirDoesNotExist(t *testing.T) {
sourceDir := "missing"
destinationDir := t.TempDir()
file1 := addFile(destinationDir, "dir", "file")
file2 := addFile(destinationDir, "file")
file1 := addFile(t, destinationDir, "dir", "file")
file2 := addFile(t, destinationDir, "file")
err := MoveDirectory(sourceDir, destinationDir, false)
assert.Error(t, err, "move directory should fail when source path is missing")
require.Error(t, err, "move directory should fail when source path is missing")
assert.FileExists(t, file1, "destination dir contents should remain")
assert.FileExists(t, file2, "destination dir contents should remain")
}
func Test_movePath_shouldFailIfDestinationDirExists(t *testing.T) {
sourceDir := t.TempDir()
file1 := addFile(sourceDir, "dir", "file")
file2 := addFile(sourceDir, "file")
file1 := addFile(t, sourceDir, "dir", "file")
file2 := addFile(t, sourceDir, "file")
destinationDir := t.TempDir()
file3 := addFile(destinationDir, "dir", "file")
file4 := addFile(destinationDir, "file")
file3 := addFile(t, destinationDir, "dir", "file")
file4 := addFile(t, destinationDir, "file")
err := MoveDirectory(sourceDir, destinationDir, false)
assert.Error(t, err, "move directory should fail when destination directory already exists")
require.Error(t, err, "move directory should fail when destination directory already exists")
assert.FileExists(t, file1, "source dir contents should remain")
assert.FileExists(t, file2, "source dir contents should remain")
assert.FileExists(t, file3, "destination dir contents should remain")
@ -40,14 +41,14 @@ func Test_movePath_shouldFailIfDestinationDirExists(t *testing.T) {
func Test_movePath_succesIfOverwriteSetWhenDestinationDirExists(t *testing.T) {
sourceDir := t.TempDir()
file1 := addFile(sourceDir, "dir", "file")
file2 := addFile(sourceDir, "file")
file1 := addFile(t, sourceDir, "dir", "file")
file2 := addFile(t, sourceDir, "file")
destinationDir := t.TempDir()
file3 := addFile(destinationDir, "dir", "file")
file4 := addFile(destinationDir, "file")
file3 := addFile(t, destinationDir, "dir", "file")
file4 := addFile(t, destinationDir, "file")
err := MoveDirectory(sourceDir, destinationDir, true)
assert.NoError(t, err)
require.NoError(t, err)
assert.NoFileExists(t, file1, "source dir contents should be moved")
assert.NoFileExists(t, file2, "source dir contents should be moved")
assert.FileExists(t, file3, "destination dir contents should remain")
@ -58,32 +59,34 @@ func Test_movePath_successWhenSourceExistsAndDestinationIsMissing(t *testing.T)
tmp := t.TempDir()
sourceDir := path.Join(tmp, "source")
os.Mkdir(sourceDir, 0766)
file1 := addFile(sourceDir, "dir", "file")
file2 := addFile(sourceDir, "file")
file1 := addFile(t, sourceDir, "dir", "file")
file2 := addFile(t, sourceDir, "file")
destinationDir := path.Join(tmp, "destination")
err := MoveDirectory(sourceDir, destinationDir, false)
assert.NoError(t, err)
require.NoError(t, err)
assert.NoFileExists(t, file1, "source dir contents should be moved")
assert.NoFileExists(t, file2, "source dir contents should be moved")
assertFileContent(t, path.Join(destinationDir, "file"))
assertFileContent(t, path.Join(destinationDir, "dir", "file"))
}
func addFile(fileParts ...string) (filepath string) {
func addFile(t *testing.T, fileParts ...string) (filepath string) {
if len(fileParts) > 2 {
dir := path.Join(fileParts[:len(fileParts)-1]...)
os.MkdirAll(dir, 0766)
err := os.MkdirAll(dir, 0766)
require.NoError(t, err)
}
p := path.Join(fileParts...)
os.WriteFile(p, content, 0766)
err := os.WriteFile(p, content, 0766)
require.NoError(t, err)
return p
}
func assertFileContent(t *testing.T, filePath string) {
actualContent, err := os.ReadFile(filePath)
assert.NoErrorf(t, err, "failed to read file %s", filePath)
require.NoError(t, err, "failed to read file %s", filePath)
assert.Equal(t, content, actualContent, "file %s content doesn't match", filePath)
}

View File

@ -5,14 +5,14 @@ import (
"path"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func createService(t *testing.T) *Service {
dataStorePath := path.Join(t.TempDir(), t.Name())
service, err := NewService(dataStorePath, "")
assert.NoError(t, err, "NewService should not fail")
require.NoError(t, err, "NewService should not fail")
t.Cleanup(func() {
os.RemoveAll(dataStorePath)

View File

@ -6,6 +6,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_WriteFile_CanStoreContentInANewFile(t *testing.T) {
@ -14,7 +15,7 @@ func Test_WriteFile_CanStoreContentInANewFile(t *testing.T) {
content := []byte("content")
err := WriteToFile(tmpFilePath, content)
assert.NoError(t, err)
require.NoError(t, err)
fileContent, _ := os.ReadFile(tmpFilePath)
assert.Equal(t, content, fileContent)
@ -25,11 +26,11 @@ func Test_WriteFile_CanOverwriteExistingFile(t *testing.T) {
tmpFilePath := path.Join(tmpDir, "dummy")
err := WriteToFile(tmpFilePath, []byte("content"))
assert.NoError(t, err)
require.NoError(t, err)
content := []byte("new content")
err = WriteToFile(tmpFilePath, content)
assert.NoError(t, err)
require.NoError(t, err)
fileContent, _ := os.ReadFile(tmpFilePath)
assert.Equal(t, content, fileContent)
@ -41,7 +42,7 @@ func Test_WriteFile_CanWriteANestedPath(t *testing.T) {
content := []byte("content")
err := WriteToFile(tmpFilePath, content)
assert.NoError(t, err)
require.NoError(t, err)
fileContent, _ := os.ReadFile(tmpFilePath)
assert.Equal(t, content, fileContent)

View File

@ -60,15 +60,9 @@ func NewAzureClient() *azureClient {
}
func newHttpClientForAzure(insecureSkipVerify bool) *http.Client {
tlsConfig := crypto.CreateTLSConfiguration()
if insecureSkipVerify {
tlsConfig.InsecureSkipVerify = true
}
httpsCli := &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
TLSClientConfig: crypto.CreateTLSConfiguration(insecureSkipVerify),
Proxy: http.ProxyFromEnvironment,
},
Timeout: 300 * time.Second,

View File

@ -12,6 +12,7 @@ import (
_ "github.com/joho/godotenv/autoload"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const privateAzureRepoURL = "https://portainer.visualstudio.com/gitops-test/_git/gitops-test"
@ -58,8 +59,16 @@ func TestService_ClonePublicRepository_Azure(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
dst := t.TempDir()
repositoryUrl := fmt.Sprintf(tt.args.repositoryURLFormat, tt.args.password)
err := service.CloneRepository(dst, repositoryUrl, tt.args.referenceName, "", "", false)
assert.NoError(t, err)
err := service.CloneRepository(
dst,
repositoryUrl,
tt.args.referenceName,
"",
"",
gittypes.GitCredentialAuthType_Basic,
false,
)
require.NoError(t, err)
assert.FileExists(t, filepath.Join(dst, "README.md"))
})
}
@ -73,8 +82,16 @@ func TestService_ClonePrivateRepository_Azure(t *testing.T) {
dst := t.TempDir()
err := service.CloneRepository(dst, privateAzureRepoURL, "refs/heads/main", "", pat, false)
assert.NoError(t, err)
err := service.CloneRepository(
dst,
privateAzureRepoURL,
"refs/heads/main",
"",
pat,
gittypes.GitCredentialAuthType_Basic,
false,
)
require.NoError(t, err)
assert.FileExists(t, filepath.Join(dst, "README.md"))
}
@ -84,8 +101,15 @@ func TestService_LatestCommitID_Azure(t *testing.T) {
pat := getRequiredValue(t, "AZURE_DEVOPS_PAT")
service := NewService(context.TODO())
id, err := service.LatestCommitID(privateAzureRepoURL, "refs/heads/main", "", pat, false)
assert.NoError(t, err)
id, err := service.LatestCommitID(
privateAzureRepoURL,
"refs/heads/main",
"",
pat,
gittypes.GitCredentialAuthType_Basic,
false,
)
require.NoError(t, err)
assert.NotEmpty(t, id, "cannot guarantee commit id, but it should be not empty")
}
@ -96,8 +120,15 @@ func TestService_ListRefs_Azure(t *testing.T) {
username := getRequiredValue(t, "AZURE_DEVOPS_USERNAME")
service := NewService(context.TODO())
refs, err := service.ListRefs(privateAzureRepoURL, username, accessToken, false, false)
assert.NoError(t, err)
refs, err := service.ListRefs(
privateAzureRepoURL,
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
)
require.NoError(t, err)
assert.GreaterOrEqual(t, len(refs), 1)
}
@ -108,8 +139,8 @@ func TestService_ListRefs_Azure_Concurrently(t *testing.T) {
username := getRequiredValue(t, "AZURE_DEVOPS_USERNAME")
service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond)
go service.ListRefs(privateAzureRepoURL, username, accessToken, false, false)
service.ListRefs(privateAzureRepoURL, username, accessToken, false, false)
go service.ListRefs(privateAzureRepoURL, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
service.ListRefs(privateAzureRepoURL, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
time.Sleep(2 * time.Second)
}
@ -247,16 +278,26 @@ func TestService_ListFiles_Azure(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
paths, err := service.ListFiles(tt.args.repositoryUrl, tt.args.referenceName, tt.args.username, tt.args.password, false, false, tt.extensions, false)
paths, err := service.ListFiles(
tt.args.repositoryUrl,
tt.args.referenceName,
tt.args.username,
tt.args.password,
gittypes.GitCredentialAuthType_Basic,
false,
false,
tt.extensions,
false,
)
if tt.expect.shouldFail {
assert.Error(t, err)
require.Error(t, err)
if tt.expect.err != nil {
assert.Equal(t, tt.expect.err, err)
}
} else {
assert.NoError(t, err)
require.NoError(t, err)
if tt.expect.matchedCount > 0 {
assert.Greater(t, len(paths), 0)
assert.NotEmpty(t, paths)
}
}
})
@ -270,8 +311,28 @@ func TestService_ListFiles_Azure_Concurrently(t *testing.T) {
username := getRequiredValue(t, "AZURE_DEVOPS_USERNAME")
service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond)
go service.ListFiles(privateAzureRepoURL, "refs/heads/main", username, accessToken, false, false, []string{}, false)
service.ListFiles(privateAzureRepoURL, "refs/heads/main", username, accessToken, false, false, []string{}, false)
go service.ListFiles(
privateAzureRepoURL,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
service.ListFiles(
privateAzureRepoURL,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
time.Sleep(2 * time.Second)
}

View File

@ -8,7 +8,10 @@ import (
"testing"
gittypes "github.com/portainer/portainer/api/git/types"
"github.com/portainer/portainer/pkg/fips"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_buildDownloadUrl(t *testing.T) {
@ -18,15 +21,18 @@ func Test_buildDownloadUrl(t *testing.T) {
project: "project",
repository: "repository",
}, "refs/heads/main")
require.NoError(t, err)
expectedUrl, _ := url.Parse("https://dev.azure.com/organisation/project/_apis/git/repositories/repository/items?scopePath=/&download=true&versionDescriptor.version=main&$format=zip&recursionLevel=full&api-version=6.0&versionDescriptor.versionType=branch")
actualUrl, _ := url.Parse(u)
if assert.NoError(t, err) {
assert.Equal(t, expectedUrl.Host, actualUrl.Host)
assert.Equal(t, expectedUrl.Scheme, actualUrl.Scheme)
assert.Equal(t, expectedUrl.Path, actualUrl.Path)
assert.Equal(t, expectedUrl.Query(), actualUrl.Query())
}
expectedUrl, err := url.Parse("https://dev.azure.com/organisation/project/_apis/git/repositories/repository/items?scopePath=/&download=true&versionDescriptor.version=main&$format=zip&recursionLevel=full&api-version=6.0&versionDescriptor.versionType=branch")
require.NoError(t, err)
actualUrl, err := url.Parse(u)
require.NoError(t, err)
assert.Equal(t, expectedUrl.Host, actualUrl.Host)
assert.Equal(t, expectedUrl.Scheme, actualUrl.Scheme)
assert.Equal(t, expectedUrl.Path, actualUrl.Path)
assert.Equal(t, expectedUrl.Query(), actualUrl.Query())
}
func Test_buildRootItemUrl(t *testing.T) {
@ -39,7 +45,7 @@ func Test_buildRootItemUrl(t *testing.T) {
expectedUrl, _ := url.Parse("https://dev.azure.com/organisation/project/_apis/git/repositories/repository/items?scopePath=/&api-version=6.0&versionDescriptor.version=main&versionDescriptor.versionType=branch")
actualUrl, _ := url.Parse(u)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, expectedUrl.Host, actualUrl.Host)
assert.Equal(t, expectedUrl.Scheme, actualUrl.Scheme)
assert.Equal(t, expectedUrl.Path, actualUrl.Path)
@ -56,7 +62,7 @@ func Test_buildRefsUrl(t *testing.T) {
expectedUrl, _ := url.Parse("https://dev.azure.com/organisation/project/_apis/git/repositories/repository/refs?api-version=6.0")
actualUrl, _ := url.Parse(u)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, expectedUrl.Host, actualUrl.Host)
assert.Equal(t, expectedUrl.Scheme, actualUrl.Scheme)
assert.Equal(t, expectedUrl.Path, actualUrl.Path)
@ -73,7 +79,7 @@ func Test_buildTreeUrl(t *testing.T) {
expectedUrl, _ := url.Parse("https://dev.azure.com/organisation/project/_apis/git/repositories/repository/trees/sha1?api-version=6.0&recursive=true")
actualUrl, _ := url.Parse(u)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, expectedUrl.Host, actualUrl.Host)
assert.Equal(t, expectedUrl.Scheme, actualUrl.Scheme)
assert.Equal(t, expectedUrl.Path, actualUrl.Path)
@ -234,6 +240,8 @@ func Test_isAzureUrl(t *testing.T) {
}
func Test_azureDownloader_downloadZipFromAzureDevOps(t *testing.T) {
fips.InitFIPS(false)
type args struct {
options baseOption
}
@ -301,13 +309,15 @@ func Test_azureDownloader_downloadZipFromAzureDevOps(t *testing.T) {
},
}
_, err := a.downloadZipFromAzureDevOps(context.Background(), option)
assert.Error(t, err)
require.Error(t, err)
assert.Equal(t, tt.want, zipRequestAuth)
})
}
}
func Test_azureDownloader_latestCommitID(t *testing.T) {
fips.InitFIPS(false)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
response := `{
"count": 1,
@ -497,12 +507,12 @@ func Test_listRefs_azure(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
refs, err := client.listRefs(context.TODO(), tt.args)
if tt.expect.err == nil {
assert.NoError(t, err)
require.NoError(t, err)
if tt.expect.refsCount > 0 {
assert.Greater(t, len(refs), 0)
assert.NotEmpty(t, refs)
}
} else {
assert.Error(t, err)
require.Error(t, err)
assert.Equal(t, tt.expect.err, err)
}
})
@ -608,14 +618,14 @@ func Test_listFiles_azure(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
paths, err := client.listFiles(context.TODO(), tt.args)
if tt.expect.shouldFail {
assert.Error(t, err)
require.Error(t, err)
if tt.expect.err != nil {
assert.Equal(t, tt.expect.err, err)
}
} else {
assert.NoError(t, err)
require.NoError(t, err)
if tt.expect.matchedCount > 0 {
assert.Greater(t, len(paths), 0)
assert.NotEmpty(t, paths)
}
}
})

View File

@ -19,6 +19,7 @@ type CloneOptions struct {
ReferenceName string
Username string
Password string
AuthType gittypes.GitCredentialAuthType
// TLSSkipVerify skips SSL verification when cloning the Git repository
TLSSkipVerify bool `example:"false"`
}
@ -42,7 +43,15 @@ func CloneWithBackup(gitService portainer.GitService, fileService portainer.File
cleanUp = true
if err := gitService.CloneRepository(options.ProjectPath, options.URL, options.ReferenceName, options.Username, options.Password, options.TLSSkipVerify); err != nil {
if err := gitService.CloneRepository(
options.ProjectPath,
options.URL,
options.ReferenceName,
options.Username,
options.Password,
options.AuthType,
options.TLSSkipVerify,
); err != nil {
cleanUp = false
if err := filesystem.MoveDirectory(backupProjectPath, options.ProjectPath, false); err != nil {
log.Warn().Err(err).Msg("failed restoring backup folder")

View File

@ -7,12 +7,14 @@ import (
"strings"
gittypes "github.com/portainer/portainer/api/git/types"
"github.com/rs/zerolog/log"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/filemode"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/transport"
githttp "github.com/go-git/go-git/v5/plumbing/transport/http"
"github.com/go-git/go-git/v5/storage/memory"
"github.com/pkg/errors"
@ -33,7 +35,7 @@ func (c *gitClient) download(ctx context.Context, dst string, opt cloneOption) e
URL: opt.repositoryUrl,
Depth: opt.depth,
InsecureSkipTLS: opt.tlsSkipVerify,
Auth: getAuth(opt.username, opt.password),
Auth: getAuth(opt.authType, opt.username, opt.password),
Tags: git.NoTags,
}
@ -51,7 +53,10 @@ func (c *gitClient) download(ctx context.Context, dst string, opt cloneOption) e
}
if !c.preserveGitDirectory {
os.RemoveAll(filepath.Join(dst, ".git"))
err := os.RemoveAll(filepath.Join(dst, ".git"))
if err != nil {
log.Error().Err(err).Msg("failed to remove .git directory")
}
}
return nil
@ -64,7 +69,7 @@ func (c *gitClient) latestCommitID(ctx context.Context, opt fetchOption) (string
})
listOptions := &git.ListOptions{
Auth: getAuth(opt.username, opt.password),
Auth: getAuth(opt.authType, opt.username, opt.password),
InsecureSkipTLS: opt.tlsSkipVerify,
}
@ -94,7 +99,23 @@ func (c *gitClient) latestCommitID(ctx context.Context, opt fetchOption) (string
return "", errors.Errorf("could not find ref %q in the repository", opt.referenceName)
}
func getAuth(username, password string) *githttp.BasicAuth {
func getAuth(authType gittypes.GitCredentialAuthType, username, password string) transport.AuthMethod {
if password == "" {
return nil
}
switch authType {
case gittypes.GitCredentialAuthType_Basic:
return getBasicAuth(username, password)
case gittypes.GitCredentialAuthType_Token:
return getTokenAuth(password)
default:
log.Warn().Msg("unknown git credentials authorization type, defaulting to None")
return nil
}
}
func getBasicAuth(username, password string) *githttp.BasicAuth {
if password != "" {
if username == "" {
username = "token"
@ -108,6 +129,15 @@ func getAuth(username, password string) *githttp.BasicAuth {
return nil
}
func getTokenAuth(token string) *githttp.TokenAuth {
if token != "" {
return &githttp.TokenAuth{
Token: token,
}
}
return nil
}
func (c *gitClient) listRefs(ctx context.Context, opt baseOption) ([]string, error) {
rem := git.NewRemote(memory.NewStorage(), &config.RemoteConfig{
Name: "origin",
@ -115,7 +145,7 @@ func (c *gitClient) listRefs(ctx context.Context, opt baseOption) ([]string, err
})
listOptions := &git.ListOptions{
Auth: getAuth(opt.username, opt.password),
Auth: getAuth(opt.authType, opt.username, opt.password),
InsecureSkipTLS: opt.tlsSkipVerify,
}
@ -143,7 +173,7 @@ func (c *gitClient) listFiles(ctx context.Context, opt fetchOption) ([]string, e
Depth: 1,
SingleBranch: true,
ReferenceName: plumbing.ReferenceName(opt.referenceName),
Auth: getAuth(opt.username, opt.password),
Auth: getAuth(opt.authType, opt.username, opt.password),
InsecureSkipTLS: opt.tlsSkipVerify,
Tags: git.NoTags,
}

View File

@ -2,12 +2,16 @@ package git
import (
"context"
"net/http"
"net/http/httptest"
"path/filepath"
"testing"
"time"
gittypes "github.com/portainer/portainer/api/git/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
@ -24,8 +28,16 @@ func TestService_ClonePrivateRepository_GitHub(t *testing.T) {
dst := t.TempDir()
repositoryUrl := privateGitRepoURL
err := service.CloneRepository(dst, repositoryUrl, "refs/heads/main", username, accessToken, false)
assert.NoError(t, err)
err := service.CloneRepository(
dst,
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
)
require.NoError(t, err)
assert.FileExists(t, filepath.Join(dst, "README.md"))
}
@ -37,8 +49,15 @@ func TestService_LatestCommitID_GitHub(t *testing.T) {
service := newService(context.TODO(), 0, 0)
repositoryUrl := privateGitRepoURL
id, err := service.LatestCommitID(repositoryUrl, "refs/heads/main", username, accessToken, false)
assert.NoError(t, err)
id, err := service.LatestCommitID(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
)
require.NoError(t, err)
assert.NotEmpty(t, id, "cannot guarantee commit id, but it should be not empty")
}
@ -50,8 +69,8 @@ func TestService_ListRefs_GitHub(t *testing.T) {
service := newService(context.TODO(), 0, 0)
repositoryUrl := privateGitRepoURL
refs, err := service.ListRefs(repositoryUrl, username, accessToken, false, false)
assert.NoError(t, err)
refs, err := service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
require.NoError(t, err)
assert.GreaterOrEqual(t, len(refs), 1)
}
@ -63,8 +82,8 @@ func TestService_ListRefs_Github_Concurrently(t *testing.T) {
service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond)
repositoryUrl := privateGitRepoURL
go service.ListRefs(repositoryUrl, username, accessToken, false, false)
service.ListRefs(repositoryUrl, username, accessToken, false, false)
go service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
time.Sleep(2 * time.Second)
}
@ -202,16 +221,26 @@ func TestService_ListFiles_GitHub(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
paths, err := service.ListFiles(tt.args.repositoryUrl, tt.args.referenceName, tt.args.username, tt.args.password, false, false, tt.extensions, false)
paths, err := service.ListFiles(
tt.args.repositoryUrl,
tt.args.referenceName,
tt.args.username,
tt.args.password,
gittypes.GitCredentialAuthType_Basic,
false,
false,
tt.extensions,
false,
)
if tt.expect.shouldFail {
assert.Error(t, err)
require.Error(t, err)
if tt.expect.err != nil {
assert.Equal(t, tt.expect.err, err)
}
} else {
assert.NoError(t, err)
require.NoError(t, err)
if tt.expect.matchedCount > 0 {
assert.Greater(t, len(paths), 0)
assert.NotEmpty(t, paths)
}
}
})
@ -226,8 +255,28 @@ func TestService_ListFiles_Github_Concurrently(t *testing.T) {
username := getRequiredValue(t, "GITHUB_USERNAME")
service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond)
go service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false)
service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false)
go service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
time.Sleep(2 * time.Second)
}
@ -240,8 +289,18 @@ func TestService_purgeCache_Github(t *testing.T) {
username := getRequiredValue(t, "GITHUB_USERNAME")
service := NewService(context.TODO())
service.ListRefs(repositoryUrl, username, accessToken, false, false)
service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false)
service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
assert.Equal(t, 1, service.repoRefCache.Len())
assert.Equal(t, 1, service.repoFileCache.Len())
@ -261,8 +320,18 @@ func TestService_purgeCacheByTTL_Github(t *testing.T) {
// 40*timeout is designed for giving enough time for ListRefs and ListFiles to cache the result
service := newService(context.TODO(), 2, 40*timeout)
service.ListRefs(repositoryUrl, username, accessToken, false, false)
service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false)
service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
assert.Equal(t, 1, service.repoRefCache.Len())
assert.Equal(t, 1, service.repoFileCache.Len())
@ -293,13 +362,13 @@ func TestService_HardRefresh_ListRefs_GitHub(t *testing.T) {
service := newService(context.TODO(), 2, 0)
repositoryUrl := privateGitRepoURL
refs, err := service.ListRefs(repositoryUrl, username, accessToken, false, false)
assert.NoError(t, err)
refs, err := service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
require.NoError(t, err)
assert.GreaterOrEqual(t, len(refs), 1)
assert.Equal(t, 1, service.repoRefCache.Len())
_, err = service.ListRefs(repositoryUrl, username, "fake-token", false, false)
assert.Error(t, err)
_, err = service.ListRefs(repositoryUrl, username, "fake-token", gittypes.GitCredentialAuthType_Basic, false, false)
require.Error(t, err)
assert.Equal(t, 1, service.repoRefCache.Len())
}
@ -311,27 +380,47 @@ func TestService_HardRefresh_ListRefs_And_RemoveAllCaches_GitHub(t *testing.T) {
service := newService(context.TODO(), 2, 0)
repositoryUrl := privateGitRepoURL
refs, err := service.ListRefs(repositoryUrl, username, accessToken, false, false)
assert.NoError(t, err)
refs, err := service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
require.NoError(t, err)
assert.GreaterOrEqual(t, len(refs), 1)
assert.Equal(t, 1, service.repoRefCache.Len())
files, err := service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false)
assert.NoError(t, err)
files, err := service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
require.NoError(t, err)
assert.GreaterOrEqual(t, len(files), 1)
assert.Equal(t, 1, service.repoFileCache.Len())
files, err = service.ListFiles(repositoryUrl, "refs/heads/test", username, accessToken, false, false, []string{}, false)
assert.NoError(t, err)
files, err = service.ListFiles(
repositoryUrl,
"refs/heads/test",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
require.NoError(t, err)
assert.GreaterOrEqual(t, len(files), 1)
assert.Equal(t, 2, service.repoFileCache.Len())
_, err = service.ListRefs(repositoryUrl, username, "fake-token", false, false)
assert.Error(t, err)
_, err = service.ListRefs(repositoryUrl, username, "fake-token", gittypes.GitCredentialAuthType_Basic, false, false)
require.Error(t, err)
assert.Equal(t, 1, service.repoRefCache.Len())
_, err = service.ListRefs(repositoryUrl, username, "fake-token", true, false)
assert.Error(t, err)
_, err = service.ListRefs(repositoryUrl, username, "fake-token", gittypes.GitCredentialAuthType_Basic, true, false)
require.Error(t, err)
assert.Equal(t, 1, service.repoRefCache.Len())
// The relevant file caches should be removed too
assert.Equal(t, 0, service.repoFileCache.Len())
@ -344,12 +433,72 @@ func TestService_HardRefresh_ListFiles_GitHub(t *testing.T) {
accessToken := getRequiredValue(t, "GITHUB_PAT")
username := getRequiredValue(t, "GITHUB_USERNAME")
repositoryUrl := privateGitRepoURL
files, err := service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false)
assert.NoError(t, err)
files, err := service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
require.NoError(t, err)
assert.GreaterOrEqual(t, len(files), 1)
assert.Equal(t, 1, service.repoFileCache.Len())
_, err = service.ListFiles(repositoryUrl, "refs/heads/main", username, "fake-token", false, true, []string{}, false)
assert.Error(t, err)
_, err = service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
"fake-token",
gittypes.GitCredentialAuthType_Basic,
false,
true,
[]string{},
false,
)
require.Error(t, err)
assert.Equal(t, 0, service.repoFileCache.Len())
}
func TestService_CloneRepository_TokenAuth(t *testing.T) {
ensureIntegrationTest(t)
service := newService(context.TODO(), 2, 0)
var requests []*http.Request
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requests = append(requests, r)
}))
accessToken := "test_access_token"
username := "test_username"
repositoryUrl := testServer.URL
// Since we aren't hitting a real git server we ignore the error
_ = service.CloneRepository(
"test_dir",
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Token,
false,
)
testServer.Close()
if len(requests) != 1 {
t.Fatalf("expected 1 request sent but got %d", len(requests))
}
gotAuthHeader := requests[0].Header.Get("Authorization")
if gotAuthHeader == "" {
t.Fatal("no Authorization header in git request")
}
expectedAuthHeader := "Bearer test_access_token"
if gotAuthHeader != expectedAuthHeader {
t.Fatalf("expected Authorization header %q but got %q", expectedAuthHeader, gotAuthHeader)
}
}

View File

@ -13,6 +13,7 @@ import (
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func setup(t *testing.T) string {
@ -38,9 +39,9 @@ func Test_ClonePublicRepository_Shallow(t *testing.T) {
dir := t.TempDir()
t.Logf("Cloning into %s", dir)
err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", false)
assert.NoError(t, err)
assert.Equal(t, 1, getCommitHistoryLength(t, err, dir), "cloned repo has incorrect depth")
err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", gittypes.GitCredentialAuthType_Basic, false)
require.NoError(t, err)
assert.Equal(t, 1, getCommitHistoryLength(t, dir), "cloned repo has incorrect depth")
}
func Test_ClonePublicRepository_NoGitDirectory(t *testing.T) {
@ -50,8 +51,8 @@ func Test_ClonePublicRepository_NoGitDirectory(t *testing.T) {
dir := t.TempDir()
t.Logf("Cloning into %s", dir)
err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", false)
assert.NoError(t, err)
err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", gittypes.GitCredentialAuthType_Basic, false)
require.NoError(t, err)
assert.NoDirExists(t, filepath.Join(dir, ".git"))
}
@ -74,8 +75,8 @@ func Test_cloneRepository(t *testing.T) {
depth: 10,
})
assert.NoError(t, err)
assert.Equal(t, 4, getCommitHistoryLength(t, err, dir), "cloned repo has incorrect depth")
require.NoError(t, err)
assert.Equal(t, 4, getCommitHistoryLength(t, dir), "cloned repo has incorrect depth")
}
func Test_latestCommitID(t *testing.T) {
@ -84,9 +85,9 @@ func Test_latestCommitID(t *testing.T) {
repositoryURL := setup(t)
referenceName := "refs/heads/main"
id, err := service.LatestCommitID(repositoryURL, referenceName, "", "", false)
id, err := service.LatestCommitID(repositoryURL, referenceName, "", "", gittypes.GitCredentialAuthType_Basic, false)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, "68dcaa7bd452494043c64252ab90db0f98ecf8d2", id)
}
@ -95,9 +96,9 @@ func Test_ListRefs(t *testing.T) {
repositoryURL := setup(t)
fs, err := service.ListRefs(repositoryURL, "", "", false, false)
fs, err := service.ListRefs(repositoryURL, "", "", gittypes.GitCredentialAuthType_Basic, false, false)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, []string{"refs/heads/main"}, fs)
}
@ -107,13 +108,23 @@ func Test_ListFiles(t *testing.T) {
repositoryURL := setup(t)
referenceName := "refs/heads/main"
fs, err := service.ListFiles(repositoryURL, referenceName, "", "", false, false, []string{".yml"}, false)
fs, err := service.ListFiles(
repositoryURL,
referenceName,
"",
"",
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{".yml"},
false,
)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, []string{"docker-compose.yml"}, fs)
}
func getCommitHistoryLength(t *testing.T, err error, dir string) int {
func getCommitHistoryLength(t *testing.T, dir string) int {
repo, err := git.PlainOpen(dir)
if err != nil {
t.Fatalf("can't open a git repo at %s with error %v", dir, err)
@ -125,11 +136,10 @@ func getCommitHistoryLength(t *testing.T, err error, dir string) int {
}
count := 0
err = iter.ForEach(func(_ *object.Commit) error {
if err := iter.ForEach(func(_ *object.Commit) error {
count++
return nil
})
if err != nil {
}); err != nil {
t.Fatalf("can't iterate over the commit history with error %v", err)
}
@ -205,12 +215,12 @@ func Test_listRefsPrivateRepository(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
refs, err := client.listRefs(context.TODO(), tt.args)
if tt.expect.err == nil {
assert.NoError(t, err)
require.NoError(t, err)
if tt.expect.refsCount > 0 {
assert.Greater(t, len(refs), 0)
assert.NotEmpty(t, refs)
}
} else {
assert.Error(t, err)
require.Error(t, err)
assert.Equal(t, tt.expect.err, err)
}
})
@ -255,7 +265,7 @@ func Test_listFilesPrivateRepository(t *testing.T) {
name: "list tree with real repository and head ref but no credential",
args: fetchOption{
baseOption: baseOption{
repositoryUrl: privateGitRepoURL + "fake",
repositoryUrl: privateGitRepoURL,
username: "",
password: "",
},
@ -316,14 +326,14 @@ func Test_listFilesPrivateRepository(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
paths, err := client.listFiles(context.TODO(), tt.args)
if tt.expect.shouldFail {
assert.Error(t, err)
require.Error(t, err)
if tt.expect.err != nil {
assert.Equal(t, tt.expect.err, err)
}
} else {
assert.NoError(t, err)
require.NoError(t, err)
if tt.expect.matchedCount > 0 {
assert.Greater(t, len(paths), 0)
assert.NotEmpty(t, paths)
}
}
})

View File

@ -8,6 +8,7 @@ import (
"time"
lru "github.com/hashicorp/golang-lru"
gittypes "github.com/portainer/portainer/api/git/types"
"github.com/rs/zerolog/log"
"golang.org/x/sync/singleflight"
)
@ -22,6 +23,7 @@ type baseOption struct {
repositoryUrl string
username string
password string
authType gittypes.GitCredentialAuthType
tlsSkipVerify bool
}
@ -123,13 +125,22 @@ func (service *Service) timerHasStopped() bool {
// CloneRepository clones a git repository using the specified URL in the specified
// destination folder.
func (service *Service) CloneRepository(destination, repositoryURL, referenceName, username, password string, tlsSkipVerify bool) error {
func (service *Service) CloneRepository(
destination,
repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
tlsSkipVerify bool,
) error {
options := cloneOption{
fetchOption: fetchOption{
baseOption: baseOption{
repositoryUrl: repositoryURL,
username: username,
password: password,
authType: authType,
tlsSkipVerify: tlsSkipVerify,
},
referenceName: referenceName,
@ -155,12 +166,20 @@ func (service *Service) cloneRepository(destination string, options cloneOption)
}
// LatestCommitID returns SHA1 of the latest commit of the specified reference
func (service *Service) LatestCommitID(repositoryURL, referenceName, username, password string, tlsSkipVerify bool) (string, error) {
func (service *Service) LatestCommitID(
repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
tlsSkipVerify bool,
) (string, error) {
options := fetchOption{
baseOption: baseOption{
repositoryUrl: repositoryURL,
username: username,
password: password,
authType: authType,
tlsSkipVerify: tlsSkipVerify,
},
referenceName: referenceName,
@ -170,7 +189,14 @@ func (service *Service) LatestCommitID(repositoryURL, referenceName, username, p
}
// ListRefs will list target repository's references without cloning the repository
func (service *Service) ListRefs(repositoryURL, username, password string, hardRefresh bool, tlsSkipVerify bool) ([]string, error) {
func (service *Service) ListRefs(
repositoryURL,
username,
password string,
authType gittypes.GitCredentialAuthType,
hardRefresh bool,
tlsSkipVerify bool,
) ([]string, error) {
refCacheKey := generateCacheKey(repositoryURL, username, password, strconv.FormatBool(tlsSkipVerify))
if service.cacheEnabled && hardRefresh {
// Should remove the cache explicitly, so that the following normal list can show the correct result
@ -196,6 +222,7 @@ func (service *Service) ListRefs(repositoryURL, username, password string, hardR
repositoryUrl: repositoryURL,
username: username,
password: password,
authType: authType,
tlsSkipVerify: tlsSkipVerify,
}
@ -215,18 +242,62 @@ var singleflightGroup = &singleflight.Group{}
// ListFiles will list all the files of the target repository with specific extensions.
// If extension is not provided, it will list all the files under the target repository
func (service *Service) ListFiles(repositoryURL, referenceName, username, password string, dirOnly, hardRefresh bool, includedExts []string, tlsSkipVerify bool) ([]string, error) {
repoKey := generateCacheKey(repositoryURL, referenceName, username, password, strconv.FormatBool(tlsSkipVerify), strconv.FormatBool(dirOnly))
func (service *Service) ListFiles(
repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
dirOnly,
hardRefresh bool,
includedExts []string,
tlsSkipVerify bool,
) ([]string, error) {
repoKey := generateCacheKey(
repositoryURL,
referenceName,
username,
password,
strconv.FormatBool(tlsSkipVerify),
strconv.Itoa(int(authType)),
strconv.FormatBool(dirOnly),
)
fs, err, _ := singleflightGroup.Do(repoKey, func() (any, error) {
return service.listFiles(repositoryURL, referenceName, username, password, dirOnly, hardRefresh, tlsSkipVerify)
return service.listFiles(
repositoryURL,
referenceName,
username,
password,
authType,
dirOnly,
hardRefresh,
tlsSkipVerify,
)
})
return filterFiles(fs.([]string), includedExts), err
}
func (service *Service) listFiles(repositoryURL, referenceName, username, password string, dirOnly, hardRefresh bool, tlsSkipVerify bool) ([]string, error) {
repoKey := generateCacheKey(repositoryURL, referenceName, username, password, strconv.FormatBool(tlsSkipVerify), strconv.FormatBool(dirOnly))
func (service *Service) listFiles(
repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
dirOnly,
hardRefresh bool,
tlsSkipVerify bool,
) ([]string, error) {
repoKey := generateCacheKey(
repositoryURL,
referenceName,
username,
password,
strconv.FormatBool(tlsSkipVerify),
strconv.Itoa(int(authType)),
strconv.FormatBool(dirOnly),
)
if service.cacheEnabled && hardRefresh {
// Should remove the cache explicitly, so that the following normal list can show the correct result
@ -247,6 +318,7 @@ func (service *Service) listFiles(repositoryURL, referenceName, username, passwo
repositoryUrl: repositoryURL,
username: username,
password: password,
authType: authType,
tlsSkipVerify: tlsSkipVerify,
},
referenceName: referenceName,

View File

@ -1,12 +1,21 @@
package gittypes
import "errors"
import (
"errors"
)
var (
ErrIncorrectRepositoryURL = errors.New("git repository could not be found, please ensure that the URL is correct")
ErrAuthenticationFailure = errors.New("authentication failed, please ensure that the git credentials are correct")
)
type GitCredentialAuthType int
const (
GitCredentialAuthType_Basic GitCredentialAuthType = iota
GitCredentialAuthType_Token
)
// RepoConfig represents a configuration for a repo
type RepoConfig struct {
// The repo url
@ -24,10 +33,11 @@ type RepoConfig struct {
}
type GitAuthentication struct {
Username string
Password string
Username string
Password string
AuthorizationType GitCredentialAuthType
// Git credentials identifier when the value is not 0
// When the value is 0, Username and Password are set without using saved credential
// When the value is 0, Username, Password, and Authtype are set without using saved credential
// This is introduced since 2.15.0
GitCredentialID int `example:"0"`
}

View File

@ -29,7 +29,14 @@ func UpdateGitObject(gitService portainer.GitService, objId string, gitConfig *g
return false, "", errors.WithMessagef(err, "failed to get credentials for %v", objId)
}
newHash, err := gitService.LatestCommitID(gitConfig.URL, gitConfig.ReferenceName, username, password, gitConfig.TLSSkipVerify)
newHash, err := gitService.LatestCommitID(
gitConfig.URL,
gitConfig.ReferenceName,
username,
password,
gittypes.GitCredentialAuthType_Basic,
gitConfig.TLSSkipVerify,
)
if err != nil {
return false, "", errors.WithMessagef(err, "failed to fetch latest commit id of %v", objId)
}
@ -62,6 +69,7 @@ func UpdateGitObject(gitService portainer.GitService, objId string, gitConfig *g
cloneParams.auth = &gitAuth{
username: username,
password: password,
authType: gitConfig.Authentication.AuthorizationType,
}
}
@ -89,14 +97,31 @@ type cloneRepositoryParameters struct {
}
type gitAuth struct {
authType gittypes.GitCredentialAuthType
username string
password string
}
func cloneGitRepository(gitService portainer.GitService, cloneParams *cloneRepositoryParameters) error {
if cloneParams.auth != nil {
return gitService.CloneRepository(cloneParams.toDir, cloneParams.url, cloneParams.ref, cloneParams.auth.username, cloneParams.auth.password, cloneParams.tlsSkipVerify)
return gitService.CloneRepository(
cloneParams.toDir,
cloneParams.url,
cloneParams.ref,
cloneParams.auth.username,
cloneParams.auth.password,
cloneParams.auth.authType,
cloneParams.tlsSkipVerify,
)
}
return gitService.CloneRepository(cloneParams.toDir, cloneParams.url, cloneParams.ref, "", "", cloneParams.tlsSkipVerify)
return gitService.CloneRepository(
cloneParams.toDir,
cloneParams.url,
cloneParams.ref,
"",
"",
gittypes.GitCredentialAuthType_Basic,
cloneParams.tlsSkipVerify,
)
}

View File

@ -21,10 +21,14 @@ func ValidateAutoUpdateSettings(autoUpdate *portainer.AutoUpdateSettings) error
return httperrors.NewInvalidPayloadError("invalid Webhook format")
}
if autoUpdate.Interval != "" {
if _, err := time.ParseDuration(autoUpdate.Interval); err != nil {
return httperrors.NewInvalidPayloadError("invalid Interval format")
}
if autoUpdate.Interval == "" {
return nil
}
if d, err := time.ParseDuration(autoUpdate.Interval); err != nil {
return httperrors.NewInvalidPayloadError("invalid Interval format")
} else if d < time.Minute {
return httperrors.NewInvalidPayloadError("interval must be at least 1 minute")
}
return nil

View File

@ -23,6 +23,16 @@ func Test_ValidateAutoUpdate(t *testing.T) {
value: &portainer.AutoUpdateSettings{Interval: "1dd2hh3mm"},
wantErr: true,
},
{
name: "short interval value",
value: &portainer.AutoUpdateSettings{Interval: "1s"},
wantErr: true,
},
{
name: "valid webhook without interval",
value: &portainer.AutoUpdateSettings{Webhook: "8dce8c2f-9ca1-482b-ad20-271e86536ada"},
wantErr: false,
},
{
name: "valid auto update",
value: &portainer.AutoUpdateSettings{

View File

@ -32,15 +32,12 @@ type Service struct {
}
// NewService initializes a new service.
func NewService() *Service {
tlsConfig := crypto.CreateTLSConfiguration()
tlsConfig.InsecureSkipVerify = true
func NewService(insecureSkipVerify bool) *Service {
return &Service{
httpsClient: &http.Client{
Timeout: httpClientTimeout,
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
TLSClientConfig: crypto.CreateTLSConfiguration(insecureSkipVerify),
},
},
}

View File

@ -0,0 +1,18 @@
package openamt
import (
"net/http"
"testing"
"github.com/portainer/portainer/pkg/fips"
"github.com/stretchr/testify/require"
)
func TestNewService(t *testing.T) {
fips.InitFIPS(false)
service := NewService(true)
require.NotNil(t, service)
require.True(t, service.httpsClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify) //nolint:forbidigo
}

View File

@ -1,7 +1,6 @@
package client
import (
"crypto/tls"
"errors"
"fmt"
"io"
@ -11,6 +10,7 @@ import (
"time"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/crypto"
"github.com/rs/zerolog/log"
"github.com/segmentio/encoding/json"
@ -105,21 +105,28 @@ func Get(url string, timeout int) ([]byte, error) {
// ExecutePingOperation will send a SystemPing operation HTTP request to a Docker environment(endpoint)
// using the specified host and optional TLS configuration.
// It uses a new Http.Client for each operation.
func ExecutePingOperation(host string, tlsConfig *tls.Config) (bool, error) {
func ExecutePingOperation(host string, tlsConfiguration portainer.TLSConfiguration) (bool, error) {
transport := &http.Transport{}
scheme := "http"
if tlsConfig != nil {
if tlsConfiguration.TLS {
tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(tlsConfiguration)
if err != nil {
return false, err
}
transport.TLSClientConfig = tlsConfig
scheme = "https"
}
client := &http.Client{
Timeout: time.Second * 3,
Timeout: 3 * time.Second,
Transport: transport,
}
target := strings.Replace(host, "tcp://", scheme+"://", 1)
return pingOperation(client, target)
}
@ -134,10 +141,7 @@ func pingOperation(client *http.Client, target string) (bool, error) {
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
agentOnDockerEnvironment := false
if resp.Header.Get(portainer.PortainerAgentHeader) != "" {
agentOnDockerEnvironment = true
}
agentOnDockerEnvironment := resp.Header.Get(portainer.PortainerAgentHeader) != ""
return agentOnDockerEnvironment, nil
}

View File

@ -0,0 +1,47 @@
package client
import (
"net/http"
"net/http/httptest"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/pkg/fips"
"github.com/stretchr/testify/require"
)
func TestExecutePingOperationFailure(t *testing.T) {
fips.InitFIPS(false)
host := "http://localhost:1"
config := portainer.TLSConfiguration{
TLS: true,
TLSSkipVerify: true,
}
// Invalid host
ok, err := ExecutePingOperation(host, config)
require.False(t, ok)
require.Error(t, err)
// Invalid TLS configuration
config.TLSCertPath = "/invalid/path/to/cert"
config.TLSKeyPath = "/invalid/path/to/key"
ok, err = ExecutePingOperation(host, config)
require.False(t, ok)
require.Error(t, err)
}
func TestPingOperation(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Add(portainer.PortainerAgentHeader, "1")
}))
defer srv.Close()
agentOnDockerEnv, err := pingOperation(http.DefaultClient, srv.URL)
require.NoError(t, err)
require.True(t, agentOnDockerEnv)
}

View File

@ -2,6 +2,7 @@ package csrf
import (
"crypto/rand"
"errors"
"fmt"
"net/http"
"os"
@ -9,7 +10,8 @@ import (
"github.com/portainer/portainer/api/http/security"
httperror "github.com/portainer/portainer/pkg/libhttp/error"
gorillacsrf "github.com/gorilla/csrf"
gcsrf "github.com/gorilla/csrf"
"github.com/rs/zerolog/log"
"github.com/urfave/negroni"
)
@ -19,7 +21,7 @@ func SkipCSRFToken(w http.ResponseWriter) {
w.Header().Set(csrfSkipHeader, "1")
}
func WithProtect(handler http.Handler) (http.Handler, error) {
func WithProtect(handler http.Handler, trustedOrigins []string) (http.Handler, error) {
// IsDockerDesktopExtension is used to check if we should skip csrf checks in the request bouncer (ShouldSkipCSRFCheck)
// DOCKER_EXTENSION is set to '1' in build/docker-extension/docker-compose.yml
isDockerDesktopExtension := false
@ -34,10 +36,12 @@ func WithProtect(handler http.Handler) (http.Handler, error) {
return nil, fmt.Errorf("failed to generate CSRF token: %w", err)
}
handler = gorillacsrf.Protect(
handler = gcsrf.Protect(
token,
gorillacsrf.Path("/"),
gorillacsrf.Secure(false),
gcsrf.Path("/"),
gcsrf.Secure(false),
gcsrf.TrustedOrigins(trustedOrigins),
gcsrf.ErrorHandler(withErrorHandler(trustedOrigins)),
)(handler)
return withSkipCSRF(handler, isDockerDesktopExtension), nil
@ -55,7 +59,7 @@ func withSendCSRFToken(handler http.Handler) http.Handler {
}
if statusCode := sw.Status(); statusCode >= 200 && statusCode < 300 {
sw.Header().Set("X-CSRF-Token", gorillacsrf.Token(r))
sw.Header().Set("X-CSRF-Token", gcsrf.Token(r))
}
})
@ -73,9 +77,33 @@ func withSkipCSRF(handler http.Handler, isDockerDesktopExtension bool) http.Hand
}
if skip {
r = gorillacsrf.UnsafeSkipCheck(r)
r = gcsrf.UnsafeSkipCheck(r)
}
handler.ServeHTTP(w, r)
})
}
func withErrorHandler(trustedOrigins []string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
err := gcsrf.FailureReason(r)
if errors.Is(err, gcsrf.ErrBadOrigin) || errors.Is(err, gcsrf.ErrBadReferer) || errors.Is(err, gcsrf.ErrNoReferer) {
log.Error().Err(err).
Str("request_url", r.URL.String()).
Str("host", r.Host).
Str("x_forwarded_proto", r.Header.Get("X-Forwarded-Proto")).
Str("forwarded", r.Header.Get("Forwarded")).
Str("origin", r.Header.Get("Origin")).
Str("referer", r.Header.Get("Referer")).
Strs("trusted_origins", trustedOrigins).
Msg("Failed to validate Origin or Referer")
}
http.Error(
w,
http.StatusText(http.StatusForbidden)+" - "+err.Error(),
http.StatusForbidden,
)
})
}

View File

@ -2,6 +2,7 @@ package auth
import (
"net/http"
"strconv"
"strings"
portainer "github.com/portainer/portainer/api"
@ -82,6 +83,11 @@ func (handler *Handler) authenticate(rw http.ResponseWriter, r *http.Request) *h
}
}
// Clear any existing user caches
if user != nil {
handler.KubernetesClientFactory.ClearUserClientCache(strconv.Itoa(int(user.ID)))
}
if user != nil && isUserInitialAdmin(user) || settings.AuthenticationMethod == portainer.AuthenticationInternal {
return handler.authenticateInternal(rw, user, payload.Password)
}

View File

@ -8,6 +8,7 @@ import (
"github.com/portainer/portainer/api/http/proxy"
"github.com/portainer/portainer/api/http/proxy/factory/kubernetes"
"github.com/portainer/portainer/api/http/security"
"github.com/portainer/portainer/api/kubernetes/cli"
httperror "github.com/portainer/portainer/pkg/libhttp/error"
"github.com/gorilla/mux"
@ -23,16 +24,18 @@ type Handler struct {
OAuthService portainer.OAuthService
ProxyManager *proxy.Manager
KubernetesTokenCacheManager *kubernetes.TokenCacheManager
KubernetesClientFactory *cli.ClientFactory
passwordStrengthChecker security.PasswordStrengthChecker
bouncer security.BouncerService
}
// NewHandler creates a handler to manage authentication operations.
func NewHandler(bouncer security.BouncerService, rateLimiter *security.RateLimiter, passwordStrengthChecker security.PasswordStrengthChecker) *Handler {
func NewHandler(bouncer security.BouncerService, rateLimiter *security.RateLimiter, passwordStrengthChecker security.PasswordStrengthChecker, kubernetesClientFactory *cli.ClientFactory) *Handler {
h := &Handler{
Router: mux.NewRouter(),
passwordStrengthChecker: passwordStrengthChecker,
bouncer: bouncer,
KubernetesClientFactory: kubernetesClientFactory,
}
h.Handle("/auth/oauth/validate",

View File

@ -2,6 +2,7 @@ package auth
import (
"net/http"
"strconv"
"github.com/portainer/portainer/api/http/security"
"github.com/portainer/portainer/api/logoutcontext"
@ -23,12 +24,12 @@ func (handler *Handler) logout(w http.ResponseWriter, r *http.Request) *httperro
if tokenData != nil {
handler.KubernetesTokenCacheManager.RemoveUserFromCache(tokenData.ID)
handler.KubernetesClientFactory.ClearUserClientCache(strconv.Itoa(int(tokenData.ID)))
logoutcontext.Cancel(tokenData.Token)
handler.bouncer.RevokeJWT(tokenData.Token)
}
security.RemoveAuthCookie(w)
handler.bouncer.RevokeJWT(tokenData.Token)
return response.Empty(w)
}

View File

@ -0,0 +1,55 @@
package auth
import (
"net/http"
"net/http/httptest"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/http/proxy/factory/kubernetes"
"github.com/portainer/portainer/api/http/security"
"github.com/portainer/portainer/api/internal/testhelpers"
"github.com/portainer/portainer/api/kubernetes/cli"
"github.com/stretchr/testify/require"
)
type mockBouncer struct {
security.BouncerService
}
func NewMockBouncer() *mockBouncer {
return &mockBouncer{BouncerService: testhelpers.NewTestRequestBouncer()}
}
func (*mockBouncer) CookieAuthLookup(r *http.Request) (*portainer.TokenData, error) {
return &portainer.TokenData{
ID: 1,
Username: "testuser",
Token: "valid-token",
}, nil
}
func TestLogout(t *testing.T) {
h := NewHandler(NewMockBouncer(), nil, nil, nil)
h.KubernetesTokenCacheManager = kubernetes.NewTokenCacheManager()
k, err := cli.NewClientFactory(nil, nil, nil, "", "", "")
require.NoError(t, err)
h.KubernetesClientFactory = k
rr := httptest.NewRecorder()
req := httptest.NewRequest("POST", "/auth/logout", nil)
h.ServeHTTP(rr, req)
require.Equal(t, http.StatusNoContent, rr.Code)
}
func TestLogoutNoPanic(t *testing.T) {
h := NewHandler(testhelpers.NewTestRequestBouncer(), nil, nil, nil)
rr := httptest.NewRecorder()
req := httptest.NewRequest("POST", "/auth/logout", nil)
h.ServeHTTP(rr, req)
require.Equal(t, http.StatusNoContent, rr.Code)
}

View File

@ -18,10 +18,15 @@ import (
"github.com/portainer/portainer/api/crypto"
"github.com/portainer/portainer/api/http/offlinegate"
"github.com/portainer/portainer/api/internal/testhelpers"
"github.com/portainer/portainer/pkg/fips"
"github.com/stretchr/testify/assert"
)
func init() {
fips.InitFIPS(false)
}
func listFiles(dir string) []string {
items := make([]string, 0)
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {

Some files were not shown because too many files have changed in this diff Show More