Compare commits
658 Commits
v1.14.0-rc
...
main
Author | SHA1 | Date |
---|---|---|
|
f0fde6e1d4 | |
|
08929d2e1c | |
|
5705bcc3f4 | |
|
b475124651 | |
|
0dbff6d239 | |
|
54907a03fe | |
|
16023c10c3 | |
|
68968b3e54 | |
|
5160f96c2e | |
|
8934b2cb17 | |
|
2ae9d6fe2f | |
|
e5c7c7f2ae | |
|
6002d56735 | |
|
6df1424a44 | |
|
07fd98e3fe | |
|
9d0493c2b5 | |
|
8f8884fbb3 | |
|
8580ef88fe | |
|
6a0c6d5b75 | |
|
bea46e334d | |
|
b9fd3e40ed | |
|
3569ccc653 | |
|
438a6db497 | |
|
7114144278 | |
|
9241b61972 | |
|
9e9bb128a3 | |
|
96760885dc | |
|
751d782293 | |
|
f1dcb7ba11 | |
|
883e3e4aae | |
|
3c5ebbadd3 | |
|
eaa5610904 | |
|
76a5866107 | |
|
efad9a0e94 | |
|
d086cb2fc3 | |
|
a98c559818 | |
|
1652e6b27f | |
|
71863e017d | |
|
0d27d5258f | |
|
38a52980cc | |
|
1b4c17bf9c | |
|
b83148f626 | |
|
0fb63232ba | |
|
55d1592aaa | |
|
d1a244e12f | |
|
6337c52cfb | |
|
b4eee87e18 | |
|
eb5634f41e | |
|
7a311d6ee0 | |
|
1eda42a9f2 | |
|
b170892e64 | |
|
1516e72ccb | |
|
deb262c1b0 | |
|
fe14a2c934 | |
|
512199723f | |
|
05112fef29 | |
|
5dbf002da7 | |
|
d18278aa58 | |
|
d4e40c01d8 | |
|
5e68beb13f | |
|
945911ccb5 | |
|
bf2b1185bf | |
|
aa88d1cfd3 | |
|
6a6a237ba7 | |
|
3c22de7fe3 | |
|
88455b1e83 | |
|
5ed2401b9d | |
|
1746291e59 | |
|
cb400e1d6b | |
|
b334bfc3d7 | |
|
7208f94c4f | |
|
3821906ffa | |
|
81609484ae | |
|
3c323060c0 | |
|
ee43d040a6 | |
|
a7f977f198 | |
|
f12b9c15b2 | |
|
0eb1040a0a | |
|
a45c9f27e8 | |
|
f79b825cf1 | |
|
ad08c7a3ff | |
|
564e77465b | |
|
6b7dd12bf7 | |
|
21db5f8853 | |
|
9295be4cc0 | |
|
178b6e3db5 | |
|
bf0d909524 | |
|
1e6af39458 | |
|
3fb8c72b6c | |
|
92617d07c5 | |
|
1b7d9014a5 | |
|
f93eed56ca | |
|
271ff180e9 | |
|
beb392e0db | |
|
21ae1cbe82 | |
|
3bb39d9331 | |
|
c153651044 | |
|
5a79e70d79 | |
|
0f81772e83 | |
|
62889238ed | |
|
cf58cc8fb2 | |
|
e2a7986629 | |
|
eb77151f48 | |
|
620a116e7f | |
|
3843ae7030 | |
|
e64806a651 | |
|
82e3b1190c | |
|
e736ef71df | |
|
2b0c5094bd | |
|
bca5e55620 | |
|
80cea31a84 | |
|
4c6fedd563 | |
|
a3cee616dc | |
|
7aa8040c09 | |
|
e0153e011e | |
|
9235fe1eb1 | |
|
d9721fddb5 | |
|
c0c4407657 | |
|
e3a64065f1 | |
|
a6ae21e7a3 | |
|
fa156c3961 | |
|
e446d92d4c | |
|
c8e623864f | |
|
893621c1ad | |
|
fcfb2fd9ee | |
|
cdcd6eb99d | |
|
79707aaa60 | |
|
5d9a4e84cb | |
|
9010d9b13e | |
|
0bf2252e10 | |
|
ae5e94e822 | |
|
06fc9da925 | |
|
f56698e27e | |
|
10a5b7b702 | |
|
ba0636e8de | |
|
de170043ea | |
|
e9bd9f3c8d | |
|
3ca547f186 | |
|
5fd9df3e2c | |
|
7442147028 | |
|
6d164f430c | |
|
6ac38cde85 | |
|
b877f4acae | |
|
294bbbc69e | |
|
ec1eadc501 | |
|
7caa52c1fa | |
|
9afad9a2db | |
|
bedea9c74c | |
|
1e54f1cb15 | |
|
1c372893ec | |
|
43fcaa2706 | |
|
a9031eb13f | |
|
f0efe2aaa1 | |
|
0a4b05cb6e | |
|
cbba3bdde7 | |
|
5b1738abf8 | |
|
91fcb65118 | |
|
223e1fca70 | |
|
d090d0ad44 | |
|
0045e94072 | |
|
3900f2f117 | |
|
054375093d | |
|
1d3af6d160 | |
|
34c26dd476 | |
|
2ef7711227 | |
|
b52b45012b | |
|
ddc1bcbdf5 | |
|
298b8ad992 | |
|
97ce5662ba | |
|
411469b90c | |
|
5f7bf64d06 | |
|
094ba59160 | |
|
e79dbb8d60 | |
|
5dedaca148 | |
|
e92069247d | |
|
fb7cf9e4ba | |
|
3207619f30 | |
|
1f39943291 | |
|
fc9683688a | |
|
80bba2ee9c | |
|
d8bb82b29e | |
|
29a77958d5 | |
|
a8469126d8 | |
|
225db5e8c0 | |
|
46b8a31ef0 | |
|
32ae4091ac | |
|
42d2e9bfc4 | |
|
05765fb2fd | |
|
dc02caf2b0 | |
|
be5f56ab18 | |
|
dce97770cd | |
|
4ce7361f5a | |
|
4b09b63c2d | |
|
ceeab10b6e | |
|
6b73a256d5 | |
|
db69829fd7 | |
|
3eaa73962b | |
|
3120e33ed7 | |
|
912b116bdb | |
|
cfad06b701 | |
|
eb5230e12f | |
|
6860dabb85 | |
|
cb22dfc482 | |
|
d2a25cd446 | |
|
bc6414672e | |
|
6ff0aa32e3 | |
|
03d0bd9d22 | |
|
f5d13aeb17 | |
|
a56b06bab1 | |
|
78c97d93b5 | |
|
4e0a0e0b72 | |
|
9dcfe164d8 | |
|
fa8f464fb3 | |
|
20a647b265 | |
|
e68dca0112 | |
|
9486bd0acb | |
|
938dd3c661 | |
|
eeee79e551 | |
|
623e023bb3 | |
|
e725f89906 | |
|
14e71fa2cd | |
|
92390e9af5 | |
|
77f1141ef5 | |
|
703a726cf2 | |
|
8cb04bba33 | |
|
e85f18dc59 | |
|
be97a5c1c6 | |
|
3504546ba9 | |
|
cae7a7a901 | |
|
ea93c00cc2 | |
|
3b2c50b459 | |
|
c9bfd33077 | |
|
975e6bdc6c | |
|
876a1fc30f | |
|
dfdb1c139d | |
|
a663cc4a76 | |
|
4ad9c2485a | |
|
a711b1067b | |
|
99ba81e5d1 | |
|
617411fa5a | |
|
fe0a45eac6 | |
|
a5a6e47e42 | |
|
11cd6d922b | |
|
010fd1cb1d | |
|
6e34c09d84 | |
|
0224d99889 | |
|
c43fc42c25 | |
|
cd01222d8e | |
|
cb7758f72b | |
|
8b545532e2 | |
|
eb48cbd60f | |
|
26661c775f | |
|
0ea4eb563a | |
|
ff6ea15796 | |
|
34e417bdac | |
|
a1cf952b8d | |
|
86082eb137 | |
|
11f100fc59 | |
|
b588dc926d | |
|
4b7f93189d | |
|
bcba234035 | |
|
ed9af610e5 | |
|
aa7ca15159 | |
|
4f634dc3ab | |
|
cbdbbe26c2 | |
|
04d6c79179 | |
|
6c0ed1e5d2 | |
|
b607259563 | |
|
abbfac09f4 | |
|
baf74d67a7 | |
|
e4e9b18b37 | |
|
2e5df858ad | |
|
015b1e69f6 | |
|
dd18cb49e6 | |
|
3cd85f5b43 | |
|
226370d035 | |
|
7e80d8f1fd | |
|
298b497482 | |
|
b89270f2c1 | |
|
3723033c4f | |
|
f338e874a8 | |
|
074f26539d | |
|
3a7cf09957 | |
|
3c06fc8d87 | |
|
18b3d96e64 | |
|
40a95aab32 | |
|
ad987edd11 | |
|
8fcb6de323 | |
|
af85b7d59f | |
|
b66d7a7e0c | |
|
483f0978e8 | |
|
d00e7f8f2a | |
|
2bf98d3965 | |
|
3517487611 | |
|
871ba8de7c | |
|
226d50d9cb | |
|
9f0026d7dc | |
|
51490af667 | |
|
aed944cb0e | |
|
e19f45b9e9 | |
|
f50161d71f | |
|
55bbd5954f | |
|
738bb79a99 | |
|
cc47be933d | |
|
7cc0c99a08 | |
|
de7231cf86 | |
|
b92605f5fc | |
|
e5354e123b | |
|
ea09946803 | |
|
a9c9f19368 | |
|
e7da6727cf | |
|
74790d9f60 | |
|
6933e66dab | |
|
bef994e67a | |
|
b2369cca28 | |
|
c30d044664 | |
|
677d99a857 | |
|
dacd5eff93 | |
|
5a64df9579 | |
|
7a51e0dad6 | |
|
ec2013b79d | |
|
bebea4d278 | |
|
32a8c62920 | |
|
cb03de4574 | |
|
bcb60ed783 | |
|
b02fc1da96 | |
|
f200f8fe49 | |
|
dfedc43cf3 | |
|
7feda11e54 | |
|
e5d6c48fea | |
|
8e23752a6e | |
|
d5d5cc6589 | |
|
1fbd22f353 | |
|
511afbe1eb | |
|
a46fef8f2f | |
|
a5ef9d6f7c | |
|
6588141090 | |
|
10fce5e0cd | |
|
a75506bb13 | |
|
4071435023 | |
|
d0cffa3d19 | |
|
6bffac5d06 | |
|
7c4bc77cdc | |
|
f981dd4ab2 | |
|
db470a751b | |
|
29d84feb10 | |
|
70d88901b9 | |
|
e2839bbdec | |
|
07847925fe | |
|
8320df44fd | |
|
8058a38058 | |
|
82ce1fa44f | |
|
e8267abdf9 | |
|
ebbeb7aeb7 | |
|
fa7fca8d3d | |
|
a9b5dbc0fa | |
|
53ef988c15 | |
|
706dd13020 | |
|
bdd231cd31 | |
|
6ffe4610c3 | |
|
9f17fb30ee | |
|
182478fbdf | |
|
23bb0330d1 | |
|
660ea1e1db | |
|
331d057caf | |
|
fa4899a4b1 | |
|
1831c7b2dc | |
|
e770f0c308 | |
|
c53ab20d56 | |
|
d06601e977 | |
|
732b87b250 | |
|
226a4c1138 | |
|
c6264ff392 | |
|
b24b9fef08 | |
|
9d5bb455a6 | |
|
5c4b04efaa | |
|
fe14fb235c | |
|
0945780359 | |
|
2b3a0b45c6 | |
|
6fa81ec9b9 | |
|
34d4f18cc8 | |
|
6a1d8dfc6c | |
|
d9b278edb9 | |
|
69b456af70 | |
|
b1035dd49d | |
|
f02613d2f7 | |
|
b34e0116d7 | |
|
de7a414511 | |
|
561073d053 | |
|
ba0dbb91f9 | |
|
23ca089d40 | |
|
10260bd34c | |
|
db2eb89a26 | |
|
0a4e417aab | |
|
14758a3435 | |
|
7580538f03 | |
|
3f4a1c295a | |
|
f15cde5dfd | |
|
42de654372 | |
|
0ccdc7c6e1 | |
|
a4416874cf | |
|
aab2140a7c | |
|
c0d51a5465 | |
|
11f771fc39 | |
|
8e94e1f9a8 | |
|
025d66d5fd | |
|
9855cd28fb | |
|
5dcb315b10 | |
|
1ba78b83bf | |
|
9deaa819aa | |
|
60e9277e98 | |
|
390ac497bb | |
|
0b74a73761 | |
|
95f6729276 | |
|
5d0f09da25 | |
|
e6fb4ba3d5 | |
|
3f9c2dc789 | |
|
da291467d7 | |
|
efcf836d16 | |
|
68f3545424 | |
|
7e8a3c0bbc | |
|
670338e02f | |
|
5b4c8cd5b1 | |
|
70168634cb | |
|
c8aa37d852 | |
|
1110853cba | |
|
bf6215c894 | |
|
f1e68f8ced | |
|
b523a1b680 | |
|
3c777cb09f | |
|
7c9b7c1ba5 | |
|
c643ee5fd4 | |
|
f1846be634 | |
|
c2192a75aa | |
|
02ac1069fe | |
|
2641cc8fef | |
|
ae5d97cd8c | |
|
46801a0828 | |
|
5c4c66bee9 | |
|
26cc41f26d | |
|
b92143dad1 | |
|
a19cf56081 | |
|
43de32ada4 | |
|
7439db57b3 | |
|
12b2dbe0fa | |
|
74ca35ea6d | |
|
6d0f726c2f | |
|
dc6eeafe98 | |
|
c78fea3204 | |
|
9d6f4d2db5 | |
|
8ae667ef5e | |
|
e8632b240d | |
|
2d3521a56c | |
|
434bd2f3ae | |
|
79156bedad | |
|
8be8fc6671 | |
|
1d9fbcfcf6 | |
|
3408ffefac | |
|
f6e2b0107f | |
|
c79d7ebc91 | |
|
866c2ab781 | |
|
b5c9921ee8 | |
|
a80c9359bf | |
|
cb7eebd9c9 | |
|
252e8a866f | |
|
eebc4af484 | |
|
cacb5f0eae | |
|
d112cc26da | |
|
8f1424f04e | |
|
ad00ae7e6e | |
|
981f30cb25 | |
|
3f1853c961 | |
|
f5671c728c | |
|
de96d4c84b | |
|
16a73acf7b | |
|
e29432beb8 | |
|
babd76f2a3 | |
|
627e2fede6 | |
|
c2cd6b7176 | |
|
934b3ea6a9 | |
|
37e0ab12cc | |
|
f684e16def | |
|
4120d43b78 | |
|
f63b714483 | |
|
ec6090bd01 | |
|
6e65c73cc6 | |
|
bdff60178a | |
|
0b447771f1 | |
|
af62dd4b3e | |
|
86963bf229 | |
|
d4e7d1472e | |
|
a6c543384b | |
|
8e1dc8e997 | |
|
8cf1749ae0 | |
|
a9463cebe4 | |
|
c0402075fb | |
|
0ed1a7fc86 | |
|
d25a908b78 | |
|
8fde4a017d | |
|
4e781d4009 | |
|
ed0ef67c16 | |
|
6c3988e462 | |
|
4ffc6d17b2 | |
|
8eac3606d9 | |
|
3c0948c9be | |
|
07c03a8919 | |
|
b62b38f566 | |
|
3aabfc3414 | |
|
260a4995c2 | |
|
04db3ba767 | |
|
1228b41851 | |
|
cc32375b76 | |
|
8ca7cae662 | |
|
4dea3a48e8 | |
|
fefb4b858c | |
|
2c7047a304 | |
|
60f5ad5cf4 | |
|
3b06d915ca | |
|
dd3d05bbac | |
|
82d9fe4d4d | |
|
26459488ed | |
|
b7f2e15c6e | |
|
be4aabccd9 | |
|
64d8014b87 | |
|
5c88c897a5 | |
|
a523d10802 | |
|
f7f9ed3393 | |
|
75210c7f4a | |
|
dc38a2a879 | |
|
d4e743b138 | |
|
9bc32e0e5c | |
|
1a6750c025 | |
|
49a7fe74a9 | |
|
903458b61b | |
|
514ba56ca1 | |
|
29aad63f32 | |
|
54bd7ce32e | |
|
6d0d1aaccc | |
|
ad6104b90a | |
|
92b9e59fd5 | |
|
2fa71e41b2 | |
|
c1e3d6f40e | |
|
545a0e2112 | |
|
7811b9f78c | |
|
1a167f9ebf | |
|
5dcd9dc81f | |
|
7b7727e808 | |
|
d48e9762eb | |
|
86e54801c5 | |
|
7b26673b29 | |
|
8e0f4d17f7 | |
|
ba9c109868 | |
|
d6f89e2d07 | |
|
6997b8e393 | |
|
d9ca147479 | |
|
e83ba06733 | |
|
53b57f8bdf | |
|
c2bc67bdea | |
|
8742f1b1f3 | |
|
5b9b8e7828 | |
|
faa704d909 | |
|
71c75d6dcb | |
|
e862b976a4 | |
|
442cc76417 | |
|
01aa657f0e | |
|
d72f857656 | |
|
c01c679076 | |
|
2bf3bc9cc7 | |
|
afca7dd6fe | |
|
84feddb082 | |
|
6e27ed3694 | |
|
a1d6d1d698 | |
|
dc4b95e7de | |
|
fd6c74715a | |
|
0d2f3db696 | |
|
2aecd45285 | |
|
82faa554bd | |
|
f8e697d1e8 | |
|
c8a0c345dc | |
|
3e9f6cc83d | |
|
c8baaa9b11 | |
|
55e027897c | |
|
7a3b947961 | |
|
c69f47d5d2 | |
|
35c90f1672 | |
|
aa3fde5ea5 | |
|
917b55e107 | |
|
7f88d631a9 | |
|
2b018272e6 | |
|
49097744ee | |
|
7f9fbabb7b | |
|
dc286a38fc | |
|
6c8d051269 | |
|
bd2008c893 | |
|
3bd8a7da7d | |
|
255a51f695 | |
|
6697b5ccb4 | |
|
6a3e226381 | |
|
6fb109f620 | |
|
21beda3c2a | |
|
4a6a362e60 | |
|
824bebbad7 | |
|
920396dfd8 | |
|
1ec52beca8 | |
|
cf5dfdf42d | |
|
7408dbd436 | |
|
71de94b87a | |
|
c4ce6a3382 | |
|
d89a9f7b0c | |
|
28d64c2c52 | |
|
ff634862b4 | |
|
6a7f146aed | |
|
df28134e25 | |
|
20676c1ae7 | |
|
3fa8f6c72d | |
|
9c20b5ca15 | |
|
0789c6154c | |
|
0288ab7611 | |
|
544d7965c6 | |
|
c827fd0c6b | |
|
3c2d77f4cf | |
|
d1d331faa8 | |
|
a365d32105 | |
|
b0dc189311 | |
|
9614ead033 | |
|
89229d3899 | |
|
689b015480 | |
|
385ecc2cd9 | |
|
044530a9e3 | |
|
d9ea253dde | |
|
9b7dd663c3 | |
|
5551ded4fd | |
|
2b57b4ca03 | |
|
b9b21b5b6c | |
|
04f52beee0 | |
|
ac164582dc | |
|
1d1090083a | |
|
8b049a5803 | |
|
a8d77eae95 | |
|
068766adb4 | |
|
7d61917d00 | |
|
2d0bca5e29 | |
|
33633d8a02 | |
|
94c24bd29c | |
|
284e23bef3 | |
|
d00721c874 | |
|
d9c9f77860 | |
|
c8e252cfac | |
|
18921fce5f | |
|
bed10c7fe6 | |
|
75fe761061 | |
|
1010b04821 | |
|
7873ced0f1 | |
|
9219e588d9 | |
|
017b9a43e8 | |
|
cf460d51c3 | |
|
68016033ec | |
|
8e8c340dd1 |
|
@ -13,10 +13,10 @@ reviewers:
|
|||
- reasonerjt
|
||||
- ywk253100
|
||||
- blackpiglet
|
||||
- qiuming-best
|
||||
- shubham-pampattiwar
|
||||
- Lyndon-Li
|
||||
- anshulahuja98
|
||||
- kaovilai
|
||||
|
||||
tech-writer:
|
||||
- sseago
|
||||
|
|
|
@ -9,5 +9,5 @@ Fixes #(issue)
|
|||
# Please indicate you've done the following:
|
||||
|
||||
- [ ] [Accepted the DCO](https://velero.io/docs/v1.5/code-standards/#dco-sign-off). Commits without the DCO will delay acceptance.
|
||||
- [ ] [Created a changelog file](https://velero.io/docs/v1.5/code-standards/#adding-a-changelog) or added `/kind changelog-not-required` as a comment on this pull request.
|
||||
- [ ] [Created a changelog file (`make new-changelog`)](https://velero.io/docs/main/code-standards/#adding-a-changelog) or comment `/kind changelog-not-required` on this PR.
|
||||
- [ ] Updated the corresponding documentation in `site/content/docs/main`.
|
||||
|
|
|
@ -1,93 +0,0 @@
|
|||
name: "Verify Velero CRDs across k8s versions"
|
||||
on:
|
||||
pull_request:
|
||||
# Do not run when the change only includes these directories.
|
||||
paths-ignore:
|
||||
- "site/**"
|
||||
- "design/**"
|
||||
|
||||
jobs:
|
||||
# Build the Velero CLI once for all Kubernetes versions, and cache it so the fan-out workers can get it.
|
||||
build-cli:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
id: go
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
id: cache
|
||||
uses: actions/cache@v4
|
||||
env:
|
||||
cache-name: cache-velero-cli
|
||||
with:
|
||||
path: ./_output/bin/linux/amd64/velero
|
||||
# The cache key a combination of the current PR number, and a SHA256 hash of the Velero binary
|
||||
key: velero-${{ github.event.pull_request.number }}-${{ hashFiles('./_output/bin/linux/amd64/velero') }}
|
||||
# This key controls the prefixes that we'll look at in the cache to restore from
|
||||
restore-keys: |
|
||||
velero-${{ github.event.pull_request.number }}-
|
||||
|
||||
- name: Fetch cached go modules
|
||||
uses: actions/cache@v4
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v4
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
|
||||
# If no binaries were built for this PR, build it now.
|
||||
- name: Build Velero CLI
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
make local
|
||||
|
||||
# Check the common CLI against all Kubernetes versions
|
||||
crd-check:
|
||||
needs: build-cli
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
# Latest k8s versions. There's no series-based tag, nor is there a latest tag.
|
||||
k8s:
|
||||
- 1.23.17
|
||||
- 1.24.17
|
||||
- 1.25.16
|
||||
- 1.26.13
|
||||
- 1.27.10
|
||||
- 1.28.6
|
||||
- 1.29.1
|
||||
# All steps run in parallel unless otherwise specified.
|
||||
# See https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows#creating-dependent-jobs
|
||||
steps:
|
||||
- name: Fetch built CLI
|
||||
id: cache
|
||||
uses: actions/cache@v4
|
||||
env:
|
||||
cache-name: cache-velero-cli
|
||||
with:
|
||||
path: ./_output/bin/linux/amd64/velero
|
||||
# The cache key a combination of the current PR number, and a SHA256 hash of the Velero binary
|
||||
key: velero-${{ github.event.pull_request.number }}-${{ hashFiles('./_output/bin/linux/amd64/velero') }}
|
||||
# This key controls the prefixes that we'll look at in the cache to restore from
|
||||
restore-keys: |
|
||||
velero-${{ github.event.pull_request.number }}-
|
||||
- uses: engineerd/setup-kind@v0.5.0
|
||||
with:
|
||||
version: "v0.21.0"
|
||||
image: "kindest/node:v${{ matrix.k8s }}"
|
||||
- name: Install CRDs
|
||||
run: |
|
||||
kubectl cluster-info
|
||||
kubectl get pods -n kube-system
|
||||
kubectl version
|
||||
echo "current-context:" $(kubectl config current-context)
|
||||
echo "environment-kubeconfig:" ${KUBECONFIG}
|
||||
./_output/bin/linux/amd64/velero install --crds-only --dry-run -oyaml | kubectl apply -f -
|
|
@ -6,16 +6,18 @@ on:
|
|||
paths-ignore:
|
||||
- "site/**"
|
||||
- "design/**"
|
||||
- "**/*.md"
|
||||
jobs:
|
||||
# Build the Velero CLI and image once for all Kubernetes versions, and cache it so the fan-out workers can get it.
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
id: go
|
||||
go-version-file: 'go.mod'
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
id: cli-cache
|
||||
|
@ -31,17 +33,6 @@ jobs:
|
|||
path: ./velero.tar
|
||||
# The cache key a combination of the current PR number and the commit SHA
|
||||
key: velero-image-${{ github.event.pull_request.number }}-${{ github.sha }}
|
||||
- name: Fetch cached go modules
|
||||
uses: actions/cache@v4
|
||||
if: steps.cli-cache.outputs.cache-hit != 'true'
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v4
|
||||
if: steps.cli-cache.outputs.cache-hit != 'true' || steps.image-cache.outputs.cache-hit != 'true'
|
||||
# If no binaries were built for this PR, build it now.
|
||||
- name: Build Velero CLI
|
||||
if: steps.cli-cache.outputs.cache-hit != 'true'
|
||||
|
@ -51,47 +42,57 @@ jobs:
|
|||
- name: Build Velero Image
|
||||
if: steps.image-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
IMAGE=velero VERSION=pr-test make container
|
||||
docker save velero:pr-test -o ./velero.tar
|
||||
IMAGE=velero VERSION=pr-test BUILD_OUTPUT_TYPE=docker make container
|
||||
docker save velero:pr-test-linux-amd64 -o ./velero.tar
|
||||
# Create json of k8s versions to test
|
||||
# from guide: https://stackoverflow.com/a/65094398/4590470
|
||||
setup-test-matrix:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Set k8s versions
|
||||
id: set-matrix
|
||||
# everything excluding older tags. limits needs to be high enough to cover all latest versions
|
||||
# and test labels
|
||||
# grep -E "v[1-9]\.(2[5-9]|[3-9][0-9])" filters for v1.25 to v9.99
|
||||
# and removes older patches of the same minor version
|
||||
# awk -F. '{if(!a[$1"."$2]++)print $1"."$2"."$NF}'
|
||||
run: |
|
||||
echo "matrix={\
|
||||
\"k8s\":$(wget -q -O - "https://hub.docker.com/v2/namespaces/kindest/repositories/node/tags?page_size=50" | grep -o '"name": *"[^"]*' | grep -o '[^"]*$' | grep -v -E "alpha|beta" | grep -E "v[1-9]\.(2[5-9]|[3-9][0-9])" | awk -F. '{if(!a[$1"."$2]++)print $1"."$2"."$NF}' | sort -r | sed s/v//g | jq -R -c -s 'split("\n")[:-1]'),\
|
||||
\"labels\":[\
|
||||
\"Basic && (ClusterResource || NodePort || StorageClass)\", \
|
||||
\"ResourceFiltering && !Restic\", \
|
||||
\"ResourceModifier || (Backups && BackupsSync) || PrivilegesMgmt || OrderedResources\", \
|
||||
\"(NamespaceMapping && Single && Restic) || (NamespaceMapping && Multiple && Restic)\"\
|
||||
]}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Run E2E test against all Kubernetes versions on kind
|
||||
run-e2e-test:
|
||||
needs: build
|
||||
needs:
|
||||
- build
|
||||
- setup-test-matrix
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
k8s:
|
||||
- 1.23.17
|
||||
- 1.24.17
|
||||
- 1.25.16
|
||||
- 1.26.13
|
||||
- 1.27.10
|
||||
- 1.28.6
|
||||
- 1.29.1
|
||||
focus:
|
||||
# tests to focus on, use `|` to concatenate multiple regexes to run on the same job
|
||||
# ordered according to e2e_suite_test.go order
|
||||
- Basic\]\[ClusterResource
|
||||
- ResourceFiltering
|
||||
- ResourceModifier|Backups|PrivilegesMgmt\]\[SSR
|
||||
- Schedule\]\[OrderedResources
|
||||
- NamespaceMapping\]\[Single\]\[Restic|NamespaceMapping\]\[Multiple\]\[Restic
|
||||
- Basic\]\[Nodeport
|
||||
- Basic\]\[StorageClass
|
||||
matrix: ${{fromJson(needs.setup-test-matrix.outputs.matrix)}}
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
id: go
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v4
|
||||
go-version-file: 'go.mod'
|
||||
- name: Install MinIO
|
||||
run:
|
||||
docker run -d --rm -p 9000:9000 -e "MINIO_ACCESS_KEY=minio" -e "MINIO_SECRET_KEY=minio123" -e "MINIO_DEFAULT_BUCKETS=bucket,additional-bucket" bitnami/minio:2021.6.17-debian-10-r7
|
||||
- uses: engineerd/setup-kind@v0.5.0
|
||||
- uses: engineerd/setup-kind@v0.6.2
|
||||
with:
|
||||
version: "v0.21.0"
|
||||
skipClusterLogsExport: true
|
||||
version: "v0.27.0"
|
||||
image: "kindest/node:v${{ matrix.k8s }}"
|
||||
- name: Fetch built CLI
|
||||
id: cli-cache
|
||||
|
@ -108,14 +109,6 @@ jobs:
|
|||
- name: Load Velero Image
|
||||
run:
|
||||
kind load image-archive velero.tar
|
||||
# always try to fetch the cached go modules as the e2e test needs it either
|
||||
- name: Fetch cached go modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Run E2E test
|
||||
run: |
|
||||
cat << EOF > /tmp/credential
|
||||
|
@ -128,13 +121,18 @@ jobs:
|
|||
curl -LO https://dl.k8s.io/release/v${{ matrix.k8s }}/bin/linux/amd64/kubectl
|
||||
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
||||
|
||||
GOPATH=~/go CLOUD_PROVIDER=kind \
|
||||
OBJECT_STORE_PROVIDER=aws BSL_CONFIG=region=minio,s3ForcePathStyle="true",s3Url=http://$(hostname -i):9000 \
|
||||
CREDS_FILE=/tmp/credential BSL_BUCKET=bucket \
|
||||
ADDITIONAL_OBJECT_STORE_PROVIDER=aws ADDITIONAL_BSL_CONFIG=region=minio,s3ForcePathStyle="true",s3Url=http://$(hostname -i):9000 \
|
||||
ADDITIONAL_CREDS_FILE=/tmp/credential ADDITIONAL_BSL_BUCKET=additional-bucket \
|
||||
GINKGO_FOCUS='${{ matrix.focus }}' VELERO_IMAGE=velero:pr-test \
|
||||
GINKGO_SKIP='SKIP_KIND|pv-backup|Restic|Snapshot|LongTime' \
|
||||
GOPATH=~/go \
|
||||
CLOUD_PROVIDER=kind \
|
||||
OBJECT_STORE_PROVIDER=aws \
|
||||
BSL_CONFIG=region=minio,s3ForcePathStyle="true",s3Url=http://$(hostname -i):9000 \
|
||||
CREDS_FILE=/tmp/credential \
|
||||
BSL_BUCKET=bucket \
|
||||
ADDITIONAL_OBJECT_STORE_PROVIDER=aws \
|
||||
ADDITIONAL_BSL_CONFIG=region=minio,s3ForcePathStyle="true",s3Url=http://$(hostname -i):9000 \
|
||||
ADDITIONAL_CREDS_FILE=/tmp/credential \
|
||||
ADDITIONAL_BSL_BUCKET=additional-bucket \
|
||||
VELERO_IMAGE=velero:pr-test-linux-amd64 \
|
||||
GINKGO_LABELS="${{ matrix.labels }}" \
|
||||
make -C test/ run-e2e
|
||||
timeout-minutes: 30
|
||||
- name: Upload debug bundle
|
||||
|
@ -142,4 +140,4 @@ jobs:
|
|||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: DebugBundle
|
||||
path: /home/runner/work/velero/velero/test/e2e/debug-bundle*
|
||||
path: /home/runner/work/velero/velero/test/e2e/debug-bundle*
|
||||
|
|
|
@ -13,7 +13,7 @@ jobs:
|
|||
# maintain the versions of Velero those need security scan
|
||||
versions: [main]
|
||||
# list of images that need scan
|
||||
images: [velero, velero-restore-helper]
|
||||
images: [velero, velero-plugin-for-aws, velero-plugin-for-gcp, velero-plugin-for-microsoft-azure]
|
||||
permissions:
|
||||
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
|
||||
|
||||
|
|
|
@ -7,24 +7,16 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
id: go
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v4
|
||||
- name: Fetch cached go modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
go-version-file: 'go.mod'
|
||||
- name: Make ci
|
||||
run: make ci
|
||||
- name: Upload test coverage
|
||||
uses: codecov/codecov-action@v4
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: coverage.out
|
||||
|
|
|
@ -13,9 +13,9 @@ jobs:
|
|||
- name: Codespell
|
||||
uses: codespell-project/actions-codespell@master
|
||||
with:
|
||||
# ignore the config/.../crd.go file as it's generated binary data that is edited elswhere.
|
||||
# ignore the config/.../crd.go file as it's generated binary data that is edited elsewhere.
|
||||
skip: .git,*.png,*.jpg,*.woff,*.ttf,*.gif,*.ico,./config/crd/v1beta1/crds/crds.go,./config/crd/v1/crds/crds.go,./config/crd/v2alpha1/crds/crds.go,./go.sum,./LICENSE
|
||||
ignore_words_list: iam,aks,ist,bridget,ue,shouldnot,atleast
|
||||
ignore_words_list: iam,aks,ist,bridget,ue,shouldnot,atleast,notin,sme,optin
|
||||
check_filenames: true
|
||||
check_hidden: true
|
||||
|
||||
|
|
|
@ -1,7 +1,12 @@
|
|||
name: Pull Request Linter Check
|
||||
on: [pull_request]
|
||||
on:
|
||||
pull_request:
|
||||
# Do not run when the change only includes these directories.
|
||||
paths-ignore:
|
||||
- "site/**"
|
||||
- "design/**"
|
||||
- "**/*.md"
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Run Linter Check
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -12,9 +17,8 @@ jobs:
|
|||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
- name: Linter check
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v7
|
||||
with:
|
||||
version: v1.57.2
|
||||
version: v2.1.1
|
||||
args: --verbose
|
||||
|
|
|
@ -14,95 +14,58 @@ jobs:
|
|||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
id: go
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# Fix issue of setup-gcloud
|
||||
- run: |
|
||||
sudo apt-get install python2.7
|
||||
export CLOUDSDK_PYTHON="/usr/bin/python2"
|
||||
|
||||
- id: 'auth'
|
||||
uses: google-github-actions/auth@v2
|
||||
with:
|
||||
credentials_json: '${{ secrets.GCS_SA_KEY }}'
|
||||
|
||||
- name: 'set up GCloud SDK'
|
||||
uses: google-github-actions/setup-gcloud@v2
|
||||
|
||||
- name: 'use gcloud CLI'
|
||||
run: |
|
||||
gcloud info
|
||||
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
make local
|
||||
# Clean go cache to ease the build environment storage pressure.
|
||||
go clean -modcache -cache
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Upload test coverage
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: coverage.out
|
||||
verbose: true
|
||||
|
||||
# Use the JSON key in secret to login gcr.io
|
||||
- uses: 'docker/login-action@v3'
|
||||
with:
|
||||
registry: 'gcr.io' # or REGION.docker.pkg.dev
|
||||
username: '_json_key'
|
||||
password: '${{ secrets.GCR_SA_KEY }}'
|
||||
|
||||
# Only try to publish the container image from the root repo; forks don't have permission to do so and will always get failures.
|
||||
- name: Publish container image
|
||||
if: github.repository == 'vmware-tanzu/velero'
|
||||
run: |
|
||||
sudo swapoff -a
|
||||
sudo rm -f /mnt/swapfile
|
||||
docker system prune -a --force
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- id: 'auth'
|
||||
uses: google-github-actions/auth@v2
|
||||
with:
|
||||
credentials_json: '${{ secrets.GCS_SA_KEY }}'
|
||||
- name: 'set up GCloud SDK'
|
||||
uses: google-github-actions/setup-gcloud@v2
|
||||
- name: 'use gcloud CLI'
|
||||
run: |
|
||||
gcloud info
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: all
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: Build
|
||||
run: |
|
||||
make local
|
||||
# Clean go cache to ease the build environment storage pressure.
|
||||
go clean -modcache -cache
|
||||
- name: Test
|
||||
run: make test
|
||||
- name: Upload test coverage
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: coverage.out
|
||||
verbose: true
|
||||
# Use the JSON key in secret to login gcr.io
|
||||
- uses: 'docker/login-action@v3'
|
||||
with:
|
||||
registry: 'gcr.io' # or REGION.docker.pkg.dev
|
||||
username: '_json_key'
|
||||
password: '${{ secrets.GCR_SA_KEY }}'
|
||||
# Only try to publish the container image from the root repo; forks don't have permission to do so and will always get failures.
|
||||
- name: Publish container image
|
||||
if: github.repository == 'vmware-tanzu/velero'
|
||||
run: |
|
||||
sudo swapoff -a
|
||||
sudo rm -f /mnt/swapfile
|
||||
docker system prune -a --force
|
||||
|
||||
# Build and push Velero image to docker registry
|
||||
docker login -u ${{ secrets.DOCKER_USER }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
VERSION=$(./hack/docker-push.sh | grep 'VERSION:' | awk -F: '{print $2}' | xargs)
|
||||
|
||||
# Upload Velero image package to GCS
|
||||
source hack/ci/build_util.sh
|
||||
BIN=velero
|
||||
RESTORE_HELPER_BIN=velero-restore-helper
|
||||
GCS_BUCKET=velero-builds
|
||||
VELERO_IMAGE=${BIN}-${VERSION}
|
||||
VELERO_RESTORE_HELPER_IMAGE=${RESTORE_HELPER_BIN}-${VERSION}
|
||||
VELERO_IMAGE_FILE=${VELERO_IMAGE}.tar.gz
|
||||
VELERO_RESTORE_HELPER_IMAGE_FILE=${VELERO_RESTORE_HELPER_IMAGE}.tar.gz
|
||||
VELERO_IMAGE_BACKUP_FILE=${VELERO_IMAGE}-'build.'${GITHUB_RUN_NUMBER}.tar.gz
|
||||
VELERO_RESTORE_HELPER_IMAGE_BACKUP_FILE=${VELERO_RESTORE_HELPER_IMAGE}-'build.'${GITHUB_RUN_NUMBER}.tar.gz
|
||||
|
||||
cp ${VELERO_IMAGE_FILE} ${VELERO_IMAGE_BACKUP_FILE}
|
||||
cp ${VELERO_RESTORE_HELPER_IMAGE_FILE} ${VELERO_RESTORE_HELPER_IMAGE_BACKUP_FILE}
|
||||
|
||||
uploader ${VELERO_IMAGE_FILE} ${GCS_BUCKET}
|
||||
uploader ${VELERO_RESTORE_HELPER_IMAGE_FILE} ${GCS_BUCKET}
|
||||
uploader ${VELERO_IMAGE_BACKUP_FILE} ${GCS_BUCKET}
|
||||
uploader ${VELERO_RESTORE_HELPER_IMAGE_BACKUP_FILE} ${GCS_BUCKET}
|
||||
# Build and push Velero image to docker registry
|
||||
docker login -u ${{ secrets.DOCKER_USER }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
./hack/docker-push.sh
|
||||
|
|
|
@ -7,7 +7,7 @@ jobs:
|
|||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9.0.0
|
||||
- uses: actions/stale@v9.1.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: "This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 14 days. If a Velero team member has requested log or more information, please provide the output of the shared commands."
|
||||
|
@ -20,4 +20,4 @@ jobs:
|
|||
days-before-pr-close: -1
|
||||
# Only issues made after Feb 09 2021.
|
||||
start-date: "2021-09-02T00:00:00"
|
||||
exempt-issue-labels: "Epic,Area/CLI,Area/Cloud/AWS,Area/Cloud/Azure,Area/Cloud/GCP,Area/Cloud/vSphere,Area/CSI,Area/Design,Area/Documentation,Area/Plugins,Bug,Enhancement/User,kind/requirement,kind/refactor,kind/tech-debt,limitation,Needs investigation,Needs triage,Needs Product,P0 - Hair on fire,P1 - Important,P2 - Long-term important,P3 - Wouldn't it be nice if...,Product Requirements,Restic - GA,Restic,release-blocker,Security"
|
||||
exempt-issue-labels: "Epic,Area/CLI,Area/Cloud/AWS,Area/Cloud/Azure,Area/Cloud/GCP,Area/Cloud/vSphere,Area/CSI,Area/Design,Area/Documentation,Area/Plugins,Bug,Enhancement/User,kind/requirement,kind/refactor,kind/tech-debt,limitation,Needs investigation,Needs triage,Needs Product,P0 - Hair on fire,P1 - Important,P2 - Long-term important,P3 - Wouldn't it be nice if...,Product Requirements,Restic - GA,Restic,release-blocker,Security,backlog"
|
||||
|
|
|
@ -53,4 +53,8 @@ tilt-resources/cloud
|
|||
# test generated files
|
||||
test/e2e/report.xml
|
||||
coverage.out
|
||||
__debug_bin*
|
||||
__debug_bin*
|
||||
debug.test*
|
||||
|
||||
# make lint cache
|
||||
.cache/
|
||||
|
|
660
.golangci.yaml
|
@ -6,17 +6,12 @@ run:
|
|||
# default concurrency is a available CPU number
|
||||
concurrency: 4
|
||||
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 0
|
||||
timeout: 20m
|
||||
|
||||
# exit code when at least one issue was found, default is 1
|
||||
issues-exit-code: 1
|
||||
|
||||
|
||||
# default is true. Enables skipping of directories:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
skip-dirs-use-default: true
|
||||
|
||||
# by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules":
|
||||
# If invoked with -mod=readonly, the go command is disallowed from the implicit
|
||||
# automatic updating of go.mod described above. Instead, it fails when any changes
|
||||
|
@ -32,378 +27,383 @@ run:
|
|||
# If false (default) - golangci-lint acquires file lock on start.
|
||||
allow-parallel-runners: false
|
||||
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||
formats:
|
||||
- format: colored-line-number
|
||||
text:
|
||||
path: stdout
|
||||
|
||||
# print lines of code with issue, default is true
|
||||
print-issued-lines: true
|
||||
# print lines of code with issue, default is true
|
||||
print-issued-lines: true
|
||||
|
||||
# print linter name in the end of issue text, default is true
|
||||
print-linter-name: true
|
||||
# print linter name in the end of issue text, default is true
|
||||
print-linter-name: true
|
||||
|
||||
# make issues output unique by line, default is true
|
||||
uniq-by-line: true
|
||||
|
||||
|
||||
# all available settings of specific linters
|
||||
linters-settings:
|
||||
dogsled:
|
||||
# checks assignments with too many blank identifiers; default is 2
|
||||
max-blank-identifiers: 2
|
||||
dupl:
|
||||
# tokens count to trigger issue, 150 by default
|
||||
threshold: 100
|
||||
errcheck:
|
||||
# report about not checking of errors in type assertions: `a := b.(MyStruct)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-type-assertions: false
|
||||
|
||||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-blank: false
|
||||
|
||||
# [deprecated] comma-separated list of pairs of the form pkg:regex
|
||||
# the regex is used to ignore names within pkg. (default "fmt:.*").
|
||||
# see https://github.com/kisielk/errcheck#the-deprecated-method for details
|
||||
# ignore: fmt:.*,io/ioutil:^Read.*
|
||||
|
||||
# path to a file containing a list of functions to exclude from checking
|
||||
# see https://github.com/kisielk/errcheck#excluding-functions for details
|
||||
# exclude: /path/to/file.txt
|
||||
exhaustive:
|
||||
# indicates that switch statements are to be considered exhaustive if a
|
||||
# 'default' case is present, even if all enum members aren't listed in the
|
||||
# switch
|
||||
default-signifies-exhaustive: false
|
||||
funlen:
|
||||
lines: 60
|
||||
statements: 40
|
||||
gocognit:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 10
|
||||
nestif:
|
||||
# minimal complexity of if statements to report, 5 by default
|
||||
min-complexity: 4
|
||||
goconst:
|
||||
# minimal length of string constant, 3 by default
|
||||
min-len: 3
|
||||
# minimal occurrences count to trigger, 3 by default
|
||||
min-occurrences: 5
|
||||
gocritic:
|
||||
# Which checks should be enabled; can't be combined with 'disabled-checks';
|
||||
# See https://go-critic.github.io/overview#checks-overview
|
||||
# To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run`
|
||||
# By default list of stable checks is used.
|
||||
# enabled-checks:
|
||||
# - rangeValCopy
|
||||
|
||||
# Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty
|
||||
# disabled-checks:
|
||||
# - regexpMust
|
||||
|
||||
# Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint run` to see all tags and checks.
|
||||
# Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags".
|
||||
# enabled-tags:
|
||||
# - performance
|
||||
# disabled-tags:
|
||||
# - experimental
|
||||
|
||||
settings: # settings passed to gocritic
|
||||
captLocal: # must be valid enabled check name
|
||||
paramsOnly: true
|
||||
# rangeValCopy:
|
||||
# sizeThreshold: 32
|
||||
gocyclo:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 10
|
||||
godot:
|
||||
# check all top-level comments, not only declarations
|
||||
check-all: false
|
||||
godox:
|
||||
# report any comments starting with keywords, this is useful for TODO or FIXME comments that
|
||||
# might be left in the code accidentally and should be resolved before merging
|
||||
keywords: # default keywords are TODO, BUG, and FIXME, these can be overwritten by this setting
|
||||
- NOTE
|
||||
- OPTIMIZE # marks code that should be optimized before merging
|
||||
- HACK # marks hack-arounds that should be removed before merging
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
goimports:
|
||||
# put imports beginning with prefix after 3rd-party packages;
|
||||
# it's a comma-separated list of prefixes
|
||||
local-prefixes: github.com/org/project
|
||||
golint:
|
||||
# minimal confidence for issues, default is 0.8
|
||||
min-confidence: 0.8
|
||||
gomnd:
|
||||
# the list of enabled checks, see https://github.com/tommy-muehle/go-mnd/#checks for description.
|
||||
checks: argument,case,condition,operation,return,assign
|
||||
gomodguard:
|
||||
allowed:
|
||||
modules: # List of allowed modules
|
||||
# - gopkg.in/yaml.v2
|
||||
domains: # List of allowed module domains
|
||||
# - golang.org
|
||||
blocked:
|
||||
modules: # List of blocked modules
|
||||
# - github.com/uudashr/go-module: # Blocked module
|
||||
# recommendations: # Recommended modules that should be used instead (Optional)
|
||||
# - golang.org/x/mod
|
||||
# reason: "`mod` is the official go.mod parser library." # Reason why the recommended module should be used (Optional)
|
||||
versions: # List of blocked module version constraints
|
||||
# - github.com/mitchellh/go-homedir: # Blocked module with version constraint
|
||||
# version: "< 1.1.0" # Version constraint, see https://github.com/Masterminds/semver#basic-comparisons
|
||||
# reason: "testing if blocked version constraint works." # Reason why the version constraint exists. (Optional)
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
# check-shadowing: true
|
||||
|
||||
# settings per analyzer
|
||||
settings:
|
||||
printf: # analyzer name, run `go tool vet help` to see all analyzers
|
||||
funcs: # run `go tool vet help printf` to see available settings for `printf` analyzer
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf
|
||||
|
||||
# enable or disable analyzers by name
|
||||
enable:
|
||||
- atomicalign
|
||||
enable-all: false
|
||||
disable:
|
||||
- shadow
|
||||
disable-all: false
|
||||
depguard:
|
||||
list-type: blacklist # Velero.io word list : ignore
|
||||
include-go-root: false
|
||||
packages:
|
||||
- github.com/sirupsen/logrus
|
||||
packages-with-error-message:
|
||||
# specify an error message to output when a denylisted package is used
|
||||
- github.com/sirupsen/logrus: "logging is allowed only by logutils.Log"
|
||||
lll:
|
||||
# max line length, lines longer will be reported. Default is 120.
|
||||
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
|
||||
line-length: 120
|
||||
# tab width in spaces. Default to 1.
|
||||
tab-width: 1
|
||||
maligned:
|
||||
# print struct with more effective memory layout or not, false by default
|
||||
suggest-new: true
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
||||
locale: US
|
||||
ignore-words:
|
||||
- someword
|
||||
nakedret:
|
||||
# make an issue if func has more lines of code than this setting and it has naked returns; default is 30
|
||||
max-func-lines: 30
|
||||
prealloc:
|
||||
# XXX: we don't recommend using this linter before doing performance profiling.
|
||||
# For most programs usage of prealloc will be a premature optimization.
|
||||
|
||||
# Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them.
|
||||
# True by default.
|
||||
simple: true
|
||||
range-loops: true # Report preallocation suggestions on range loops, true by default
|
||||
for-loops: false # Report preallocation suggestions on for loops, false by default
|
||||
nolintlint:
|
||||
# Enable to ensure that nolint directives are all used. Default is true.
|
||||
allow-unused: false
|
||||
# Disable to ensure that nolint directives don't have a leading space. Default is true.
|
||||
allow-leading-space: true
|
||||
# Exclude following linters from requiring an explanation. Default is [].
|
||||
allow-no-explanation: []
|
||||
# Enable to require an explanation of nonzero length after each nolint directive. Default is false.
|
||||
require-explanation: true
|
||||
# Enable to require nolint directives to mention the specific linter being suppressed. Default is false.
|
||||
require-specific: true
|
||||
revive:
|
||||
rules:
|
||||
- name: unexported-return
|
||||
disabled: true
|
||||
|
||||
rowserrcheck:
|
||||
packages:
|
||||
- github.com/jmoiron/sqlx
|
||||
testifylint:
|
||||
# TODO: enable them all
|
||||
disable:
|
||||
- bool-compare
|
||||
- compares
|
||||
- error-is-as
|
||||
- error-nil
|
||||
- expected-actual
|
||||
- go-require
|
||||
- float-compare
|
||||
- require-error
|
||||
- suite-dont-use-pkg
|
||||
- suite-extra-assert-call
|
||||
- suite-thelper
|
||||
enable:
|
||||
- empty
|
||||
- len
|
||||
- nil-compare
|
||||
testpackage:
|
||||
# regexp pattern to skip files
|
||||
skip-regexp: (export|internal)_test\.go
|
||||
unparam:
|
||||
# Inspect exported functions, default is false. Set to true if no external program/library imports your code.
|
||||
# XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
unused:
|
||||
# treat code as a program (not a library) and report unused exported identifiers; default is false.
|
||||
# XXX: if you enable this setting, unused will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find funcs usages. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
whitespace:
|
||||
multi-if: false # Enforces newlines (or comments) after every multi-line if statement
|
||||
multi-func: false # Enforces newlines (or comments) after every multi-line function signature
|
||||
wsl:
|
||||
# If true append is only allowed to be cuddled if appending value is
|
||||
# matching variables, fields or types on line above. Default is true.
|
||||
strict-append: true
|
||||
# Allow calls and assignments to be cuddled as long as the lines have any
|
||||
# matching variables, fields or types. Default is true.
|
||||
allow-assign-and-call: true
|
||||
# Allow multiline assignments to be cuddled. Default is true.
|
||||
allow-multiline-assign: true
|
||||
# Allow declarations (var) to be cuddled.
|
||||
allow-cuddle-declarations: false
|
||||
# Allow trailing comments in ending of blocks
|
||||
allow-trailing-comment: false
|
||||
# Force newlines in end of case at this limit (0 = never).
|
||||
force-case-trailing-whitespace: 0
|
||||
# Force cuddling of err checks with err var assignment
|
||||
force-err-cuddling: false
|
||||
# Allow leading comments to be separated with empty liens
|
||||
allow-separated-leading-comment: false
|
||||
# Show statistics per linter.
|
||||
show-stats: false
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
# all available settings of specific linters
|
||||
settings:
|
||||
depguard:
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
# specify an error message to output when a denylisted package is used
|
||||
- pkg: github.com/sirupsen/logrus
|
||||
desc: "logging is allowed only by logutils.Log"
|
||||
|
||||
dogsled:
|
||||
# checks assignments with too many blank identifiers; default is 2
|
||||
max-blank-identifiers: 2
|
||||
|
||||
dupl:
|
||||
# tokens count to trigger issue, 150 by default
|
||||
threshold: 100
|
||||
|
||||
errcheck:
|
||||
# report about not checking of errors in type assertions: `a := b.(MyStruct)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-type-assertions: false
|
||||
|
||||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-blank: false
|
||||
|
||||
|
||||
exhaustive:
|
||||
# indicates that switch statements are to be considered exhaustive if a
|
||||
# 'default' case is present, even if all enum members aren't listed in the
|
||||
# switch
|
||||
default-signifies-exhaustive: false
|
||||
|
||||
funlen:
|
||||
lines: 60
|
||||
statements: 40
|
||||
|
||||
gocognit:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 10
|
||||
|
||||
nestif:
|
||||
# minimal complexity of if statements to report, 5 by default
|
||||
min-complexity: 4
|
||||
|
||||
goconst:
|
||||
# minimal length of string constant, 3 by default
|
||||
min-len: 3
|
||||
# minimal occurrences count to trigger, 3 by default
|
||||
min-occurrences: 5
|
||||
|
||||
gocritic:
|
||||
# Which checks should be enabled; can't be combined with 'disabled-checks';
|
||||
# See https://go-critic.github.io/overview#checks-overview
|
||||
# To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run`
|
||||
# By default list of stable checks is used.
|
||||
settings: # settings passed to gocritic
|
||||
captLocal: # must be valid enabled check name
|
||||
paramsOnly: true
|
||||
|
||||
gocyclo:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 10
|
||||
|
||||
godot:
|
||||
# check all top-level comments, not only declarations
|
||||
check-all: false
|
||||
|
||||
godox:
|
||||
# report any comments starting with keywords, this is useful for TODO or FIXME comments that
|
||||
# might be left in the code accidentally and should be resolved before merging
|
||||
keywords: # default keywords are TODO, BUG, and FIXME, these can be overwritten by this setting
|
||||
- NOTE
|
||||
- OPTIMIZE # marks code that should be optimized before merging
|
||||
- HACK # marks hack-arounds that should be removed before merging
|
||||
|
||||
gosec:
|
||||
excludes:
|
||||
- G115
|
||||
|
||||
govet:
|
||||
# enable or disable analyzers by name
|
||||
enable:
|
||||
- atomicalign
|
||||
enable-all: false
|
||||
disable:
|
||||
- shadow
|
||||
disable-all: false
|
||||
|
||||
lll:
|
||||
# max line length, lines longer will be reported. Default is 120.
|
||||
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
|
||||
line-length: 120
|
||||
# tab width in spaces. Default to 1.
|
||||
tab-width: 1
|
||||
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
||||
locale: US
|
||||
ignore-rules:
|
||||
- someword
|
||||
|
||||
nakedret:
|
||||
# make an issue if func has more lines of code than this setting and it has naked returns; default is 30
|
||||
max-func-lines: 30
|
||||
|
||||
prealloc:
|
||||
# XXX: we don't recommend using this linter before doing performance profiling.
|
||||
# For most programs usage of prealloc will be a premature optimization.
|
||||
|
||||
# Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them.
|
||||
# True by default.
|
||||
simple: true
|
||||
range-loops: true # Report preallocation suggestions on range loops, true by default
|
||||
for-loops: false # Report preallocation suggestions on for loops, false by default
|
||||
|
||||
nolintlint:
|
||||
# Enable to ensure that nolint directives are all used. Default is true.
|
||||
allow-unused: false
|
||||
# Exclude following linters from requiring an explanation. Default is [].
|
||||
allow-no-explanation: []
|
||||
# Enable to require an explanation of nonzero length after each nolint directive. Default is false.
|
||||
require-explanation: true
|
||||
# Enable to require nolint directives to mention the specific linter being suppressed. Default is false.
|
||||
require-specific: true
|
||||
|
||||
perfsprint:
|
||||
strconcat: false
|
||||
sprintf1: false
|
||||
errorf: false
|
||||
int-conversion: true
|
||||
|
||||
revive:
|
||||
rules:
|
||||
- name: blank-imports
|
||||
disabled: true
|
||||
- name: context-as-argument
|
||||
disabled: true
|
||||
- name: context-keys-type
|
||||
- name: dot-imports
|
||||
disabled: true
|
||||
- name: early-return
|
||||
disabled: true
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: error-naming
|
||||
disabled: true
|
||||
- name: error-return
|
||||
disabled: true
|
||||
- name: error-strings
|
||||
disabled: true
|
||||
- name: errorf
|
||||
disabled: true
|
||||
- name: increment-decrement
|
||||
- name: indent-error-flow
|
||||
disabled: true
|
||||
- name: range
|
||||
- name: receiver-naming
|
||||
disabled: true
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
- name: time-naming
|
||||
- name: unexported-return
|
||||
disabled: true
|
||||
- name: unnecessary-stmt
|
||||
- name: unreachable-code
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- name: use-any
|
||||
- name: var-declaration
|
||||
- name: var-naming
|
||||
disabled: true
|
||||
|
||||
rowserrcheck:
|
||||
packages:
|
||||
- github.com/jmoiron/sqlx
|
||||
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
- -QF1001 # FIXME
|
||||
- -QF1003 # FIXME
|
||||
- -QF1004 # FIXME
|
||||
- -QF1007 # FIXME
|
||||
- -QF1008 # FIXME
|
||||
- -QF1009 # FIXME
|
||||
- -QF1012 # FIXME
|
||||
|
||||
testifylint:
|
||||
# TODO: enable them all
|
||||
disable:
|
||||
- empty # FIXME
|
||||
- equal-values # FIXME
|
||||
- float-compare
|
||||
- go-require
|
||||
- len # FIXME
|
||||
- require-error
|
||||
enable-all: true
|
||||
|
||||
testpackage:
|
||||
# regexp pattern to skip files
|
||||
skip-regexp: (export|internal)_test\.go
|
||||
unparam:
|
||||
# Inspect exported functions, default is false. Set to true if no external program/library imports your code.
|
||||
# XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
|
||||
whitespace:
|
||||
multi-if: false # Enforces newlines (or comments) after every multi-line if statement
|
||||
multi-func: false # Enforces newlines (or comments) after every multi-line function signature
|
||||
|
||||
wsl:
|
||||
# If true append is only allowed to be cuddled if appending value is
|
||||
# matching variables, fields or types on line above. Default is true.
|
||||
strict-append: true
|
||||
# Allow calls and assignments to be cuddled as long as the lines have any
|
||||
# matching variables, fields or types. Default is true.
|
||||
allow-assign-and-call: true
|
||||
# Allow multiline assignments to be cuddled. Default is true.
|
||||
allow-multiline-assign: true
|
||||
# Allow declarations (var) to be cuddled.
|
||||
allow-cuddle-declarations: false
|
||||
# Allow trailing comments in ending of blocks
|
||||
allow-trailing-comment: false
|
||||
# Force newlines in end of case at this limit (0 = never).
|
||||
force-case-trailing-whitespace: 0
|
||||
# Force cuddling of err checks with err var assignment
|
||||
force-err-cuddling: false
|
||||
# Allow leading comments to be separated with empty lines
|
||||
allow-separated-leading-comment: false
|
||||
|
||||
default: none
|
||||
enable:
|
||||
- asasalint
|
||||
- asciicheck
|
||||
- bidichk
|
||||
- bodyclose
|
||||
- copyloopvar
|
||||
- dogsled
|
||||
- durationcheck
|
||||
- dupword
|
||||
- durationcheck
|
||||
- errcheck
|
||||
- exportloopref
|
||||
- errchkjson
|
||||
- ginkgolinter
|
||||
- goconst
|
||||
- gofmt
|
||||
- goheader
|
||||
- goimports
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ginkgolinter
|
||||
- importas
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nakedret
|
||||
- nosprintfhostport
|
||||
- nilerr
|
||||
- noctx
|
||||
- nolintlint
|
||||
- nosprintfhostport
|
||||
- perfsprint
|
||||
- revive
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- testifylint
|
||||
- typecheck
|
||||
- thelper
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- usestdlibvars
|
||||
- whitespace
|
||||
fast: false
|
||||
|
||||
exclusions:
|
||||
# which dirs to skip: issues from them won't be reported;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but default dirs are skipped independently
|
||||
# from this option's value (see skip-dirs-use-default).
|
||||
# "/" will be replaced by current OS file path separator to properly work
|
||||
# on Windows.
|
||||
paths:
|
||||
- pkg/plugin/generated/*
|
||||
- third_party
|
||||
|
||||
rules:
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: "DefaultVolumesToRestic" # No need to report deprecate for DefaultVolumesToRestic.
|
||||
- path: ".*_test.go$"
|
||||
linters:
|
||||
- errcheck
|
||||
- goconst
|
||||
- gosec
|
||||
- govet
|
||||
- staticcheck
|
||||
- unparam
|
||||
- unused
|
||||
- path: test/
|
||||
linters:
|
||||
- errcheck
|
||||
- goconst
|
||||
- gosec
|
||||
- nilerr
|
||||
- staticcheck
|
||||
- unparam
|
||||
- unused
|
||||
- path: ".*data_upload_controller_test.go$"
|
||||
linters:
|
||||
- dupword
|
||||
text: "type"
|
||||
- path: ".*config_test.go$"
|
||||
linters:
|
||||
- dupword
|
||||
text: "bucket"
|
||||
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: "github.com/golang/protobuf/proto" # grpc-go still uses github.com/golang/protobuf/proto.
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: "DefaultVolumesToRestic" # No need to report deprecate for DefaultVolumesToRestic.
|
||||
- path: ".*_test.go$"
|
||||
linters:
|
||||
- dupword
|
||||
- errcheck
|
||||
- goconst
|
||||
- gosec
|
||||
- govet
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- path: test/
|
||||
linters:
|
||||
- dupword
|
||||
- errcheck
|
||||
- goconst
|
||||
- gosec
|
||||
- gosimple
|
||||
- nilerr
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
|
||||
# The list of ids of default excludes to include or disable. By default it's empty.
|
||||
include:
|
||||
- EXC0002 # disable excluding of issues about comments from golint
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-issues-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
# Show only new issues created after git revision `REV`
|
||||
# new-from-rev: origin/main
|
||||
# make issues output unique by line, default is true
|
||||
uniq-by-line: true
|
||||
|
||||
# which dirs to skip: issues from them won't be reported;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but default dirs are skipped independently
|
||||
# from this option's value (see skip-dirs-use-default).
|
||||
# "/" will be replaced by current OS file path separator to properly work
|
||||
# on Windows.
|
||||
exclude-dirs:
|
||||
- pkg/plugin/generated/*
|
||||
# This file contains all available configuration options
|
||||
# with their default values.
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- pkg/plugin/generated/*
|
||||
- third_party
|
||||
|
||||
settings:
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- github.com/vmware-tanzu/velero
|
||||
|
||||
severity:
|
||||
# Default value is empty string.
|
||||
# Set the default severity for issues. If severity rules are defined and the issues
|
||||
# do not match or no severity is provided to the rule this will be the default
|
||||
# severity applied. Severities should match the supported severity names of the
|
||||
# selected out format.
|
||||
# - Code climate: https://docs.codeclimate.com/docs/issues#issue-severity
|
||||
# - Checkstyle: https://checkstyle.sourceforge.io/property_types.html#severity
|
||||
# - Github: https://help.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-error-message
|
||||
default-severity: error
|
||||
|
||||
# The default value is false.
|
||||
# If set to true severity-rules regular expressions become case sensitive.
|
||||
case-sensitive: false
|
||||
default: error
|
||||
|
||||
# Default value is empty list.
|
||||
# When a list of severity rules are provided, severity information will be added to lint
|
||||
|
@ -412,5 +412,7 @@ severity:
|
|||
# Only affects out formats that support setting severity information.
|
||||
rules:
|
||||
- linters:
|
||||
- dupl
|
||||
- dupl
|
||||
severity: info
|
||||
|
||||
version: "2"
|
||||
|
|
|
@ -26,18 +26,23 @@ builds:
|
|||
- arm
|
||||
- arm64
|
||||
- ppc64le
|
||||
- s390x
|
||||
ignore:
|
||||
# don't build arm for darwin and arm/arm64 for windows
|
||||
- goos: darwin
|
||||
goarch: arm
|
||||
- goos: darwin
|
||||
goarch: ppc64le
|
||||
- goos: darwin
|
||||
goarch: s390x
|
||||
- goos: windows
|
||||
goarch: arm
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
- goos: windows
|
||||
goarch: ppc64le
|
||||
- goos: windows
|
||||
goarch: s390x
|
||||
ldflags:
|
||||
- -X "github.com/vmware-tanzu/velero/pkg/buildinfo.Version={{ .Tag }}" -X "github.com/vmware-tanzu/velero/pkg/buildinfo.GitSHA={{ .FullCommit }}" -X "github.com/vmware-tanzu/velero/pkg/buildinfo.GitTreeState={{ .Env.GIT_TREE_STATE }}" -X "github.com/vmware-tanzu/velero/pkg/buildinfo.ImageRegistry={{ .Env.REGISTRY }}"
|
||||
archives:
|
||||
|
@ -46,9 +51,6 @@ archives:
|
|||
files:
|
||||
- LICENSE
|
||||
- examples/**/*
|
||||
# Add the setting to resolve the DEPRECATED warning. Actually, Velero's case is not affected by the rlcp behavior change.
|
||||
# https://github.com/orgs/goreleaser/discussions/3659#discussioncomment-4587257
|
||||
rlcp: true
|
||||
checksum:
|
||||
name_template: 'CHECKSUM'
|
||||
release:
|
||||
|
@ -63,4 +65,4 @@ git:
|
|||
# tags if there are more than one tag in the same commit.
|
||||
#
|
||||
# Default: `-version:refname`
|
||||
tag_sort: -version:creatordate
|
||||
tag_sort: -version:creatordate
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
## Current release:
|
||||
* [CHANGELOG-1.14.md][24]
|
||||
* [CHANGELOG-1.15.md][25]
|
||||
|
||||
## Older releases:
|
||||
* [CHANGELOG-1.14.md][24]
|
||||
* [CHANGELOG-1.13.md][23]
|
||||
* [CHANGELOG-1.12.md][22]
|
||||
* [CHANGELOG-1.11.md][21]
|
||||
|
@ -27,6 +28,7 @@
|
|||
* [CHANGELOG-0.3.md][1]
|
||||
|
||||
|
||||
[25]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.15.md
|
||||
[24]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.14.md
|
||||
[23]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.13.md
|
||||
[22]: https://github.com/vmware-tanzu/velero/blob/main/changelogs/CHANGELOG-1.12.md
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
We as members, contributors, and leaders pledge to make participation in the Velero project and our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
identity and expression, level of experience, education, socioeconomic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.22-bookworm as velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
|
@ -42,13 +42,16 @@ RUN mkdir -p /output/usr/bin && \
|
|||
export GOARM=$( echo "${GOARM}" | cut -c2-) && \
|
||||
go build -o /output/${BIN} \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/${BIN} && \
|
||||
go build -o /output/velero-restore-helper \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/velero-restore-helper && \
|
||||
go build -o /output/velero-helper \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/velero-helper && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Restic binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.22-bookworm as restic-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23-bookworm AS restic-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
# Copyright the Velero contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARG OS_VERSION=1809
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.23-bookworm AS velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
ARG PKG
|
||||
ARG VERSION
|
||||
ARG REGISTRY
|
||||
ARG GIT_SHA
|
||||
ARG GIT_TREE_STATE
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
|
||||
ENV CGO_ENABLED=0 \
|
||||
GO111MODULE=on \
|
||||
GOPROXY=${GOPROXY} \
|
||||
GOOS=${TARGETOS} \
|
||||
GOARCH=${TARGETARCH} \
|
||||
GOARM=${TARGETVARIANT} \
|
||||
LDFLAGS="-X ${PKG}/pkg/buildinfo.Version=${VERSION} -X ${PKG}/pkg/buildinfo.GitSHA=${GIT_SHA} -X ${PKG}/pkg/buildinfo.GitTreeState=${GIT_TREE_STATE} -X ${PKG}/pkg/buildinfo.ImageRegistry=${REGISTRY}"
|
||||
|
||||
WORKDIR /go/src/github.com/vmware-tanzu/velero
|
||||
|
||||
COPY . /go/src/github.com/vmware-tanzu/velero
|
||||
|
||||
RUN mkdir -p /output/usr/bin && \
|
||||
export GOARM=$( echo "${GOARM}" | cut -c2-) && \
|
||||
go build -o /output/${BIN}.exe \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/${BIN} && \
|
||||
go build -o /output/velero-helper.exe \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/velero-helper && \
|
||||
go clean -modcache -cache
|
||||
|
||||
# Velero image packing section
|
||||
FROM mcr.microsoft.com/windows/nanoserver:${OS_VERSION}
|
||||
COPY --from=velero-builder /output /
|
||||
|
||||
USER ContainerUser
|
|
@ -107,6 +107,29 @@ Lazy consensus does _not_ apply to the process of:
|
|||
|
||||
* Removal of maintainers from Velero
|
||||
|
||||
## Deprecation Policy
|
||||
|
||||
### Deprecation Process
|
||||
|
||||
Any contributor may introduce a request to deprecate a feature or an option of a feature by opening a feature request issue in the vmware-tanzu/velero GitHub project. The issue should describe why the feature is no longer needed or has become detrimental to Velero, as well as whether and how it has been superseded. The submitter should give as much detail as possible.
|
||||
|
||||
Once the issue is filed, a one-month discussion period begins. Discussions take place within the issue itself as well as in the community meetings. The person who opens the issue, or a maintainer, should add the date and time marking the end of the discussion period in a comment on the issue as soon as possible after it is opened. A decision on the issue needs to be made within this one-month period.
|
||||
|
||||
The feature will be deprecated by a supermajority vote of 50% plus one of the project maintainers at the time of the vote tallying, which is 72 hours after the end of the community meeting that is the end of the comment period. (Maintainers are permitted to vote in advance of the deadline, but should hold their votes until as close as possible to hear all possible discussion.) Votes will be tallied in comments on the issue.
|
||||
|
||||
Non-maintainers may add non-binding votes in comments to the issue as well; these are opinions to be taken into consideration by maintainers, but they do not count as votes.
|
||||
|
||||
If the vote passes, the deprecation window takes effect in the subsequent release, and the removal follows the schedule.
|
||||
|
||||
### Schedule
|
||||
If depreciation proposal passes by supermajority votes, the feature is deprecated in the next minor release and the feature can be removed completely after two minor version or equivalent major version e.g., if feature gets deprecated in Nth minor version, then feature can be removed after N+2 minor version or its equivalent if the major version number changes.
|
||||
|
||||
### Deprecation Window
|
||||
|
||||
The deprecation window is the period from the release in which the deprecation takes effect through the release in which the feature is removed. During this period, only critical security vulnerabilities and catastrophic bugs should be fixed.
|
||||
|
||||
**Note:** If a backup relies on a deprecated feature, then backups made with the last Velero release before this feature is removed must still be restorable in version `n+2`. For instance, something like restic feature support, that might mean that restic is removed from the list of supported uploader types in version `n` but the underlying implementation required to restore from a restic backup won't be removed until release `n+2`.
|
||||
|
||||
## Updating Governance
|
||||
|
||||
All substantive changes in Governance require a supermajority agreement by all maintainers.
|
||||
|
|
|
@ -10,10 +10,10 @@
|
|||
| Daniel Jiang | [reasonerjt](https://github.com/reasonerjt) | [VMware](https://www.github.com/vmware/) |
|
||||
| Wenkai Yin | [ywk253100](https://github.com/ywk253100) | [VMware](https://www.github.com/vmware/) |
|
||||
| Xun Jiang | [blackpiglet](https://github.com/blackpiglet) | [VMware](https://www.github.com/vmware/) |
|
||||
| Ming Qiu | [qiuming-best](https://github.com/qiuming-best) | [VMware](https://www.github.com/vmware/) |
|
||||
| Shubham Pampattiwar | [shubham-pampattiwar](https://github.com/shubham-pampattiwar) | [OpenShift](https://github.com/openshift) |
|
||||
| Yonghui Li | [Lyndon-Li](https://github.com/Lyndon-Li) | [VMware](https://www.github.com/vmware/) |
|
||||
| Anshul Ahuja | [anshulahuja98](https://github.com/anshulahuja98) | [Microsoft Azure](https://www.github.com/azure/) |
|
||||
| Tiger Kaovilai | [kaovilai](https://github.com/kaovilai) | [OpenShift](https://github.com/openshift) |
|
||||
|
||||
## Emeritus Maintainers
|
||||
* Adnan Abdulhussein ([prydonius](https://github.com/prydonius))
|
||||
|
@ -26,7 +26,8 @@
|
|||
* Bridget McErlean ([zubron](https://github.com/zubron))
|
||||
* JenTing Hsiao ([jenting](https://github.com/jenting))
|
||||
* Dave Smith-Uchida ([dsu-igeek](https://github.com/dsu-igeek))
|
||||
|
||||
* Ming Qiu ([qiuming-best](https://github.com/qiuming-best))
|
||||
|
||||
## Velero Contributors & Stakeholders
|
||||
|
||||
| Feature Area | Lead |
|
||||
|
|
162
Makefile
|
@ -22,6 +22,18 @@ PKG := github.com/vmware-tanzu/velero
|
|||
|
||||
# Where to push the docker image.
|
||||
REGISTRY ?= velero
|
||||
# In order to push images to an insecure registry, follow the two steps:
|
||||
# 1. Set "INSECURE_REGISTRY=true"
|
||||
# 2. Provide your own buildx builder instance by setting "BUILDX_INSTANCE=your-own-builder-instance"
|
||||
# The builder can be created with the following command:
|
||||
# cat << EOF > buildkitd.toml
|
||||
# [registry."insecure-registry-ip:port"]
|
||||
# http = true
|
||||
# insecure = true
|
||||
# EOF
|
||||
# docker buildx create --name=velero-builder --driver=docker-container --bootstrap --use --config ./buildkitd.toml
|
||||
# Refer to https://github.com/docker/buildx/issues/1370#issuecomment-1288516840 for more details
|
||||
INSECURE_REGISTRY ?= false
|
||||
GCR_REGISTRY ?= gcr.io/velero-gcp
|
||||
|
||||
# Image name
|
||||
|
@ -31,6 +43,7 @@ GCR_IMAGE ?= $(GCR_REGISTRY)/$(BIN)
|
|||
# We allow the Dockerfile to be configurable to enable the use of custom Dockerfiles
|
||||
# that pull base images from different registries.
|
||||
VELERO_DOCKERFILE ?= Dockerfile
|
||||
VELERO_DOCKERFILE_WINDOWS ?= Dockerfile-Windows
|
||||
BUILDER_IMAGE_DOCKERFILE ?= hack/build-image/Dockerfile
|
||||
|
||||
# Calculate the realpath of the build-image Dockerfile as we `cd` into the hack/build
|
||||
|
@ -74,17 +87,17 @@ else
|
|||
GCR_IMAGE_TAGS ?= $(GCR_IMAGE):$(VERSION)
|
||||
endif
|
||||
|
||||
# check buildx is enabled
|
||||
# check buildx is enabled only if docker is in path
|
||||
# macOS/Windows docker cli without Docker Desktop license: https://github.com/abiosoft/colima
|
||||
# To add buildx to docker cli: https://github.com/abiosoft/colima/discussions/273#discussioncomment-2684502
|
||||
ifeq ($(shell docker buildx inspect 2>/dev/null | awk '/Status/ { print $$2 }'), running)
|
||||
ifeq ($(shell which docker 2>/dev/null 1>&2 && docker buildx inspect 2>/dev/null | awk '/Status/ { print $$2 }'), running)
|
||||
BUILDX_ENABLED ?= true
|
||||
# if emulated docker cli from podman, assume enabled
|
||||
# emulated docker cli from podman: https://podman-desktop.io/docs/migrating-from-docker/emulating-docker-cli-with-podman
|
||||
# podman known issues:
|
||||
# - on remote podman, such as on macOS,
|
||||
# --output issue: https://github.com/containers/podman/issues/15922
|
||||
else ifeq ($(shell cat $(shell which docker) | grep -c "exec podman"), 1)
|
||||
else ifeq ($(shell which docker 2>/dev/null 1>&2 && cat $(shell which docker) | grep -c "exec podman"), 1)
|
||||
BUILDX_ENABLED ?= true
|
||||
else
|
||||
BUILDX_ENABLED ?= false
|
||||
|
@ -94,13 +107,36 @@ define BUILDX_ERROR
|
|||
buildx not enabled, refusing to run this recipe
|
||||
see: https://velero.io/docs/main/build-from-source/#making-images-and-updating-velero for more info
|
||||
endef
|
||||
|
||||
# comma cannot be escaped and can only be used in Make function arguments by putting into variable
|
||||
comma=,
|
||||
# The version of restic binary to be downloaded
|
||||
RESTIC_VERSION ?= 0.15.0
|
||||
|
||||
CLI_PLATFORMS ?= linux-amd64 linux-arm linux-arm64 darwin-amd64 darwin-arm64 windows-amd64 linux-ppc64le
|
||||
BUILDX_PLATFORMS ?= $(subst -,/,$(ARCH))
|
||||
BUILDX_OUTPUT_TYPE ?= docker
|
||||
CLI_PLATFORMS ?= linux-amd64 linux-arm linux-arm64 darwin-amd64 darwin-arm64 windows-amd64 linux-ppc64le linux-s390x
|
||||
BUILD_OUTPUT_TYPE ?= docker
|
||||
BUILD_OS ?= linux
|
||||
BUILD_ARCH ?= amd64
|
||||
BUILD_TAG_GCR ?= false
|
||||
BUILD_WINDOWS_VERSION ?= ltsc2022
|
||||
|
||||
ifeq ($(BUILD_OUTPUT_TYPE), docker)
|
||||
ALL_OS = linux
|
||||
ALL_ARCH.linux = $(word 2, $(subst -, ,$(shell go env GOOS)-$(shell go env GOARCH)))
|
||||
else
|
||||
ALL_OS = $(subst $(comma), ,$(BUILD_OS))
|
||||
ALL_ARCH.linux = $(subst $(comma), ,$(BUILD_ARCH))
|
||||
endif
|
||||
|
||||
ALL_ARCH.windows = $(if $(filter windows,$(ALL_OS)),amd64,)
|
||||
ALL_OSVERSIONS.windows = $(if $(filter windows,$(ALL_OS)),$(BUILD_WINDOWS_VERSION),)
|
||||
ALL_OS_ARCH.linux = $(foreach os, $(filter linux,$(ALL_OS)), $(foreach arch, ${ALL_ARCH.linux}, ${os}-$(arch)))
|
||||
ALL_OS_ARCH.windows = $(foreach os, $(filter windows,$(ALL_OS)), $(foreach arch, $(ALL_ARCH.windows), $(foreach osversion, ${ALL_OSVERSIONS.windows}, ${os}-${osversion}-${arch})))
|
||||
ALL_OS_ARCH = $(ALL_OS_ARCH.linux)$(ALL_OS_ARCH.windows)
|
||||
|
||||
ALL_IMAGE_TAGS = $(IMAGE_TAGS)
|
||||
ifeq ($(BUILD_TAG_GCR), true)
|
||||
ALL_IMAGE_TAGS += $(GCR_IMAGE_TAGS)
|
||||
endif
|
||||
|
||||
# set git sha and tree state
|
||||
GIT_SHA = $(shell git rev-parse HEAD)
|
||||
|
@ -124,17 +160,14 @@ GOBIN=$$(pwd)/.go/bin
|
|||
# If you want to build all containers, see the 'all-containers' rule.
|
||||
all:
|
||||
@$(MAKE) build
|
||||
@$(MAKE) build BIN=velero-restore-helper
|
||||
|
||||
build-%:
|
||||
@$(MAKE) --no-print-directory ARCH=$* build
|
||||
@$(MAKE) --no-print-directory ARCH=$* build BIN=velero-restore-helper
|
||||
|
||||
all-build: $(addprefix build-, $(CLI_PLATFORMS))
|
||||
|
||||
all-containers:
|
||||
@$(MAKE) --no-print-directory container
|
||||
@$(MAKE) --no-print-directory container BIN=velero-restore-helper
|
||||
|
||||
local: build-dirs
|
||||
# Add DEBUG=1 to enable debug locally
|
||||
|
@ -196,11 +229,38 @@ container:
|
|||
ifneq ($(BUILDX_ENABLED), true)
|
||||
$(error $(BUILDX_ERROR))
|
||||
endif
|
||||
|
||||
ifeq ($(BUILDX_INSTANCE),)
|
||||
@echo creating a buildx instance
|
||||
-docker buildx rm velero-builder || true
|
||||
@docker buildx create --use --name=velero-builder
|
||||
else
|
||||
@echo using a specified buildx instance $(BUILDX_INSTANCE)
|
||||
@docker buildx use $(BUILDX_INSTANCE)
|
||||
endif
|
||||
|
||||
@mkdir -p _output
|
||||
|
||||
@for osarch in $(ALL_OS_ARCH); do \
|
||||
$(MAKE) container-$${osarch}; \
|
||||
done
|
||||
|
||||
ifeq ($(BUILD_OUTPUT_TYPE), registry)
|
||||
@for tag in $(ALL_IMAGE_TAGS); do \
|
||||
IMAGE_TAG=$${tag} $(MAKE) push-manifest; \
|
||||
done
|
||||
endif
|
||||
|
||||
container-linux-%:
|
||||
@BUILDX_ARCH=$* $(MAKE) container-linux
|
||||
|
||||
container-linux:
|
||||
@echo "building container: $(IMAGE):$(VERSION)-linux-$(BUILDX_ARCH)"
|
||||
|
||||
@docker buildx build --pull \
|
||||
--output=type=$(BUILDX_OUTPUT_TYPE) \
|
||||
--platform $(BUILDX_PLATFORMS) \
|
||||
$(addprefix -t , $(IMAGE_TAGS)) \
|
||||
$(addprefix -t , $(GCR_IMAGE_TAGS)) \
|
||||
--output="type=$(BUILD_OUTPUT_TYPE)$(if $(findstring tar, $(BUILD_OUTPUT_TYPE)),$(comma)dest=_output/$(BIN)-$(VERSION)-linux-$(BUILDX_ARCH).tar,)" \
|
||||
--platform="linux/$(BUILDX_ARCH)" \
|
||||
$(addprefix -t , $(addsuffix "-linux-$(BUILDX_ARCH)",$(ALL_IMAGE_TAGS))) \
|
||||
--build-arg=GOPROXY=$(GOPROXY) \
|
||||
--build-arg=PKG=$(PKG) \
|
||||
--build-arg=BIN=$(BIN) \
|
||||
|
@ -209,14 +269,54 @@ endif
|
|||
--build-arg=GIT_TREE_STATE=$(GIT_TREE_STATE) \
|
||||
--build-arg=REGISTRY=$(REGISTRY) \
|
||||
--build-arg=RESTIC_VERSION=$(RESTIC_VERSION) \
|
||||
--provenance=false \
|
||||
--sbom=false \
|
||||
-f $(VELERO_DOCKERFILE) .
|
||||
@echo "container: $(IMAGE):$(VERSION)"
|
||||
ifeq ($(BUILDX_OUTPUT_TYPE)_$(REGISTRY), registry_velero)
|
||||
docker pull $(IMAGE):$(VERSION)
|
||||
rm -f $(BIN)-$(VERSION).tar
|
||||
docker save $(IMAGE):$(VERSION) -o $(BIN)-$(VERSION).tar
|
||||
gzip -f $(BIN)-$(VERSION).tar
|
||||
endif
|
||||
|
||||
@echo "built container: $(IMAGE):$(VERSION)-linux-$(BUILDX_ARCH)"
|
||||
|
||||
container-windows-%:
|
||||
@BUILDX_OSVERSION=$(firstword $(subst -, ,$*)) BUILDX_ARCH=$(lastword $(subst -, ,$*)) $(MAKE) container-windows
|
||||
|
||||
container-windows:
|
||||
@echo "building container: $(IMAGE):$(VERSION)-windows-$(BUILDX_OSVERSION)-$(BUILDX_ARCH)"
|
||||
|
||||
@docker buildx build --pull \
|
||||
--output="type=$(BUILD_OUTPUT_TYPE)$(if $(findstring tar, $(BUILD_OUTPUT_TYPE)),$(comma)dest=_output/$(BIN)-$(VERSION)-windows-$(BUILDX_OSVERSION)-$(BUILDX_ARCH).tar,)" \
|
||||
--platform="windows/$(BUILDX_ARCH)" \
|
||||
$(addprefix -t , $(addsuffix "-windows-$(BUILDX_OSVERSION)-$(BUILDX_ARCH)",$(ALL_IMAGE_TAGS))) \
|
||||
--build-arg=GOPROXY=$(GOPROXY) \
|
||||
--build-arg=PKG=$(PKG) \
|
||||
--build-arg=BIN=$(BIN) \
|
||||
--build-arg=VERSION=$(VERSION) \
|
||||
--build-arg=OS_VERSION=$(BUILDX_OSVERSION) \
|
||||
--build-arg=GIT_SHA=$(GIT_SHA) \
|
||||
--build-arg=GIT_TREE_STATE=$(GIT_TREE_STATE) \
|
||||
--build-arg=REGISTRY=$(REGISTRY) \
|
||||
--provenance=false \
|
||||
--sbom=false \
|
||||
-f $(VELERO_DOCKERFILE_WINDOWS) .
|
||||
|
||||
@echo "built container: $(IMAGE):$(VERSION)-windows-$(BUILDX_OSVERSION)-$(BUILDX_ARCH)"
|
||||
|
||||
push-manifest:
|
||||
@echo "building manifest: $(IMAGE_TAG) for $(foreach osarch, $(ALL_OS_ARCH), $(IMAGE_TAG)-${osarch})"
|
||||
@docker manifest create --amend --insecure=$(INSECURE_REGISTRY) $(IMAGE_TAG) $(foreach osarch, $(ALL_OS_ARCH), $(IMAGE_TAG)-${osarch})
|
||||
|
||||
@set -x; \
|
||||
for arch in $(ALL_ARCH.windows); do \
|
||||
for osversion in $(ALL_OSVERSIONS.windows); do \
|
||||
BASEIMAGE=mcr.microsoft.com/windows/nanoserver:$${osversion}; \
|
||||
full_version=`docker manifest inspect --insecure=$(INSECURE_REGISTRY) $${BASEIMAGE} | jq -r '.manifests[0].platform["os.version"]'`; \
|
||||
docker manifest annotate --os windows --arch $${arch} --os-version $${full_version} $(IMAGE_TAG) $(IMAGE_TAG)-windows-$${osversion}-$${arch}; \
|
||||
done; \
|
||||
done
|
||||
|
||||
@echo "pushing manifest $(IMAGE_TAG)"
|
||||
@docker manifest push --purge --insecure=$(INSECURE_REGISTRY) $(IMAGE_TAG)
|
||||
|
||||
@echo "pushed manifest $(IMAGE_TAG):"
|
||||
@docker manifest inspect --insecure=$(INSECURE_REGISTRY) $(IMAGE_TAG)
|
||||
|
||||
SKIP_TESTS ?=
|
||||
test: build-dirs
|
||||
|
@ -377,4 +477,22 @@ test-perf: local
|
|||
$(MAKE) -e VERSION=$(VERSION) -C test/ run-perf
|
||||
|
||||
go-generate:
|
||||
go generate ./pkg/...
|
||||
go generate ./pkg/...
|
||||
|
||||
# requires an authenticated gh cli
|
||||
# gh: https://cli.github.com/
|
||||
# First create a PR
|
||||
# gh pr create --title 'Title name' --body 'PR body'
|
||||
# by default uses PR title as changelog body but can be overwritten like so
|
||||
# make new-changelog CHANGELOG_BODY="Changes you have made"
|
||||
new-changelog: GH_LOGIN ?= $(shell gh pr view --json author --jq .author.login 2> /dev/null)
|
||||
new-changelog: GH_PR_NUMBER ?= $(shell gh pr view --json number --jq .number 2> /dev/null)
|
||||
new-changelog: CHANGELOG_BODY ?= '$(shell gh pr view --json title --jq .title)'
|
||||
new-changelog:
|
||||
@if [ "$(GH_LOGIN)" = "" ]; then \
|
||||
echo "branch does not have PR or cli not logged in, try 'gh auth login' or 'gh pr create'"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@mkdir -p ./changelogs/unreleased/ && \
|
||||
echo $(CHANGELOG_BODY) > ./changelogs/unreleased/$(GH_PR_NUMBER)-$(GH_LOGIN) && \
|
||||
echo \"$(CHANGELOG_BODY)\" added to "./changelogs/unreleased/$(GH_PR_NUMBER)-$(GH_LOGIN)"
|
|
@ -42,11 +42,12 @@ The following is a list of the supported Kubernetes versions for each Velero ver
|
|||
|
||||
| Velero version | Expected Kubernetes version compatibility | Tested on Kubernetes version |
|
||||
|----------------|-------------------------------------------|-------------------------------------|
|
||||
| 1.16 | 1.18-latest | 1.31.4, 1.32.3, and 1.33.0 |
|
||||
| 1.15 | 1.18-latest | 1.28.8, 1.29.8, 1.30.4 and 1.31.1 |
|
||||
| 1.14 | 1.18-latest | 1.27.9, 1.28.9, and 1.29.4 |
|
||||
| 1.13 | 1.18-latest | 1.26.5, 1.27.3, 1.27.8, and 1.28.3 |
|
||||
| 1.12 | 1.18-latest | 1.25.7, 1.26.5, 1.26.7, and 1.27.3 |
|
||||
| 1.11 | 1.18-latest | 1.23.10, 1.24.9, 1.25.5, and 1.26.1 |
|
||||
| 1.10 | 1.18-latest | 1.22.5, 1.23.8, 1.24.6 and 1.25.1 |
|
||||
|
||||
Velero supports IPv4, IPv6, and dual stack environments. Support for this was tested against Velero v1.8.
|
||||
|
||||
|
|
2
Tiltfile
|
@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
|
|||
|
||||
tilt_helper_dockerfile_header = """
|
||||
# Tilt image
|
||||
FROM golang:1.22 as tilt-helper
|
||||
FROM golang:1.23 as tilt-helper
|
||||
|
||||
# Support live reloading with Tilt
|
||||
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \
|
||||
|
|
|
@ -14,7 +14,7 @@ https://velero.io/docs/v1.14/upgrade-to-1.14/
|
|||
|
||||
### Highlights
|
||||
|
||||
#### The maintenance work for kopia backup repositories is run in jobs
|
||||
#### The maintenance work for kopia/restic backup repositories is run in jobs
|
||||
Since velero started using kopia as the approach for filesystem-level backup/restore, we've noticed an issue when velero connects to the kopia backup repositories and performs maintenance, it sometimes consumes excessive memory that can cause the velero pod to get OOM Killed. To mitigate this issue, the maintenance work will be moved out of velero pod to a separate kubernetes job, and the user will be able to specify the resource request in "velero install".
|
||||
#### Volume Policies are extended to support more actions to handle volumes
|
||||
In an earlier release, a flexible volume policy was introduced to skip certain volumes from a backup. In v1.14 we've made enhancement to this policy to allow the user to set how the volumes should be backed up. The user will be able to set "fs-backup" or "snapshot" as value of “action" in the policy and velero will backup the volumes accordingly. This enhancement allows the user to achieve a fine-grained control like "opt-in/out" without having to update the target workload. For more details please refer to https://velero.io/docs/v1.14/resource-filtering/#supported-volumepolicy-actions
|
||||
|
@ -38,6 +38,7 @@ Besides the service principal with secret(password)-based authentication, Velero
|
|||
* CSI plugin has been merged into velero repo in v1.14 release. It will be installed by default as an internal plugin, and should not be installed via "–plugins " parameter in "velero install" command.
|
||||
* The default resource requests and limitations for node agent are removed in v1.14, to make the node agent pods have the QoS class of "BestEffort", more details please refer to #7391
|
||||
* There's a change in namespace filtering behavior during backup: In v1.14, when the includedNamespaces/excludedNamespaces fields are not set and the labelSelector/OrLabelSelectors are set in the backup spec, the backup will only include the namespaces which contain the resources that match the label selectors, while in previous releases all namespaces will be included in the backup with such settings. More details refer to #7105
|
||||
* Patching the PV in the "Finalizing" state may cause the restore to be in "PartiallyFailed" state when the PV is blocked in "Pending" state, while in the previous release the restore may end up being in "Complete" state. For more details refer to #7866
|
||||
|
||||
### All Changes
|
||||
* Fix backup log to show error string, not index (#7805, @piny940)
|
||||
|
|
|
@ -0,0 +1,145 @@
|
|||
## v1.15
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.15.0
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.15.0`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.15/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.15/upgrade-to-1.15/
|
||||
|
||||
### Highlights
|
||||
#### Data mover micro service
|
||||
Data transfer activities for CSI Snapshot Data Movement are moved from node-agent pods to dedicate backupPods or restorePods. This brings many benefits such as:
|
||||
- This avoids to access volume data through host path, while host path access is privileged and may involve security escalations, which are concerned by users.
|
||||
- This enables users to to control resource (i.e., cpu, memory) allocations in a granular manner, e.g., control them per backup/restore of a volume.
|
||||
- This enhances the resilience, crash of one data movement activity won't affect others.
|
||||
- This prevents unnecessary full backup because of host path changes after workload pods restart.
|
||||
- For more information, check the design https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/vgdp-micro-service/vgdp-micro-service.md.
|
||||
|
||||
#### Item Block concepts and ItemBlockAction (IBA) plugin
|
||||
Item Block concepts are introduced for resource backups to help to achieve multiple thread backups. Specifically, correlated resources are categorized in the same item block and item blocks could be processed concurrently in multiple threads.
|
||||
ItemBlockAction plugin is introduced to help Velero to categorize resources into item blocks. At present, Velero provides built-in IBAs for pods and PVCs and Velero also supports customized IBAs for any resources.
|
||||
In v1.15, Velero doesn't support multiple thread process of item blocks though item block concepts and IBA plugins are fully supported. The multiple thread support will be delivered in future releases.
|
||||
For more information, check the design https://github.com/vmware-tanzu/velero/blob/main/design/backup-performance-improvements.md.
|
||||
|
||||
#### Node selection for repository maintenance job
|
||||
Repository maintenance are resource consuming tasks, Velero now allows you to configure the nodes to run repository maintenance jobs, so that you can run repository maintenance jobs in idle nodes or avoid them to run in nodes hosting critical workloads.
|
||||
To support the configuration, a new repository maintenance configuration configMap is introduced.
|
||||
For more information, check the document https://velero.io/docs/v1.15/repository-maintenance/.
|
||||
|
||||
#### Backup PVC read-only configuration
|
||||
In 1.15, Velero allows you to configure the data mover backupPods to read-only mount the backupPVCs. In this way, the data mover expose process could be significantly accelerated for some storages (i.e., ceph).
|
||||
To support the configuration, a new backup PVC configuration configMap is introduced.
|
||||
For more information, check the document https://velero.io/docs/v1.15/data-movement-backup-pvc-configuration/.
|
||||
|
||||
#### Backup PVC storage class configuration
|
||||
In 1.15, Velero allows you to configure the storageclass used by the data mover backupPods. In this way, the provision of backupPVCs don't need to adhere to the same pattern as workload PVCs, e.g., for a backupPVC, it only needs one replica, whereas, the a workload PVC may have multiple replicas.
|
||||
To support the configuration, the same backup PVC configuration configMap is used.
|
||||
For more information, check the document https://velero.io/docs/v1.15/data-movement-backup-pvc-configuration/.
|
||||
|
||||
#### Backup repository data cache configuration
|
||||
The backup repository may need to cache data on the client side during various repository operations, i.e., read, write, maintenance, etc. The cache consumes the root file system space of the pod where the repository access happens.
|
||||
In 1.15, Velero allows you to configure the total size of the cache per repository. In this way, if your pod doesn't have enough space in its root file system, the pod won't be evicted due to running out of ephemeral storage.
|
||||
To support the configuration, a new backup repository configuration configMap is introduced.
|
||||
For more information, check the document https://velero.io/docs/v1.15/backup-repository-configuration/.
|
||||
|
||||
#### Performance improvements
|
||||
In 1.15, several performance related issues/enhancements are included, which makes significant performance improvements in specific scenarios:
|
||||
- There was a memory leak of Velero server after plugin calls, now it is fixed, see issue https://github.com/vmware-tanzu/velero/issues/7925
|
||||
- The `client-burst/client-qps` parameters are automatically inherited to plugins, so that you can use the same velero server parameters to accelerate the plugin executions when large number of API server calls happen, see issue https://github.com/vmware-tanzu/velero/issues/7806
|
||||
- Maintenance of Kopia repository takes huge memory in scenarios that huge number of files have been backed up, Velero 1.15 has included the Kopia upstream enhancement to fix the problem, see issue https://github.com/vmware-tanzu/velero/issues/7510
|
||||
|
||||
### Runtime and dependencies
|
||||
Golang runtime: v1.22.8
|
||||
kopia: v0.17.0
|
||||
|
||||
### Limitations/Known issues
|
||||
#### Read-only backup PVC may not work on SELinux environments
|
||||
Due to an issue of Kubernetes upstream, if a volume is mounted as read-only in SELinux environments, the read privilege is not granted to any user, as a result, the data mover backup will fail. On the other hand, the backupPVC must be mounted as read-only in order to accelerate the data mover expose process.
|
||||
Therefore, a user option is added in the same backup PVC configuration configMap, once the option is enabled, the backupPod container will run as a super privileged container and disable SELinux access control. If you have concern in this super privileged container or you have configured [pod security admissions](https://kubernetes.io/docs/concepts/security/pod-security-admission/) and don't allow super privileged containers, you will not be able to use this read-only backupPVC feature and lose the benefit to accelerate the data mover expose process.
|
||||
|
||||
### Breaking changes
|
||||
#### Deprecation of Restic
|
||||
Restic path for fs-backup is in deprecation process starting from 1.15. According to [Velero deprecation policy](https://github.com/vmware-tanzu/velero/blob/v1.15/GOVERNANCE.md#deprecation-policy), for 1.15, if Restic path is used the backup/restore of fs-backup still creates and succeeds, but you will see warnings in below scenarios:
|
||||
- When `--uploader-type=restic` is used in Velero installation
|
||||
- When Restic path is used to create backup/restore of fs-backup
|
||||
|
||||
#### node-agent configuration name is configurable
|
||||
Previously, a fixed name is searched for node-agent configuration configMap. Now in 1.15, Velero allows you to customize the name of the configMap, on the other hand, the name must be specified by node-agent server parameter `node-agent-configmap`.
|
||||
|
||||
#### Repository maintenance job configurations in Velero server parameter are moved to repository maintenance job configuration configMap
|
||||
In 1.15, below Velero server parameters for repository maintenance jobs are moved to the repository maintenance job configuration configMap. While for back compatibility reason, the same Velero sever parameters are preserved as is. But the configMap is recommended and the same values in the configMap take preference if they exist in both places:
|
||||
```
|
||||
--keep-latest-maintenance-jobs
|
||||
--maintenance-job-cpu-request
|
||||
--maintenance-job-mem-request
|
||||
--maintenance-job-cpu-limit
|
||||
--maintenance-job-mem-limit
|
||||
```
|
||||
|
||||
#### Changing PVC selected-node feature is deprecated
|
||||
In 1.15, the [Changing PVC selected-node feature](https://velero.io/docs/v1.15/restore-reference/#changing-pvc-selected-node) enters deprecation process and will be removed in future releases according to [Velero deprecation policy](https://github.com/vmware-tanzu/velero/blob/v1.15/GOVERNANCE.md#deprecation-policy). Usage of this feature for any purpose is not recommended.
|
||||
|
||||
### All Changes
|
||||
* add no-relabeling option to backupPVC configmap (#8288, @sseago)
|
||||
* only set spec.volumes readonly if PVC is readonly for datamover (#8284, @sseago)
|
||||
* Add labels to maintenance job pods (#8256, @shubham-pampattiwar)
|
||||
* Add the Carvel package related resources to the restore priority list (#8228, @ywk253100)
|
||||
* Reduces indirect imports for plugin/framework importers (#8208, @kaovilai)
|
||||
* Add controller name to periodical_enqueue_source. The logger parameter now includes an additional field with the value of reflect.TypeOf(objList).String() and another field with the value of controllerName. (#8198, @kaovilai)
|
||||
* Update Openshift SCC docs link (#8170, @shubham-pampattiwar)
|
||||
* Partially fix issue #8138, add doc for node-agent memory preserve (#8167, @Lyndon-Li)
|
||||
* Pass Velero server command args to the plugins (#8166, @ywk253100)
|
||||
* Fix issue #8155, Merge Kopia upstream commits for critical issue fixes and performance improvements (#8158, @Lyndon-Li)
|
||||
* Implement the Repo maintenance Job configuration. (#8145, @blackpiglet)
|
||||
* Add document for data mover micro service (#8144, @Lyndon-Li)
|
||||
* Fix issue #8134, allow to config resource request/limit for data mover micro service pods (#8143, @Lyndon-Li)
|
||||
* Apply backupPVCConfig to backupPod volume spec (#8141, @shubham-pampattiwar)
|
||||
* Add resource modifier for velero restore describe CLI (#8139, @blackpiglet)
|
||||
* Fix issue #7620, add doc for backup repo config (#8131, @Lyndon-Li)
|
||||
* Modify E2E and perf test report generated directory (#8129, @blackpiglet)
|
||||
* Add docs for backup pvc config support (#8119, @shubham-pampattiwar)
|
||||
* Delete generated k8s client and informer. (#8114, @blackpiglet)
|
||||
* Add support for backup PVC configuration (#8109, @shubham-pampattiwar)
|
||||
* ItemBlock model and phase 1 (single-thread) workflow changes (#8102, @sseago)
|
||||
* Fix issue #8032, make node-agent configMap name configurable (#8097, @Lyndon-Li)
|
||||
* Fix issue #8072, add the warning messages for restic deprecation (#8096, @Lyndon-Li)
|
||||
* Fix issue #7620, add backup repository configuration implementation and support cacheLimit configuration for Kopia repo (#8093, @Lyndon-Li)
|
||||
* Patch dbr's status when error happens (#8086, @reasonerjt)
|
||||
* According to design #7576, after node-agent restarts, if a DU/DD is in InProgress status, re-capture the data mover ms pod and continue the execution (#8085, @Lyndon-Li)
|
||||
* Updates to IBM COS documentation to match current version (#8082, @gjanders)
|
||||
* Data mover micro service DUCR/DDCR controller refactor according to design #7576 (#8074, @Lyndon-Li)
|
||||
* add retries with timeout to existing patch calls that moves a backup/restore from InProgress/Finalizing to a final status phase. (#8068, @kaovilai)
|
||||
* Data mover micro service restore according to design #7576 (#8061, @Lyndon-Li)
|
||||
* Internal ItemBlockAction plugins (#8054, @sseago)
|
||||
* Data mover micro service backup according to design #7576 (#8046, @Lyndon-Li)
|
||||
* Avoid wrapping failed PVB status with empty message. (#8028, @mrnold)
|
||||
* Created new ItemBlockAction (IBA) plugin type (#8026, @sseago)
|
||||
* Make PVPatchMaximumDuration timeout configurable (#8021, @shubham-pampattiwar)
|
||||
* Reuse existing plugin manager for get/put volume info (#8012, @sseago)
|
||||
* Data mover ms watcher according to design #7576 (#7999, @Lyndon-Li)
|
||||
* New data path for data mover ms according to design #7576 (#7988, @Lyndon-Li)
|
||||
* For issue #7700 and #7747, add the design for backup PVC configurations (#7982, @Lyndon-Li)
|
||||
* Only get VolumeSnapshotClass when DataUpload exists. (#7974, @blackpiglet)
|
||||
* Fix issue #7972, sync the backupPVC deletion in expose clean up (#7973, @Lyndon-Li)
|
||||
* Expose the VolumeHelper to third-party plugins. (#7969, @blackpiglet)
|
||||
* Check whether the volume's source is PVC before fetching its PV. (#7967, @blackpiglet)
|
||||
* Check whether the namespaces specified in namespace filter exist. (#7965, @blackpiglet)
|
||||
* Add design for backup repository configurations for issue #7620, #7301 (#7963, @Lyndon-Li)
|
||||
* New data path for data mover ms according to design #7576 (#7955, @Lyndon-Li)
|
||||
* Skip PV patch step in Restoe workflow for WaitForFirstConsumer VolumeBindingMode Pending state PVCs (#7953, @shubham-pampattiwar)
|
||||
* Fix issue #7904, add the deprecation and limitation clarification for change PVC selected-node feature (#7948, @Lyndon-Li)
|
||||
* Expose the VolumeHelper to third-party plugins. (#7944, @blackpiglet)
|
||||
* Don't consider unschedulable pods unrecoverable (#7899, @sseago)
|
||||
* Upgrade to robfig/cron/v3 to support time zone specification. (#7793, @kaovilai)
|
||||
* Add the result in the backup's VolumeInfo. (#7775, @blackpiglet)
|
||||
* Migrate from github.com/golang/protobuf to google.golang.org/protobuf (#7593, @mmorel-35)
|
||||
* Add the design for data mover micro service (#7576, @Lyndon-Li)
|
||||
* Descriptive restore error when restoring into a terminating namespace. (#7424, @kaovilai)
|
||||
* Ignore missing path error in conditional match (#7410, @seanblong)
|
||||
* Propose a deprecation process for velero (#5532, @shubham-pampattiwar)
|
|
@ -0,0 +1,156 @@
|
|||
## v1.16
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.16.0
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.16.0`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.16/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.16/upgrade-to-1.16/
|
||||
|
||||
### Highlights
|
||||
#### Windows cluster support
|
||||
In v1.16, Velero supports to run in Windows clusters and backup/restore Windows workloads, either stateful or stateless:
|
||||
* Hybrid build and all-in-one image: the build process is enhanced to build an all-in-one image for hybrid CPU architecture and hybrid platform. For more information, check the design https://github.com/vmware-tanzu/velero/blob/main/design/multiple-arch-build-with-windows.md
|
||||
* Deployment in Windows clusters: Velero node-agent, data mover pods and maintenance jobs now support to run in both linux and Windows nodes
|
||||
* Data mover backup/restore Windows workloads: Velero built-in data mover supports Windows workloads throughout its full cycle, i.e., discovery, backup, restore, pre/post hook, etc. It automatically identifies Windows workloads and schedules data mover pods to the right group of nodes
|
||||
|
||||
Check the epic issue https://github.com/vmware-tanzu/velero/issues/8289 for more information.
|
||||
|
||||
#### Parallel Item Block backup
|
||||
v1.16 now supports to back up item blocks in parallel. Specifically, during backup, correlated resources are grouped in item blocks and Velero backup engine creates a thread pool to back up the item blocks in parallel. This significantly improves the backup throughput, especially when there are large scale of resources.
|
||||
Pre/post hooks also belongs to item blocks, so will also run in parallel along with the item blocks.
|
||||
Users are allowed to configure the parallelism through the `--item-block-worker-count` Velero server parameter. If not configured, the default parallelism is 1.
|
||||
|
||||
For more information, check issue https://github.com/vmware-tanzu/velero/issues/8334.
|
||||
|
||||
#### Data mover restore enhancement in scalability
|
||||
In previous releases, for each volume of WaitForFirstConsumer mode, data mover restore is only allowed to happen in the node that the volume is attached. This severely degrades the parallelism and the balance of node resource(CPU, memory, network bandwidth) consumption for data mover restore (https://github.com/vmware-tanzu/velero/issues/8044).
|
||||
|
||||
In v1.16, users are allowed to configure data mover restores running and spreading evenly across all nodes in the cluster. The configuration is done through a new flag `ignoreDelayBinding` in node-agent configuration (https://github.com/vmware-tanzu/velero/issues/8242).
|
||||
|
||||
#### Data mover enhancements in observability
|
||||
In 1.16, some observability enhancements are added:
|
||||
* Output various statuses of intermediate objects for failures of data mover backup/restore (https://github.com/vmware-tanzu/velero/issues/8267)
|
||||
* Output the errors when Velero fails to delete intermediate objects during clean up (https://github.com/vmware-tanzu/velero/issues/8125)
|
||||
|
||||
The outputs are in the same node-agent log and enabled automatically.
|
||||
|
||||
#### CSI snapshot backup/restore enhancement in usability
|
||||
In previous releases, a unnecessary VolumeSnapshotContent object is retained for each backup and synced to other clusters sharing the same backup storage location. And during restore, the retained VolumeSnapshotContent is also restored unnecessarily.
|
||||
|
||||
In 1.16, the retained VolumeSnapshotContent is removed from the backup, so no unnecessary CSI objects are synced or restored.
|
||||
|
||||
For more information, check issue https://github.com/vmware-tanzu/velero/issues/8725.
|
||||
|
||||
#### Backup Repository Maintenance enhancement in resiliency and observability
|
||||
In v1.16, some enhancements of backup repository maintenance are added to improve the observability and resiliency:
|
||||
* A new backup repository maintenance history section, called `RecentMaintenance`, is added to the BackupRepository CR. Specifically, for each BackupRepository, including start/completion time, completion status and error message. (https://github.com/vmware-tanzu/velero/issues/7810)
|
||||
* Running maintenance jobs are now recaptured after Velero server restarts. (https://github.com/vmware-tanzu/velero/issues/7753)
|
||||
* The maintenance job will not be launched for readOnly BackupStorageLocation. (https://github.com/vmware-tanzu/velero/issues/8238)
|
||||
* The backup repository will not try to initialize a new repository for readOnly BackupStorageLocation. (https://github.com/vmware-tanzu/velero/issues/8091)
|
||||
* Users now are allowed to configure the intervals of an effective maintenance in the way of `normalGC`, `fastGC` and `eagerGC`, through the `fullMaintenanceInterval` parameter in backupRepository configuration. (https://github.com/vmware-tanzu/velero/issues/8364)
|
||||
|
||||
#### Volume Policy enhancement of filtering volumes by PVC labels
|
||||
In v1.16, Volume Policy is extended to support filtering volumes by PVC labels. (https://github.com/vmware-tanzu/velero/issues/8256).
|
||||
|
||||
#### Resource Status restore per object
|
||||
In v1.16, users are allowed to define whether to restore resource status per object through an annotation `velero.io/restore-status` set on the object. (https://github.com/vmware-tanzu/velero/issues/8204).
|
||||
|
||||
#### Velero Restore Helper binary is merged into Velero image
|
||||
In v1.16, Velero banaries, i.e., velero, velero-helper and velero-restore-helper, are all included into the single Velero image. (https://github.com/vmware-tanzu/velero/issues/8484).
|
||||
|
||||
### Runtime and dependencies
|
||||
Golang runtime: 1.23.7
|
||||
kopia: 0.19.0
|
||||
|
||||
### Limitations/Known issues
|
||||
#### Limitations of Windows support
|
||||
* fs-backup is not supported for Windows workloads and so fs-backup runs only in linux nodes for linux workloads
|
||||
* Backup/restore of NTFS extended attributes/advanced features are not supported, i.e., Security Descriptors, System/Hidden/ReadOnly attributes, Creation Time, NTFS Streams, etc.
|
||||
|
||||
### All Changes
|
||||
* Add third party annotation support for maintenance job, so that the declared third party annotations could be added to the maintenance job pods (#8812, @Lyndon-Li)
|
||||
* Fix issue #8803, use deterministic name to create backupRepository (#8808, @Lyndon-Li)
|
||||
* Refactor restoreItem and related functions to differentiate the backup resource name and the restore target resource name. (#8797, @blackpiglet)
|
||||
* ensure that PV is removed before VS is deleted (#8777, @ix-rzi)
|
||||
* host_pods should not be mandatory to node-agent (#8774, @mpryc)
|
||||
* Log doesn't show pv name, but displays %!s(MISSING) instead (#8771, @hu-keyu)
|
||||
* Fix issue #8754, add third party annotation support for data mover (#8770, @Lyndon-Li)
|
||||
* Add docs for volume policy with labels as a criteria (#8759, @shubham-pampattiwar)
|
||||
* Move pvc annotation removal from CSI RIA to regular PVC RIA (#8755, @sseago)
|
||||
* Add doc for maintenance history (#8747, @Lyndon-Li)
|
||||
* Fix issue #8733, add doc for restorePVC (#8737, @Lyndon-Li)
|
||||
* Fix issue #8426, add doc for Windows support (#8736, @Lyndon-Li)
|
||||
* Fix issue #8475, refactor build-from-source doc for hybrid image build (#8729, @Lyndon-Li)
|
||||
* Return directly if no pod volme backup are tracked (#8728, @ywk253100)
|
||||
* Fix issue #8706, for immediate volumes, there is no selected-node annotation on PVC, so deduce the attached node from VolumeAttachment CRs (#8715, @Lyndon-Li)
|
||||
* Add labels as a criteria for volume policy (#8713, @shubham-pampattiwar)
|
||||
* Copy SecurityContext from Containers[0] if present for PVR (#8712, @sseago)
|
||||
* Support pushing images to an insecure registry (#8703, @ywk253100)
|
||||
* Modify golangci configuration to make it work. (#8695, @blackpiglet)
|
||||
* Run backup post hooks inside ItemBlock synchronously (#8694, @ywk253100)
|
||||
* Add docs for object level status restore (#8693, @shubham-pampattiwar)
|
||||
* Clean artifacts generated during CSI B/R. (#8684, @blackpiglet)
|
||||
* Don't run maintenance on the ReadOnly BackupRepositories. (#8681, @blackpiglet)
|
||||
* Fix #8657: WaitGroup panic issue (#8679, @ywk253100)
|
||||
* Fixes issue #8214, validate `--from-schedule` flag in create backup command to prevent empty or whitespace-only values. (#8665, @aj-2000)
|
||||
* Implement parallel ItemBlock processing via backup_controller goroutines (#8659, @sseago)
|
||||
* Clean up leaked CSI snapshot for incomplete backup (#8637, @raesonerjt)
|
||||
* Handle update conflict when restoring the status (#8630, @ywk253100)
|
||||
* Fix issue #8419, support repo maintenance job to run on Windows nodes (#8626, @Lyndon-Li)
|
||||
* Always create DataUpload configmap in restore namespace (#8621, @sseago)
|
||||
* Fix issue #8091, avoid to create new repo when BSL is readonly (#8615, @Lyndon-Li)
|
||||
* Fix issue #8242, distribute dd evenly across nodes (#8611, @Lyndon-Li)
|
||||
* Fix issue #8497, update du/dd progress on completion (#8608, @Lyndon-Li)
|
||||
* Fix issue #8418, add Windows toleration to data mover pods (#8606, @Lyndon-Li)
|
||||
* Check the PVB status via podvolume Backupper rather than calling API server to avoid API server issue (#8603, @ywk253100)
|
||||
* Fix issue #8067, add tmp folder (/tmp for linux, C:\Windows\Temp for Windows) as an alternative of udmrepo's config file location (#8602, @Lyndon-Li)
|
||||
* Data mover restore for Windows (#8594, @Lyndon-Li)
|
||||
* Skip patching the PV in finalization for failed operation (#8591, @reasonerjt)
|
||||
* Fix issue #8579, set event burst to block event broadcaster from filtering events (#8590, @Lyndon-Li)
|
||||
* Configurable Kopia Maintenance Interval. backup-repository-configmap adds an option for configurable`fullMaintenanceInterval` where fastGC (12 hours), and eagerGC (6 hours) allowing for faster removal of deleted velero backups from kopia repo. (#8581, @kaovilai)
|
||||
* Fix issue #7753, recall repo maintenance history on Velero server restart (#8580, @Lyndon-Li)
|
||||
* Clear validation errors when schedule is valid (#8575, @ywk253100)
|
||||
* Merge restore helper image into Velero server image (#8574, @ywk253100)
|
||||
* Don't include excluded items in ItemBlocks (#8572, @sseago)
|
||||
* fs uploader and block uploader support Windows nodes (#8569, @Lyndon-Li)
|
||||
* Fix issue #8418, support data mover backup for Windows nodes (#8555, @Lyndon-Li)
|
||||
* Fix issue #8044, allow users to ignore delay binding the restorePVC of data mover when it is in WaitForFirstConsumer mode (#8550, @Lyndon-Li)
|
||||
* Fix issue #8539, validate uploader types when o.CRDsOnly is set to false only since CRD installation doesn't rely on uploader types (#8538, @Lyndon-Li)
|
||||
* Fix issue #7810, add maintenance history for backupRepository CRs (#8532, @Lyndon-Li)
|
||||
* Make fs-backup work on linux nodes with the new Velero deployment and disable fs-backup if the source/target pod is running in non-linux node (#8424) (#8518, @Lyndon-Li)
|
||||
* Fix issue: backup schedule pause/unpause doesn't work (#8512, @ywk253100)
|
||||
* Fix backup post hook issue #8159 (caused by #7571): always execute backup post hooks after PVBs are handled (#8509, @ywk253100)
|
||||
* Fix issue #8267, enhance the error message when expose fails (#8508, @Lyndon-Li)
|
||||
* Fix issue #8416, #8417, deploy Velero server and node-agent in linux/Windows hybrid env (#8504, @Lyndon-Li)
|
||||
* Design to add label selector as a criteria for volume policy (#8503, @shubham-pampattiwar)
|
||||
* Related to issue #8485, move the acceptedByNode and acceptedTimestamp to Status of DU/DD CRD (#8498, @Lyndon-Li)
|
||||
* Add SecurityContext to restore-helper (#8491, @reasonerjt)
|
||||
* Fix issue #8433, add third party labels to data mover pods when the same labels exist in node-agent pods (#8487, @Lyndon-Li)
|
||||
* Fix issue #8485, add an accepted time so as to count the prepare timeout (#8486, @Lyndon-Li)
|
||||
* Fix issue #8125, log diagnostic info for data mover exposers when expose timeout (#8482, @Lyndon-Li)
|
||||
* Fix issue #8415, implement multi-arch build and Windows build (#8476, @Lyndon-Li)
|
||||
* Pin kopia to 0.18.2 (#8472, @Lyndon-Li)
|
||||
* Add nil check for updating DataUpload VolumeInfo in finalizing phase (#8471, @blackpiglet)
|
||||
* Allowing Object-Level Resource Status Restore (#8464, @shubham-pampattiwar)
|
||||
* For issue #8429. Add the design for multi-arch build and windows build (#8459, @Lyndon-Li)
|
||||
* Upgrade go.mod k8s.io/ go.mod to v0.31.3 and implemented proper logger configuration for both client-go and controller-runtime libraries. This change ensures that logging format and level settings are properly applied throughout the codebase. The update improves logging consistency and control across the Velero system. (#8450, @kaovilai)
|
||||
* Add Design for Allowing Object-Level Resource Status Restore (#8403, @shubham-pampattiwar)
|
||||
* Fix issue #8391, check ErrCancelled from suffix of data mover pod's termination message (#8396, @Lyndon-Li)
|
||||
* Fix issue #8394, don't call closeDataPath in VGDP callbacks, otherwise, the VGDP cleanup will hang (#8395, @Lyndon-Li)
|
||||
* Adding support in velero Resource Policies for filtering PVs based on additional VolumeAttributes properties under CSI PVs (#8383, @mayankagg9722)
|
||||
* Add --item-block-worker-count flag to velero install and server (#8380, @sseago)
|
||||
* Make BackedUpItems thread safe (#8366, @sseago)
|
||||
* Include --annotations flag in backup and restore create commands (#8354, @alromeros)
|
||||
* Use aggregated discovery API to discovery API groups and resources (#8353, @ywk253100)
|
||||
* Copy "envFrom" from Velero server when creating maintenance jobs (#8343, @evhan)
|
||||
* Set hinting region to use for GetBucketRegion() in pkg/repository/config/aws.go (#8297, @kaovilai)
|
||||
* Bump up version of client-go and controller-runtime (#8275, @ywk253100)
|
||||
* fix(pkg/repository/maintenance): don't panic when there's no container statuses (#8271, @mcluseau)
|
||||
* Add Backup warning for inclusion of NS managed by ArgoCD (#8257, @shubham-pampattiwar)
|
||||
* Added tracking for deleted namespace status check in restore flow. (#8233, @sangitaray2021)
|
|
@ -0,0 +1 @@
|
|||
This PR aims to add s390x support to Velero binary.
|
|
@ -0,0 +1 @@
|
|||
Inherit k8s default volumeSnapshotClass.
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: backuprepositories.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
@ -54,6 +54,13 @@ spec:
|
|||
description: MaintenanceFrequency is how often maintenance should
|
||||
be run.
|
||||
type: string
|
||||
repositoryConfig:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: RepositoryConfig is for repository-specific configuration
|
||||
fields.
|
||||
nullable: true
|
||||
type: object
|
||||
repositoryType:
|
||||
description: RepositoryType indicates the type of the backend repository
|
||||
enum:
|
||||
|
@ -81,8 +88,8 @@ spec:
|
|||
description: BackupRepositoryStatus is the current status of a BackupRepository.
|
||||
properties:
|
||||
lastMaintenanceTime:
|
||||
description: LastMaintenanceTime is the last time maintenance was
|
||||
run.
|
||||
description: LastMaintenanceTime is the last time repo maintenance
|
||||
succeeded.
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
|
@ -97,6 +104,33 @@ spec:
|
|||
- Ready
|
||||
- NotReady
|
||||
type: string
|
||||
recentMaintenance:
|
||||
description: RecentMaintenance is status of the recent repo maintenance.
|
||||
items:
|
||||
properties:
|
||||
completeTimestamp:
|
||||
description: CompleteTimestamp is the completion time of the
|
||||
repo maintenance.
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
message:
|
||||
description: Message is a message about the current status of
|
||||
the repo maintenance.
|
||||
type: string
|
||||
result:
|
||||
description: Result is the result of the repo maintenance.
|
||||
enum:
|
||||
- Succeeded
|
||||
- Failed
|
||||
type: string
|
||||
startTimestamp:
|
||||
description: StartTimestamp is the start time of the repo maintenance.
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: backups.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
@ -63,7 +63,6 @@ spec:
|
|||
DefaultVolumesToRestic specifies whether restic should be used to take a
|
||||
backup of all pod volumes by default.
|
||||
|
||||
|
||||
Deprecated: this field is no longer used and will be removed entirely in future. Use DefaultVolumesToFsBackup instead.
|
||||
nullable: true
|
||||
type: boolean
|
||||
|
@ -176,11 +175,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -364,11 +365,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -425,11 +428,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: backupstoragelocations.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
@ -86,10 +86,13 @@ spec:
|
|||
valid secret key.
|
||||
type: string
|
||||
name:
|
||||
default: ""
|
||||
description: |-
|
||||
Name of the referent.
|
||||
This field is effectively required, but due to backwards compatibility is
|
||||
allowed to be empty. Instances of this type with an empty value here are
|
||||
almost certainly wrong.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must be defined
|
||||
|
@ -141,7 +144,6 @@ spec:
|
|||
description: |-
|
||||
AccessMode is an unused field.
|
||||
|
||||
|
||||
Deprecated: there is now an AccessMode field on the Spec and this field
|
||||
will be removed entirely as of v2.0.
|
||||
enum:
|
||||
|
@ -153,7 +155,6 @@ spec:
|
|||
LastSyncedRevision is the value of the `metadata/revision` file in the backup
|
||||
storage location the last time the BSL's contents were synced into the cluster.
|
||||
|
||||
|
||||
Deprecated: this field is no longer updated or used for detecting changes to
|
||||
the location's contents and will be removed entirely in v2.0.
|
||||
type: string
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: deletebackuprequests.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: downloadrequests.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: podvolumebackups.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
@ -96,7 +96,6 @@ spec:
|
|||
the event) or if no container name is specified "spec.containers[2]" (container with
|
||||
index 2 in this pod). This syntax is chosen only to have some well-defined way of
|
||||
referencing a part of an object.
|
||||
TODO: this design is not final and this field is subject to change in the future.
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: podvolumerestores.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
@ -93,7 +93,6 @@ spec:
|
|||
the event) or if no container name is specified "spec.containers[2]" (container with
|
||||
index 2 in this pod). This syntax is chosen only to have some well-defined way of
|
||||
referencing a part of an object.
|
||||
TODO: this design is not final and this field is subject to change in the future.
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: restores.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
@ -138,11 +138,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -291,11 +293,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -354,11 +358,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: schedules.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
@ -102,7 +102,6 @@ spec:
|
|||
DefaultVolumesToRestic specifies whether restic should be used to take a
|
||||
backup of all pod volumes by default.
|
||||
|
||||
|
||||
Deprecated: this field is no longer used and will be removed entirely in future. Use DefaultVolumesToFsBackup instead.
|
||||
nullable: true
|
||||
type: boolean
|
||||
|
@ -215,11 +214,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -405,11 +406,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -466,11 +469,13 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: serverstatusrequests.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: volumesnapshotlocations.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
@ -57,10 +57,13 @@ spec:
|
|||
valid secret key.
|
||||
type: string
|
||||
name:
|
||||
default: ""
|
||||
description: |-
|
||||
Name of the referent.
|
||||
This field is effectively required, but due to backwards compatibility is
|
||||
allowed to be empty. Instances of this type with an empty value here are
|
||||
almost certainly wrong.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must be defined
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: datadownloads.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
@ -92,6 +92,13 @@ spec:
|
|||
DataMover specifies the data mover to be used by the backup.
|
||||
If DataMover is "" or "velero", the built-in data mover will be used.
|
||||
type: string
|
||||
nodeOS:
|
||||
description: NodeOS is OS of the node where the DataDownload is processed.
|
||||
enum:
|
||||
- auto
|
||||
- linux
|
||||
- windows
|
||||
type: string
|
||||
operationTimeout:
|
||||
description: |-
|
||||
OperationTimeout specifies the time used to wait internal operations,
|
||||
|
@ -136,6 +143,16 @@ spec:
|
|||
status:
|
||||
description: DataDownloadStatus is the current status of a DataDownload.
|
||||
properties:
|
||||
acceptedByNode:
|
||||
description: Node is name of the node where the DataUpload is prepared.
|
||||
type: string
|
||||
acceptedTimestamp:
|
||||
description: |-
|
||||
AcceptedTimestamp records the time the DataUpload is to be prepared.
|
||||
The server's time is used for AcceptedTimestamp
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
completionTimestamp:
|
||||
description: |-
|
||||
CompletionTimestamp records the time a restore was completed.
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
name: datauploads.velero.io
|
||||
spec:
|
||||
group: velero.io
|
||||
|
@ -143,6 +143,17 @@ spec:
|
|||
status:
|
||||
description: DataUploadStatus is the current status of a DataUpload.
|
||||
properties:
|
||||
acceptedByNode:
|
||||
description: AcceptedByNode is name of the node where the DataUpload
|
||||
is prepared.
|
||||
type: string
|
||||
acceptedTimestamp:
|
||||
description: |-
|
||||
AcceptedTimestamp records the time the DataUpload is to be prepared.
|
||||
The server's time is used for AcceptedTimestamp
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
completionTimestamp:
|
||||
description: |-
|
||||
CompletionTimestamp records the time a backup was completed.
|
||||
|
@ -165,6 +176,13 @@ spec:
|
|||
node:
|
||||
description: Node is name of the node where the DataUpload is processed.
|
||||
type: string
|
||||
nodeOS:
|
||||
description: NodeOS is OS of the node where the DataUpload is processed.
|
||||
enum:
|
||||
- auto
|
||||
- linux
|
||||
- windows
|
||||
type: string
|
||||
path:
|
||||
description: Path is the full path of the snapshot volume being backed
|
||||
up.
|
||||
|
|
|
@ -8,17 +8,7 @@ rules:
|
|||
- ""
|
||||
resources:
|
||||
- persistentvolumerclaims
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
|
@ -26,6 +16,18 @@ rules:
|
|||
- velero.io
|
||||
resources:
|
||||
- backuprepositories
|
||||
- backups
|
||||
- backupstoragelocations
|
||||
- datadownloads
|
||||
- datauploads
|
||||
- deletebackuprequests
|
||||
- downloadrequests
|
||||
- podvolumebackups
|
||||
- podvolumerestores
|
||||
- restores
|
||||
- schedules
|
||||
- serverstatusrequests
|
||||
- volumesnapshotlocations
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
|
@ -38,239 +40,18 @@ rules:
|
|||
- velero.io
|
||||
resources:
|
||||
- backuprepositories/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- backups
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- backups/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- backupstoragelocations
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- backupstoragelocations/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- datadownloads
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- datadownloads/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- datauploads
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- datauploads/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- deletebackuprequests
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- deletebackuprequests/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- downloadrequests
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- downloadrequests/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- podvolumebackups
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- podvolumebackups/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- podvolumerestores
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- podvolumerestores/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- restores
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- restores/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- schedules
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- schedules/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- serverstatusrequests
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- serverstatusrequests/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- velero.io
|
||||
resources:
|
||||
- volumesnapshotlocations
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
|
|
|
@ -76,7 +76,7 @@ volumePolicies:
|
|||
- Update VolumePolicy action type validation to account for `fs-backup` and `snapshot` as valid VolumePolicy actions.
|
||||
- Modifications needed for `fs-backup` action:
|
||||
- Now based on the specification of volume policy on backup request we will decide whether to go via legacy pod annotations approach or the newer volume policy based fs-backup action approach.
|
||||
- If there is a presence of volume policy(fs-backup/snapshot) on the backup request that matches as an action for a volume we use the newer volume policy approach to get the list of the volumes for `fs-backup` action
|
||||
- If there is a presence of volume policy(fs-backup/snapshot) on the backup request that matches as an action for a volume we use the newer volume policy approach to get the list of the volumes for `fs-backup` action
|
||||
- Else continue with the annotation based legacy approach workflow.
|
||||
|
||||
- Modifications needed for `snapshot` action:
|
||||
|
@ -276,7 +276,7 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
|||
|
||||
if !boolptr.IsSetToFalse(v.snapshotVolumes) {
|
||||
// If the backup.Spec.SnapshotVolumes is not set, or set to true, then should take the snapshot.
|
||||
v.logger.Infof("performing snapshot action for pv %s as the snapshotVolumes is not set to false")
|
||||
v.logger.Infof("performing snapshot action for pv %s as the snapshotVolumes is not set to false", pv.Name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,370 @@
|
|||
# Velero Backup performance Improvements and VolumeGroupSnapshot enablement
|
||||
|
||||
There are two different goals here, linked by a single primary missing feature in the Velero backup workflow.
|
||||
The first goal is to enhance backup performance by allowing the primary backup controller to run in multiple threads, enabling Velero to back up multiple items at the same time for a given backup.
|
||||
The second goal is to enable Velero to eventually support VolumeGroupSnapshots.
|
||||
For both of these goals, Velero needs a way to determine which items should be backed up together.
|
||||
|
||||
This design proposal will include two development phases:
|
||||
- Phase 1 will refactor the backup workflow to identify blocks of related items that should be backed up together, and then coordinate backup hooks among items in the block.
|
||||
- Phase 2 will add multiple worker threads for backing up item blocks, so instead of backing up each block as it identified, the velero backup workflow will instead add the block to a channel and one of the workers will pick it up.
|
||||
- Actual support for VolumeGroupSnapshots is out-of-scope here and will be handled in a future design proposal, but the item block refactor introduced in Phase 1 is a primary building block for this future proposal.
|
||||
|
||||
## Background
|
||||
Currently, during backup processing, the main Velero backup controller runs in a single thread, completely finishing the primary backup processing for one resource before moving on to the next one.
|
||||
We can improve the overall backup performance by backing up multiple items for a backup at the same time, but before we can do this we must first identify resources that need to be backed up together.
|
||||
Generally speaking, resources that need to be backed up together are resources with interdependencies -- pods with their PVCs, PVCs with their PVs, groups of pods that form a single application, CRs, pods, and other resources that belong to the same operator, etc.
|
||||
As part of this initial refactoring, once these "Item Blocks" are identified, an additional change will be to move pod hook processing up to the ItemBlock level.
|
||||
If there are multiple pods in the ItemBlock, pre-hooks for all pods will be run before backing up the items, followed by post-hooks for all pods.
|
||||
This change to hook processing is another prerequisite for future VolumeGroupSnapshot support, since supporting this will require backing up the pods and volumes together for any volumes which belong to the same group.
|
||||
Once we are backing up items by block, the next step will be to create multiple worker threads to process and back up ItemBlocks, so that we can back up multiple ItemBlocks at the same time.
|
||||
|
||||
In looking at the different kinds of large backups that Velero must deal with, two obvious scenarios come to mind:
|
||||
1. Backups with a relatively small number of large volumes
|
||||
2. Backups with a large number of relatively small volumes.
|
||||
|
||||
In case 1, the majority of the time spent on the backup is in the asynchronous phases -- CSI snapshot creation actions after the snaphandle exists, and DataUpload processing. In that case, parallel item processing will likely have a minimal impact on overall backup completion time.
|
||||
|
||||
In case 2, the majority of time spent on the backup will likely be during the synchronous actions. Especially as regards CSI snapshot creation, the waiting for the VSC snaphandle to exist will result in significant passage of time with thousands of volumes. This is the sort of use case which will benefit the most from parallel item processing.
|
||||
|
||||
## Goals
|
||||
- Identify groups of related items to back up together (ItemBlocks).
|
||||
- Manage backup hooks at the ItemBlock level rather than per-item.
|
||||
- Using worker threads, back up ItemBlocks at the same time.
|
||||
|
||||
## Non Goals
|
||||
- Support VolumeGroupSnapshots: this is a future feature, although certain prerequisites for this enhancement are included in this proposal.
|
||||
- Process multiple backups in parallel: this is a future feature, although certain prerequisites for this enhancement are included in this proposal.
|
||||
- Refactoring plugin infrastructure to avoid RPC calls for internal plugins.
|
||||
- Restore performance improvements: this is potentially a future feature
|
||||
|
||||
## High-Level Design
|
||||
|
||||
### ItemBlock concept
|
||||
|
||||
The updated design is based on a new struct/type called `ItemBlock`.
|
||||
Essentially, an `ItemBlock` is a group of items that must be backed up together in order to guarantee backup integrity.
|
||||
When we eventually split item backup across multiple worker threads, `ItemBlocks` will be kept together as the basic unit of backup.
|
||||
To facilitate this, a new plugin type, `ItemBlockAction` will allow relationships between items to be identified by velero -- any resources that must be backed up with other resources will need IBA plugins defined for them.
|
||||
Examples of `ItemBlocks` include:
|
||||
1. A pod, its mounted PVCs, and the bound PVs for those PVCs.
|
||||
2. A VolumeGroup (related PVCs and PVs) along with any pods mounting these volumes.
|
||||
3. For a ReadWriteMany PVC, the PVC, its bound PV, and all pods mounting this PVC.
|
||||
|
||||
### Phase 1: ItemBlock processing
|
||||
- A new plugin type, `ItemBlockAction`, will be created
|
||||
- `ItemBlockAction` will contain the API method `GetRelatedItems`, which will be needed for determining which items to group together into `ItemBlocks`.
|
||||
- When processing the list of items returned from the item collector, instead of simply calling `BackupItem` on each in turn, we will use the `GetRelatedItems` API call to determine other items to include with the current item in an ItemBlock. Repeat recursively on each item returned.
|
||||
- Don't include an item in more than one ItemBlock -- if the next item from the item collector is already in a block, skip it.
|
||||
- Once ItemBlock is determined, call new func `BackupItemBlock` instead of `BackupItem`.
|
||||
- New func `BackupItemBlock` will call pre hooks for any pods in the block, then back up the items in the block (`BackupItem` will no longer run hooks directly), then call post hooks for any pods in the block.
|
||||
- The finalize phase will not be affected by the ItemBlock design, since this is just updating resources after async operations are completed on the items and there is no need to run these updates in parallel.
|
||||
|
||||
### Phase 2: Process ItemBlocks for a single backup in multiple threads
|
||||
- Concurrent `BackupItemBlock` operations will be executed by worker threads invoked by the backup controller, which will communicate with the backup controller operation via a shared channel.
|
||||
- The ItemBlock processing loop implemented in Phase 1 will be modified to send each newly-created ItemBlock to the shared channel rather than calling `BackupItemBlock` inline.
|
||||
- Users will be able to configure the number of workers available for concurrent `BackupItemBlock` operations.
|
||||
- Access to the BackedUpItems map must be synchronized
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### Phase 1: ItemBlock processing
|
||||
|
||||
#### New ItemBlockAction plugin type
|
||||
|
||||
In order for Velero to identify groups of items to back up together in an ItemBlock, we need a way to identify items which need to be backed up along with the current item. While the current `Execute` BackupItemAction method does return a list of additional items which are required by the current item, we need to know this *before* we start the item backup. To support this, we need a new plugin type, `ItemBlockAction` (IBA) with an API method, `GetRelatedItems` which Velero will call on each item as it processes. The expectation is that the registered IBA plugins will return the same items as returned as additional items by the BIA `Execute` method, with the exception that items which are not created until calling `Execute` should not be returned here, as they don't exist yet.
|
||||
|
||||
#### Proto definition (compiled into golang by protoc)
|
||||
|
||||
The ItemBlockAction plugin type is defined as follows:
|
||||
```
|
||||
service ItemBlockAction {
|
||||
rpc AppliesTo(ItemBlockActionAppliesToRequest) returns (ItemBlockActionAppliesToResponse);
|
||||
rpc GetRelatedItems(ItemBlockActionGetRelatedItemsRequest) returns (ItemBlockActionGetRelatedItemsResponse);
|
||||
}
|
||||
|
||||
message ItemBlockActionAppliesToRequest {
|
||||
string plugin = 1;
|
||||
}
|
||||
|
||||
message ItemBlockActionAppliesToResponse {
|
||||
ResourceSelector ResourceSelector = 1;
|
||||
}
|
||||
|
||||
message ItemBlockActionGetRelatedItemsRequest {
|
||||
string plugin = 1;
|
||||
bytes item = 2;
|
||||
bytes backup = 3;
|
||||
}
|
||||
|
||||
message ItemBlockActionGetRelatedItemsResponse {
|
||||
repeated generated.ResourceIdentifier relatedItems = 1;
|
||||
}
|
||||
```
|
||||
|
||||
A new PluginKind, `ItemBlockAction`, will be created, and the backup process will be modified to use this plugin kind.
|
||||
|
||||
For any BIA plugins which return additional items from `Execute()` that need to be backed up at the same time or sequentially in the same worker thread as the current items should add a new IBA plugin to return these same items (minus any which won't exist before BIA `Execute()` is called).
|
||||
This mainly applies to plugins that operate on pods which reference resources which must be backed up along with the pod and are potentially affected by pod hooks or for plugins which connect multiple pods whose volumes should be backed up at the same time.
|
||||
|
||||
### Changes to processing item list from the Item Collector
|
||||
|
||||
#### New structs BackupItemBlock, ItemBlock, and ItemBlockItem
|
||||
```go
|
||||
package backup
|
||||
|
||||
type BackupItemBlock struct {
|
||||
itemblock.ItemBlock
|
||||
// This is a reference to the shared itemBackupper for the backup
|
||||
itemBackupper *itemBackupper
|
||||
}
|
||||
|
||||
package itemblock
|
||||
|
||||
type ItemBlock struct {
|
||||
Log logrus.FieldLogger
|
||||
Items []ItemBlockItem
|
||||
}
|
||||
|
||||
type ItemBlockItem struct {
|
||||
Gr schema.GroupResource
|
||||
Item *unstructured.Unstructured
|
||||
PreferredGVR schema.GroupVersionResource
|
||||
}
|
||||
```
|
||||
|
||||
#### Current workflow
|
||||
In the `BackupWithResolvers` func, the current Velero implementation iterates over the list of items for backup returned by the Item Collector. For each item, Velero loads the item from the file created by the Item Collector, we call `backupItem`, update the GR map if successful, remove the (temporary) file containing item metadata, and update progress for the backup.
|
||||
|
||||
#### Modifications to the loop over ItemCollector results
|
||||
The `kubernetesResource` struct used by the item collector will be modified to add an `orderedResource` bool which will be set true for all of the resources moved to the beginning for each GroupResource as a result of being ordered resources.
|
||||
In addition, an `inItemBlock` bool is added to the struct which will be set to true later when processing the list when each item is added to an ItemBlock.
|
||||
While the item collector already puts ordered resources first for each GR, there is no indication in the list which of these initial items are from the ordered resources list and which are the remaining (unordered) items.
|
||||
Velero needs to know which resources are ordered because when we process them later, the ordered resources for each GroupResource must be processed sequentially in a single ItemBlock.
|
||||
|
||||
The current workflow within each iteration of the ItemCollector.items loop will replaced with the following:
|
||||
- (note that some of the below should be pulled out into a helper func to facilitate recursive call to it for items returned from `GetRelatedItems`.)
|
||||
- Before loop iteration, create a pointer to a `BackupItemBlock` which will represent the current ItemBlock being processed.
|
||||
- If `item` has `inItemBlock==true`, continue. This one has already been processed.
|
||||
- If current `itemBlock` is nil, create it.
|
||||
- Add `item` to `itemBlock`.
|
||||
- Load item from ItemCollector file. Close/remove file after loading (on error return or not, possibly with similar anonymous func to current impl)
|
||||
- If other versions of the same item exist (via EnableAPIGroupVersions), add these to the `itemBlock` as well (and load from ItemCollector file)
|
||||
- Get matching IBA plugins for item, call `GetRelatedItems` for each. For each item returned, get full item content from ItemCollector (if present in item list, pulling from file, removing file when done) or from cluster (if not present in item list), add item to the current block, add item to `itemsInBlock` map, and then recursively apply current step to each (i.e. call IBA method, add to block, etc.)
|
||||
- If current item and next item are both ordered items for the same GR, then continue to next item, adding to current `itemBlock`.
|
||||
- Once full ItemBlock list is generated, call `backupItemBlock(block ItemBlock)
|
||||
- Add `backupItemBlock` return values to `backedUpGroupResources` map
|
||||
|
||||
|
||||
#### New func `backupItemBlock`
|
||||
|
||||
Method signature for new func `backupItemBlock` is as follows:
|
||||
```go
|
||||
func (kb *kubernetesBackupper) backupItemBlock(block BackupItemBlock) []schema.GroupResource
|
||||
```
|
||||
The return value is a slice of GRs for resources which were backed up. Velero tracks these to determine which CRDs need to be included in the backup. Note that we need to make sure we include in this not only those resources that were backed up directly, but also those backed up indirectly via additional items BIA execute returns.
|
||||
|
||||
In order to handle backup hooks, this func will first take the input item list (`block.items`) and get a list of included pods, filtered to include only those not yet backed up (using `block.itemBackupper.backupRequest.BackedUpItems`). Iterate over this list and execute pre hooks (pulled out of `itemBackupper.backupItemInternal`) for each item.
|
||||
Now iterate over the full list (`block.items`) and call `backupItem` for each. After the first, the later items should already have been backed up, but calling a second time is harmless, since the first thing Velero does is check the `BackedUpItems` map, exiting if item is already backed up). We still need this call in case there's a plugin which returns something in `GetAdditionalItems` but forgets to return it in the `Execute` additional items return value. If we don't do this, we could end up missing items.
|
||||
|
||||
After backing up the items in the block, we now execute post hooks using the same filtered item list we used for pre hooks, again taking the logic from `itemBackupper.backupItemInternal`).
|
||||
|
||||
#### `itemBackupper.backupItemInternal` cleanup
|
||||
|
||||
After implementing backup hooks in `backupItemBlock`, hook processing should be removed from `itemBackupper.backupItemInternal`.
|
||||
|
||||
### Phase 2: Process ItemBlocks for a single backup in multiple threads
|
||||
|
||||
#### New input field for number of ItemBlock workers
|
||||
|
||||
The velero installer and server CLIs will get a new input field `itemBlockWorkerCount`, which will be passed along to the `backupReconciler`.
|
||||
The `backupReconciler` struct will also have this new field added.
|
||||
|
||||
#### Worker pool for item block processing
|
||||
|
||||
A new type, `ItemBlockWorker` will be added which will manage a pool of worker goroutines which will process item blocks, a shared input channel for passing blocks to workers, and a WaitGroup to shut down cleanly when the reconciler exits.
|
||||
```go
|
||||
type ItemBlockWorkerPool struct {
|
||||
itemBlockChannel chan ItemBlockInput
|
||||
wg *sync.WaitGroup
|
||||
logger logrus.FieldLogger
|
||||
}
|
||||
|
||||
type ItemBlockInput struct {
|
||||
itemBlock *BackupItemBlock
|
||||
returnChan chan ItemBlockReturn
|
||||
}
|
||||
|
||||
type ItemBlockReturn struct {
|
||||
itemBlock *BackupItemBlock
|
||||
resources []schema.GroupResource
|
||||
err error
|
||||
}
|
||||
|
||||
func (*p ItemBlockWorkerPool) getInputChannel() chan ItemBlockInput
|
||||
func StartItemBlockWorkerPool(context context.Context, workers int, logger logrus.FieldLogger) ItemBlockWorkerPool
|
||||
func processItemBlockWorker(context context.Context, itemBlockChannel chan ItemBlockInput, logger logrus.FieldLogger, wg *sync.WaitGroup)
|
||||
```
|
||||
|
||||
The worker pool will be started by calling `StartItemBlockWorkerPool` in `NewBackupReconciler()`, passing in the worker count and reconciler context.
|
||||
`backupreconciler.prepareBackupRequest` will also add the input channel to the `backupRequest` so that it will be available during backup processing.
|
||||
The func `StartItemBlockWorkerPool` will create the `ItemBlockWorkerPool` with a shared buffered input channel (fixed buffer size) and start `workers` gororoutines which will each call `processItemBlockWorker`.
|
||||
The `processItemBlockWorker` func (run by the worker goroutines) will read from `itemBlockChannel`, call `BackupItemBlock` on the retrieved `ItemBlock`, and then send the return value to the retrieved `returnChan`, and then process the next block.
|
||||
|
||||
#### Modify ItemBlock processing loop to send ItemBlocks to the worker pool rather than backing them up directly
|
||||
|
||||
The ItemBlock processing loop implemented in Phase 1 will be modified to send each newly-created ItemBlock to the shared channel rather than calling `BackupItemBlock` inline, using a WaitGroup to manage in-process items. A separate goroutine will be created to process returns for this backup. After completion of the ItemBlock processing loop, velero will use the WaitGroup to wait for all ItemBlock processing to complete before moving forward.
|
||||
|
||||
A simplified example of what this response goroutine might look like:
|
||||
```go
|
||||
// omitting cancel handling, context, etc
|
||||
ret := make(chan ItemBlockReturn)
|
||||
wg := &sync.WaitGroup{}
|
||||
// Handle returns
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case response := <-ret: // process each BackupItemBlock response
|
||||
func() {
|
||||
defer wg.Done()
|
||||
responses = append(responses, response)
|
||||
}()
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Simplified illustration, looping over and assumed already-determined ItemBlock list
|
||||
for _, itemBlock := range itemBlocks {
|
||||
wg.Add(1)
|
||||
inputChan <- ItemBlockInput{itemBlock: itemBlock, returnChan: ret}
|
||||
}
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
wg.Wait()
|
||||
}()
|
||||
// Wait for all the ItemBlocks to be processed
|
||||
select {
|
||||
case <-done:
|
||||
logger.Info("done processing ItemBlocks")
|
||||
}
|
||||
// responses from BackupItemBlock calls are in responses
|
||||
```
|
||||
|
||||
When processing the responses, the main thing is to set `backedUpGroupResources[item.groupResource]=true` for each GR returned, which will give the same result as the current implementation calling items one-by-one and setting that field as needed.
|
||||
|
||||
The ItemBlock processing loop described above will be split into two separate iterations. For the first iteration, velero will only process those items at the beginning of the loop identified as `orderedResources` -- when the groups generated from these resources are passed to the worker channel, velero will wait for the response before moving on to the next ItemBlock.
|
||||
This is to ensure that the ordered resources are processed in the required order. Once the last ordered resource is processed, the remaining ItemBlocks will be processed and sent to the worker channel without waiting for a response, in order to allow these ItemBlocks to be processed in parallel.
|
||||
The reason we must execute `ItemBlocks` with ordered resources first (and one at a time) is that this is a list of resources identified by the user as resources which must be backed up first, and in a particular order.
|
||||
|
||||
#### Synchronize access to the BackedUpItems map
|
||||
|
||||
Velero uses a map of BackedUpItems to track which items have already been backed up. This prevents velero from attempting to back up an item more than once, as well as guarding against creating infinite loops due to circular dependencies in the additional items returns. Since velero will now be accessing this map from the parallel goroutines, access to the map must be synchronized with mutexes.
|
||||
|
||||
### Backup Finalize phase
|
||||
|
||||
The finalize phase will not be affected by the ItemBlock design, since this is just updating resources after async operations are completed on the items and there is no need to run these updates in parallel.
|
||||
|
||||
## Alternatives considered
|
||||
|
||||
### BackpuItemAction v3 API
|
||||
|
||||
Instead of adding a new `ItemBlockAction` plugin type, we could add a `GetAdditionalItems` method to BackupItemAction.
|
||||
This was rejected because the new plugin type provides a cleaner interface, and keeps the function of grouping related items separate from the function of modifying item content for the backup.
|
||||
|
||||
### Per-backup worker pool
|
||||
|
||||
The current design makes use of a permanent worker pool, started at backup controller startup time. With this design, when we follow on with running multiple backups in parallel, the same set of workers will take ItemBlock inputs from more than one backup. Another approach that was initially considered was a temporary worker pool, created while processing a backup, and deleted upon backup completion.
|
||||
|
||||
#### User-visible API differences between the two approaches
|
||||
|
||||
The main user-visible difference here is in the configuration API. For the permanent worker approach, the worker count represents the total worker count for all backups. The concurrent backup count represents the number of backups running at the same time. At any given time, though, the maximum number of worker threads backing up items concurrently is equal to the worker count. If worker count is 15 and the concurrent backup count is 3, then there will be, at most, 15 items being processed at the same time, split among up to three running backups.
|
||||
|
||||
For the per-backup worker approach, the worker count represents the worker count for each backup. The concurrent backup count, as before, represents the number of backups running at the same time. If worker count is 15 and the concurrent backup count is 3, then there will be, at most, 45 items being processed at the same time, up to 15 for each of up to three running backups.
|
||||
#### Comparison of the two approaches
|
||||
|
||||
- Permanent worker pool advantages:
|
||||
- This is the more commonly-followed Kubernetes pattern. It's generally better to follow standard practices, unless there are genuine reasons for the use case to go in a different way.
|
||||
- It's easier for users to understand the maximum number of concurrent items processed, which will have performance impact and impact on the resource requirements for the Velero pod. Users will not have to multiply the config numbers in their heads when working out how many total workers are present.
|
||||
- It will give us more flexibility for future enhancements around concurrent backups. One possible use case: backup priority. Maybe a user wants scheduled backups to have a lower priority than user-generated backups, since a user is sitting there waiting for completion -- a shared worker pool could react to the priority by taking ItemBlocks for the higher priority backup first, which would allow a large lower-priority backup's items to be preempted by a higher-priority backup's items without needing to explicitly stop the main controller flow for that backup.
|
||||
- Per-backup worker pool advantages:
|
||||
- Lower memory consumption than permanent worker pool, but the total memory used by a worker blocked on input will be pretty low, so if we're talking only 10-20 workers, the impact will be minimal.
|
||||
|
||||
## Compatibility
|
||||
|
||||
### Example IBA implementation for BIA plugins which return additional items
|
||||
|
||||
Included below is an example of what might be required for a BIA plugin which returns additional items.
|
||||
The code is taken from the internal velero `pod_action.go` which identifies the items required for a given pod.
|
||||
|
||||
In this particular case, the only function of pod_action is to return additional items, so we can really just convert this plugin to an IBA plugin. If there were other actions, such as modifying the pod content on backup, then we would still need the pod action, and the related items vs. content manipulation functions would need to be separated.
|
||||
|
||||
```go
|
||||
// PodAction implements ItemBlockAction.
|
||||
type PodAction struct {
|
||||
log logrus.FieldLogger
|
||||
}
|
||||
|
||||
// NewPodAction creates a new ItemAction for pods.
|
||||
func NewPodAction(logger logrus.FieldLogger) *PodAction {
|
||||
return &PodAction{log: logger}
|
||||
}
|
||||
|
||||
// AppliesTo returns a ResourceSelector that applies only to pods.
|
||||
func (a *PodAction) AppliesTo() (velero.ResourceSelector, error) {
|
||||
return velero.ResourceSelector{
|
||||
IncludedResources: []string{"pods"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetRelatedItems scans the pod's spec.volumes for persistentVolumeClaim volumes and returns a
|
||||
// ResourceIdentifier list containing references to all of the persistentVolumeClaim volumes used by
|
||||
// the pod. This ensures that when a pod is backed up, all referenced PVCs are backed up too.
|
||||
func (a *PodAction) GetRelatedItems(item runtime.Unstructured, backup *v1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) {
|
||||
pod := new(corev1api.Pod)
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(item.UnstructuredContent(), pod); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
var relatedItems []velero.ResourceIdentifier
|
||||
if pod.Spec.PriorityClassName != "" {
|
||||
a.log.Infof("Adding priorityclass %s to relatedItems", pod.Spec.PriorityClassName)
|
||||
relatedItems = append(relatedItems, velero.ResourceIdentifier{
|
||||
GroupResource: kuberesource.PriorityClasses,
|
||||
Name: pod.Spec.PriorityClassName,
|
||||
})
|
||||
}
|
||||
|
||||
if len(pod.Spec.Volumes) == 0 {
|
||||
a.log.Info("pod has no volumes")
|
||||
return relatedItems, nil
|
||||
}
|
||||
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.ClaimName != "" {
|
||||
a.log.Infof("Adding pvc %s to relatedItems", volume.PersistentVolumeClaim.ClaimName)
|
||||
|
||||
relatedItems = append(relatedItems, velero.ResourceIdentifier{
|
||||
GroupResource: kuberesource.PersistentVolumeClaims,
|
||||
Namespace: pod.Namespace,
|
||||
Name: volume.PersistentVolumeClaim.ClaimName,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return relatedItems, nil
|
||||
}
|
||||
|
||||
// API call
|
||||
func (a *PodAction) Name() string {
|
||||
return "PodAction"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
|
||||
## Implementation
|
||||
Phase 1 and Phase 2 could be implemented within the same Velero release cycle, but they need not be.
|
||||
Phase 1 is expected to be implemented in Velero 1.15.
|
||||
Phase 2 is expected to be implemented in Velero 1.16.
|
|
@ -0,0 +1,94 @@
|
|||
# Backup PVC Configuration Design
|
||||
|
||||
## Glossary & Abbreviation
|
||||
|
||||
**Velero Generic Data Path (VGDP)**: VGDP is the collective modules that is introduced in [Unified Repository design][1]. Velero uses these modules to finish data transfer for various purposes (i.e., PodVolume backup/restore, Volume Snapshot Data Movement). VGDP modules include uploaders and the backup repository.
|
||||
|
||||
**Exposer**: Exposer is a module that is introduced in [Volume Snapshot Data Movement Design][2]. Velero uses this module to expose the volume snapshots to Velero node-agent pods or node-agent associated pods so as to complete the data movement from the snapshots.
|
||||
|
||||
**backupPVC**: The intermediate PVC created by the exposer for VGDP to access data from, see [Volume Snapshot Data Movement Design][2] for more details.
|
||||
|
||||
**backupPod**: The pod consumes the backupPVC so that VGDP could access data from the backupPVC, see [Volume Snapshot Data Movement Design][2] for more details.
|
||||
|
||||
**sourcePVC**: The PVC to be backed up, see [Volume Snapshot Data Movement Design][2] for more details.
|
||||
|
||||
## Background
|
||||
|
||||
As elaberated in [Volume Snapshot Data Movement Design][2], a backupPVC may be created by the Exposer and the VGDP reads data from the backupPVC.
|
||||
In some scenarios, users may need to configure some advanced settings of the backupPVC so that the data movement could work in best performance in their environments. Specifically:
|
||||
- For some storage providers, when creating a read-only volume from a snapshot, it is very fast; whereas, if a writable volume is created from the snapshot, they need to clone the entire disk data, which is time consuming. If the backupPVC's `accessModes` is set as `ReadOnlyMany`, the volume driver is able to tell the storage to create a read-only volume, which may dramatically shorten the snapshot expose time. On the other hand, `ReadOnlyMany` is not supported by all volumes. Therefore, users should be allowed to configure the `accessModes` for the backupPVC.
|
||||
- Some storage providers create one or more replicas when creating a volume, the number of replicas is defined in the storage class. However, it doesn't make any sense to keep replicas when an intermediate volume used by the backup. Therefore, users should be allowed to configure another storage class specifically used by the backupPVC.
|
||||
|
||||
## Goals
|
||||
|
||||
- Create a mechanism for users to specify various configurations for backupPVC
|
||||
|
||||
## Non-Goals
|
||||
|
||||
## Solution
|
||||
|
||||
We will use the ConfigMap specified by `velero node-agent` CLI's parameter `--node-agent-configmap` to host the backupPVC configurations.
|
||||
This configMap is not created by Velero, users should create it manually on demand. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only.
|
||||
Node-agent server checks these configurations at startup time and use it to initiate the related Exposer modules. Therefore, users could edit this configMap any time, but in order to make the changes effective, node-agent server needs to be restarted.
|
||||
Inside the ConfigMap we will add one new kind of configuration as the data in the configMap, the name is ```backupPVC```.
|
||||
Users may want to set different backupPVC configurations for different volumes, therefore, we define the configurations as a map and allow users to specific configurations by storage class. Specifically, the key of the map element is the storage class name used by the sourcePVC and the value is the set of configurations for the backupPVC created for the sourcePVC.
|
||||
|
||||
The data structure is as below:
|
||||
```go
|
||||
type Configs struct {
|
||||
// LoadConcurrency is the config for data path load concurrency per node.
|
||||
LoadConcurrency *LoadConcurrency `json:"loadConcurrency,omitempty"`
|
||||
|
||||
// LoadAffinity is the config for data path load affinity.
|
||||
LoadAffinity []*LoadAffinity `json:"loadAffinity,omitempty"`
|
||||
|
||||
// BackupPVC is the config for backupPVC of snapshot data movement.
|
||||
BackupPVC map[string]BackupPVC `json:"backupPVC,omitempty"`
|
||||
}
|
||||
|
||||
type BackupPVC struct {
|
||||
// StorageClass is the name of storage class to be used by the backupPVC.
|
||||
StorageClass string `json:"storageClass,omitempty"`
|
||||
|
||||
// ReadOnly sets the backupPVC's access mode as read only.
|
||||
ReadOnly bool `json:"readOnly,omitempty"`
|
||||
}
|
||||
```
|
||||
|
||||
### Sample
|
||||
A sample of the ConfigMap is as below:
|
||||
```json
|
||||
{
|
||||
"backupPVC": {
|
||||
"storage-class-1": {
|
||||
"storageClass": "snapshot-storage-class",
|
||||
"readOnly": true
|
||||
},
|
||||
"storage-class-2": {
|
||||
"storageClass": "snapshot-storage-class"
|
||||
},
|
||||
"storage-class-3": {
|
||||
"readOnly": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To create the configMap, users need to save something like the above sample to a json file and then run below command:
|
||||
```
|
||||
kubectl create cm <ConfigMap name> -n velero --from-file=<json file name>
|
||||
```
|
||||
|
||||
### Implementation
|
||||
The `backupPVC` is passed to the exposer and the exposer sets the related specification and create the backupPVC.
|
||||
If `backupPVC.storageClass` doesn't exist or set as empty, the sourcePVC's storage class will be used.
|
||||
If `backupPVC.readOnly` is set to true, `ReadOnlyMany` will be the only value set to the backupPVC's `accessModes`, otherwise, `ReadWriteOnce` is used.
|
||||
|
||||
Once `backupPVC.storageClass` is set, users must make sure that the specified storage class exists in the cluster and can be used the the backupPVC, otherwise, the corresponding DataUpload CR will stay in `Accepted` phase until the prepare timeout (by default 30min).
|
||||
Once `backupPVC.readOnly` is set to true, users must make sure that the storage supports to create a `ReadOnlyMany` PVC from a snapshot, otherwise, the corresponding DataUpload CR will stay in `Accepted` phase until the prepare timeout (by default 30min).
|
||||
|
||||
Once above problems happen, the DataUpload CR is cancelled after prepare timeout and the backupPVC and backupPod will be deleted, so there is no way to tell the cause is one of the above problems or others.
|
||||
To help the troubleshooting, we can add some diagnostic mechanism to discover the status of the backupPod before deleting it as a result of the prepare timeout.
|
||||
|
||||
[1]: unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md
|
||||
[2]: volume-snapshot-data-movement/volume-snapshot-data-movement.md
|
|
@ -0,0 +1,123 @@
|
|||
# Backup Repository Configuration Design
|
||||
|
||||
## Glossary & Abbreviation
|
||||
|
||||
**Backup Storage**: The storage to store the backup data. Check [Unified Repository design][1] for details.
|
||||
**Backup Repository**: Backup repository is layered between BR data movers and Backup Storage to provide BR related features that is introduced in [Unified Repository design][1].
|
||||
|
||||
## Background
|
||||
|
||||
According to the [Unified Repository design][1] Velero uses selectable backup repositories for various backup/restore methods, i.e., fs-backup, volume snapshot data movement, etc. To achieve the best performance, backup repositories may need to be configured according to the running environments.
|
||||
For example, if there are sufficient CPU and memory resources in the environment, users may enable compression feature provided by the backup repository, so as to achieve the best backup throughput.
|
||||
As another example, if the local disk space is not sufficient, users may want to constraint the backup repository's cache size, so as to prevent the repository from running out of the disk space.
|
||||
Therefore, it is worthy to allow users to configure some essential parameters of the backup repsoitories, and the configuration may vary from backup repositories.
|
||||
|
||||
## Goals
|
||||
|
||||
- Create a mechanism for users to specify configurations for backup repositories
|
||||
|
||||
## Non-Goals
|
||||
|
||||
## Solution
|
||||
|
||||
### BackupRepository CRD
|
||||
|
||||
After a backup repository is initialized, a BackupRepository CR is created to represent the instance of the backup repository. The BackupRepository's spec is a core parameter used by Unified Repo modules when interactive with the backup repsoitory. Therefore, we can add the configurations into the BackupRepository CR called ```repositoryConfig```.
|
||||
The configurations may be different varying from backup repositories, therefore, we will not define each of the configurations explicitly. Instead, we add a map in the BackupRepository's spec to take any configuration to be set to the backup repository.
|
||||
|
||||
During various operations to the backup repository, the Unified Repo modules will retrieve from the map for the specific configuration that is required at that time. So even though it is specified, a configuration may not be visited/hornored if the operations don't require it for the specific backup repository, this won't bring any issue. When and how a configuration is hornored is decided by the configuration itself and should be clarified in the configuration's specification.
|
||||
|
||||
Below is the new BackupRepository's spec after adding the configuration map:
|
||||
```yaml
|
||||
spec:
|
||||
description: BackupRepositorySpec is the specification for a BackupRepository.
|
||||
properties:
|
||||
backupStorageLocation:
|
||||
description: |-
|
||||
BackupStorageLocation is the name of the BackupStorageLocation
|
||||
that should contain this repository.
|
||||
type: string
|
||||
maintenanceFrequency:
|
||||
description: MaintenanceFrequency is how often maintenance should
|
||||
be run.
|
||||
type: string
|
||||
repositoryConfig:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: RepositoryConfig contains configurations for the specific
|
||||
repository.
|
||||
type: object
|
||||
repositoryType:
|
||||
description: RepositoryType indicates the type of the backend repository
|
||||
enum:
|
||||
- kopia
|
||||
- restic
|
||||
- ""
|
||||
type: string
|
||||
resticIdentifier:
|
||||
description: |-
|
||||
ResticIdentifier is the full restic-compatible string for identifying
|
||||
this repository.
|
||||
type: string
|
||||
volumeNamespace:
|
||||
description: |-
|
||||
VolumeNamespace is the namespace this backup repository contains
|
||||
pod volume backups for.
|
||||
type: string
|
||||
required:
|
||||
- backupStorageLocation
|
||||
- maintenanceFrequency
|
||||
- resticIdentifier
|
||||
- volumeNamespace
|
||||
type: object
|
||||
```
|
||||
|
||||
### BackupRepository configMap
|
||||
|
||||
The BackupRepository CR is not created explicitly by a Velero CLI, but created as part of the backup/restore/maintenance operation if the CR doesn't exist. As a result, users don't have any way to specify the configurations before the BackupRepository CR is created.
|
||||
Therefore, a BackupRepository configMap is introduced as a template of the configurations to be applied to the backup repository CR.
|
||||
When the backup repository CR is created by the BackupRepository controller, the configurations in the configMap are copied to the ```repositoryConfig``` field.
|
||||
For an existing BackupRepository CR, the configMap is never visited, if users want to modify the configuration value, they should directly edit the BackupRepository CR.
|
||||
|
||||
The BackupRepository configMap is created by users in velero installation namespace. The configMap name must be specified in the velero server parameter ```--backup-repository-configmap```, otherwise, it won't effect.
|
||||
If the configMap name is specified but the configMap doesn't exist by the time of a backup repository is created, the configMap name is ignored.
|
||||
For any reason, if the configMap doesn't effect, nothing is specified to the backup repository CR, so the Unified Repo modules use the hard-coded values to configure the backup repository.
|
||||
|
||||
The BackupRepository configMap supports backup repository type specific configurations, even though users can only specify one configMap.
|
||||
So in the configMap struct, multiple entries are supported, indexed by the backup repository type. During the backup repository creation, the configMap is searched by the repository type.
|
||||
|
||||
### Configurations
|
||||
|
||||
With the above mechanisms, any kind of configuration could be added. Here list the configurations defined at present:
|
||||
```cacheLimitMB```: specifies the size limit(in MB) for the local data cache. The more data is cached locally, the less data may be downloaded from the backup storage, so the better performance may be achieved. Practically, users can specify any size that is smaller than the free space so that the disk space won't run out. This parameter is for each repository connection, that is, users could change it before connecting to the repository. If a backup repository doesn't use local cache, this parameter will be ignored. For Kopia repository, this parameter is supported.
|
||||
```enableCompression```: specifies to enable/disable compression for a backup repsotiory. Most of the backup repositories support the data compression feature, if it is not supported by a backup repository, this parameter is ignored. Most of the backup repositories support to dynamically enable/disable compression, so this parameter is defined to be used whenever creating a write connection to the backup repository, if the dynamically changing is not supported, this parameter will be hornored only when initializing the backup repository. For Kopia repository, this parameter is supported and can be dynamically modified.
|
||||
|
||||
### Sample
|
||||
Below is an example of the BackupRepository configMap with the configurations:
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: <config-name>
|
||||
namespace: velero
|
||||
data:
|
||||
<repository-type-1>: |
|
||||
{
|
||||
"cacheLimitMB": 2048,
|
||||
"enableCompression": true
|
||||
}
|
||||
<repository-type-2>: |
|
||||
{
|
||||
"cacheLimitMB": 1,
|
||||
"enableCompression": false
|
||||
}
|
||||
```
|
||||
|
||||
To create the configMap, users need to save something like the above sample to a file and then run below commands:
|
||||
```
|
||||
kubectl apply -f <yaml file name>
|
||||
```
|
||||
|
||||
|
||||
|
||||
[1]: unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md
|
|
@ -86,7 +86,7 @@ volumePolicies:
|
|||
# capacity condition matches the volumes whose capacity falls into the range
|
||||
capacity: "0,100Gi"
|
||||
csi:
|
||||
driver: aws.ebs.csi.driver
|
||||
driver: ebs.csi.aws.com
|
||||
fsType: ext4
|
||||
storageClass:
|
||||
- gp2
|
||||
|
@ -174,7 +174,7 @@ data:
|
|||
- conditions:
|
||||
capacity: "0,100Gi"
|
||||
csi:
|
||||
driver: aws.ebs.csi.driver
|
||||
driver: ebs.csi.aws.com
|
||||
fsType: ext4
|
||||
storageClass:
|
||||
- gp2
|
||||
|
|
|
@ -65,7 +65,7 @@ This page contains a pre-migration checklist for ensuring a repo migration goes
|
|||
|
||||
#### Updating Netlify
|
||||
|
||||
The settings for Netflify should remain the same, except that it now needs to be installed in the new repo. The instructions on how to install Netlify on the new repo are here: https://www.netlify.com/docs/github-permissions/.
|
||||
The settings for Netlify should remain the same, except that it now needs to be installed in the new repo. The instructions on how to install Netlify on the new repo are here: https://www.netlify.com/docs/github-permissions/.
|
||||
|
||||
#### Communication strategy
|
||||
|
||||
|
|
|
@ -0,0 +1,122 @@
|
|||
# Multi-arch Build and Windows Build Support
|
||||
|
||||
## Background
|
||||
|
||||
At present, Velero images could be built for linux-amd64 and linux-arm64. We need to support other platforms, i.e., windows-amd64.
|
||||
At present, for linux image build, we leverage Buildkit's `--platform` option to create the image manifest list in one build call. However, it is a limited way and doesn't fully support all multi-arch scenarios. Specifically, since the build is done in one call with the same parameters, it is impossbile to build images with different configurations (e.g., Windows build requires a different Dockerfile).
|
||||
At present, Velero by default build images locally, or no image or manifest is pushed to registry. However, docker doesn't support multi-arch build locally. We need to clarify the behavior of local build.
|
||||
|
||||
## Goals
|
||||
- Refactor the `make container` process to fully support multi-arch build
|
||||
- Add Windows build to the existing build process
|
||||
- Clarify the behavior of local build with multi-arch build capabilities
|
||||
- Don't change the pattern of the final image tag to be used by users
|
||||
|
||||
## Non-Goals
|
||||
- There may be some workarounds to make the multi-arch image/manifest fully available locally. These workarounds will not be adopted, so local build always build single-arch images
|
||||
|
||||
## Local Build
|
||||
|
||||
For local build, two values of `--output` parameter for `docker buildx build` are supported:
|
||||
- `docker`: a docker format image is built, but the image is only built for the platform (`<os>/<arch>`) as same as the building env. E.g., when building from linux-amd64 env, a single manifest of linux-amd64 is created regardless how the input parameters are configured.
|
||||
- `tar`: one or more images are built as tarballs according to the input platform (`<os>/<arch>`) parameters. Specifically, one tarball is generated for each platform. The build process is the same with the `Build Separate Manifests` of `Push Build` as detailed below. Merely, the `--output` parameter diffs, as `type=tar;dest=<tarball generated path>`. The tarball is generated to the `_output` folder and named with the platform info, e.g., `_output/velero-main-linux-amd64.tar`.
|
||||
|
||||
## Push Build
|
||||
|
||||
For push build, the `--output` parameter for `docker buildx build` is always `registry`. And build will go according to the input parameters and create multi-arch manifest lists.
|
||||
|
||||
### Step 1: Build Separate Manifests
|
||||
|
||||
Instead of specifying multiple platforms (`<os>/<arch>`) to `--platform` option, we add multiple `container-%` targets in Makefile and each target builds one platform representively.
|
||||
|
||||
The goal here is to build multiple manifests through the multiple targets. However, `docker buildx build` by default creates a manifest list even though there is only one element in `--platform`. Therefore, two flags `--provenance=false` and `--sbom=false` will be set additionally to force `docker buildx build` to create manifests.
|
||||
|
||||
Each manifest has a unique tag, the OS type and arch is added to the tag, in the pattern `$(REGISTRY)/$(BIN):$(VERSION)-$(OS)-$(ARCH)`. For example, `velero/velero:main-linux-amd64`.
|
||||
|
||||
All the created manifests will be pushed to registry so that the all-in-one manifest list could be created.
|
||||
|
||||
### Step 2: Create All-In-One Manifest List
|
||||
|
||||
The next step is to create a manifest list to include all the created manifests. This could be done by `docker manifest create` command, the tags created and pushed at Step 1 are passed to this command.
|
||||
A tag is also created for the manifest list, in the pattern `$(REGISTRY)/$(BIN):$(VERSION)`. For example, `velero/velero:main`.
|
||||
|
||||
### Step 3: Push All-In-One Manifest List
|
||||
|
||||
The created manifest will be pushed to registry by command `docker manifest push`.
|
||||
|
||||
## Input Parameters
|
||||
|
||||
Below are the input parameters that are configurable to meet different build purposes during Dev and release cycle:
|
||||
- BUILD_OUTPUT_TYPE: the type of output for the build, i.e., `docker`, `tar`, `registry`, while `docker` and `tar` is for local build; `registry` means push build. Default value is `docker`
|
||||
- BUILD_OS: which types of OS should be built for. Multiple values are accepted, e.g., `linux,windows`. Default value is `linux`
|
||||
- BUILD_ARCH: which types of architecture should be built for. Multiple values are accepted, e.g., `amd64,arm64`. Default value is `amd64`
|
||||
- BUILDX_INSTANCE: an existing buildx instance to be used by the build. Default value is <empty> which indicates the build to create a new buildx instance
|
||||
|
||||
## Windows Build
|
||||
|
||||
Windows container images vary from Windows OS versions, e.g., `ltsc2022` for Windows server 2022 and `1809` for Windows server 2019. Images for different OS versions should be built separately.
|
||||
Therefore, separate build targets are added for each OS version, like `container-windows-%`.
|
||||
For the same reason, a new input parameter is added, `BUILD_WINDOWS_VERSION`. The default value is `ltsc2022`. Windows server 2022 is the only base image we will deliver officially, Windows server 2019 is not supported. In future, we may need to support Windows server 2025 base image.
|
||||
For local build to tar, the Windows OS version is also added to the name of the tarball, e.g., `_output/velero-main-windows-ltsc2022-amd64.tar`.
|
||||
|
||||
At present, Windows container image only supports `amd64` as the architecture, so `BUILD_ARCH` is ignored for Windows.
|
||||
|
||||
The Windows manifests need to be annotated with os type, arch, and os version. This will be done through `docker manifest annotate` command.
|
||||
|
||||
## Use Malti-arch Images
|
||||
|
||||
In order to use the images, the manifest list's tag should be provided to `velero install` command or helm, the individual manifests are covered by the manifest list. During launch time, the container engine will load the right image to the container according to the platform of the running node.
|
||||
|
||||
## Build Samples
|
||||
|
||||
**Local build to docker**
|
||||
```
|
||||
make container
|
||||
```
|
||||
The built image could be listed by `docker image ls`.
|
||||
|
||||
**Local build for linux-amd64 and windows-amd64 to tar**
|
||||
```
|
||||
BUILD_OUTPUT_TYPE=tar BUILD_OS=linux,windows make container
|
||||
```
|
||||
Under `_output` directory, below files are generated:
|
||||
```
|
||||
velero-main-linux-amd64.tar
|
||||
velero-main-windows-ltsc2022-amd64.tar
|
||||
```
|
||||
|
||||
**Local build for linux-amd64, linux-arm64 and windows-amd64 to tar**
|
||||
```
|
||||
BUILD_OUTPUT_TYPE=tar BUILD_OS=linux,windows BUILD_ARCH=amd64,arm64 make container
|
||||
```
|
||||
Under `_output` directory, below files are generated:
|
||||
```
|
||||
velero-main-linux-amd64.tar
|
||||
velero-main-linux-arm64.tar
|
||||
velero-main-windows-ltsc2022-amd64.tar
|
||||
```
|
||||
|
||||
**Push build for linux-amd64 and windows-amd64**
|
||||
Prerequisite: login to registry, e.g., through `docker login`
|
||||
```
|
||||
BUILD_OUTPUT_TYPE=registry REGISTRY=<registry> BUILD_OS=linux,windows make container
|
||||
```
|
||||
Nothing is available locally, in the registry 3 tags are available:
|
||||
```
|
||||
velero/velero:main
|
||||
velero/velero:main-windows-ltsc2022-amd64
|
||||
velero/velero:main-linux-amd64
|
||||
```
|
||||
|
||||
**Push build for linux-amd64, linux-arm64 and windows-amd64**
|
||||
Prerequisite: login to registry, e.g., through `docker login`
|
||||
```
|
||||
BUILD_OUTPUT_TYPE=registry REGISTRY=<registry> BUILD_OS=linux,windows BUILD_ARCH=amd64,arm64 make container
|
||||
```
|
||||
Nothing is available locally, in the registry 4 tags are available:
|
||||
```
|
||||
velero/velero:main
|
||||
velero/velero:main-windows-ltsc2022-amd64
|
||||
velero/velero:main-linux-amd64
|
||||
velero/velero:main-linux-arm64
|
||||
```
|
|
@ -26,18 +26,18 @@ Therefore, in order to improve the compatibility, it is worthy to configure the
|
|||
|
||||
## Non-Goals
|
||||
- It is also beneficial to support VGDP instances affinity for PodVolume backup/restore, however, it is not possible since VGDP instances for PodVolume backup/restore should always run in the node where the source/target pods are created.
|
||||
- It is also beneficial to support VGDP instances affinity for data movement restores, however, it is not possible in some cases. For example, when the `volumeBindingMode` in the storageclass is `WaitForFirstConsumer`, the restore volume must be mounted in the node where the target pod is scheduled, so the VGDP instance must run in the same node. On the other hand, considering the fact that restores may not frequently and centrally run, we will not support data movement restores.
|
||||
- As elaberated in the [Volume Snapshot Data Movement Design][2], the Exposer may take different ways to expose snapshots, i.e., through backup pods (this is the only way supported at present). The implementation section below only considers this approach currently, if a new expose method is introduced in future, the definition of the affinity configurations and behaviors should still work, but we may need a new implementation.
|
||||
- It is also beneficial to support VGDP instances affinity for data movement restores, however, it is not possible in some cases. For example, when the `volumeBindingMode` in the StorageClass is `WaitForFirstConsumer`, the restore volume must be mounted in the node where the target pod is scheduled, so the VGDP instance must run in the same node. On the other hand, considering the fact that restores may not frequently and centrally run, we will not support data movement restores.
|
||||
- As elaborated in the [Volume Snapshot Data Movement Design][2], the Exposer may take different ways to expose snapshots, i.e., through backup pods (this is the only way supported at present). The implementation section below only considers this approach currently, if a new expose method is introduced in future, the definition of the affinity configurations and behaviors should still work, but we may need a new implementation.
|
||||
|
||||
## Solution
|
||||
|
||||
We will use the ```node-agent-config``` configMap to host the node affinity configurations.
|
||||
We will use the ConfigMap specified by `velero node-agent` CLI's parameter `--node-agent-configmap` to host the node affinity configurations.
|
||||
This configMap is not created by Velero, users should create it manually on demand. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only.
|
||||
Node-agent server checks these configurations at startup time and use it to initiate the related VGDP modules. Therefore, users could edit this configMap any time, but in order to make the changes effective, node-agent server needs to be restarted.
|
||||
Inside ```node-agent-config``` configMap we will add one new kind of configuration as the data in the configMap, the name is ```loadAffinity```.
|
||||
Inside the ConfigMap we will add one new kind of configuration as the data in the configMap, the name is ```loadAffinity```.
|
||||
Users may want to set different LoadAffinity configurations according to different conditions (i.e., for different storages represented by StorageClass, CSI driver, etc.), so we define ```loadAffinity``` as an array. This is for extensibility consideration, at present, we don't implement multiple configurations support, so if there are multiple configurations, we always take the first one in the array.
|
||||
|
||||
The data structure for ```node-agent-config``` is as below:
|
||||
The data structure is as below:
|
||||
```go
|
||||
type Configs struct {
|
||||
// LoadConcurrency is the config for load concurrency per node.
|
||||
|
@ -63,7 +63,7 @@ Anti-affinity configuration means preventing VGDP instances running in the nodes
|
|||
- It could be defined by `MatchExpressions` of `metav1.LabelSelector`. The labels are defined in `Key` and `Values` of `MatchExpressions` and the `Operator` should be defined as `LabelSelectorOpNotIn` or `LabelSelectorOpDoesNotExist`.
|
||||
|
||||
### Sample
|
||||
A sample of the ```node-agent-config``` configMap is as below:
|
||||
A sample of the ConfigMap is as below:
|
||||
```json
|
||||
{
|
||||
"loadAffinity": [
|
||||
|
@ -101,7 +101,7 @@ This sample showcases one anti-affinity configuration:
|
|||
|
||||
To create the configMap, users need to save something like the above sample to a json file and then run below command:
|
||||
```
|
||||
kubectl create cm node-agent-config -n velero --from-file=<json file name>
|
||||
kubectl create cm <ConfigMap name> -n velero --from-file=<json file name>
|
||||
```
|
||||
|
||||
### Implementation
|
||||
|
@ -111,15 +111,7 @@ It is possible that node-agent pods, as a daemonset, don't run in every worker n
|
|||
Otherwise, if a backupPod are scheduled to a node where node-agent pod is absent, the corresponding DataUpload CR will stay in `Accepted` phase until the prepare timeout (by default 30min).
|
||||
|
||||
At present, as part of the expose operations, the exposer creates a volume, represented by backupPVC, from the snapshot. The backupPVC uses the same storageClass with the source volume. If the `volumeBindingMode` in the storageClass is `Immediate`, the volume is immediately allocated from the underlying storage without waiting for the backupPod. On the other hand, the loadAffinity is set to the backupPod's affinity. If the backupPod is scheduled to a node where the snapshot volume is not accessible, e.g., because of storage topologies, the backupPod won't get into Running state, concequently, the data movement won't complete.
|
||||
Once this problem happens, the backupPod stays in `Pending` phase, and the corresponding DataUpload CR stays in `Accepted` phase until the prepare timeout (by default 30min).
|
||||
|
||||
There is a common solution for the both problems:
|
||||
- We have an existing logic to periodically enqueue the dataupload CRs which are in the `Accepted` phase for timeout and cancel checks
|
||||
- We add a new logic to this existing logic to check if the corresponding backupPods are in unrecoverable status
|
||||
- The above problems could be covered by this check, because in both cases the backupPods are in abnormal and unrecoverable status
|
||||
- If a backupPod is unrecoverable, the dataupload controller cancels the dataupload and deletes the backupPod
|
||||
|
||||
Specifically, when the above problems happen, the status of a backupPod is like below:
|
||||
Once this problem happens, the backupPod stays in `Pending` phase, and the corresponding DataUpload CR stays in `Accepted` phase until the prepare timeout (by default 30min). Below is an example of the backupPod's status when the problem happens:
|
||||
```
|
||||
status:
|
||||
conditions:
|
||||
|
@ -133,5 +125,8 @@ Specifically, when the above problems happen, the status of a backupPod is like
|
|||
phase: Pending
|
||||
```
|
||||
|
||||
On the other hand, the backupPod is deleted after the prepare timeout, so there is no way to tell the cause is one of the above problems or others.
|
||||
To help the troubleshooting, we can add some diagnostic mechanism to discover the status of the backupPod and node-agent in the same node before deleting it as a result of the prepare timeout.
|
||||
|
||||
[1]: Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md
|
||||
[2]: volume-snapshot-data-movement/volume-snapshot-data-movement.md
|
|
@ -26,11 +26,11 @@ Therefore, in order to gain the optimized performance with the limited resources
|
|||
|
||||
## Solution
|
||||
|
||||
We introduce a configMap named ```node-agent-config``` for users to specify the node-agent related configurations. This configMap is not created by Velero, users should create it manually on demand. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only.
|
||||
We introduce a ConfigMap specified by `velero node-agent` CLI's parameter `--node-agent-configmap` for users to specify the node-agent related configurations. This configMap is not created by Velero, users should create it manually on demand. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only.
|
||||
Node-agent server checks these configurations at startup time and use it to initiate the related VGDP modules. Therefore, users could edit this configMap any time, but in order to make the changes effective, node-agent server needs to be restarted.
|
||||
The ```node-agent-config``` configMap may be used for other purpose of configuring node-agent in future, at present, there is only one kind of configuration as the data in the configMap, the name is ```loadConcurrency```.
|
||||
The ConfigMap may be used for other purpose of configuring node-agent in future, at present, there is only one kind of configuration as the data in the configMap, the name is ```loadConcurrency```.
|
||||
|
||||
The data structure for ```node-agent-config``` is as below:
|
||||
The data structure is as below:
|
||||
```go
|
||||
type Configs struct {
|
||||
// LoadConcurrency is the config for load concurrency per node.
|
||||
|
@ -82,7 +82,7 @@ At least one node is expected to have a label with the specified ```RuledConfigs
|
|||
If one node falls into more than one rules, e.g., if node1 also has the label ```beta.kubernetes.io/instance-type=Standard_B4ms```, the smallest number (3) will be used.
|
||||
|
||||
### Sample
|
||||
A sample of the ```node-agent-config``` configMap is as below:
|
||||
A sample of the ConfigMap is as below:
|
||||
```json
|
||||
{
|
||||
"loadConcurrency": {
|
||||
|
@ -110,7 +110,7 @@ A sample of the ```node-agent-config``` configMap is as below:
|
|||
```
|
||||
To create the configMap, users need to save something like the above sample to a json file and then run below command:
|
||||
```
|
||||
kubectl create cm node-agent-config -n velero --from-file=<json file name>
|
||||
kubectl create cm <ConfigMap name> -n velero --from-file=<json file name>
|
||||
```
|
||||
|
||||
### Global data path manager
|
||||
|
|
|
@ -241,7 +241,7 @@ In cases where the methods signatures remain the same, the adaptation layer will
|
|||
Examples where an adaptation may be safe:
|
||||
- A method signature is being changed to add a new parameter but the parameter could be optional (for example, adding a context parameter). The adaptation could call through to the method provided in the previous version but omit the parameter.
|
||||
- A method signature is being changed to remove a parameter, but it is safe to pass a default value to the previous version. The adaptation could call through to the method provided in the previous version but use a default value for the parameter.
|
||||
- A new method is being added but does not impact any existing behaviour of Velero (for example, a new method which will allow Velero to [wait for additional items to be ready](https://github.com/vmware-tanzu/velero/blob/main/design/wait-for-additional-items.md)). The adaptation would return a value which allows the existing behaviour to be performed.
|
||||
- A new method is being added but does not impact any existing behaviour of Velero (for example, a new method which will allow Velero to [wait for additional items to be ready](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/wait-for-additional-items.md)). The adaptation would return a value which allows the existing behaviour to be performed.
|
||||
- A method is being deleted as it is no longer used. The adaptation would call through to any methods which are still included but would omit the deleted method in the adaptation.
|
||||
|
||||
Examples where an adaptation may not be safe:
|
||||
|
|
|
@ -0,0 +1,311 @@
|
|||
# Repository maintenance job configuration design
|
||||
|
||||
## Abstract
|
||||
Add this design to make the repository maintenance job can read configuration from a dedicate ConfigMap and make the Job's necessary parts configurable, e.g. `PodSpec.Affinity` and `PodSpec.Resources`.
|
||||
|
||||
## Background
|
||||
Repository maintenance is split from the Velero server to a k8s Job in v1.14 by design [repository maintenance job](Implemented/repository-maintenance.md).
|
||||
The repository maintenance Job configuration was read from the Velero server CLI parameter, and it inherits the most of Velero server's Deployment's PodSpec to fill un-configured fields.
|
||||
|
||||
This design introduces a new way to let the user to customize the repository maintenance behavior instead of inheriting from the Velero server Deployment or reading from `velero server` CLI parameters.
|
||||
The configurations added in this design including the resource limitations, node selection.
|
||||
It's possible new configurations are introduced in future releases based on this design.
|
||||
|
||||
For the node selection, the repository maintenance Job also inherits from the Velero server deployment before, but the Job may last for a while and cost noneligible resources, especially memory.
|
||||
The users have the need to choose which k8s node to run the maintenance Job.
|
||||
This design reuses the data structure introduced by design [node-agent affinity configuration](Implemented/node-agent-affinity.md) to make the repository maintenance job can choose which node running on.
|
||||
|
||||
## Goals
|
||||
- Unify the repository maintenance Job configuration at one place.
|
||||
- Let user can choose repository maintenance Job running on which nodes.
|
||||
|
||||
## Non Goals
|
||||
- There was an [issue](https://github.com/vmware-tanzu/velero/issues/7911) to require the whole Job's PodSpec should be configurable. That's not in the scope of this design.
|
||||
- Please notice this new configuration is dedicated for the repository maintenance. Repository itself configuration is not covered.
|
||||
|
||||
|
||||
## Compatibility
|
||||
v1.14 uses the `velero server` CLI's parameter to pass the repository maintenance job configuration.
|
||||
In v1.15, those parameters are still kept, including `--maintenance-job-cpu-request`, `--maintenance-job-mem-request`, `--maintenance-job-cpu-limit`, `--maintenance-job-mem-limit`, and `--keep-latest-maintenance-jobs`.
|
||||
But the parameters read from the ConfigMap specified by `velero server` CLI parameter `--repo-maintenance-job-configmap` introduced by this design have a higher priority.
|
||||
|
||||
If there `--repo-maintenance-job-configmap` is not specified, then the `velero server` parameters are used if provided.
|
||||
|
||||
If the `velero server` parameters are not specified too, then the default values are used.
|
||||
* `--keep-latest-maintenance-jobs` default value is 3.
|
||||
* `--maintenance-job-cpu-request` default value is 0.
|
||||
* `--maintenance-job-mem-request` default value is 0.
|
||||
* `--maintenance-job-cpu-limit` default value is 0.
|
||||
* `--maintenance-job-mem-limit` default value is 0.
|
||||
|
||||
## Deprecation
|
||||
Propose to deprecate the `velero server` parameters `--maintenance-job-cpu-request`, `--maintenance-job-mem-request`, `--maintenance-job-cpu-limit`, `--maintenance-job-mem-limit`, and `--keep-latest-maintenance-jobs` in release-1.15.
|
||||
That means those parameters will be deleted in release-1.17.
|
||||
After deletion, those resources-related parameters are replaced by the ConfigMap specified by `velero server` CLI's parameter `--repo-maintenance-job-configmap`.
|
||||
`--keep-latest-maintenance-jobs` is deleted from `velero server` CLI. It turns into a non-configurable internal parameter, and its value is 3.
|
||||
Please check [issue 7923](https://github.com/vmware-tanzu/velero/issues/7923) for more information why deleting this parameter.
|
||||
|
||||
## Design
|
||||
This design introduces a new ConfigMap specified by `velero server` CLI parameter `--repo-maintenance-job-configmap` as the source of the repository maintenance job configuration. The specified ConfigMap is read from the namespace where Velero is installed.
|
||||
If the ConfigMap doesn't exist, the internal default values are used.
|
||||
|
||||
Example of using the parameter `--repo-maintenance-job-configmap`:
|
||||
```
|
||||
velero server \
|
||||
...
|
||||
--repo-maintenance-job-configmap repo-job-config
|
||||
...
|
||||
```
|
||||
|
||||
**Notice**
|
||||
* Velero doesn't own this ConfigMap. If the user wants to customize the repository maintenance job, the user needs to create this ConfigMap.
|
||||
* Velero reads this ConfigMap content at starting a new repository maintenance job, so the ConfigMap change will not take affect until the next created job.
|
||||
|
||||
### Structure
|
||||
The data structure is as below:
|
||||
```go
|
||||
type Configs struct {
|
||||
// LoadAffinity is the config for data path load affinity.
|
||||
LoadAffinity []*LoadAffinity `json:"loadAffinity,omitempty"`
|
||||
|
||||
// PodResources is the config for the CPU and memory resources setting.
|
||||
PodResources *kube.PodResources `json:"podResources,omitempty"`
|
||||
}
|
||||
|
||||
type LoadAffinity struct {
|
||||
// NodeSelector specifies the label selector to match nodes
|
||||
NodeSelector metav1.LabelSelector `json:"nodeSelector"`
|
||||
}
|
||||
|
||||
type PodResources struct {
|
||||
CPURequest string `json:"cpuRequest,omitempty"`
|
||||
MemoryRequest string `json:"memoryRequest,omitempty"`
|
||||
CPULimit string `json:"cpuLimit,omitempty"`
|
||||
MemoryLimit string `json:"memoryLimit,omitempty"`
|
||||
}
|
||||
```
|
||||
|
||||
The ConfigMap content is a map.
|
||||
If there is a key value as `global` in the map, the key's value is applied to all BackupRepositories maintenance jobs that cannot find their own specific configuration in the ConfigMap.
|
||||
The other keys in the map is the combination of three elements of a BackupRepository:
|
||||
* The namespace in which BackupRepository backs up volume data.
|
||||
* The BackupRepository referenced BackupStorageLocation's name.
|
||||
* The BackupRepository's type. Possible values are `kopia` and `restic`.
|
||||
|
||||
Those three keys can identify a [unique BackupRepository](https://github.com/vmware-tanzu/velero/blob/2fc6300f2239f250b40b0488c35feae59520f2d3/pkg/repository/backup_repo_op.go#L32-L37).
|
||||
|
||||
If there is a key match with BackupRepository, the key's value is applied to the BackupRepository's maintenance jobs.
|
||||
By this way, it's possible to let user configure before the BackupRepository is created.
|
||||
This is especially convenient for administrator configuring during the Velero installation.
|
||||
For example, the following BackupRepository's key should be `test-default-kopia`.
|
||||
|
||||
``` yaml
|
||||
- apiVersion: velero.io/v1
|
||||
kind: BackupRepository
|
||||
metadata:
|
||||
generateName: test-default-kopia-
|
||||
labels:
|
||||
velero.io/repository-type: kopia
|
||||
velero.io/storage-location: default
|
||||
velero.io/volume-namespace: test
|
||||
name: test-default-kopia-kgt6n
|
||||
namespace: velero
|
||||
spec:
|
||||
backupStorageLocation: default
|
||||
maintenanceFrequency: 1h0m0s
|
||||
repositoryType: kopia
|
||||
resticIdentifier: gs:jxun:/restic/test
|
||||
volumeNamespace: test
|
||||
```
|
||||
|
||||
The `LoadAffinity` structure is reused from design [node-agent affinity configuration](Implemented/node-agent-affinity.md).
|
||||
It's possible that the users want to choose nodes that match condition A or condition B to run the job.
|
||||
For example, the user want to let the nodes is in a specified machine type or the nodes locate in the us-central1-x zones to run the job.
|
||||
This can be done by adding multiple entries in the `LoadAffinity` array.
|
||||
|
||||
### Affinity Example
|
||||
A sample of the ConfigMap is as below:
|
||||
``` bash
|
||||
cat <<EOF > repo-maintenance-job-config.json
|
||||
{
|
||||
"global": {
|
||||
podResources: {
|
||||
"cpuRequest": "100m",
|
||||
"cpuLimit": "200m",
|
||||
"memoryRequest": "100Mi",
|
||||
"memoryLimit": "200Mi"
|
||||
},
|
||||
"loadAffinity": [
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchExpressions": [
|
||||
{
|
||||
"key": "cloud.google.com/machine-family",
|
||||
"operator": "In",
|
||||
"values": [
|
||||
"e2"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchExpressions": [
|
||||
{
|
||||
"key": "topology.kubernetes.io/zone",
|
||||
"operator": "In",
|
||||
"values": [
|
||||
"us-central1-a",
|
||||
"us-central1-b",
|
||||
"us-central1-c"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
EOF
|
||||
```
|
||||
This sample showcases two affinity configurations:
|
||||
- matchLabels: maintenance job runs on nodes with label key `cloud.google.com/machine-family` and value `e2`.
|
||||
- matchLabels: maintenance job runs on nodes located in `us-central1-a`, `us-central1-b` and `us-central1-c`.
|
||||
The nodes matching one of the two conditions are selected.
|
||||
|
||||
To create the configMap, users need to save something like the above sample to a json file and then run below command:
|
||||
```
|
||||
kubectl create cm repo-maintenance-job-config -n velero --from-file=repo-maintenance-job-config.json
|
||||
```
|
||||
|
||||
### Value assigning rules
|
||||
If the Velero BackupRepositoryController cannot find the introduced ConfigMap, the following default values are used for repository maintenance job:
|
||||
``` go
|
||||
config := Configs {
|
||||
// LoadAffinity is the config for data path load affinity.
|
||||
LoadAffinity: nil,
|
||||
|
||||
// Resources is the config for the CPU and memory resources setting.
|
||||
PodResources: &kube.PodResources{
|
||||
// The repository maintenance job CPU request setting
|
||||
CPURequest: "0m",
|
||||
|
||||
// The repository maintenance job memory request setting
|
||||
MemoryRequest: "0Mi",
|
||||
|
||||
// The repository maintenance job CPU limit setting
|
||||
CPULimit: "0m",
|
||||
|
||||
// The repository maintenance job memory limit setting
|
||||
MemoryLimit: "0Mi",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
If the Velero BackupRepositoryController finds the introduced ConfigMap with only `global` element, the `global` value is used.
|
||||
|
||||
If the Velero BackupRepositoryController finds the introduced ConfigMap with only element matches the BackupRepository, the matched element value is used.
|
||||
|
||||
|
||||
If the Velero BackupRepositoryController finds the introduced ConfigMap with both `global` element and element matches the BackupRepository, the matched element defined values overwrite the `global` value, and the `global` value is still used for matched element undefined values.
|
||||
|
||||
For example, the ConfigMap content has two elements.
|
||||
``` json
|
||||
{
|
||||
"global": {
|
||||
"loadAffinity": [
|
||||
{
|
||||
"nodeSelector": {
|
||||
"matchExpressions": [
|
||||
{
|
||||
"key": "cloud.google.com/machine-family",
|
||||
"operator": "In",
|
||||
"values": [
|
||||
"e2"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
],
|
||||
"podResources": {
|
||||
"cpuRequest": "100m",
|
||||
"cpuLimit": "200m",
|
||||
"memoryRequest": "100Mi",
|
||||
"memoryLimit": "200Mi"
|
||||
}
|
||||
},
|
||||
"ns1-default-kopia": {
|
||||
"podResources": {
|
||||
"memoryRequest": "400Mi",
|
||||
"memoryLimit": "800Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
The config value used for BackupRepository backing up volume data in namespace `ns1`, referencing BSL `default`, and the type is `Kopia`:
|
||||
``` go
|
||||
config := Configs {
|
||||
// LoadAffinity is the config for data path load affinity.
|
||||
LoadAffinity: []*kube.LoadAffinity{
|
||||
{
|
||||
NodeSelector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "cloud.google.com/machine-family",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"e2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
PodResources: &kube.PodResources{
|
||||
// The repository maintenance job CPU request setting
|
||||
CPURequest: "",
|
||||
// The repository maintenance job memory request setting
|
||||
MemoryRequest: "400Mi",
|
||||
// The repository maintenance job CPU limit setting
|
||||
CPULimit: "",
|
||||
// The repository maintenance job memory limit setting
|
||||
MemoryLimit: "800Mi",
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### Implementation
|
||||
During the Velero repository controller starts to maintain a repository, it will call the repository manager's `PruneRepo` function to build the maintenance Job.
|
||||
The ConfigMap specified by `velero server` CLI parameter `--repo-maintenance-job-configmap` is get to reinitialize the repository `MaintenanceConfig` setting.
|
||||
|
||||
``` go
|
||||
jobConfig, err := getMaintenanceJobConfig(
|
||||
context.Background(),
|
||||
m.client,
|
||||
m.log,
|
||||
m.namespace,
|
||||
m.repoMaintenanceJobConfig,
|
||||
repo,
|
||||
)
|
||||
if err != nil {
|
||||
log.Infof("Cannot find the ConfigMap %s with error: %s. Use default value.",
|
||||
m.namespace+"/"+m.repoMaintenanceJobConfig,
|
||||
err.Error(),
|
||||
)
|
||||
}
|
||||
|
||||
log.Info("Start to maintenance repo")
|
||||
|
||||
maintenanceJob, err := m.buildMaintenanceJob(
|
||||
jobConfig,
|
||||
param,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error to build maintenance job")
|
||||
}
|
||||
```
|
||||
|
||||
## Alternatives Considered
|
||||
An other option is creating each ConfigMap for a BackupRepository.
|
||||
This is not ideal for scenario that has a lot of BackupRepositories in the cluster.
|
|
@ -0,0 +1,113 @@
|
|||
# Allow Object-Level Resource Status Restore in Velero
|
||||
|
||||
## Abstract
|
||||
This design proposes a way to enhance Velero’s restore functionality by enabling object-level resource status restoration through annotations.
|
||||
Currently, Velero allows restoring resource statuses only at a resource type level, which lacks granularity of restoring the status of specific resources.
|
||||
By introducing an annotation that controllers can set on individual resource objects, this design aims to improve flexibility and autonomy for users/resource-controllers, providing a more way
|
||||
to enable resource status restore.
|
||||
|
||||
|
||||
## Background
|
||||
Velero provides the `restoreStatus` field in the Restore API to specify resource types for status restoration. However, this feature is limited to resource types as a whole, lacking the granularity needed to restore specific objects of a resource type. Resource controllers, especially those managing custom resources with external dependencies, may need to restore status on a per-object basis based on internal logic and dependencies.
|
||||
|
||||
This design adds an annotation-based approach to allow controllers to specify status restoration at the object level, enabling Velero to handle status restores more flexibly.
|
||||
|
||||
## Goals
|
||||
- Provide a mechanism to specify the restoration of a resource’s status at an object level.
|
||||
- Maintain backwards compatibility with existing functionality, allowing gradual adoption of this feature.
|
||||
- Integrate the new annotation-based objects-level status restore with Velero’s existing resource-type-level `restoreStatus` configuration.
|
||||
|
||||
## Non-Goals
|
||||
- Alter Velero’s existing resource type-level status restoration mechanism for resources without annotations.
|
||||
|
||||
## Use-Cases/Scenarios
|
||||
|
||||
1. Controller managing specific Resources
|
||||
- A resource controller identifies that a specific object of a resource should have its status restored due to particular dependencies
|
||||
- The controller automatically sets the `velero.io/restore-status: true` annotation on the resource.
|
||||
- During restore, Velero restores the status of this object, while leaving other resources unaffected.
|
||||
- The status for the annotated object will be restored regardless of its inclusion/exclusion in `restoreStatus.includedResources`
|
||||
|
||||
2. A specific object must not have its status restored even if its included in `restoreStatus.includedResources`
|
||||
- A user specifies a resource type in the `restoreStatus.includedResources` field within the Restore custom resource.
|
||||
- A particular object of that resource type is annotated with `velero.io/restore-status: false` by the user.
|
||||
- The status of the annotated object will not restored even though its included in `restoreStatus.includedResources` because annotation is `false` and it takes precedence.
|
||||
|
||||
4. Default Behavior for objects Without the Annotation
|
||||
- Objects without the `velero.io/restore-status` annotation behave as they currently do: Velero skips their status restoration unless the resource type is specified in the `restoreStatus.includedResources` field.
|
||||
|
||||
## High-Level Design
|
||||
|
||||
- Object-Level Status Restore Annotation: We are introducing the `velero.io/restore-status` annotation at the resource object level to mark specific objects for status restoration.
|
||||
- `true`: Indicates that the status should be restored for this object
|
||||
- `false`: Skip restoring status for this specific object
|
||||
- Invalid or missing annotations defer to the meaning of existing resource type-level logic.
|
||||
|
||||
- Restore logic precedence:
|
||||
- Annotations take precedence when they exist with valid values (`true` or `false`).
|
||||
- Restore spec `restoreStatus.includedResources` is only used when annotations are invalid or missing.
|
||||
|
||||
- Velero Restore Logic Update: During a restore operation, Velero will:
|
||||
- Extend the existing restore logic to parse and prioritize annotations introduced in this design.
|
||||
- Update resource objects accordingly based on their annotation values or fallback configuration.
|
||||
|
||||
|
||||
## Detailed Design
|
||||
|
||||
- Annotation for object-Level Status Restore: The `velero.io/restore-status` annotation will be set on individual resource objects by users/controllers as needed:
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
velero.io/restore-status: "true"
|
||||
```
|
||||
|
||||
- Restore Logic Modifications: During the restore operation, the restore controller will follow these steps:
|
||||
- Parse the `restoreStatus.includedResources` spec to determine resource types eligible for status restoration.
|
||||
- For each resource object:
|
||||
- Check for the `velero.io/restore-status` annotation.
|
||||
- If the annotation value is:
|
||||
- `true`: Restore the status of the object
|
||||
- `false`: Skip restoring the status of the object
|
||||
- If the annotation is invalid or missing:
|
||||
- Default to the `restoreStatus.includedResources` configuration
|
||||
|
||||
|
||||
## Implementation
|
||||
|
||||
We are targeting the implementation of this design for Velero 1.16 release.
|
||||
|
||||
Current restoreStatus logic resides here: https://github.com/vmware-tanzu/velero/blob/32a8c62920ad96c70f1465252c0197b83d5fa6b6/pkg/restore/restore.go#L1652
|
||||
|
||||
The modified logic would look somewhat like:
|
||||
|
||||
```go
|
||||
// Determine whether to restore status from resource type configuration
|
||||
shouldRestoreStatus := ctx.resourceStatusIncludesExcludes != nil && ctx.resourceStatusIncludesExcludes.ShouldInclude(groupResource.String())
|
||||
|
||||
// Check for object-level annotation
|
||||
annotations := obj.GetAnnotations()
|
||||
objectAnnotation := annotations["velero.io/restore-status"]
|
||||
annotationValid := objectAnnotation == "true" || objectAnnotation == "false"
|
||||
|
||||
// Determine restore behavior based on annotation precedence
|
||||
shouldRestoreStatus = (annotationValid && objectAnnotation == "true") || (!annotationValid && shouldRestoreStatus)
|
||||
|
||||
ctx.log.Debugf("status field for %s: exists: %v, should restore: %v (by annotation: %v)", newGR, statusFieldExists, shouldRestoreStatus, annotationValid)
|
||||
|
||||
if shouldRestoreStatus && statusFieldExists {
|
||||
if err := unstructured.SetNestedField(obj.Object, objStatus, "status"); err != nil {
|
||||
ctx.log.Errorf("Could not set status field %s: %v", kube.NamespaceAndName(obj), err)
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
obj.SetResourceVersion(createdObj.GetResourceVersion())
|
||||
updated, err := resourceClient.UpdateStatus(obj, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
ctx.log.Infof("Status field update failed %s: %v", kube.NamespaceAndName(obj), err)
|
||||
warnings.Add(namespace, err)
|
||||
} else {
|
||||
createdObj = updated
|
||||
}
|
||||
}
|
||||
```
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
# Backup Restore Status Patch Retrying Configuration
|
||||
|
||||
## Abstract
|
||||
When a backup/restore completes, we want to ensure that the custom resource progresses to the correct status.
|
||||
If a patch call fails to update status to completion, it should be retried up to a certain time limit.
|
||||
|
||||
This design proposes a way to configure timeout for this retry time limit.
|
||||
|
||||
## Background
|
||||
Original Issue: https://github.com/vmware-tanzu/velero/issues/7207
|
||||
|
||||
Velero was performing a restore when the API server was rolling out to a new version.
|
||||
It had trouble connecting to the API server, but eventually, the restore was successful.
|
||||
However, since the API server was still in the middle of rolling out, Velero failed to update the restore CR status and gave up.
|
||||
|
||||
After the connection was restored, it didn't attempt to update, causing the restore CR to be stuck at "In progress" indefinitely.
|
||||
This can lead to incorrect decisions for other components that rely on the backup/restore CR status to determine completion.
|
||||
|
||||
## Goals
|
||||
- Make timeout configurable for retry patching by reusing existing [`--resource-timeout` server flag](https://github.com/vmware-tanzu/velero/blob/d9ca14747925630664c9e4f85a682b5fc356806d/pkg/cmd/server/server.go#L245)
|
||||
|
||||
## Non Goals
|
||||
- Create a new timeout flag
|
||||
- Refactor backup/restore workflow
|
||||
|
||||
|
||||
## High-Level Design
|
||||
We will add retries with timeout to existing patch calls that moves a backup/restore from InProgress to a different status phase such as
|
||||
- FailedValidation (final)
|
||||
- Failed (final)
|
||||
- WaitingForPluginOperations
|
||||
- WaitingForPluginOperationsPartiallyFailed
|
||||
- Finalizing
|
||||
- FinalizingPartiallyFailed
|
||||
|
||||
and from above non final phases to
|
||||
- Completed
|
||||
- PartiallyFailed
|
||||
|
||||
Once backup/restore is in some phase it will already be reconciled again periodically and do not need additional retry
|
||||
- WaitingForPluginOperations
|
||||
- WaitingForPluginOperationsPartiallyFailed
|
||||
|
||||
## Detailed Design
|
||||
Relevant reconcilers will have `resourceTimeout time.Duration` added to its struct and to parameters of New[Backup|Restore]XReconciler functions.
|
||||
|
||||
pkg/cmd/server/server.go in `func (s *server) runControllers(..) error` also update the New[Backup|Restore]XCReconciler with added duration parameters using value from existing `--resource-timeout` server flag.
|
||||
|
||||
Current calls to kube.PatchResource involving status patch will be replaced with kube.PatchResourceWithRetriesOnErrors added to package `kube` below.
|
||||
|
||||
Calls where there is a ...client.Patch() will be wrapped with client.RetriesPhasePatchFuncOnErrors() added to package `client` below.
|
||||
|
||||
pkg/util/kube/client.go
|
||||
```go
|
||||
// PatchResourceWithRetries patches the original resource with the updated resource, retrying when the provided retriable function returns true.
|
||||
func PatchResourceWithRetries(maxDuration time.Duration, original, updated client.Object, kbClient client.Client, retriable func(error) bool) error {
|
||||
return veleroPkgClient.RetryOnRetriableMaxBackOff(maxDuration, func() error { return PatchResource(original, updated, kbClient) }, retriable)
|
||||
}
|
||||
|
||||
// PatchResourceWithRetriesOnErrors patches the original resource with the updated resource, retrying when the operation returns an error.
|
||||
func PatchResourceWithRetriesOnErrors(maxDuration time.Duration, original, updated client.Object, kbClient client.Client) error {
|
||||
return PatchResourceWithRetries(maxDuration, original, updated, kbClient, func(err error) bool {
|
||||
// retry using DefaultBackoff to resolve connection refused error that may occur when the server is under heavy load
|
||||
// TODO: consider using a more specific error type to retry, for now, we retry on all errors
|
||||
// specific errors:
|
||||
// - connection refused: https://pkg.go.dev/syscall#:~:text=Errno(0x67)-,ECONNREFUSED,-%3D%20Errno(0x6f
|
||||
return err != nil
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
pkg/client/retry.go
|
||||
```go
|
||||
// CapBackoff provides a backoff with a set backoff cap
|
||||
func CapBackoff(cap time.Duration) wait.Backoff {
|
||||
if cap < 0 {
|
||||
cap = 0
|
||||
}
|
||||
return wait.Backoff{
|
||||
Steps: math.MaxInt,
|
||||
Duration: 10 * time.Millisecond,
|
||||
Cap: cap,
|
||||
Factor: retry.DefaultBackoff.Factor,
|
||||
Jitter: retry.DefaultBackoff.Jitter,
|
||||
}
|
||||
}
|
||||
|
||||
// RetryOnRetriableMaxBackOff accepts a patch function param, retrying when the provided retriable function returns true.
|
||||
func RetryOnRetriableMaxBackOff(maxDuration time.Duration, fn func() error, retriable func(error) bool) error {
|
||||
return retry.OnError(CapBackoff(maxDuration), func(err error) bool { return retriable(err) }, fn)
|
||||
}
|
||||
|
||||
// RetryOnErrorMaxBackOff accepts a patch function param, retrying when the error is not nil.
|
||||
func RetryOnErrorMaxBackOff(maxDuration time.Duration, fn func() error) error {
|
||||
return RetryOnRetriableMaxBackOff(maxDuration, fn, func(err error) bool { return err != nil })
|
||||
}
|
||||
```
|
||||
|
||||
## Alternatives Considered
|
||||
- Requeuing InProgress backups that is not known by current velero instance to still be in progress as failed (attempted in [#7863](https://github.com/vmware-tanzu/velero/pull/7863))
|
||||
- It was deemed as making backup restore flow hard to enhance for future reconciler updates such as adding cancel or adding parallel backups.
|
||||
|
||||
## Security Considerations
|
||||
None
|
||||
|
||||
## Compatibility
|
||||
Retry should only trigger a restore or backup that is already in progress and not patching successfully by current instance. Prior InProgress backups/restores will not be re-processed and will remain stuck InProgress until there is another velero server (re)start.
|
||||
|
||||
## Implementation
|
||||
There is a past implementation in [#7845](https://github.com/vmware-tanzu/velero/pull/7845/) where implementation for this design will be based upon.
|
||||
|
|
@ -71,6 +71,20 @@ type ScheduleSpec struct {
|
|||
}
|
||||
```
|
||||
|
||||
**Note:** The Velero server automatically patches the `skipImmediately` field back to `false` after it's been used. This is because `skipImmediately` is designed to be a one-time operation rather than a persistent state. When the controller detects that `skipImmediately` is set to `true`, it:
|
||||
1. Sets the flag back to `false`
|
||||
2. Records the current time in `schedule.Status.LastSkipped`
|
||||
|
||||
This "consume and reset" pattern ensures that after skipping one immediate backup, the schedule returns to normal behavior for subsequent runs. The `LastSkipped` timestamp is then used to determine when the next backup should run.
|
||||
|
||||
```go
|
||||
// From pkg/controller/schedule_controller.go
|
||||
if schedule.Spec.SkipImmediately != nil && *schedule.Spec.SkipImmediately {
|
||||
*schedule.Spec.SkipImmediately = false
|
||||
schedule.Status.LastSkipped = &metav1.Time{Time: c.clock.Now()}
|
||||
}
|
||||
```
|
||||
|
||||
`LastSkipped` will be added to `ScheduleStatus` struct to track the last time a schedule was skipped.
|
||||
```diff
|
||||
// ScheduleStatus captures the current state of a Velero schedule
|
||||
|
@ -97,6 +111,8 @@ type ScheduleStatus struct {
|
|||
}
|
||||
```
|
||||
|
||||
The `LastSkipped` field is crucial for the schedule controller to determine the next run time. When a backup is skipped, this timestamp is used instead of `LastBackup` to calculate when the next backup should occur, ensuring the schedule maintains its intended cadence even after skipping a backup.
|
||||
|
||||
When `schedule.spec.SkipImmediately` is `true`, `LastSkipped` will be set to the current time, and `schedule.spec.SkipImmediately` set to nil so it can be used again.
|
||||
|
||||
The `getNextRunTime()` function below is updated so `LastSkipped` which is after `LastBackup` will be used to determine next run time.
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
# Adding Support For VolumeAttributes in Resource Policy
|
||||
|
||||
## Abstract
|
||||
Currently [Velero Resource policies](https://velero.io/docs/main/resource-filtering/#creating-resource-policies) are only supporting "Driver" to be filtered for [CSI volume conditions](https://github.com/vmware-tanzu/velero/blob/8e23752a6ea83f101bd94a69dcf17f519a805388/internal/resourcepolicies/volume_resources_validator.go#L28)
|
||||
|
||||
If user want to skip certain CSI volumes based on other volume attributes like protocol or SKU, etc, they can't do it with the current Velero resource policies. It would be convenient if Velero resource policies could be extended to filter on volume attributes along with existing driver filter in the resource policies `conditions` to handle the backup of volumes just by `some specific volumes attributes conditions`.
|
||||
|
||||
## Background
|
||||
As of Today, Velero resource policy already provides us the way to filter volumes based on the `driver` name. But it's not enough to handle the volumes based on other volume attributes like protocol, SKU, etc.
|
||||
|
||||
## Example:
|
||||
- Provision Azure NFS: Define the Storage class with `protocol: nfs` under storage class parameters to provision [CSI NFS Azure File Shares](https://learn.microsoft.com/en-us/azure/aks/azure-files-csi#nfs-file-shares).
|
||||
- User wants to back up AFS (Azure file shares) but only want to backup `SMB` type of file share volumes and not `NFS` file share volumes.
|
||||
|
||||
## Goals
|
||||
- We are only bringing additional support in the resource policy to only handle volumes during backup.
|
||||
- Introducing support for `VolumeAttributes` filter along with `driver` filter in CSI volume conditions to handle volumes.
|
||||
|
||||
## Non-Goals
|
||||
- Currently, only handles volumes, and does not support other resources.
|
||||
|
||||
## Use-cases/Scenarios
|
||||
### Skip backup volumes by some volume attributes:
|
||||
Users want to skip PV with the requirements:
|
||||
- option to skip specified PV on volume attributes type (like Protocol as NFS, SMB, etc)
|
||||
|
||||
### Sample Storage Class Used to create such Volumes
|
||||
```
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: azurefile-csi-nfs
|
||||
provisioner: file.csi.azure.com
|
||||
allowVolumeExpansion: true
|
||||
parameters:
|
||||
protocol: nfs
|
||||
```
|
||||
|
||||
## High-Level Design
|
||||
Modifying the existing Resource Policies code for [csiVolumeSource](https://github.com/vmware-tanzu/velero/blob/8e23752a6ea83f101bd94a69dcf17f519a805388/internal/resourcepolicies/volume_resources_validator.go#L28C6-L28C22) to add the new `VolumeAttributes` filter for CSI volumes and adding validations in existing [csiCondition](https://github.com/vmware-tanzu/velero/blob/8e23752a6ea83f101bd94a69dcf17f519a805388/internal/resourcepolicies/volume_resources.go#L150) to match with volume attributes in the conditions from Resource Policy config map and original persistent volume.
|
||||
|
||||
## Detailed Design
|
||||
The volume resources policies should contain a list of policies which is the combination of conditions and related `action`, when target volumes meet the conditions, the related `action` will take effection.
|
||||
|
||||
Below is the API Design for the user configuration:
|
||||
|
||||
### API Design
|
||||
```go
|
||||
type csiVolumeSource struct {
|
||||
Driver string `yaml:"driver,omitempty"`
|
||||
// [NEW] CSI volume attributes
|
||||
VolumeAttributes map[string]string `yaml:"volumeAttributes,omitempty"`
|
||||
}
|
||||
```
|
||||
|
||||
The policies YAML config file would look like this:
|
||||
```yaml
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: disk.csi.azure.com
|
||||
action:
|
||||
type: skip
|
||||
- conditions:
|
||||
csi:
|
||||
driver: file.csi.azure.com
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip`
|
||||
```
|
||||
|
||||
### New Supported Conditions
|
||||
#### VolumeAttributes
|
||||
Existing CSI Volume Condition can now add `volumeAttributes` which will be key and value pairs.
|
||||
|
||||
Specify details for the related volume source (currently only csi driver is supported filter)
|
||||
```yaml
|
||||
csi: // match volume using `file.csi.azure.com` and with volumeAttributes protocol as nfs
|
||||
driver: file.csi.azure.com
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
```
|
|
@ -0,0 +1,223 @@
|
|||
# VGDP Micro Service For Volume Snapshot Data Movement
|
||||
|
||||
## Glossary & Abbreviation
|
||||
|
||||
**VGDP**: Velero Generic Data Path. The collective of modules that is introduced in [Unified Repository design][1]. Velero uses these modules to finish data transmission for various purposes. It includes uploaders and the backup repository.
|
||||
**Volume Snapshot Data Movement**: The backup/restore method introduced in [Volume Snapshot Data Movement design][2]. It backs up snapshot data from the volatile and limited production environment into the durable, heterogeneous and scalable backup storage.
|
||||
**VBDM**: Velero Built-in Data Mover as introduced in [Volume Snapshot Data Movement design][2], it is the built-in data mover shipped along with Velero.
|
||||
**Exposer**: Exposer is introduced in [Volume Snapshot Data Movement design][2] and is used to expose the volume snapshots/target volumes for VGDP to access locally.
|
||||
|
||||
## Background
|
||||
As the architecture introduced in [Volume Snapshot Data Movement design][2], VGDP instances are running inside the node-agent pods, however, more and more use cases require to run the VGDP instances in dedicated pods, or in another word, make them as micro services, the benefits are as below:
|
||||
- This avoids VGDP to access volume data through host path, while host path access involves privilege escalations in some environments (e.g., must run under privileged mode), which makes challenge to users.
|
||||
- This enable users to to control resource (i.e., cpu, memory) request/limit in a granular manner, e.g., control them per backup/restore of a volume
|
||||
- This increases the resilience, crash of one VGDP activity won't affect others
|
||||
- In the cases that the backup storage must be represented by a Kubernetes persistent volumes (i.e., nfs storage, [COSI][3]), this avoids to dynamically mount the persistent volumes to node-agent pods and cause node-agent pods to restart (this is not accepted since node-agent lose it current state after its pods restart)
|
||||
- This prevents unnecessary full backup. Velero's fs uploaders support file level incremental backup by comparing the file name and metadata. However, at present the files are visited by host path, while pod and PVC's ID are part of the host path, so once the pod is recreated, the same file is regarded as a different file since the pod's ID has been changed. If the fs uploader is in a dedicated pod and files are visited by pod's volume path, files' full path are not changed after pod restarts, so incremental backups could continue.
|
||||
|
||||
## Goals
|
||||
- Create a solution to make VGDP instances as micro services
|
||||
- Modify the VBDM to offload the VGDP work from node-agent to the VGDP micro service
|
||||
- Create the mechanism for VBDM to control and monitor the VGDP micro services in various scenarios
|
||||
|
||||
## Non-Goals
|
||||
- The current solution covers Volume Snapshot Data Movement backup/restore type only, even though VGDP is also used by pod volume backup. It is less possible to do this for pod volume backup, since it must run inside the source workload pods.
|
||||
- The current solution covers VBDM only. 3rd data movers still follow the **Replacement** section of [Volume Snapshot Data Movement design][2]. That is, 3rd data movers handle the DUCR/DDCR on their own and they are free to make themselves micro service style or monolith service style.
|
||||
|
||||
|
||||
## Overview
|
||||
The solution is based on [Volume Snapshot Data Movement design][2], the architecture is followed as is and existing components are not changed unless it is necessary.
|
||||
Below lists the changed components, why and how:
|
||||
**Exposer**: Exposer is to expose the snapshot/target volume as a path/device name/endpoint that are recognizable by VGDP. Varying from the type of snapshot/target volume, a pod may be created as part of the expose. Now, since we run the VGDP instance in a separate pod, a pod is created anyway, we assume exposer creates a pod all the time and make the appropriate exposing configurations to the pod so that VGDP instance could access the snapshot/target volume locally inside the pod. The pod is still called as backupPod or restorePod.
|
||||
Then we need to change the command the backupPod/restorePod is running, the command launches VGDP-MS (VGDP Micro Service, see below) when the container starts up.
|
||||
For CSI snapshot, the backupPod/restorePod is created as the result of expose, the only thing left is to change the backupPod/restorePod's image.
|
||||
**VBDM**: VBDM contains the data mover controller, while the controller calls the Exposer and launches the VGDP instances. Now, since the VGDP instance is launched by the backupPod/restorePod, the controller should not launch the VGDP instance again. However, the controller still needs to monitor and control the VGDP instance. Moreover, in order to avoid any contest situations, the controller is still the only place to update DUCRs and DDCRs.
|
||||
|
||||
Besides the changes to above existing components, we need to add below new components:
|
||||
**VGDP Watcher**: We create a new module to help the data mover controller to watch activities of the VGDP instance in the backupPod/restorePod. VGDP Watcher is a part of VBDM.
|
||||
**VGDP-MS**: VGDP Micro Service is the binary for the command backupPod/restorePod runs. It accepts the parameters and then launches the VGDP instance according to the request type, specifically, backup or restore. VGDP-MS also runs other modules to sync-up with the data mover controller. VGDP-MS is also a part of VBDM.
|
||||
|
||||
Below diagram shows how these components work together:
|
||||

|
||||
|
||||
The [Node-agent concurrency][4] is still used to control the concurrency of VGDP micro services. When there are too many volumes in the backup/restore, which takes too much computing resources(CPU, memory, etc.) or Kubernetes resources(pods, PVCs, PVs, etc.), users could set the concurrency in each node so as to control the total number of concurrent VGDP micro services in the cluster.
|
||||
|
||||
## Detailed Design
|
||||
### Exposer
|
||||
At present, the exposer creates backupPod/restorePod and sets ```velero-helper pause``` as the command run by backupPod/restorePod.
|
||||
Now, VGDP-MS command will be used, and the ```velero``` image will be running inside the backupPod/restorePod. The command is like below:
|
||||
```velero data-mover backup --volume-path xxx --volume-mode xxx --data-upload xxx --resource-timeout xxx --log-format xxx --log-level xxx```
|
||||
Or:
|
||||
```velero data-mover restore --volume-path xxx --volume-mode xxx --data-download xxx --resource-timeout xxx --log-format xxx --log-level xxx```
|
||||
|
||||
The first one is for backup and the other one is for restore.
|
||||
Below are the parameters of the commands:
|
||||
**volume-path**: Deliver the full path inside the backupPod/restorePod for the volume to be backed up/restored.
|
||||
**volume-mode**: Deliver the mode for the volume be backed up/restored, at present either ```Filesystem``` mode or ```Block``` mode.
|
||||
**data-upload**: DUCR for this backup.
|
||||
**data-download**: DDCR for this backup.
|
||||
**resource-timeout**: resource-timeout is used to control the timeout for operations related to resources. It has the same meaning with the resource-timeout for node-agent.
|
||||
**log-format** and **log-level**: This is to control the behavior of log generation inside VGDP-MS.
|
||||
|
||||
In order to have the same capability and permission with node-agent, below pod configurations are inherited from node-agent and set to backupPod/restorePod's spec:
|
||||
- Volumes: Some configMaps will be mapped as volumes to node-agent, so we add the same volumes of node-agent to the backupPod/restorePod
|
||||
- Environment Variables
|
||||
- Security Contexts
|
||||
We may not actually need all the capabilities in the VGDP-MS as the node-agent. At present, we just duplicate all of them, if we find any problem in future, we can filter out the capabilities that are not required by VGDP-MS.
|
||||
The backupPod/restorePod is not run in Privileged mode as it is not required since the volumes are visisted by pod path.
|
||||
The root user is still required, especially by the restore (in order to restore the file system attributes, owners, etc.), so we will use root user for backupPod/restorePod.
|
||||
We set backupPod/restorePod's ```RestartPolicy``` to ```RestartPolicyNever```, so that once VGDP-MS terminates in any reason, backupPod/restorePod won't restart and the DUCR/DDCR is marked as one of the terminal phases (Completed/Failed/Cancelled) accordingly.
|
||||
|
||||
|
||||
### VGDP Watcher
|
||||
#### Dual mode event watch
|
||||
The primary task of VGDP Watcher is to watch the status change from backupPod/restorePod or the VGDP instance, so as to inform the data mover controller in below situations:
|
||||
- backupPod/restorePod starts
|
||||
- VGDP instance starts
|
||||
- Progress update
|
||||
- VGDP instance completes/fails/cancelled
|
||||
- backupPod/restorePod stops
|
||||
|
||||
We use two mechanism to make the watch:
|
||||
**Pod Phases**: VGDP Watcher watches the backupPod/restorePod's phases updated by Kubernetes. That is, VGDP Watcher creates an informer to watch the pod resource for the backupPod/restorePod and detect that the pod reaches to one of the terminated phases (i.e., PodSucceeded, PodFailed). We also check the availability & status of the backupPod/restorePod at the beginning of the watch so as to detect the starting of the backupPod/restorePod.
|
||||
**Custom Kubernetes Events**: VGDP-MS generates Kubernetes events and associates them to the DUCR/DDCR at the time of VGDP instance starting/stopping and progress update, then VGDP Watcher creates another informer to watch the Event resource associated to the DUCR/DDCR.
|
||||
|
||||
Pod Phases watch covers the entire lifecycle of the backupPod/restorePod, but we don't know the status of the VGDP instance through it; and it can only deliver information by then end of the pod lifecycle.
|
||||
Custom Event watch generates details of the VGDP instances and the events could be generated any time; but it cannot generate notifications before VGDP starts or in the case that VGDP crashes or shutdown abnormally.
|
||||
|
||||
Therefore, we adopt the both mechanisms to VGDP Watcher. In the end, there will be two sources generating the result of VGDP-MS:
|
||||
- The termination message of backupPod/restorePod
|
||||
- The message along with the VGDP Instance Completes/Fails/Cancelled event
|
||||
|
||||
On the one hand, in some cases only the backupPod/restorePod's termination message is available, e.g., the backupPod/restorePod crashes or or backupPod/restorePod quits before VGDP instance is started. So we refer to the first mechanism to get the notifications.
|
||||
On the other hand, if they are both available, we have the results from them for mutual verification.
|
||||
|
||||
Conclusively, under the help of VGDP Watcher, data mover controller starts VGDP-MS controllably and waits until VGDP-MS ends under any circumstances.
|
||||
|
||||
#### AsyncBR adapter
|
||||
VGDP Watcher needs to notify the data mover controller when one of the watched event happens, so that the controller could do the operations as if it receives the same callbacks from VGDP as the current behavior. In order not to break the existing code logics of data mover controllers, we make VGDP Watcher as an adapter of AsyncBR which is the interface implemented by VGDP and called by the data mover controller.
|
||||
Since the parameters to call VGDP Watcher is different from the ones to call VGDP, we change the AsyncBR interface to hide some parameters from one another, the new interface is as below:
|
||||
```
|
||||
type AsyncBR interface {
|
||||
// Init initializes an asynchronous data path instance
|
||||
Init(ctx context.Context, res *exposer.ExposeResult, param interface{}) error
|
||||
|
||||
// StartBackup starts an asynchronous data path instance for backup
|
||||
StartBackup(dataMoverConfig map[string]string, param interface{}) error
|
||||
|
||||
// StartRestore starts an asynchronous data path instance for restore
|
||||
StartRestore(snapshotID string, dataMoverConfig map[string]string) error
|
||||
|
||||
// Cancel cancels an asynchronous data path instance
|
||||
Cancel()
|
||||
|
||||
// Close closes an asynchronous data path instance
|
||||
Close(ctx context.Context)
|
||||
}
|
||||
```
|
||||
Some parameters are hidden into ```param```, but the functions and calling logics are not changed.
|
||||
|
||||
VGDP Watcher should be launched by the data mover controller before VGDP instance starts, otherwise, multiple corner problems may happen. E.g., VGDP-MS may run the VGDP instance immediately after the backupPod/restorePod is launched and completes it before the data mover controller starts VGDP Watcher, as a result, multiple informs are missed from VGDP Watcher.
|
||||
Therefore, the controller launches VGDP Watcher first and then set the DUCR/DDCR to ```InProgress```; on the other hand, VGDP-MS waits DUCR/DDCR turns to ```InProgress``` before running the VGDP instance.
|
||||
|
||||
### VGDP-MS
|
||||
VGDP-MS is represented by ```velero data-mover``` subcommand and has its own subcommand ```backup``` and ```restore```.
|
||||
Below diagram shows the VGDP-MS workflow:
|
||||

|
||||
|
||||
**Start DUCR/DDCR Watcher**: VGDP-MS needs to watch the corresponding DUCR/DDCR so as to react on some events happening to the DUCR/DDCR. E.g., when the data movement is cancelled, a ```Cancel``` flag is set to the DUCR/DDCR, by watching the DUCR/DDCR, VGDP-MS is able to see it and cancel the VGDP instance.
|
||||
**Wait DUCR/DDCR InProgress**: As mentioned above, VGDP-MS won't start the VGDP instance until DUCR/DDCR turns to ```InProgress```, by which time VGDP Watcher has been started.
|
||||
**Record VGDP Starts**: This generates the VGDP Instance Starts event.
|
||||
**VGDP Callbacks**: When VGDP comes to one of the terminal states (i.e., completed, failed, cancelled), the corresponding callback is called.
|
||||
**Record VGDP Ends**: This generates the VGDP Instance Completes/Fails/Cancelled event, and also generates backupPod/restorePod termination message.
|
||||
**Record VGDP Progress**: This periodically generates/updates the Progress event with totalBytes/bytesDone to indicate the progress of the data movement.
|
||||
**Set VGDP Output**: This writes the termination message to the backupPod/restorePod's termination log (by default, it is written to ```/dev/termination-log```).
|
||||
|
||||
If VGDP completes, VGDP Instance Completes event and backupPod/restorePod termination shares the same message as below:
|
||||
```
|
||||
type BackupResult struct {
|
||||
SnapshotID string `json:"snapshotID"`
|
||||
EmptySnapshot bool `json:"emptySnapshot"`
|
||||
Source exposer.AccessPoint `json:"source,omitempty"`
|
||||
}
|
||||
```
|
||||
```
|
||||
type RestoreResult struct {
|
||||
Target exposer.AccessPoint `json:"target,omitempty"`
|
||||
}
|
||||
```
|
||||
```
|
||||
type AccessPoint struct {
|
||||
ByPath string `json:"byPath"`
|
||||
VolMode uploader.PersistentVolumeMode `json:"volumeMode"`
|
||||
}
|
||||
```
|
||||
|
||||
The existing VGDP result structures are actually being reused, we just add the json markers so that they can be marshalled.
|
||||
|
||||
As mentioned above, once VGDP-MS ends in any way, the backupPod/restorePod terminates and never restarts, so the end of VGDP-MS means the end of DU/DD.
|
||||
|
||||
For Progress update, the existing Progress structure is being reused:
|
||||
```
|
||||
type Progress struct {
|
||||
TotalBytes int64 `json:"totalBytes,omitempty"`
|
||||
BytesDone int64 `json:"doneBytes,omitempty"`
|
||||
}
|
||||
```
|
||||
|
||||
### Log Collection
|
||||
During the running of VGDP instance, some logs are generated which are important for troubleshooting. This includes all the logs generated by the uploader and repository. Therefore, it is important to collect these logs.
|
||||
On the other hand, the logs are now generated in the backupPod/restorePod, while the backupPod/restorePod is deleted immediately after the data movement completes. Therefore, by default, ```velero debug``` is not able to collect these logs.
|
||||
|
||||
As a solution, we use logrus's hook mechanism to redirect the backupPod/restorePod's logs into node-agent's log, so that ```velero debug``` could collect VGDP logs as is without any changes.
|
||||
|
||||
Below diagram shows how VGDP logs are redirected:
|
||||

|
||||
|
||||
This log redirecting mechanism is thread safe since the hook acquires the write lock before writing the log buffer, so it guarantees that in the node-agent log there is no corruptions after redirecting the log, and the redirected logs and the original node-agent logs are not projected into each other.
|
||||
|
||||
### Resource Control
|
||||
The CPU/memory resource of backupPod/restorePod is configurable, which means users are allowed to configure resources per volume backup/restore.
|
||||
By default, the [Best Effort policy][5] is used, and users are allowed to change it through the ConfigMap specified by `velero node-agent` CLI's parameter `--node-agent-configmap`. Specifically, we add below structures to the ConfigMap:
|
||||
```
|
||||
type Configs struct {
|
||||
// PodResources is the resource config for various types of pods launched by node-agent, i.e., data mover pods.
|
||||
PodResources *PodResources `json:"podResources,omitempty"`
|
||||
}
|
||||
|
||||
type PodResources struct {
|
||||
CPURequest string `json:"cpuRequest,omitempty"`
|
||||
MemoryRequest string `json:"memoryRequest,omitempty"`
|
||||
CPULimit string `json:"cpuLimit,omitempty"`
|
||||
MemoryLimit string `json:"memoryLimit,omitempty"`
|
||||
}
|
||||
```
|
||||
The string values must mactch Kubernetes Quantity expressions; for each resource, the "request" value must not be larger than the "limit" value. Otherwise, if any one of the values fail, all the resource configurations will be ignored.
|
||||
|
||||
The configurations are loaded by node-agent at start time, so users can change the values in the configMap any time, but the changes won't effect until node-agent restarts.
|
||||
|
||||
|
||||
## node-agent
|
||||
node-agent is still required. Even though VGDP is now not running inside node-agent, node-agent still hosts the data mover controller which reconciles DUCR/DDCR and operates DUCR/DDCR in other steps before the VGDP instance is started, i.e., Accept, Expose, etc.
|
||||
Privileged mode and root user are not required for node-agent anymore by Volume Snapshot Data Movement, however, they are still required by PVB(PodVolumeBackup) and PVR(PodVolumeRestore). Therefore, we will keep the node-agent deamonset as is, for any users who don't use PVB/PVR and have concern about the privileged mode/root user, they need to manually modify the deamonset spec to remove the dependencies.
|
||||
|
||||
## CRD Changes
|
||||
There is no changes to any CRD.
|
||||
|
||||
## Installation Changes
|
||||
No changes to installation, the backupPod/restorePod's configurations are all inherited from node-agent.
|
||||
|
||||
## Upgrade
|
||||
Upgrade is not impacted.
|
||||
|
||||
## CLI
|
||||
CLI is not changed.
|
||||
|
||||
|
||||
|
||||
[1]: ../unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md
|
||||
[2]: ../volume-snapshot-data-movement/volume-snapshot-data-movement.md
|
||||
[3]: https://kubernetes.io/blog/2022/09/02/cosi-kubernetes-object-storage-management/
|
||||
[4]: ../node-agent-concurrency.md
|
||||
[5]: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/
|
||||
|
After Width: | Height: | Size: 27 KiB |
After Width: | Height: | Size: 37 KiB |
After Width: | Height: | Size: 28 KiB |
|
@ -0,0 +1,202 @@
|
|||
# Add Label Selector as a criteria for Volume Policy
|
||||
|
||||
## Abstract
|
||||
Velero’s volume policies currently support several criteria (such as capacity, storage class, and volume source type) to select volumes for backup. This update extends the design by allowing users to specify required labels on the associated PersistentVolumeClaim (PVC) via a simple key/value map. At runtime, Velero looks up the PVC (when a PV has a ClaimRef), extracts its labels, and compares them with the user-specified map. If all key/value pairs match, the volume qualifies for backup.
|
||||
|
||||
## Background
|
||||
PersistentVolumes (PVs) in Kubernetes are typically bound to PersistentVolumeClaims (PVCs) that include labels (for example, indicating environment, application, or region). Basing backup policies on these PVC labels enables more precise control over which volumes are processed.
|
||||
|
||||
## Goals
|
||||
- Allow users to specify a simple key/value mapping in the volume policy YAML so that only volumes whose associated PVCs contain those labels are selected.
|
||||
- Support policies that target volumes based on criteria such as environment=production or region=us-west.
|
||||
|
||||
## Non-Goals
|
||||
- No changes will be made to the actions (skip, snapshot, fs-backup) of the volume policy engine. This update focuses solely on how volumes are selected.
|
||||
- The design does not support other label selector operations (e.g., NotIn, Exists, DoesNotExist) and only allows for exact key/value matching.
|
||||
|
||||
## Use-cases/scenarios
|
||||
1. Environment-Specific Backup:
|
||||
- A user wishes to back up only those volumes whose associated PVCs have labels such as `environment=production` and `app=database`.
|
||||
- The volume policy specifies a pvcLabels map with those key/value pairs; only volumes whose PVCs match are processed.
|
||||
```yaml
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
app: database
|
||||
action:
|
||||
type: snapshot
|
||||
```
|
||||
2. Region-Specific Backup:
|
||||
- A user operating in multiple regions wants to back up only volumes in the `us-west` region.
|
||||
- The policy includes `pvcLabels: { region: us-west }`, so only PVs bound to PVCs with that label are selected.
|
||||
```yaml
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
region: us-west
|
||||
action:
|
||||
type: snapshot
|
||||
```
|
||||
3. Automated Label-Based Backups:
|
||||
- An external system automatically labels new PVCs (for example, `backup: true`).
|
||||
- A volume policy with `pvcLabels: { backup: true }` ensures that any new volume whose PVC contains that label is included in backup operations.
|
||||
```yaml
|
||||
version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
backup: true
|
||||
action:
|
||||
type: snapshot
|
||||
```
|
||||
## High-Level Design
|
||||
|
||||
1. Extend Volume Policy Schema:
|
||||
- The YAML schema for volume conditions is extended to include an optional field pvcLabels of type `map[string]string`.
|
||||
2. Implement New Condition Type:
|
||||
- A new condition, `pvcLabelsCondition`, is created. It implements the `volumeCondition` interface and simply compares the user-specified key/value pairs with the actual PVC labels (populated at runtime).
|
||||
3. Update Structured Volume:
|
||||
- The internal representation of a volume (`structuredVolume`) is extended with a new field `pvcLabels map[string]string` to store the labels from the associated PVC.
|
||||
- A new helper function (or an updated parsing function) is used to perform a PVC lookup when a PV has a ClaimRef, populating the pvcLabels field.
|
||||
4. Integrate with Policy Engine:
|
||||
- The policy builder is updated to create and add a `pvcLabelsCondition` if the policy YAML contains a `pvcLabels` entry.
|
||||
- The matching entry point uses the updated `structuredVolume` (populated with PVC labels) to evaluate all conditions, including the new PVC labels condition.
|
||||
## Detailed Design
|
||||
|
||||
1. Update Volume Conditions Schema: Define the conditions struct with a simple map for PVC labels:
|
||||
```go
|
||||
// volumeConditions defines the current format of conditions we parse.
|
||||
type volumeConditions struct {
|
||||
Capacity string `yaml:"capacity,omitempty"`
|
||||
StorageClass []string `yaml:"storageClass,omitempty"`
|
||||
NFS *nFSVolumeSource `yaml:"nfs,omitempty"`
|
||||
CSI *csiVolumeSource `yaml:"csi,omitempty"`
|
||||
VolumeTypes []SupportedVolume `yaml:"volumeTypes,omitempty"`
|
||||
// New field: pvcLabels for simple exact-match filtering.
|
||||
PVCLabels map[string]string `yaml:"pvcLabels,omitempty"`
|
||||
}
|
||||
```
|
||||
2. New Condition: `pvcLabelsCondition`: Implement a condition that compares expected labels with those on the PVC:
|
||||
```go
|
||||
// pvcLabelsCondition defines a condition that matches if the PVC's labels contain all the specified key/value pairs.
|
||||
type pvcLabelsCondition struct {
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
func (c *pvcLabelsCondition) match(v *structuredVolume) bool {
|
||||
if len(c.labels) == 0 {
|
||||
return true // No label condition specified; always match.
|
||||
}
|
||||
if v.pvcLabels == nil {
|
||||
return false // No PVC labels found.
|
||||
}
|
||||
for key, expectedVal := range c.labels {
|
||||
if actualVal, exists := v.pvcLabels[key]; !exists || actualVal != expectedVal {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *pvcLabelsCondition) validate() error {
|
||||
// No extra validation needed for a simple map.
|
||||
return nil
|
||||
}
|
||||
```
|
||||
3. Update `structuredVolume`: Extend the internal volume representation with a field for PVC labels:
|
||||
```go
|
||||
// structuredVolume represents a volume with parsed fields.
|
||||
type structuredVolume struct {
|
||||
capacity resource.Quantity
|
||||
storageClass string
|
||||
// New field: pvcLabels stores labels from the associated PVC.
|
||||
pvcLabels map[string]string
|
||||
nfs *nFSVolumeSource
|
||||
csi *csiVolumeSource
|
||||
volumeType SupportedVolume
|
||||
}
|
||||
```
|
||||
4. Update PVC Lookup – `parsePVWithPVC`: Modify the PV parsing function to perform a PVC lookup:
|
||||
```go
|
||||
func (s *structuredVolume) parsePVWithPVC(pv *corev1.PersistentVolume, client crclient.Client) error {
|
||||
s.capacity = *pv.Spec.Capacity.Storage()
|
||||
s.storageClass = pv.Spec.StorageClassName
|
||||
|
||||
if pv.Spec.NFS != nil {
|
||||
s.nfs = &nFSVolumeSource{
|
||||
Server: pv.Spec.NFS.Server,
|
||||
Path: pv.Spec.NFS.Path,
|
||||
}
|
||||
}
|
||||
if pv.Spec.CSI != nil {
|
||||
s.csi = &csiVolumeSource{
|
||||
Driver: pv.Spec.CSI.Driver,
|
||||
VolumeAttributes: pv.Spec.CSI.VolumeAttributes,
|
||||
}
|
||||
}
|
||||
s.volumeType = getVolumeTypeFromPV(pv)
|
||||
|
||||
// If the PV is bound to a PVC, look it up and store its labels.
|
||||
if pv.Spec.ClaimRef != nil {
|
||||
pvc := &corev1.PersistentVolumeClaim{}
|
||||
err := client.Get(context.Background(), crclient.ObjectKey{
|
||||
Namespace: pv.Spec.ClaimRef.Namespace,
|
||||
Name: pv.Spec.ClaimRef.Name,
|
||||
}, pvc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get PVC for PV")
|
||||
}
|
||||
s.pvcLabels = pvc.Labels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
5. Update the Policy Builder: Add the new condition to the policy if pvcLabels is provided:
|
||||
```go
|
||||
func (p *Policies) BuildPolicy(resPolicies *ResourcePolicies) error {
|
||||
for _, vp := range resPolicies.VolumePolicies {
|
||||
con, err := unmarshalVolConditions(vp.Conditions)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
volCap, err := parseCapacity(con.Capacity)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
var volP volPolicy
|
||||
volP.action = vp.Action
|
||||
volP.conditions = append(volP.conditions, &capacityCondition{capacity: *volCap})
|
||||
volP.conditions = append(volP.conditions, &storageClassCondition{storageClass: con.StorageClass})
|
||||
volP.conditions = append(volP.conditions, &nfsCondition{nfs: con.NFS})
|
||||
volP.conditions = append(volP.conditions, &csiCondition{csi: con.CSI})
|
||||
volP.conditions = append(volP.conditions, &volumeTypeCondition{volumeTypes: con.VolumeTypes})
|
||||
// If a pvcLabels map is provided, add the pvcLabelsCondition.
|
||||
if con.PVCLabels != nil && len(con.PVCLabels) > 0 {
|
||||
volP.conditions = append(volP.conditions, &pvcLabelsCondition{labels: con.PVCLabels})
|
||||
}
|
||||
p.volumePolicies = append(p.volumePolicies, volP)
|
||||
}
|
||||
p.version = resPolicies.Version
|
||||
return nil
|
||||
}
|
||||
```
|
||||
6. Update the Matching Entry Point: Use the updated PV parsing that performs a PVC lookup:
|
||||
```go
|
||||
func (p *Policies) GetMatchAction(res interface{}, client crclient.Client) (*Action, error) {
|
||||
volume := &structuredVolume{}
|
||||
switch obj := res.(type) {
|
||||
case *corev1.PersistentVolume:
|
||||
if err := volume.parsePVWithPVC(obj, client); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse PV with PVC lookup")
|
||||
}
|
||||
case *corev1.Volume:
|
||||
volume.parsePodVolume(obj)
|
||||
default:
|
||||
return nil, errors.New("failed to convert object")
|
||||
}
|
||||
return p.match(volume), nil
|
||||
}
|
||||
```
|
||||
|
||||
Note: The matching loop (p.match(volume)) iterates over all conditions (including our new pvcLabelsCondition) and returns the corresponding action if all conditions match.
|
Before Width: | Height: | Size: 203 KiB After Width: | Height: | Size: 203 KiB |
Before Width: | Height: | Size: 131 KiB After Width: | Height: | Size: 131 KiB |
Before Width: | Height: | Size: 81 KiB After Width: | Height: | Size: 81 KiB |
Before Width: | Height: | Size: 80 KiB After Width: | Height: | Size: 80 KiB |
Before Width: | Height: | Size: 200 KiB After Width: | Height: | Size: 200 KiB |
Before Width: | Height: | Size: 122 KiB After Width: | Height: | Size: 122 KiB |
|
@ -27,7 +27,7 @@ Moreover, we would like to create a general workflow to variations during the da
|
|||
- Support different data accesses, i.e., file system level and block level
|
||||
- Support different snapshot types, i.e., CSI snapshot, volume snapshot API from storage vendors
|
||||
- Support different snapshot accesses, i.e., through PV generated from snapshots, and through direct access API from storage vendors
|
||||
- Reuse the existing Velero generic data path as creatd in [Unified Repository design][1]
|
||||
- Reuse the existing Velero generic data path as created in [Unified Repository design][1]
|
||||
|
||||
## Non-Goals
|
||||
|
||||
|
@ -37,7 +37,7 @@ Moreover, we would like to create a general workflow to variations during the da
|
|||
|
||||
## Architecture of Volume Snapshot Data Movement
|
||||
|
||||
## Workflows
|
||||
### Workflows
|
||||
|
||||
Here are the diagrams that illustrate components and workflows for backup and restore respectively.
|
||||
For backup, we intend to create an extensive architecture for various snapshot types, snapshot accesses and various data accesses. For example, the snapshot specific operations are isolated in Data Mover Plugin and Exposer. In this way, we only need to change the two modules for variations. Likely, the data access details are isolated into uploaders, so different uploaders could be plugged into the workflow seamlessly.
|
||||
|
@ -52,7 +52,7 @@ Below is the backup workflow:
|
|||
Below is the restore workflow:
|
||||

|
||||
|
||||
## Components
|
||||
### Components
|
||||
Below are the generic components in the data movement workflow:
|
||||
|
||||
**Velero**: Velero controls the backup/restore workflow, it calls BIA/RIA V2 to backup/restore an object that involves data movement, specifically, a PVC or a PV.
|
||||
|
@ -69,13 +69,13 @@ DMs take the responsibility to handle DUCR/DDCRs, Velero provides a built-in DM
|
|||
**Velero Generic Data Path (VGDP)**: VGDP is the collective of modules that is introduced in [Unified Repository design][1]. Velero uses these modules to finish data transmission for various purposes. In includes uploaders and the backup repository.
|
||||
**Uploader**: Uploader is the module in VGDP that reads data from the source and writes to backup repository for backup; while read data from backup repository and write to the restore target for restore. At present, only file system uploader is supported. In future, the block level uploader will be added. For file system and basic block uploader, only Kopia uploader will be used, Restic will not be integrated with VBDM.
|
||||
|
||||
## Replacement
|
||||
### Replacement
|
||||
3rd parties could integrate their own data movement into Velero by replacing VBDM with their own DMs. The DMs should process DUCR/DDCRs appropriately and finally put them into one of the terminal states as shown in the DataUpload CRD and DataDownload CRD sections.
|
||||
Theoretically, replacing the DMP is also allowed. In this way, the entire workflow is customized, so this is out of the scope of this design.
|
||||
|
||||
# Detailed Design
|
||||
## Detailed Design
|
||||
|
||||
## Backup Sequence
|
||||
### Backup Sequence
|
||||
Below are the data movement actions and sequences during backup:
|
||||

|
||||
|
||||
|
@ -150,7 +150,7 @@ We keep VGDP reused for VBDM, so everything inside VGDP are kept as is. For deta
|
|||
When VGDP completes backup, it returns an ID that represent the root object saved into the backup repository for this backup, through the root object, we will be able to enumerate the entire backup data.
|
||||
This Repo Snapshot ID will be saved along with the DUCR.
|
||||
|
||||
## DataUpload CRD
|
||||
### DataUpload CRD
|
||||
Below are the essential fields of DataUpload CRD. The CRD covers below information:
|
||||
- The information to manipulate the specified snapshot
|
||||
- The information to manipulate the specified data mover
|
||||
|
@ -351,7 +351,7 @@ spec:
|
|||
|
||||
```
|
||||
|
||||
## Restore Sequence
|
||||
### Restore Sequence
|
||||
|
||||
Below are the data movement actions sequences during restore:
|
||||

|
||||
|
@ -387,7 +387,7 @@ This also means that Velero should not restore the PV if a data movement restore
|
|||
|
||||
For restore, VBDM doesn’t need to persist anything.
|
||||
|
||||
## DataDownload CRD
|
||||
### DataDownload CRD
|
||||
Below are the essential fields of DataDownload CRD. The CRD covers below information:
|
||||
- The information to manipulate the target volume
|
||||
- The information to manipulate the specified data mover
|
||||
|
@ -977,6 +977,6 @@ Restore command is kept as is.
|
|||
|
||||
|
||||
|
||||
[1]: ../Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md
|
||||
[2]: ../Implemented/general-progress-monitoring.md
|
||||
[1]: ../unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md
|
||||
[2]: ../general-progress-monitoring.md
|
||||
[3]: ../node-agent-concurrency.md
|
|
@ -0,0 +1,374 @@
|
|||
# Design to clean the artifacts generated in the CSI backup and restore workflows
|
||||
|
||||
## Terminology
|
||||
|
||||
* VSC: VolumeSnapshotContent
|
||||
* VS: VolumeSnapshot
|
||||
|
||||
## Abstract
|
||||
* The design aims to delete the unnecessary VSs and VSCs generated during CSI backup and restore process.
|
||||
* The design stop creating related VSCs during backup syncing.
|
||||
|
||||
## Background
|
||||
In the current CSI backup and restore workflows, please notice the CSI B/R workflows means only using the CSI snapshots in the B/R, not including the CSI snapshot data movement workflows, some generated artifacts are kept after the backup or the restore process completion.
|
||||
|
||||
Some of them are kept due to design, for example, the VolumeSnapshotContents generated during the backup are kept to make sure the backup deletion can clean the snapshots in the storage providers.
|
||||
|
||||
Some of them are kept by accident, for example, after restore, two VolumeSnapshotContents are generated for the same VolumeSnapshot. One is from the backup content, and one is dynamically generated from the restore's VolumeSnapshot.
|
||||
|
||||
The design aims to clean the unnecessary artifacts, and make the CSI B/R workflow more concise and reliable.
|
||||
|
||||
## Goals
|
||||
- Clean the redundant VSC generated during CSI backup and restore.
|
||||
- Remove the VSCs in the backup sync process.
|
||||
|
||||
## Non Goals
|
||||
- There were some discussion about whether Velero backup should include VSs and VSCs not generated in during the backup. By far, the conclusion is not including them is a better option. Although that is a useful enhancement, that is not included this design.
|
||||
- Delete all the CSI-related metadata files in the BSL is not the aim of this design.
|
||||
|
||||
## Detailed Design
|
||||
### Backup
|
||||
During backup, the main change is the backup-generated VSCs should not kept anymore.
|
||||
|
||||
The reasons is we don't need them to ensure the snapshots clean up during backup deletion. Please reference to the [Backup Deletion section](#backup-deletion) section for detail.
|
||||
|
||||
As a result, we can simplify the VS deletion logic in the backup. Before, we need to not only delete the VS, but also recreate a static VSC pointing a non-exiting VS.
|
||||
|
||||
The deletion code in VS BackupItemAction can be simplify to the following:
|
||||
|
||||
``` go
|
||||
if backup.Status.Phase == velerov1api.BackupPhaseFinalizing ||
|
||||
backup.Status.Phase == velerov1api.BackupPhaseFinalizingPartiallyFailed {
|
||||
p.log.
|
||||
WithField("Backup", fmt.Sprintf("%s/%s", backup.Namespace, backup.Name)).
|
||||
WithField("BackupPhase", backup.Status.Phase).Debugf("Cleaning VolumeSnapshots.")
|
||||
|
||||
if vsc == nil {
|
||||
vsc = &snapshotv1api.VolumeSnapshotContent{}
|
||||
}
|
||||
|
||||
csi.DeleteReadyVolumeSnapshot(*vs, *vsc, p.crClient, p.log)
|
||||
return item, nil, "", nil, nil
|
||||
}
|
||||
|
||||
|
||||
func DeleteReadyVolumeSnapshot(
|
||||
vs snapshotv1api.VolumeSnapshot,
|
||||
vsc snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
logger logrus.FieldLogger,
|
||||
) {
|
||||
logger.Infof("Deleting Volumesnapshot %s/%s", vs.Namespace, vs.Name)
|
||||
if vs.Status == nil ||
|
||||
vs.Status.BoundVolumeSnapshotContentName == nil ||
|
||||
len(*vs.Status.BoundVolumeSnapshotContentName) <= 0 {
|
||||
logger.Errorf("VolumeSnapshot %s/%s is not ready. This is not expected.",
|
||||
vs.Namespace, vs.Name)
|
||||
return
|
||||
}
|
||||
|
||||
if vs.Status != nil && vs.Status.BoundVolumeSnapshotContentName != nil {
|
||||
// Patch the DeletionPolicy of the VolumeSnapshotContent to set it to Retain.
|
||||
// This ensures that the volume snapshot in the storage provider is kept.
|
||||
if err := SetVolumeSnapshotContentDeletionPolicy(
|
||||
vsc.Name,
|
||||
client,
|
||||
snapshotv1api.VolumeSnapshotContentRetain,
|
||||
); err != nil {
|
||||
logger.Warnf("Failed to patch DeletionPolicy of volume snapshot %s/%s",
|
||||
vs.Namespace, vs.Name)
|
||||
return
|
||||
}
|
||||
|
||||
if err := client.Delete(context.TODO(), &vsc); err != nil {
|
||||
logger.Warnf("Failed to delete the VSC %s: %s", vsc.Name, err.Error())
|
||||
}
|
||||
}
|
||||
if err := client.Delete(context.TODO(), &vs); err != nil {
|
||||
logger.Warnf("Failed to delete volumesnapshot %s/%s: %v", vs.Namespace, vs.Name, err)
|
||||
} else {
|
||||
logger.Infof("Deleted volumesnapshot with volumesnapshotContent %s/%s",
|
||||
vs.Namespace, vs.Name)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Restore
|
||||
|
||||
#### Restore the VolumeSnapshotContent
|
||||
The current behavior of VSC restoration is that the VSC from the backup is restore, and the restored VS also triggers creating a new VSC dynamically.
|
||||
|
||||
Two VSCs created for the same VS in one restore seems not right.
|
||||
|
||||
Skip restore the VSC from the backup is not a viable alternative, because VSC may reference to a [snapshot create secret](https://kubernetes-csi.github.io/docs/secrets-and-credentials-volume-snapshot-class.html?highlight=snapshotter-secret-name#createdelete-volumesnapshot-secret).
|
||||
|
||||
If the `SkipRestore` is set true in the restore action's result, the secret returned in the additional items is ignored too.
|
||||
|
||||
As a result, restore the VSC from the backup, and setup the VSC and the VS's relation is a better choice.
|
||||
|
||||
Another consideration is the VSC name should not be the same as the backed-up VSC's, because the older version Velero's restore and backup keep the VSC after completion.
|
||||
|
||||
There's high possibility that the restore will fail due to the VSC already exists in the cluster.
|
||||
|
||||
Multiple restores of the same backup will also meet the same problem.
|
||||
|
||||
The proposed solution is using the restore's UID and the VS's name to generate sha256 hash value as the new VSC name. Both the VS and VSC RestoreItemAction can access those UIDs, and it will avoid the conflicts issues.
|
||||
|
||||
The restored VS name also shares the same generated name.
|
||||
|
||||
The VS-referenced VSC name and the VSC's snapshot handle name are in their status.
|
||||
|
||||
Velero restore process purges the restore resources' metadata and status before running the RestoreItemActions.
|
||||
|
||||
As a result, we cannot read these information in the VS and VSC RestoreItemActions.
|
||||
|
||||
Fortunately, RestoreItemAction input parameters includes the `ItemFromBackup`. The status is intact in `ItemFromBackup`.
|
||||
|
||||
``` go
|
||||
func (p *volumeSnapshotRestoreItemAction) Execute(
|
||||
input *velero.RestoreItemActionExecuteInput,
|
||||
) (*velero.RestoreItemActionExecuteOutput, error) {
|
||||
p.log.Info("Starting VolumeSnapshotRestoreItemAction")
|
||||
|
||||
if boolptr.IsSetToFalse(input.Restore.Spec.RestorePVs) {
|
||||
p.log.Infof("Restore %s/%s did not request for PVs to be restored.",
|
||||
input.Restore.Namespace, input.Restore.Name)
|
||||
return &velero.RestoreItemActionExecuteOutput{SkipRestore: true}, nil
|
||||
}
|
||||
|
||||
var vs snapshotv1api.VolumeSnapshot
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(
|
||||
input.Item.UnstructuredContent(), &vs); err != nil {
|
||||
return &velero.RestoreItemActionExecuteOutput{},
|
||||
errors.Wrapf(err, "failed to convert input.Item from unstructured")
|
||||
}
|
||||
|
||||
var vsFromBackup snapshotv1api.VolumeSnapshot
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(
|
||||
input.ItemFromBackup.UnstructuredContent(), &vsFromBackup); err != nil {
|
||||
return &velero.RestoreItemActionExecuteOutput{},
|
||||
errors.Wrapf(err, "failed to convert input.Item from unstructured")
|
||||
}
|
||||
|
||||
// If cross-namespace restore is configured, change the namespace
|
||||
// for VolumeSnapshot object to be restored
|
||||
newNamespace, ok := input.Restore.Spec.NamespaceMapping[vs.GetNamespace()]
|
||||
if !ok {
|
||||
// Use original namespace
|
||||
newNamespace = vs.Namespace
|
||||
}
|
||||
|
||||
if csiutil.IsVolumeSnapshotExists(newNamespace, vs.Name, p.crClient) {
|
||||
p.log.Debugf("VolumeSnapshot %s already exists in the cluster. Return without change.", vs.Namespace+"/"+vs.Name)
|
||||
return &velero.RestoreItemActionExecuteOutput{UpdatedItem: input.Item}, nil
|
||||
}
|
||||
|
||||
newVSCName := generateSha256FromRestoreAndVsUID(string(input.Restore.UID), string(vsFromBackup.UID))
|
||||
// Reset Spec to convert the VolumeSnapshot from using
|
||||
// the dynamic VolumeSnapshotContent to the static one.
|
||||
resetVolumeSnapshotSpecForRestore(&vs, &newVSCName)
|
||||
|
||||
// Reset VolumeSnapshot annotation. By now, only change
|
||||
// DeletionPolicy to Retain.
|
||||
resetVolumeSnapshotAnnotation(&vs)
|
||||
|
||||
vsMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&vs)
|
||||
if err != nil {
|
||||
p.log.Errorf("Fail to convert VS %s to unstructured", vs.Namespace+"/"+vs.Name)
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
p.log.Infof(`Returning from VolumeSnapshotRestoreItemAction with
|
||||
no additionalItems`)
|
||||
|
||||
return &velero.RestoreItemActionExecuteOutput{
|
||||
UpdatedItem: &unstructured.Unstructured{Object: vsMap},
|
||||
AdditionalItems: []velero.ResourceIdentifier{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// generateSha256FromRestoreAndVsUID Use the restore UID and the VS UID to generate the new VSC name.
|
||||
// By this way, VS and VSC RIA action can get the same VSC name.
|
||||
func generateSha256FromRestoreAndVsUID(restoreUID string, vsUID string) string {
|
||||
sha256Bytes := sha256.Sum256([]byte(restoreUID + "/" + vsUID))
|
||||
return "vsc-" + hex.EncodeToString(sha256Bytes[:])
|
||||
}
|
||||
```
|
||||
|
||||
#### Restore the VolumeSnapshot
|
||||
``` go
|
||||
// Execute restores a VolumeSnapshotContent object without modification
|
||||
// returning the snapshot lister secret, if any, as additional items to restore.
|
||||
func (p *volumeSnapshotContentRestoreItemAction) Execute(
|
||||
input *velero.RestoreItemActionExecuteInput,
|
||||
) (*velero.RestoreItemActionExecuteOutput, error) {
|
||||
if boolptr.IsSetToFalse(input.Restore.Spec.RestorePVs) {
|
||||
p.log.Infof("Restore did not request for PVs to be restored %s/%s",
|
||||
input.Restore.Namespace, input.Restore.Name)
|
||||
return &velero.RestoreItemActionExecuteOutput{SkipRestore: true}, nil
|
||||
}
|
||||
|
||||
p.log.Info("Starting VolumeSnapshotContentRestoreItemAction")
|
||||
|
||||
var vsc snapshotv1api.VolumeSnapshotContent
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(
|
||||
input.Item.UnstructuredContent(), &vsc); err != nil {
|
||||
return &velero.RestoreItemActionExecuteOutput{},
|
||||
errors.Wrapf(err, "failed to convert input.Item from unstructured")
|
||||
}
|
||||
|
||||
var vscFromBackup snapshotv1api.VolumeSnapshotContent
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(
|
||||
input.ItemFromBackup.UnstructuredContent(), &vscFromBackup); err != nil {
|
||||
return &velero.RestoreItemActionExecuteOutput{},
|
||||
errors.Errorf(err.Error(), "failed to convert input.ItemFromBackup from unstructured")
|
||||
}
|
||||
|
||||
// If cross-namespace restore is configured, change the namespace
|
||||
// for VolumeSnapshot object to be restored
|
||||
newNamespace, ok := input.Restore.Spec.NamespaceMapping[vsc.Spec.VolumeSnapshotRef.Namespace]
|
||||
if ok {
|
||||
// Update the referenced VS namespace to the mapping one.
|
||||
vsc.Spec.VolumeSnapshotRef.Namespace = newNamespace
|
||||
}
|
||||
|
||||
// Reset VSC name to align with VS.
|
||||
vsc.Name = generateSha256FromRestoreAndVsUID(string(input.Restore.UID), string(vscFromBackup.Spec.VolumeSnapshotRef.UID))
|
||||
|
||||
// Reset the ResourceVersion and UID of referenced VolumeSnapshot.
|
||||
vsc.Spec.VolumeSnapshotRef.ResourceVersion = ""
|
||||
vsc.Spec.VolumeSnapshotRef.UID = ""
|
||||
|
||||
// Set the DeletionPolicy to Retain to avoid VS deletion will not trigger snapshot deletion
|
||||
vsc.Spec.DeletionPolicy = snapshotv1api.VolumeSnapshotContentRetain
|
||||
|
||||
if vscFromBackup.Status != nil && vscFromBackup.Status.SnapshotHandle != nil {
|
||||
vsc.Spec.Source.VolumeHandle = nil
|
||||
vsc.Spec.Source.SnapshotHandle = vscFromBackup.Status.SnapshotHandle
|
||||
} else {
|
||||
p.log.Errorf("fail to get snapshot handle from VSC %s status", vsc.Name)
|
||||
return nil, errors.Errorf("fail to get snapshot handle from VSC %s status", vsc.Name)
|
||||
}
|
||||
|
||||
additionalItems := []velero.ResourceIdentifier{}
|
||||
if csi.IsVolumeSnapshotContentHasDeleteSecret(&vsc) {
|
||||
additionalItems = append(additionalItems,
|
||||
velero.ResourceIdentifier{
|
||||
GroupResource: schema.GroupResource{Group: "", Resource: "secrets"},
|
||||
Name: vsc.Annotations[velerov1api.PrefixedSecretNameAnnotation],
|
||||
Namespace: vsc.Annotations[velerov1api.PrefixedSecretNamespaceAnnotation],
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
vscMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&vsc)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
p.log.Infof("Returning from VolumeSnapshotContentRestoreItemAction with %d additionalItems",
|
||||
len(additionalItems))
|
||||
return &velero.RestoreItemActionExecuteOutput{
|
||||
UpdatedItem: &unstructured.Unstructured{Object: vscMap},
|
||||
AdditionalItems: additionalItems,
|
||||
}, nil
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### Backup Sync
|
||||
csi-volumesnapshotclasses.json, csi-volumesnapshotcontents.json, and csi-volumesnapshots.json are CSI-related metadata files in the BSL for each backup.
|
||||
|
||||
csi-volumesnapshotcontents.json and csi-volumesnapshots.json are not needed anymore, but csi-volumesnapshotclasses.json is still needed.
|
||||
|
||||
One concrete scenario is that a backup is created in cluster-A, then the backup is synced to cluster-B, and the backup is deleted in the cluster-B. In this case, we don't have a chance to create the VS and VSC needed VolumeSnapshotClass.
|
||||
|
||||
The VSC deletion workflow proposed by this design needs to create the VSC first. If the VSC's referenced VolumeSnapshotClass doesn't exist in cluster, the creation of VSC will fail.
|
||||
|
||||
As a result, the VolumeSnapshotClass should still be synced in the backup sync process.
|
||||
|
||||
### Backup Deletion
|
||||
Two factors are worthy for consideration for the backup deletion change:
|
||||
* Because the VSCs generated by the backup are not synced anymore, and the VSCs generated during the backup will not be kept too. The backup deletion needs to generate a VSC, then deletes it to make sure the snapshots in the storage provider are clean too.
|
||||
* The VSs generated by the backup are already deleted in the backup process, we don't need a DeleteItemAction for the VS anymore. As a result, the `velero.io/csi-volumesnapshot-delete` plugin is unneeded.
|
||||
|
||||
For the VSC DeleteItemAction, we need to generate a VSC. Because we only care about the snapshot deletion, we don't need to create a VS associated with the VSC.
|
||||
|
||||
Create a static VSC, then point it to a pseudo VS, and reference to the snapshot handle should be enough.
|
||||
|
||||
To avoid the created VSC conflict with older version Velero B/R generated ones, the VSC name is set to `vsc-uuid`.
|
||||
|
||||
The following is an example of the implementation.
|
||||
``` go
|
||||
uuid, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
p.log.WithError(err).Errorf("Fail to generate the UUID to create VSC %s", snapCont.Name)
|
||||
return errors.Wrapf(err, "Fail to generate the UUID to create VSC %s", snapCont.Name)
|
||||
}
|
||||
snapCont.Name = "vsc-" + uuid.String()
|
||||
|
||||
snapCont.Spec.DeletionPolicy = snapshotv1api.VolumeSnapshotContentDelete
|
||||
|
||||
snapCont.Spec.Source = snapshotv1api.VolumeSnapshotContentSource{
|
||||
SnapshotHandle: snapCont.Status.SnapshotHandle,
|
||||
}
|
||||
|
||||
snapCont.Spec.VolumeSnapshotRef = corev1api.ObjectReference{
|
||||
APIVersion: snapshotv1api.SchemeGroupVersion.String(),
|
||||
Kind: "VolumeSnapshot",
|
||||
Namespace: "ns-" + string(snapCont.UID),
|
||||
Name: "name-" + string(snapCont.UID),
|
||||
}
|
||||
|
||||
snapCont.ResourceVersion = ""
|
||||
|
||||
if err := p.crClient.Create(context.TODO(), &snapCont); err != nil {
|
||||
return errors.Wrapf(err, "fail to create VolumeSnapshotContent %s", snapCont.Name)
|
||||
}
|
||||
|
||||
// Read resource timeout from backup annotation, if not set, use default value.
|
||||
timeout, err := time.ParseDuration(
|
||||
input.Backup.Annotations[velerov1api.ResourceTimeoutAnnotation])
|
||||
if err != nil {
|
||||
p.log.Warnf("fail to parse resource timeout annotation %s: %s",
|
||||
input.Backup.Annotations[velerov1api.ResourceTimeoutAnnotation], err.Error())
|
||||
timeout = 10 * time.Minute
|
||||
}
|
||||
p.log.Debugf("resource timeout is set to %s", timeout.String())
|
||||
|
||||
interval := 5 * time.Second
|
||||
|
||||
// Wait until VSC created and ReadyToUse is true.
|
||||
if err := wait.PollUntilContextTimeout(
|
||||
context.Background(),
|
||||
interval,
|
||||
timeout,
|
||||
true,
|
||||
func(ctx context.Context) (bool, error) {
|
||||
tmpVSC := new(snapshotv1api.VolumeSnapshotContent)
|
||||
if err := p.crClient.Get(ctx, crclient.ObjectKeyFromObject(&snapCont), tmpVSC); err != nil {
|
||||
return false, errors.Wrapf(
|
||||
err, "failed to get VolumeSnapshotContent %s", snapCont.Name,
|
||||
)
|
||||
}
|
||||
|
||||
if tmpVSC.Status != nil && boolptr.IsSetToTrue(tmpVSC.Status.ReadyToUse) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
},
|
||||
); err != nil {
|
||||
return errors.Wrapf(err, "fail to wait VolumeSnapshotContent %s becomes ready.", snapCont.Name)
|
||||
}
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
Security is not relevant to this design.
|
||||
|
||||
## Compatibility
|
||||
In this design, no new information is added in backup and restore. As a result, this design doesn't have any compatibility issue.
|
||||
|
||||
## Open Issues
|
||||
Please notice the CSI snapshot backup and restore mechanism not supporting all file-store-based volume, e.g. Azure Files, EFS or vSphere CNS File Volume. Only block-based volumes are supported.
|
||||
Refer to [this comment](https://github.com/vmware-tanzu/velero/issues/3151#issuecomment-2623507686) for more details.
|
187
go.mod
|
@ -1,14 +1,16 @@
|
|||
module github.com/vmware-tanzu/velero
|
||||
|
||||
go 1.22.0
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.23.6
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.40.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2
|
||||
cloud.google.com/go/storage v1.50.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.3
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.14
|
||||
|
@ -17,10 +19,9 @@ require (
|
|||
github.com/aws/aws-sdk-go-v2/service/s3 v1.48.0
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7
|
||||
github.com/bombsimon/logrusr/v3 v3.0.0
|
||||
github.com/evanphx/json-patch/v5 v5.8.0
|
||||
github.com/fatih/color v1.16.0
|
||||
github.com/evanphx/json-patch/v5 v5.9.0
|
||||
github.com/fatih/color v1.18.0
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/golang/protobuf v1.5.4
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/go-hclog v0.14.1
|
||||
|
@ -28,48 +29,56 @@ require (
|
|||
github.com/joho/godotenv v1.3.0
|
||||
github.com/kopia/kopia v0.16.0
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/gomega v1.30.0
|
||||
github.com/onsi/ginkgo/v2 v2.19.0
|
||||
github.com/onsi/gomega v1.33.1
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/robfig/cron v1.1.0
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.6.0
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/afero v1.10.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/vmware-tanzu/crash-diagnostics v0.3.7
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
|
||||
golang.org/x/mod v0.17.0
|
||||
golang.org/x/net v0.24.0
|
||||
golang.org/x/oauth2 v0.19.0
|
||||
golang.org/x/text v0.14.0
|
||||
google.golang.org/api v0.172.0
|
||||
google.golang.org/grpc v1.63.2
|
||||
google.golang.org/protobuf v1.33.0
|
||||
golang.org/x/mod v0.22.0
|
||||
golang.org/x/net v0.38.0
|
||||
golang.org/x/oauth2 v0.27.0
|
||||
golang.org/x/text v0.23.0
|
||||
google.golang.org/api v0.218.0
|
||||
google.golang.org/grpc v1.69.4
|
||||
google.golang.org/protobuf v1.36.3
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.29.0
|
||||
k8s.io/apiextensions-apiserver v0.29.0
|
||||
k8s.io/apimachinery v0.29.0
|
||||
k8s.io/cli-runtime v0.24.0
|
||||
k8s.io/client-go v0.29.0
|
||||
k8s.io/klog/v2 v2.110.1
|
||||
k8s.io/kube-aggregator v0.19.12
|
||||
k8s.io/metrics v0.25.6
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
|
||||
sigs.k8s.io/controller-runtime v0.17.2
|
||||
k8s.io/api v0.31.3
|
||||
k8s.io/apiextensions-apiserver v0.31.3
|
||||
k8s.io/apimachinery v0.31.3
|
||||
k8s.io/cli-runtime v0.31.3
|
||||
k8s.io/client-go v0.31.3
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kube-aggregator v0.31.3
|
||||
k8s.io/metrics v0.31.3
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
sigs.k8s.io/controller-runtime v0.19.3
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.112.1 // indirect
|
||||
cloud.google.com/go/compute v1.24.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/iam v1.1.7 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
cel.dev/expr v0.16.2 // indirect
|
||||
cloud.google.com/go v0.116.0 // indirect
|
||||
cloud.google.com/go/auth v0.14.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.6.0 // indirect
|
||||
cloud.google.com/go/iam v1.2.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.21.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect
|
||||
|
@ -84,31 +93,38 @@ require (
|
|||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.6 // indirect
|
||||
github.com/aws/smithy-go v1.19.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/edsrzf/mmap-go v1.1.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.2.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/envoyproxy/go-control-plane v0.13.1 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/goccy/go-json v0.10.4 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.3 // indirect
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/hashicorp/cronexpr v1.1.2 // indirect
|
||||
github.com/hashicorp/yamux v0.1.1 // indirect
|
||||
|
@ -117,20 +133,20 @@ require (
|
|||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/klauspost/reedsolomon v1.12.1 // indirect
|
||||
github.com/klauspost/reedsolomon v1.12.4 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.69 // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.84 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/spdystream v0.4.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
|
@ -141,38 +157,43 @@ require (
|
|||
github.com/oklog/run v1.0.0 // indirect
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.52.3 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rs/xid v1.6.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/vladimirvivien/gexe v0.1.1 // indirect
|
||||
github.com/zeebo/blake3 v0.2.3 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/zeebo/blake3 v0.2.4 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||
go.opentelemetry.io/otel v1.25.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.25.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.25.0 // indirect
|
||||
go.starlark.net v0.0.0-20201006213952-227f4aabceb5 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.34.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
|
||||
go.opentelemetry.io/otel v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
golang.org/x/term v0.19.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/component-base v0.29.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
)
|
||||
|
||||
replace github.com/kopia/kopia => github.com/project-velero/kopia v0.0.0-20240417031915-e07d5b7de567
|
||||
replace github.com/kopia/kopia => github.com/project-velero/kopia v0.0.0-20250227051353-20bfabbfc7a0
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM --platform=$TARGETPLATFORM golang:1.22-bookworm
|
||||
FROM --platform=$TARGETPLATFORM golang:1.23-bookworm
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
|
@ -30,7 +30,7 @@ RUN wget --quiet https://github.com/kubernetes-sigs/kubebuilder/releases/downloa
|
|||
chmod +x /usr/local/kubebuilder/bin/kubebuilder
|
||||
|
||||
# get controller-tools
|
||||
RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.14.0
|
||||
RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.5
|
||||
|
||||
# get goimports (the revision is pinned so we don't indiscriminately update, but the particular commit
|
||||
# is not important)
|
||||
|
@ -69,7 +69,8 @@ RUN ARCH=$(go env GOARCH) && \
|
|||
chmod a+x /usr/include/google/protobuf && \
|
||||
chmod a+r -R /usr/include/google && \
|
||||
chmod +x /usr/bin/protoc
|
||||
RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.32.0
|
||||
RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.33.0 \
|
||||
&& go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3.0
|
||||
|
||||
# get goreleaser
|
||||
# goreleaser name template per arch is basically goarch except for amd64 and 386 https://github.com/goreleaser/goreleaser/blob/ec8819a95c5527fae65e5cb41673f5bbc3245fda/.goreleaser.yaml#L167C1-L173C42
|
||||
|
@ -87,15 +88,18 @@ RUN ARCH=$(go env GOARCH) && \
|
|||
elif [ "$ARCH" = "ppc64le" ] ; then \
|
||||
ARCH="ppc64"; \
|
||||
fi && \
|
||||
wget --quiet "https://github.com/goreleaser/goreleaser/releases/download/v1.15.2/goreleaser_Linux_$ARCH.tar.gz" && \
|
||||
wget --quiet "https://github.com/goreleaser/goreleaser/releases/download/v1.26.2/goreleaser_Linux_$ARCH.tar.gz" && \
|
||||
tar xvf goreleaser_Linux_$ARCH.tar.gz; \
|
||||
mv goreleaser /usr/bin/goreleaser && \
|
||||
chmod +x /usr/bin/goreleaser
|
||||
|
||||
# get golangci-lint
|
||||
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.57.2
|
||||
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v2.1.1
|
||||
|
||||
# install kubectl
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/$(go env GOARCH)/kubectl
|
||||
RUN chmod +x ./kubectl
|
||||
RUN mv ./kubectl /usr/local/bin
|
||||
|
||||
# Fix the "dubious ownership" issue from git when running goreleaser.sh
|
||||
RUN echo "[safe] \n\t directory = *" > /.gitconfig
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
set -e
|
||||
|
||||
function uploader {
|
||||
gsutil cp $1 gs://$2/$1
|
||||
gsutil -D setacl public-read gs://$2/$1 &> /dev/null
|
||||
}
|
|
@ -63,7 +63,7 @@ fi
|
|||
if [[ -z $BRANCH && -z $TAG ]]; then
|
||||
echo "Test Velero container build without pushing, when Dockerfile is changed by PR."
|
||||
BRANCH="${GITHUB_BASE_REF}-container"
|
||||
OUTPUT_TYPE="local,dest=."
|
||||
OUTPUT_TYPE="tar"
|
||||
else
|
||||
OUTPUT_TYPE="registry"
|
||||
fi
|
||||
|
@ -88,8 +88,12 @@ else
|
|||
fi
|
||||
fi
|
||||
|
||||
if [[ -z "$BUILDX_PLATFORMS" ]]; then
|
||||
BUILDX_PLATFORMS="linux/amd64,linux/arm64"
|
||||
if [[ -z "$BUILD_OS" ]]; then
|
||||
BUILD_OS="linux,windows"
|
||||
fi
|
||||
|
||||
if [[ -z "$BUILD_ARCH" ]]; then
|
||||
BUILD_ARCH="amd64,arm64"
|
||||
fi
|
||||
|
||||
# Debugging info
|
||||
|
@ -98,13 +102,16 @@ echo "BRANCH: $BRANCH"
|
|||
echo "TAG: $TAG"
|
||||
echo "TAG_LATEST: $TAG_LATEST"
|
||||
echo "VERSION: $VERSION"
|
||||
echo "BUILDX_PLATFORMS: $BUILDX_PLATFORMS"
|
||||
echo "BUILD_OS: $BUILD_OS"
|
||||
echo "BUILD_ARCH: $BUILD_ARCH"
|
||||
|
||||
echo "Building and pushing container images."
|
||||
|
||||
|
||||
VERSION="$VERSION" \
|
||||
TAG_LATEST="$TAG_LATEST" \
|
||||
BUILDX_PLATFORMS="$BUILDX_PLATFORMS" \
|
||||
BUILDX_OUTPUT_TYPE=$OUTPUT_TYPE \
|
||||
make all-containers
|
||||
BUILD_OS="$BUILD_OS" \
|
||||
BUILD_ARCH="$BUILD_ARCH" \
|
||||
BUILD_OUTPUT_TYPE=$OUTPUT_TYPE \
|
||||
BUILD_TAG_GCR=true \
|
||||
make all-containers
|
|
@ -1,5 +1,5 @@
|
|||
diff --git a/go.mod b/go.mod
|
||||
index 5f939c481..1caa51275 100644
|
||||
index 5f939c481..0a0a353a7 100644
|
||||
--- a/go.mod
|
||||
+++ b/go.mod
|
||||
@@ -24,32 +24,32 @@ require (
|
||||
|
@ -9,17 +9,18 @@ index 5f939c481..1caa51275 100644
|
|||
- golang.org/x/crypto v0.5.0
|
||||
- golang.org/x/net v0.5.0
|
||||
- golang.org/x/oauth2 v0.4.0
|
||||
+ golang.org/x/crypto v0.21.0
|
||||
+ golang.org/x/net v0.23.0
|
||||
+ golang.org/x/oauth2 v0.7.0
|
||||
golang.org/x/sync v0.1.0
|
||||
- golang.org/x/sync v0.1.0
|
||||
- golang.org/x/sys v0.4.0
|
||||
- golang.org/x/term v0.4.0
|
||||
- golang.org/x/text v0.6.0
|
||||
- google.golang.org/api v0.106.0
|
||||
+ golang.org/x/sys v0.18.0
|
||||
+ golang.org/x/term v0.18.0
|
||||
+ golang.org/x/text v0.14.0
|
||||
+ golang.org/x/crypto v0.35.0
|
||||
+ golang.org/x/net v0.36.0
|
||||
+ golang.org/x/oauth2 v0.7.0
|
||||
+ golang.org/x/sync v0.11.0
|
||||
+ golang.org/x/sys v0.30.0
|
||||
+ golang.org/x/term v0.29.0
|
||||
+ golang.org/x/text v0.22.0
|
||||
+ google.golang.org/api v0.114.0
|
||||
)
|
||||
|
||||
|
@ -48,7 +49,7 @@ index 5f939c481..1caa51275 100644
|
|||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
|
||||
@@ -63,9 +63,9 @@ require (
|
||||
@@ -63,11 +63,13 @@ require (
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
|
@ -61,11 +62,16 @@ index 5f939c481..1caa51275 100644
|
|||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
-go 1.18
|
||||
+go 1.23.0
|
||||
+
|
||||
+toolchain go1.23.7
|
||||
diff --git a/go.sum b/go.sum
|
||||
index 026e1d2fa..27d4207f4 100644
|
||||
index 026e1d2fa..5ebc8e609 100644
|
||||
--- a/go.sum
|
||||
+++ b/go.sum
|
||||
@@ -1,13 +1,13 @@
|
||||
@@ -1,23 +1,26 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
-cloud.google.com/go v0.108.0 h1:xntQwnfn8oHGX0crLVinvHM+AhXvi3QHQIEcX/2hiWk=
|
||||
-cloud.google.com/go v0.108.0/go.mod h1:lNUfQqusBJp0bgAg6qrHgYFYbTB+dOiob1itwnlD33Q=
|
||||
|
@ -83,10 +89,31 @@ index 026e1d2fa..27d4207f4 100644
|
|||
+cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k=
|
||||
+cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
|
||||
+cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM=
|
||||
+cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo=
|
||||
cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI=
|
||||
cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0 h1:VuHAcMq8pU1IWNT/m5yRaGqbK0BiQKHT8X4DTp9CHdI=
|
||||
@@ -70,8 +70,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0/go.mod h1:tZoQYdDZNOiIjdSn0dVWVfl0NEPGOJqVLzSrcFk4Is0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
|
||||
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 h1:+5VZ72z0Qan5Bog5C+ZkgSqUbeVUd9wgtHOrIKuc5b8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 h1:BMTdr+ib5ljLa9MxTJK8x/Ds0MbBb4MfuW5BL0zMJnI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE=
|
||||
+github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
|
||||
github.com/anacrolix/fuse v0.2.0 h1:pc+To78kI2d/WUjIyrsdqeJQAesuwpGxlI3h1nAv3Do=
|
||||
@@ -54,6 +57,7 @@ github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNu
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
|
||||
+github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
@@ -70,8 +74,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
|
@ -97,12 +124,13 @@ index 026e1d2fa..27d4207f4 100644
|
|||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
@@ -82,17 +82,17 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
@@ -82,17 +86,18 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
-github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=
|
||||
+github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
|
||||
+github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b h1:8htHrh2bw9c7Idkb7YNac+ZpTqLMjRpI+FWu51ltaQc=
|
||||
github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
||||
|
@ -120,25 +148,41 @@ index 026e1d2fa..27d4207f4 100644
|
|||
github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
@@ -172,8 +172,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
@@ -114,6 +119,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kurin/blazer v0.5.4-0.20211030221322-ba894c124ac6 h1:nz7i1au+nDzgExfqW5Zl6q85XNTvYoGnM5DHiQC0yYs=
|
||||
github.com/kurin/blazer v0.5.4-0.20211030221322-ba894c124ac6/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.46 h1:Vo3tNmNXuj7ME5qrvN4iadO7b4mzu/RSFdUkUhaPldk=
|
||||
@@ -129,6 +135,7 @@ github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3P
|
||||
github.com/ncw/swift/v2 v2.0.1 h1:q1IN8hNViXEv8Zvg3Xdis4a3c4IlIGezkYz09zQL5J0=
|
||||
github.com/ncw/swift/v2 v2.0.1/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg=
|
||||
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI=
|
||||
+github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
@@ -172,8 +179,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
-golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
-golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
+golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
+golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
|
||||
+golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
@@ -189,11 +189,11 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
@@ -189,17 +196,17 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
-golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
|
||||
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
+golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
+golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
|
||||
+golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
-golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||
-golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||
|
@ -147,31 +191,39 @@ index 026e1d2fa..27d4207f4 100644
|
|||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -214,17 +214,17 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
+golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
+golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -214,17 +221,17 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
-golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
+golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
+golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
+golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
-golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
|
||||
-golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
+golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
+golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
|
||||
+golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
-golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
||||
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
+golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||
+golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@@ -237,8 +237,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
@@ -237,8 +244,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
|
@ -182,7 +234,7 @@ index 026e1d2fa..27d4207f4 100644
|
|||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
@@ -246,15 +246,15 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
|
||||
@@ -246,15 +253,15 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
|
@ -202,7 +254,7 @@ index 026e1d2fa..27d4207f4 100644
|
|||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -266,8 +266,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
||||
@@ -266,14 +273,15 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
|
@ -213,3 +265,10 @@ index 026e1d2fa..27d4207f4 100644
|
|||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
|
@ -19,6 +19,14 @@ HACK_DIR=$(dirname "${BASH_SOURCE}")
|
|||
echo "Updating plugin proto"
|
||||
|
||||
echo protoc --version
|
||||
protoc pkg/plugin/proto/*.proto pkg/plugin/proto/*/*/*.proto --go_out=plugins=grpc:pkg/plugin/generated/ --go_opt=module=github.com/vmware-tanzu/velero/pkg/plugin/generated -I pkg/plugin/proto/ -I /usr/include
|
||||
protoc \
|
||||
-I pkg/plugin/proto/ \
|
||||
-I /usr/include \
|
||||
--go_out=pkg/plugin/generated/ \
|
||||
--go_opt=module=github.com/vmware-tanzu/velero/pkg/plugin/generated \
|
||||
--go-grpc_out=pkg/plugin/generated \
|
||||
--go-grpc_opt=paths=source_relative \
|
||||
--go-grpc_opt=require_unimplemented_servers=false \
|
||||
$(find pkg/plugin/proto -name '*.proto')
|
||||
|
||||
echo "Updating plugin proto - done!"
|
||||
|
|
|
@ -30,23 +30,6 @@ if ! command -v controller-gen > /dev/null; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
# get code-generation tools (for now keep in GOPATH since they're not fully modules-compatible yet)
|
||||
mkdir -p ${GOPATH}/src/k8s.io
|
||||
pushd ${GOPATH}/src/k8s.io
|
||||
git clone -b v0.22.2 https://github.com/kubernetes/code-generator
|
||||
popd
|
||||
|
||||
${GOPATH}/src/k8s.io/code-generator/generate-groups.sh \
|
||||
all \
|
||||
github.com/vmware-tanzu/velero/pkg/generated \
|
||||
github.com/vmware-tanzu/velero/pkg/apis \
|
||||
"velero:v1,v2alpha1" \
|
||||
--go-header-file ./hack/boilerplate.go.txt \
|
||||
--output-base ../../.. \
|
||||
$@
|
||||
|
||||
# Generate apiextensions.k8s.io/v1
|
||||
|
||||
# Generate CRD for v1.
|
||||
controller-gen \
|
||||
crd:crdVersions=v1 \
|
||||
|
|
|
@ -83,7 +83,7 @@ func TestNamespacedFileStore(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, path, tc.expectedPath)
|
||||
require.Equal(t, tc.expectedPath, path)
|
||||
|
||||
contents, err := fs.ReadFile(path)
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -1,120 +0,0 @@
|
|||
/*
|
||||
Copyright the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
plugincommon "github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/csi"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
|
||||
// volumeSnapshotDeleteItemAction is a backup item action plugin for Velero.
|
||||
type volumeSnapshotDeleteItemAction struct {
|
||||
log logrus.FieldLogger
|
||||
crClient crclient.Client
|
||||
}
|
||||
|
||||
// AppliesTo returns information indicating that the
|
||||
// VolumeSnapshotBackupItemAction should be invoked to backup
|
||||
// VolumeSnapshots.
|
||||
func (p *volumeSnapshotDeleteItemAction) AppliesTo() (velero.ResourceSelector, error) {
|
||||
p.log.Debug("VolumeSnapshotBackupItemAction AppliesTo")
|
||||
|
||||
return velero.ResourceSelector{
|
||||
IncludedResources: []string{"volumesnapshots.snapshot.storage.k8s.io"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *volumeSnapshotDeleteItemAction) Execute(
|
||||
input *velero.DeleteItemActionExecuteInput,
|
||||
) error {
|
||||
p.log.Info("Starting VolumeSnapshotDeleteItemAction for volumeSnapshot")
|
||||
|
||||
var vs snapshotv1api.VolumeSnapshot
|
||||
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(
|
||||
input.Item.UnstructuredContent(),
|
||||
&vs,
|
||||
); err != nil {
|
||||
return errors.Wrapf(err, "failed to convert input.Item from unstructured")
|
||||
}
|
||||
|
||||
// We don't want this DeleteItemAction plugin to delete VolumeSnapshot
|
||||
// taken outside of Velero. So skip deleting VolumeSnapshot objects
|
||||
// that were not created in the process of creating the Velero
|
||||
// backup being deleted.
|
||||
if !kubeutil.HasBackupLabel(&vs.ObjectMeta, input.Backup.Name) {
|
||||
p.log.Info(
|
||||
"VolumeSnapshot %s/%s was not taken by backup %s, skipping deletion",
|
||||
vs.Namespace, vs.Name, input.Backup.Name,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
p.log.Infof("Deleting VolumeSnapshot %s/%s", vs.Namespace, vs.Name)
|
||||
if vs.Status != nil && vs.Status.BoundVolumeSnapshotContentName != nil {
|
||||
// we patch the DeletionPolicy of the VolumeSnapshotContent
|
||||
// to set it to Delete. This ensures that the volume snapshot
|
||||
// in the storage provider is also deleted.
|
||||
err := csi.SetVolumeSnapshotContentDeletionPolicy(
|
||||
*vs.Status.BoundVolumeSnapshotContentName,
|
||||
p.crClient,
|
||||
)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return errors.Wrapf(
|
||||
err,
|
||||
fmt.Sprintf("failed to patch DeletionPolicy of volume snapshot %s/%s",
|
||||
vs.Namespace, vs.Name),
|
||||
)
|
||||
}
|
||||
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
err := p.crClient.Delete(context.TODO(), &vs)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewVolumeSnapshotDeleteItemAction(f client.Factory) plugincommon.HandlerInitializer {
|
||||
return func(logger logrus.FieldLogger) (interface{}, error) {
|
||||
crClient, err := f.KubebuilderClient()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return &volumeSnapshotDeleteItemAction{
|
||||
log: logger,
|
||||
crClient: crClient,
|
||||
}, nil
|
||||
}
|
||||
}
|
|
@ -1,151 +0,0 @@
|
|||
/*
|
||||
Copyright the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
factorymocks "github.com/vmware-tanzu/velero/pkg/client/mocks"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
)
|
||||
|
||||
func TestVSExecute(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
item runtime.Unstructured
|
||||
vs *snapshotv1api.VolumeSnapshot
|
||||
backup *velerov1api.Backup
|
||||
createVS bool
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "VolumeSnapshot doesn't have backup label",
|
||||
item: velerotest.UnstructuredOrDie(
|
||||
`
|
||||
{
|
||||
"apiVersion": "snapshot.storage.k8s.io/v1",
|
||||
"kind": "VolumeSnapshot",
|
||||
"metadata": {
|
||||
"namespace": "ns",
|
||||
"name": "foo"
|
||||
}
|
||||
}
|
||||
`,
|
||||
),
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "VolumeSnapshot doesn't exist in the cluster",
|
||||
vs: builder.ForVolumeSnapshot("foo", "bar").
|
||||
ObjectMeta(builder.WithLabelsMap(
|
||||
map[string]string{velerov1api.BackupNameLabel: "backup"},
|
||||
)).Status().
|
||||
BoundVolumeSnapshotContentName("vsc").
|
||||
Result(),
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "Normal case, VolumeSnapshot should be deleted",
|
||||
vs: builder.ForVolumeSnapshot("foo", "bar").
|
||||
ObjectMeta(builder.WithLabelsMap(
|
||||
map[string]string{velerov1api.BackupNameLabel: "backup"},
|
||||
)).Status().
|
||||
BoundVolumeSnapshotContentName("vsc").
|
||||
Result(),
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
expectErr: false,
|
||||
createVS: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
crClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
logger := logrus.StandardLogger()
|
||||
|
||||
p := volumeSnapshotDeleteItemAction{log: logger, crClient: crClient}
|
||||
|
||||
if test.vs != nil {
|
||||
vsMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(test.vs)
|
||||
require.NoError(t, err)
|
||||
test.item = &unstructured.Unstructured{Object: vsMap}
|
||||
}
|
||||
|
||||
if test.createVS {
|
||||
require.NoError(t, crClient.Create(context.TODO(), test.vs))
|
||||
}
|
||||
|
||||
err := p.Execute(
|
||||
&velero.DeleteItemActionExecuteInput{
|
||||
Item: test.item,
|
||||
Backup: test.backup,
|
||||
},
|
||||
)
|
||||
|
||||
if test.expectErr == false {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVSAppliesTo(t *testing.T) {
|
||||
p := volumeSnapshotDeleteItemAction{
|
||||
log: logrus.StandardLogger(),
|
||||
}
|
||||
selector, err := p.AppliesTo()
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(
|
||||
t,
|
||||
velero.ResourceSelector{
|
||||
IncludedResources: []string{"volumesnapshots.snapshot.storage.k8s.io"},
|
||||
},
|
||||
selector,
|
||||
)
|
||||
}
|
||||
|
||||
func TestNewVolumeSnapshotDeleteItemAction(t *testing.T) {
|
||||
logger := logrus.StandardLogger()
|
||||
crClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
|
||||
f := &factorymocks.Factory{}
|
||||
f.On("KubebuilderClient").Return(nil, fmt.Errorf(""))
|
||||
plugin := NewVolumeSnapshotDeleteItemAction(f)
|
||||
_, err := plugin(logger)
|
||||
require.Error(t, err)
|
||||
|
||||
f1 := &factorymocks.Factory{}
|
||||
f1.On("KubebuilderClient").Return(crClient, nil)
|
||||
plugin1 := NewVolumeSnapshotDeleteItemAction(f1)
|
||||
_, err1 := plugin1(logger)
|
||||
require.NoError(t, err1)
|
||||
}
|
|
@ -18,19 +18,23 @@ package csi
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
plugincommon "github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/csi"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
|
||||
|
@ -77,25 +81,55 @@ func (p *volumeSnapshotContentDeleteItemAction) Execute(
|
|||
|
||||
p.log.Infof("Deleting VolumeSnapshotContent %s", snapCont.Name)
|
||||
|
||||
if err := csi.SetVolumeSnapshotContentDeletionPolicy(
|
||||
snapCont.Name,
|
||||
p.crClient,
|
||||
uuid, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
p.log.WithError(err).Errorf("Fail to generate the UUID to create VSC %s", snapCont.Name)
|
||||
return errors.Wrapf(err, "Fail to generate the UUID to create VSC %s", snapCont.Name)
|
||||
}
|
||||
snapCont.Name = "vsc-" + uuid.String()
|
||||
|
||||
snapCont.Spec.DeletionPolicy = snapshotv1api.VolumeSnapshotContentDelete
|
||||
|
||||
snapCont.Spec.Source = snapshotv1api.VolumeSnapshotContentSource{
|
||||
SnapshotHandle: snapCont.Status.SnapshotHandle,
|
||||
}
|
||||
|
||||
snapCont.Spec.VolumeSnapshotRef = corev1api.ObjectReference{
|
||||
APIVersion: snapshotv1api.SchemeGroupVersion.String(),
|
||||
Kind: "VolumeSnapshot",
|
||||
Namespace: "ns-" + string(snapCont.UID),
|
||||
Name: "name-" + string(snapCont.UID),
|
||||
}
|
||||
|
||||
snapCont.ResourceVersion = ""
|
||||
|
||||
if err := p.crClient.Create(context.TODO(), &snapCont); err != nil {
|
||||
return errors.Wrapf(err, "fail to create VolumeSnapshotContent %s", snapCont.Name)
|
||||
}
|
||||
|
||||
// Read resource timeout from backup annotation, if not set, use default value.
|
||||
timeout, err := time.ParseDuration(
|
||||
input.Backup.Annotations[velerov1api.ResourceTimeoutAnnotation])
|
||||
if err != nil {
|
||||
p.log.Warnf("fail to parse resource timeout annotation %s: %s",
|
||||
input.Backup.Annotations[velerov1api.ResourceTimeoutAnnotation], err.Error())
|
||||
timeout = 10 * time.Minute
|
||||
}
|
||||
p.log.Debugf("resource timeout is set to %s", timeout.String())
|
||||
|
||||
interval := 5 * time.Second
|
||||
|
||||
// Wait until VSC created and ReadyToUse is true.
|
||||
if err := wait.PollUntilContextTimeout(
|
||||
context.Background(),
|
||||
interval,
|
||||
timeout,
|
||||
true,
|
||||
func(ctx context.Context) (bool, error) {
|
||||
return checkVSCReadiness(ctx, &snapCont, p.crClient)
|
||||
},
|
||||
); err != nil {
|
||||
// #4764: Leave a warning when VolumeSnapshotContent cannot be found for deletion.
|
||||
// Manual deleting VolumeSnapshotContent can cause this.
|
||||
// It's tricky for Velero to handle this inconsistency.
|
||||
// Even if Velero restores the VolumeSnapshotContent, CSI snapshot controller
|
||||
// may not delete it correctly due to the snapshot represented by VolumeSnapshotContent
|
||||
// already deleted on cloud provider.
|
||||
if apierrors.IsNotFound(err) {
|
||||
p.log.Warnf(
|
||||
"VolumeSnapshotContent %s of backup %s cannot be found. May leave orphan snapshot %s on cloud provider.",
|
||||
snapCont.Name, input.Backup.Name, *snapCont.Status.SnapshotHandle)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, fmt.Sprintf(
|
||||
"failed to set DeletionPolicy on volumesnapshotcontent %s. Skipping deletion",
|
||||
snapCont.Name))
|
||||
return errors.Wrapf(err, "fail to wait VolumeSnapshotContent %s becomes ready.", snapCont.Name)
|
||||
}
|
||||
|
||||
if err := p.crClient.Delete(
|
||||
|
@ -109,10 +143,29 @@ func (p *volumeSnapshotContentDeleteItemAction) Execute(
|
|||
return nil
|
||||
}
|
||||
|
||||
var checkVSCReadiness = func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error) {
|
||||
tmpVSC := new(snapshotv1api.VolumeSnapshotContent)
|
||||
if err := client.Get(ctx, crclient.ObjectKeyFromObject(vsc), tmpVSC); err != nil {
|
||||
return false, errors.Wrapf(
|
||||
err, "failed to get VolumeSnapshotContent %s", vsc.Name,
|
||||
)
|
||||
}
|
||||
|
||||
if tmpVSC.Status != nil && boolptr.IsSetToTrue(tmpVSC.Status.ReadyToUse) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func NewVolumeSnapshotContentDeleteItemAction(
|
||||
f client.Factory,
|
||||
) plugincommon.HandlerInitializer {
|
||||
return func(logger logrus.FieldLogger) (interface{}, error) {
|
||||
return func(logger logrus.FieldLogger) (any, error) {
|
||||
crClient, err := f.KubebuilderClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -22,10 +22,13 @@ import (
|
|||
"testing"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
|
@ -37,11 +40,15 @@ import (
|
|||
func TestVSCExecute(t *testing.T) {
|
||||
snapshotHandleStr := "test"
|
||||
tests := []struct {
|
||||
name string
|
||||
item runtime.Unstructured
|
||||
vsc *snapshotv1api.VolumeSnapshotContent
|
||||
backup *velerov1api.Backup
|
||||
createVSC bool
|
||||
name string
|
||||
item runtime.Unstructured
|
||||
vsc *snapshotv1api.VolumeSnapshotContent
|
||||
backup *velerov1api.Backup
|
||||
function func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error)
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
|
@ -62,17 +69,30 @@ func TestVSCExecute(t *testing.T) {
|
|||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "VolumeSnapshotContent doesn't exist in the cluster, no error",
|
||||
name: "Normal case, VolumeSnapshot should be deleted",
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
|
||||
expectErr: false,
|
||||
function: func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error) {
|
||||
return true, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normal case, VolumeSnapshot should be deleted",
|
||||
vsc: builder.ForVolumeSnapshotContent("bar").ObjectMeta(builder.WithLabelsMap(map[string]string{velerov1api.BackupNameLabel: "backup"})).Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: &snapshotHandleStr}).Result(),
|
||||
backup: builder.ForBackup("velero", "backup").Result(),
|
||||
expectErr: false,
|
||||
createVSC: true,
|
||||
backup: builder.ForBackup("velero", "backup").ObjectMeta(builder.WithAnnotationsMap(map[string]string{velerov1api.ResourceTimeoutAnnotation: "5s"})).Result(),
|
||||
expectErr: true,
|
||||
function: func(
|
||||
ctx context.Context,
|
||||
vsc *snapshotv1api.VolumeSnapshotContent,
|
||||
client crclient.Client,
|
||||
) (bool, error) {
|
||||
return false, errors.Errorf("test error case")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -80,6 +100,7 @@ func TestVSCExecute(t *testing.T) {
|
|||
t.Run(test.name, func(t *testing.T) {
|
||||
crClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
logger := logrus.StandardLogger()
|
||||
checkVSCReadiness = test.function
|
||||
|
||||
p := volumeSnapshotContentDeleteItemAction{log: logger, crClient: crClient}
|
||||
|
||||
|
@ -89,10 +110,6 @@ func TestVSCExecute(t *testing.T) {
|
|||
test.item = &unstructured.Unstructured{Object: vscMap}
|
||||
}
|
||||
|
||||
if test.createVSC {
|
||||
require.NoError(t, crClient.Create(context.TODO(), test.vsc))
|
||||
}
|
||||
|
||||
err := p.Execute(
|
||||
&velero.DeleteItemActionExecuteInput{
|
||||
Item: test.item,
|
||||
|
@ -140,3 +157,54 @@ func TestNewVolumeSnapshotContentDeleteItemAction(t *testing.T) {
|
|||
_, err1 := plugin1(logger)
|
||||
require.NoError(t, err1)
|
||||
}
|
||||
|
||||
func TestCheckVSCReadiness(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
vsc *snapshotv1api.VolumeSnapshotContent
|
||||
createVSC bool
|
||||
expectErr bool
|
||||
ready bool
|
||||
}{
|
||||
{
|
||||
name: "VSC not exist",
|
||||
vsc: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vsc-1",
|
||||
Namespace: "velero",
|
||||
},
|
||||
},
|
||||
createVSC: false,
|
||||
expectErr: true,
|
||||
ready: false,
|
||||
},
|
||||
{
|
||||
name: "VSC not ready",
|
||||
vsc: &snapshotv1api.VolumeSnapshotContent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vsc-1",
|
||||
Namespace: "velero",
|
||||
},
|
||||
},
|
||||
createVSC: true,
|
||||
expectErr: false,
|
||||
ready: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
crClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
if test.createVSC {
|
||||
require.NoError(t, crClient.Create(ctx, test.vsc))
|
||||
}
|
||||
|
||||
ready, err := checkVSCReadiness(ctx, test.vsc, crClient)
|
||||
require.Equal(t, test.ready, ready)
|
||||
if test.expectErr {
|
||||
require.Error(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -230,7 +230,7 @@ func (h *harness) addResource(t *testing.T, resource *test.APIResource) {
|
|||
}
|
||||
|
||||
// recordResourcesAction is a delete item action that can be configured to run
|
||||
// for specific resources/namespaces and simply record the items that is is
|
||||
// for specific resources/namespaces and simply record the items that is
|
||||
// executed for.
|
||||
type recordResourcesAction struct {
|
||||
selector velero.ResourceSelector
|
||||
|
|
|
@ -37,7 +37,7 @@ type hookKey struct {
|
|||
// For hooks specified in pod annotation, this field is the pod where hooks are annotated.
|
||||
podName string
|
||||
// HookPhase is only for backup hooks, for restore hooks, this field is empty.
|
||||
hookPhase hookPhase
|
||||
hookPhase HookPhase
|
||||
// HookName is only for hooks specified in the backup/restore spec.
|
||||
// For hooks specified in pod annotation, this field is empty or "<from-annotation>".
|
||||
hookName string
|
||||
|
@ -83,7 +83,7 @@ func NewHookTracker() *HookTracker {
|
|||
// Add adds a hook to the hook tracker
|
||||
// Add must precede the Record for each individual hook.
|
||||
// In other words, a hook must be added to the tracker before its execution result is recorded.
|
||||
func (ht *HookTracker) Add(podNamespace, podName, container, source, hookName string, hookPhase hookPhase) {
|
||||
func (ht *HookTracker) Add(podNamespace, podName, container, source, hookName string, hookPhase HookPhase) {
|
||||
ht.lock.Lock()
|
||||
defer ht.lock.Unlock()
|
||||
|
||||
|
@ -108,7 +108,7 @@ func (ht *HookTracker) Add(podNamespace, podName, container, source, hookName st
|
|||
// Record records the hook's execution status
|
||||
// Add must precede the Record for each individual hook.
|
||||
// In other words, a hook must be added to the tracker before its execution result is recorded.
|
||||
func (ht *HookTracker) Record(podNamespace, podName, container, source, hookName string, hookPhase hookPhase, hookFailed bool, hookErr error) error {
|
||||
func (ht *HookTracker) Record(podNamespace, podName, container, source, hookName string, hookPhase HookPhase, hookFailed bool, hookErr error) error {
|
||||
ht.lock.Lock()
|
||||
defer ht.lock.Unlock()
|
||||
|
||||
|
@ -179,7 +179,7 @@ func NewMultiHookTracker() *MultiHookTracker {
|
|||
}
|
||||
|
||||
// Add adds a backup/restore hook to the tracker
|
||||
func (mht *MultiHookTracker) Add(name, podNamespace, podName, container, source, hookName string, hookPhase hookPhase) {
|
||||
func (mht *MultiHookTracker) Add(name, podNamespace, podName, container, source, hookName string, hookPhase HookPhase) {
|
||||
mht.lock.Lock()
|
||||
defer mht.lock.Unlock()
|
||||
|
||||
|
@ -190,7 +190,7 @@ func (mht *MultiHookTracker) Add(name, podNamespace, podName, container, source,
|
|||
}
|
||||
|
||||
// Record records a backup/restore hook execution status
|
||||
func (mht *MultiHookTracker) Record(name, podNamespace, podName, container, source, hookName string, hookPhase hookPhase, hookFailed bool, hookErr error) error {
|
||||
func (mht *MultiHookTracker) Record(name, podNamespace, podName, container, source, hookName string, hookPhase HookPhase, hookFailed bool, hookErr error) error {
|
||||
mht.lock.RLock()
|
||||
defer mht.lock.RUnlock()
|
||||
|
||||
|
|
|
@ -65,13 +65,13 @@ func TestHookTracker_Record(t *testing.T) {
|
|||
info := tracker.tracker[key]
|
||||
assert.True(t, info.hookFailed)
|
||||
assert.True(t, info.hookExecuted)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tracker.Record("ns2", "pod2", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
assert.NotNil(t, err)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", "", false, nil)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, info.hookFailed)
|
||||
}
|
||||
|
||||
|
@ -141,13 +141,13 @@ func TestMultiHookTracker_Record(t *testing.T) {
|
|||
info := mht.trackers["restore1"].tracker[key]
|
||||
assert.True(t, info.hookFailed)
|
||||
assert.True(t, info.hookExecuted)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = mht.Record("restore1", "ns2", "pod2", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
assert.NotNil(t, err)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = mht.Record("restore2", "ns2", "pod2", "container1", HookSourceAnnotation, "h1", "", true, fmt.Errorf("err"))
|
||||
assert.NotNil(t, err)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestMultiHookTracker_Stat(t *testing.T) {
|
||||
|
|
|
@ -43,11 +43,11 @@ import (
|
|||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
|
||||
type hookPhase string
|
||||
type HookPhase string
|
||||
|
||||
const (
|
||||
PhasePre hookPhase = "pre"
|
||||
PhasePost hookPhase = "post"
|
||||
PhasePre HookPhase = "pre"
|
||||
PhasePost HookPhase = "post"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -81,7 +81,7 @@ type ItemHookHandler interface {
|
|||
groupResource schema.GroupResource,
|
||||
obj runtime.Unstructured,
|
||||
resourceHooks []ResourceHook,
|
||||
phase hookPhase,
|
||||
phase HookPhase,
|
||||
hookTracker *HookTracker,
|
||||
) error
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ func (h *DefaultItemHookHandler) HandleHooks(
|
|||
groupResource schema.GroupResource,
|
||||
obj runtime.Unstructured,
|
||||
resourceHooks []ResourceHook,
|
||||
phase hookPhase,
|
||||
phase HookPhase,
|
||||
hookTracker *HookTracker,
|
||||
) error {
|
||||
// We only support hooks on pods right now
|
||||
|
@ -312,27 +312,27 @@ func (h *NoOpItemHookHandler) HandleHooks(
|
|||
groupResource schema.GroupResource,
|
||||
obj runtime.Unstructured,
|
||||
resourceHooks []ResourceHook,
|
||||
phase hookPhase,
|
||||
phase HookPhase,
|
||||
hookTracker *HookTracker,
|
||||
) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func phasedKey(phase hookPhase, key string) string {
|
||||
func phasedKey(phase HookPhase, key string) string {
|
||||
if phase != "" {
|
||||
return fmt.Sprintf("%v.%v", phase, key)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func getHookAnnotation(annotations map[string]string, key string, phase hookPhase) string {
|
||||
func getHookAnnotation(annotations map[string]string, key string, phase HookPhase) string {
|
||||
return annotations[phasedKey(phase, key)]
|
||||
}
|
||||
|
||||
// getPodExecHookFromAnnotations returns an ExecHook based on the annotations, as long as the
|
||||
// 'command' annotation is present. If it is absent, this returns nil.
|
||||
// If there is an error in parsing a supplied timeout, it is logged.
|
||||
func getPodExecHookFromAnnotations(annotations map[string]string, phase hookPhase, log logrus.FieldLogger) *velerov1api.ExecHook {
|
||||
func getPodExecHookFromAnnotations(annotations map[string]string, phase HookPhase, log logrus.FieldLogger) *velerov1api.ExecHook {
|
||||
commandValue := getHookAnnotation(annotations, podBackupHookCommandAnnotationKey, phase)
|
||||
if commandValue == "" {
|
||||
return nil
|
||||
|
@ -561,7 +561,7 @@ func GroupRestoreExecHooks(
|
|||
if hookFromAnnotation.Container == "" {
|
||||
hookFromAnnotation.Container = pod.Spec.Containers[0].Name
|
||||
}
|
||||
hookTrack.Add(restoreName, metadata.GetNamespace(), metadata.GetName(), hookFromAnnotation.Container, HookSourceAnnotation, "<from-annotation>", hookPhase(""))
|
||||
hookTrack.Add(restoreName, metadata.GetNamespace(), metadata.GetName(), hookFromAnnotation.Container, HookSourceAnnotation, "<from-annotation>", HookPhase(""))
|
||||
byContainer[hookFromAnnotation.Container] = []PodExecRestoreHook{
|
||||
{
|
||||
HookName: "<from-annotation>",
|
||||
|
@ -596,7 +596,7 @@ func GroupRestoreExecHooks(
|
|||
if named.Hook.Container == "" {
|
||||
named.Hook.Container = pod.Spec.Containers[0].Name
|
||||
}
|
||||
hookTrack.Add(restoreName, metadata.GetNamespace(), metadata.GetName(), named.Hook.Container, HookSourceSpec, rrh.Name, hookPhase(""))
|
||||
hookTrack.Add(restoreName, metadata.GetNamespace(), metadata.GetName(), named.Hook.Container, HookSourceSpec, rrh.Name, HookPhase(""))
|
||||
byContainer[named.Hook.Container] = append(byContainer[named.Hook.Container], named)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -128,7 +128,7 @@ func TestHandleHooksSkips(t *testing.T) {
|
|||
func TestHandleHooks(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
phase hookPhase
|
||||
phase HookPhase
|
||||
groupResource string
|
||||
item runtime.Unstructured
|
||||
hooks []ResourceHook
|
||||
|
@ -500,7 +500,7 @@ func TestHandleHooks(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetPodExecHookFromAnnotations(t *testing.T) {
|
||||
phases := []hookPhase{"", PhasePre, PhasePost}
|
||||
phases := []HookPhase{"", PhasePre, PhasePost}
|
||||
for _, phase := range phases {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -1199,7 +1199,7 @@ func TestGroupRestoreExecHooks(t *testing.T) {
|
|||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := GroupRestoreExecHooks("restore1", tc.resourceRestoreHooks, tc.pod, velerotest.NewLogger(), hookTracker)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
|
@ -1976,7 +1976,7 @@ func TestValidateContainer(t *testing.T) {
|
|||
expectedError := fmt.Errorf("invalid InitContainer in restore hook, it doesn't have Command, Name or Image field")
|
||||
|
||||
// valid string should return nil as result.
|
||||
assert.Nil(t, ValidateContainer([]byte(valid)))
|
||||
assert.NoError(t, ValidateContainer([]byte(valid)))
|
||||
|
||||
// noName string should return expected error as result.
|
||||
assert.Equal(t, expectedError, ValidateContainer([]byte(noName)))
|
||||
|
@ -1999,7 +1999,7 @@ func TestBackupHookTracker(t *testing.T) {
|
|||
}
|
||||
test1 := []struct {
|
||||
name string
|
||||
phase hookPhase
|
||||
phase HookPhase
|
||||
groupResource string
|
||||
pods []podWithHook
|
||||
hookTracker *HookTracker
|
||||
|
|
|
@ -116,7 +116,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
|||
// not yet been observed to be running. It relies on the Informer not to be called concurrently.
|
||||
// When a container is observed running and its hooks are executed, the container is deleted
|
||||
// from the byContainer map. When the map is empty the watch is ended.
|
||||
handler := func(newObj interface{}) {
|
||||
handler := func(newObj any) {
|
||||
newPod, ok := newObj.(*v1.Pod)
|
||||
if !ok {
|
||||
return
|
||||
|
@ -169,7 +169,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
|||
hookLog.Error(err)
|
||||
errors = append(errors, err)
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), true, err)
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), true, err)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
|||
hookFailed = true
|
||||
}
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), hookFailed, hookErr)
|
||||
errTracker := multiHookTracker.Record(restoreName, newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), hookFailed, hookErr)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
|
@ -214,18 +214,23 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
|||
|
||||
selector := fields.OneTermEqualSelector("metadata.name", pod.Name)
|
||||
lw := e.ListWatchFactory.NewListWatch(pod.Namespace, selector)
|
||||
|
||||
_, podWatcher := cache.NewInformer(lw, pod, 0, cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: handler,
|
||||
UpdateFunc: func(_, newObj interface{}) {
|
||||
handler(newObj)
|
||||
_, podWatcher := cache.NewInformerWithOptions(cache.InformerOptions{
|
||||
ListerWatcher: lw,
|
||||
ObjectType: pod,
|
||||
ResyncPeriod: 0,
|
||||
Handler: cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: handler,
|
||||
UpdateFunc: func(_, newObj any) {
|
||||
handler(newObj)
|
||||
},
|
||||
DeleteFunc: func(obj any) {
|
||||
err := fmt.Errorf("pod %s deleted before all hooks were executed", kube.NamespaceAndName(pod))
|
||||
log.Error(err)
|
||||
cancel()
|
||||
},
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
err := fmt.Errorf("pod %s deleted before all hooks were executed", kube.NamespaceAndName(pod))
|
||||
log.Error(err)
|
||||
cancel()
|
||||
},
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
podWatcher.Run(ctx.Done())
|
||||
|
||||
|
@ -247,7 +252,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks(
|
|||
},
|
||||
)
|
||||
|
||||
errTracker := multiHookTracker.Record(restoreName, pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), true, err)
|
||||
errTracker := multiHookTracker.Record(restoreName, pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, HookPhase(""), true, err)
|
||||
if errTracker != nil {
|
||||
hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker")
|
||||
}
|
||||
|
|
|
@ -732,7 +732,7 @@ func TestWaitExecHandleHooks(t *testing.T) {
|
|||
|
||||
for _, e := range test.expectedExecutions {
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(e.pod)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
podCommandExecutor.On("ExecutePodCommand", mock.Anything, obj, e.pod.Namespace, e.pod.Name, e.name, e.hook).Return(e.error)
|
||||
}
|
||||
|
||||
|
@ -999,11 +999,6 @@ func TestMaxHookWait(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRestoreHookTrackerUpdate(t *testing.T) {
|
||||
type change struct {
|
||||
// delta to wait since last change applied or pod added
|
||||
wait time.Duration
|
||||
updated *v1.Pod
|
||||
}
|
||||
type expectedExecution struct {
|
||||
hook *velerov1api.ExecHook
|
||||
name string
|
||||
|
@ -1012,17 +1007,17 @@ func TestRestoreHookTrackerUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
hookTracker1 := NewMultiHookTracker()
|
||||
hookTracker1.Add("restore1", "default", "my-pod", "container1", HookSourceAnnotation, "<from-annotation>", hookPhase(""))
|
||||
hookTracker1.Add("restore1", "default", "my-pod", "container1", HookSourceAnnotation, "<from-annotation>", HookPhase(""))
|
||||
|
||||
hookTracker2 := NewMultiHookTracker()
|
||||
hookTracker2.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", hookPhase(""))
|
||||
hookTracker2.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", HookPhase(""))
|
||||
|
||||
hookTracker3 := NewMultiHookTracker()
|
||||
hookTracker3.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", hookPhase(""))
|
||||
hookTracker3.Add("restore1", "default", "my-pod", "container2", HookSourceSpec, "my-hook-2", hookPhase(""))
|
||||
hookTracker3.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", HookPhase(""))
|
||||
hookTracker3.Add("restore1", "default", "my-pod", "container2", HookSourceSpec, "my-hook-2", HookPhase(""))
|
||||
|
||||
hookTracker4 := NewMultiHookTracker()
|
||||
hookTracker4.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", hookPhase(""))
|
||||
hookTracker4.Add("restore1", "default", "my-pod", "container1", HookSourceSpec, "my-hook-1", HookPhase(""))
|
||||
|
||||
tests1 := []struct {
|
||||
name string
|
||||
|
@ -1269,7 +1264,7 @@ func TestRestoreHookTrackerUpdate(t *testing.T) {
|
|||
|
||||
for _, e := range test.expectedExecutions {
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(e.pod)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
podCommandExecutor.On("ExecutePodCommand", mock.Anything, obj, e.pod.Namespace, e.pod.Name, e.name, e.hook).Return(e.error)
|
||||
}
|
||||
|
||||
|
|
|
@ -181,7 +181,7 @@ func matchConditions(u *unstructured.Unstructured, rules []MatchRule, _ logrus.F
|
|||
p := &JSONPatcher{patches: fixed}
|
||||
_, err := p.applyPatch(u)
|
||||
if err != nil {
|
||||
if errors.Is(err, jsonpatch.ErrTestFailed) {
|
||||
if errors.Is(err, jsonpatch.ErrTestFailed) || errors.Is(err, jsonpatch.ErrMissing) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
|
|
|
@ -429,69 +429,69 @@ func TestGetResourceModifiersFromConfig(t *testing.T) {
|
|||
|
||||
func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) {
|
||||
pvcStandardSc := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "v1",
|
||||
"kind": "PersistentVolumeClaim",
|
||||
"metadata": map[string]interface{}{
|
||||
"metadata": map[string]any{
|
||||
"name": "test-pvc",
|
||||
"namespace": "foo",
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"spec": map[string]any{
|
||||
"storageClassName": "standard",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pvcPremiumSc := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "v1",
|
||||
"kind": "PersistentVolumeClaim",
|
||||
"metadata": map[string]interface{}{
|
||||
"metadata": map[string]any{
|
||||
"name": "test-pvc",
|
||||
"namespace": "foo",
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"spec": map[string]any{
|
||||
"storageClassName": "premium",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pvcGoldSc := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "v1",
|
||||
"kind": "PersistentVolumeClaim",
|
||||
"metadata": map[string]interface{}{
|
||||
"metadata": map[string]any{
|
||||
"name": "test-pvc",
|
||||
"namespace": "foo",
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"spec": map[string]any{
|
||||
"storageClassName": "gold",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
deployNginxOneReplica := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": map[string]interface{}{
|
||||
"metadata": map[string]any{
|
||||
"name": "test-deployment",
|
||||
"namespace": "foo",
|
||||
"labels": map[string]interface{}{
|
||||
"labels": map[string]any{
|
||||
"app": "nginx",
|
||||
},
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"spec": map[string]any{
|
||||
"replicas": int64(1),
|
||||
"template": map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
"template": map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"labels": map[string]any{
|
||||
"app": "nginx",
|
||||
},
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"containers": []interface{}{
|
||||
map[string]interface{}{
|
||||
"spec": map[string]any{
|
||||
"containers": []any{
|
||||
map[string]any{
|
||||
"name": "nginx",
|
||||
"image": "nginx:latest",
|
||||
},
|
||||
|
@ -502,27 +502,27 @@ func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) {
|
|||
},
|
||||
}
|
||||
deployNginxTwoReplica := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": map[string]interface{}{
|
||||
"metadata": map[string]any{
|
||||
"name": "test-deployment",
|
||||
"namespace": "foo",
|
||||
"labels": map[string]interface{}{
|
||||
"labels": map[string]any{
|
||||
"app": "nginx",
|
||||
},
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"spec": map[string]any{
|
||||
"replicas": int64(2),
|
||||
"template": map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
"template": map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"labels": map[string]any{
|
||||
"app": "nginx",
|
||||
},
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"containers": []interface{}{
|
||||
map[string]interface{}{
|
||||
"spec": map[string]any{
|
||||
"containers": []any{
|
||||
map[string]any{
|
||||
"name": "nginx",
|
||||
"image": "nginx:latest",
|
||||
},
|
||||
|
@ -533,31 +533,31 @@ func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) {
|
|||
},
|
||||
}
|
||||
deployNginxMysql := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": map[string]interface{}{
|
||||
"metadata": map[string]any{
|
||||
"name": "test-deployment",
|
||||
"namespace": "foo",
|
||||
"labels": map[string]interface{}{
|
||||
"labels": map[string]any{
|
||||
"app": "nginx",
|
||||
},
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"spec": map[string]any{
|
||||
"replicas": int64(1),
|
||||
"template": map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
"template": map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"labels": map[string]any{
|
||||
"app": "nginx",
|
||||
},
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"containers": []interface{}{
|
||||
map[string]interface{}{
|
||||
"spec": map[string]any{
|
||||
"containers": []any{
|
||||
map[string]any{
|
||||
"name": "nginx",
|
||||
"image": "nginx:latest",
|
||||
},
|
||||
map[string]interface{}{
|
||||
map[string]any{
|
||||
"name": "mysql",
|
||||
"image": "mysql:latest",
|
||||
},
|
||||
|
@ -568,19 +568,19 @@ func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) {
|
|||
},
|
||||
}
|
||||
cmTrue := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ConfigMap",
|
||||
"data": map[string]interface{}{
|
||||
"data": map[string]any{
|
||||
"test": "true",
|
||||
},
|
||||
},
|
||||
}
|
||||
cmFalse := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ConfigMap",
|
||||
"data": map[string]interface{}{
|
||||
"data": map[string]any{
|
||||
"test": "false",
|
||||
},
|
||||
},
|
||||
|
@ -1767,6 +1767,35 @@ func TestResourceModifiers_conditional_patches(t *testing.T) {
|
|||
wantErr: false,
|
||||
wantObj: cmWithLabelAToB.DeepCopy(),
|
||||
},
|
||||
{
|
||||
name: "missing condition path and skip patches",
|
||||
rm: &ResourceModifiers{
|
||||
Version: "v1",
|
||||
ResourceModifierRules: []ResourceModifierRule{
|
||||
{
|
||||
Conditions: Conditions{
|
||||
GroupResource: "*",
|
||||
Namespaces: []string{"fake"},
|
||||
Matches: []MatchRule{
|
||||
{
|
||||
Path: "/metadata/labels/a/b",
|
||||
Value: "c",
|
||||
},
|
||||
},
|
||||
},
|
||||
MergePatches: []JSONMergePatch{
|
||||
{
|
||||
PatchData: `{"metadata":{"labels":{"a":"c"}}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
obj: cmWithLabelAToB.DeepCopy(),
|
||||
groupResource: "configmaps",
|
||||
wantErr: false,
|
||||
wantObj: cmWithLabelAToB.DeepCopy(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
|
|
@ -53,7 +53,7 @@ func (p *StrategicMergePatcher) Patch(u *unstructured.Unstructured, _ logrus.Fie
|
|||
|
||||
// strategicPatchObject applies a strategic merge patch of `patchBytes` to
|
||||
// `originalObject` and stores the result in `objToUpdate`.
|
||||
// It additionally returns the map[string]interface{} representation of the
|
||||
// It additionally returns the map[string]any representation of the
|
||||
// `originalObject` and `patchBytes`.
|
||||
// NOTE: Both `originalObject` and `objToUpdate` are supposed to be versioned.
|
||||
func strategicPatchObject(
|
||||
|
@ -67,7 +67,7 @@ func strategicPatchObject(
|
|||
return err
|
||||
}
|
||||
|
||||
patchMap := make(map[string]interface{})
|
||||
patchMap := make(map[string]any)
|
||||
var strictErrs []error
|
||||
strictErrs, err = kubejson.UnmarshalStrict(patchBytes, &patchMap)
|
||||
if err != nil {
|
||||
|
@ -84,8 +84,8 @@ func strategicPatchObject(
|
|||
// <originalMap> and stores the result in <objToUpdate>.
|
||||
// NOTE: <objToUpdate> must be a versioned object.
|
||||
func applyPatchToObject(
|
||||
originalMap map[string]interface{},
|
||||
patchMap map[string]interface{},
|
||||
originalMap map[string]any,
|
||||
patchMap map[string]any,
|
||||
objToUpdate runtime.Object,
|
||||
schemaReferenceObj runtime.Object,
|
||||
strictErrs []error,
|
||||
|
@ -117,14 +117,9 @@ func applyPatchToObject(
|
|||
})
|
||||
}
|
||||
} else if len(strictErrs) > 0 {
|
||||
switch {
|
||||
//case validationDirective == metav1.FieldValidationWarn:
|
||||
// addStrictDecodingWarnings(requestContext, strictErrs)
|
||||
default:
|
||||
return apierrors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
|
||||
field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), runtime.NewStrictDecodingError(strictErrs).Error()),
|
||||
})
|
||||
}
|
||||
return apierrors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
|
||||
field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), runtime.NewStrictDecodingError(strictErrs).Error()),
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -16,11 +16,16 @@ limitations under the License.
|
|||
package resourcepolicies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
|
||||
type VolumeActionType string
|
||||
|
@ -41,14 +46,14 @@ type Action struct {
|
|||
// Type defined specific type of action, currently only support 'skip'
|
||||
Type VolumeActionType `yaml:"type"`
|
||||
// Parameters defined map of parameters when executing a specific action
|
||||
Parameters map[string]interface{} `yaml:"parameters,omitempty"`
|
||||
Parameters map[string]any `yaml:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
// volumePolicy defined policy to conditions to match Volumes and related action to handle matched Volumes
|
||||
type VolumePolicy struct {
|
||||
// Conditions defined list of conditions to match Volumes
|
||||
Conditions map[string]interface{} `yaml:"conditions"`
|
||||
Action Action `yaml:"action"`
|
||||
Conditions map[string]any `yaml:"conditions"`
|
||||
Action Action `yaml:"action"`
|
||||
}
|
||||
|
||||
// resourcePolicies currently defined slice of volume policies to handle backup
|
||||
|
@ -71,6 +76,16 @@ func unmarshalResourcePolicies(yamlData *string) (*ResourcePolicies, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode yaml data into resource policies %v", err)
|
||||
}
|
||||
|
||||
for _, vp := range resPolicies.VolumePolicies {
|
||||
if raw, ok := vp.Conditions["pvcLabels"]; ok {
|
||||
switch raw.(type) {
|
||||
case map[string]any, map[string]string:
|
||||
default:
|
||||
return nil, fmt.Errorf("pvcLabels must be a map of string to string, got %T", raw)
|
||||
}
|
||||
}
|
||||
}
|
||||
return resPolicies, nil
|
||||
}
|
||||
|
||||
|
@ -91,6 +106,9 @@ func (p *Policies) BuildPolicy(resPolicies *ResourcePolicies) error {
|
|||
volP.conditions = append(volP.conditions, &nfsCondition{nfs: con.NFS})
|
||||
volP.conditions = append(volP.conditions, &csiCondition{csi: con.CSI})
|
||||
volP.conditions = append(volP.conditions, &volumeTypeCondition{volumeTypes: con.VolumeTypes})
|
||||
if len(con.PVCLabels) > 0 {
|
||||
volP.conditions = append(volP.conditions, &pvcLabelsCondition{labels: con.PVCLabels})
|
||||
}
|
||||
p.volumePolicies = append(p.volumePolicies, volP)
|
||||
}
|
||||
|
||||
|
@ -117,16 +135,28 @@ func (p *Policies) match(res *structuredVolume) *Action {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Policies) GetMatchAction(res interface{}) (*Action, error) {
|
||||
func (p *Policies) GetMatchAction(res any) (*Action, error) {
|
||||
data, ok := res.(VolumeFilterData)
|
||||
if !ok {
|
||||
return nil, errors.New("failed to convert input to VolumeFilterData")
|
||||
}
|
||||
|
||||
volume := &structuredVolume{}
|
||||
switch obj := res.(type) {
|
||||
case *v1.PersistentVolume:
|
||||
volume.parsePV(obj)
|
||||
case *v1.Volume:
|
||||
volume.parsePodVolume(obj)
|
||||
switch {
|
||||
case data.PersistentVolume != nil:
|
||||
volume.parsePV(data.PersistentVolume)
|
||||
if data.PVC != nil {
|
||||
volume.parsePVC(data.PVC)
|
||||
}
|
||||
case data.PodVolume != nil:
|
||||
volume.parsePodVolume(data.PodVolume)
|
||||
if data.PVC != nil {
|
||||
volume.parsePVC(data.PVC)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("failed to convert object")
|
||||
}
|
||||
|
||||
return p.match(volume), nil
|
||||
}
|
||||
|
||||
|
@ -148,7 +178,43 @@ func (p *Policies) Validate() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func GetResourcePoliciesFromConfig(cm *v1.ConfigMap) (*Policies, error) {
|
||||
func GetResourcePoliciesFromBackup(
|
||||
backup velerov1api.Backup,
|
||||
client crclient.Client,
|
||||
logger logrus.FieldLogger,
|
||||
) (resourcePolicies *Policies, err error) {
|
||||
if backup.Spec.ResourcePolicy != nil &&
|
||||
strings.EqualFold(backup.Spec.ResourcePolicy.Kind, ConfigmapRefType) {
|
||||
policiesConfigMap := &v1.ConfigMap{}
|
||||
err = client.Get(
|
||||
context.Background(),
|
||||
crclient.ObjectKey{Namespace: backup.Namespace, Name: backup.Spec.ResourcePolicy.Name},
|
||||
policiesConfigMap,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Errorf("Fail to get ResourcePolicies %s ConfigMap with error %s.",
|
||||
backup.Namespace+"/"+backup.Spec.ResourcePolicy.Name, err.Error())
|
||||
return nil, fmt.Errorf("fail to get ResourcePolicies %s ConfigMap with error %s",
|
||||
backup.Namespace+"/"+backup.Spec.ResourcePolicy.Name, err.Error())
|
||||
}
|
||||
resourcePolicies, err = getResourcePoliciesFromConfig(policiesConfigMap)
|
||||
if err != nil {
|
||||
logger.Errorf("Fail to read ResourcePolicies from ConfigMap %s with error %s.",
|
||||
backup.Namespace+"/"+backup.Name, err.Error())
|
||||
return nil, fmt.Errorf("fail to read the ResourcePolicies from ConfigMap %s with error %s",
|
||||
backup.Namespace+"/"+backup.Name, err.Error())
|
||||
} else if err = resourcePolicies.Validate(); err != nil {
|
||||
logger.Errorf("Fail to validate ResourcePolicies in ConfigMap %s with error %s.",
|
||||
backup.Namespace+"/"+backup.Name, err.Error())
|
||||
return nil, fmt.Errorf("fail to validate ResourcePolicies in ConfigMap %s with error %s",
|
||||
backup.Namespace+"/"+backup.Name, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return resourcePolicies, nil
|
||||
}
|
||||
|
||||
func getResourcePoliciesFromConfig(cm *v1.ConfigMap) (*Policies, error) {
|
||||
if cm == nil {
|
||||
return nil, fmt.Errorf("could not parse config from nil configmap")
|
||||
}
|
||||
|
|
|
@ -93,21 +93,70 @@ func TestLoadResourcePolicies(t *testing.T) {
|
|||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "supported formart volume policies",
|
||||
name: "supported format volume policies",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: "0,100Gi"
|
||||
csi:
|
||||
driver: aws.efs.csi.driver
|
||||
nfs: {}
|
||||
storageClass:
|
||||
- gp2
|
||||
- ebs-sc
|
||||
action:
|
||||
type: skip`,
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: '0,100Gi'
|
||||
csi:
|
||||
driver: aws.efs.csi.driver
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "supported format csi driver with volumeAttributes for volume policies",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: '0,100Gi'
|
||||
csi:
|
||||
driver: aws.efs.csi.driver
|
||||
volumeAttributes:
|
||||
key1: value1
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "supported format pvcLabels",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
app: database
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "error format of pvcLabels (not a map)",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels: "production"
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "supported format pvcLabels with extra keys",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
region: us-west
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
@ -126,36 +175,54 @@ func TestGetResourceMatchedAction(t *testing.T) {
|
|||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]interface{}{
|
||||
Conditions: map[string]any{
|
||||
"capacity": "0,10Gi",
|
||||
"storageClass": []string{"gp2", "ebs-sc"},
|
||||
"csi": interface{}(
|
||||
map[string]interface{}{
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: Action{Type: "skip"},
|
||||
Conditions: map[string]any{
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"driver": "files.csi.driver",
|
||||
"volumeAttributes": map[string]string{"protocol": "nfs"},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: Action{Type: "snapshot"},
|
||||
Conditions: map[string]interface{}{
|
||||
Conditions: map[string]any{
|
||||
"capacity": "10,100Gi",
|
||||
"storageClass": []string{"gp2", "ebs-sc"},
|
||||
"csi": interface{}(
|
||||
map[string]interface{}{
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: Action{Type: "fs-backup"},
|
||||
Conditions: map[string]interface{}{
|
||||
Conditions: map[string]any{
|
||||
"storageClass": []string{"gp2", "ebs-sc"},
|
||||
"csi": interface{}(
|
||||
map[string]interface{}{
|
||||
"csi": any(
|
||||
map[string]any{
|
||||
"driver": "aws.efs.csi.driver",
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: Action{Type: "snapshot"},
|
||||
Conditions: map[string]any{
|
||||
"pvcLabels": map[string]string{
|
||||
"environment": "production",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testCases := []struct {
|
||||
|
@ -172,6 +239,24 @@ func TestGetResourceMatchedAction(t *testing.T) {
|
|||
},
|
||||
expectedAction: &Action{Type: "skip"},
|
||||
},
|
||||
{
|
||||
name: "match policy AFS NFS",
|
||||
volume: &structuredVolume{
|
||||
capacity: *resource.NewQuantity(5<<30, resource.BinarySI),
|
||||
storageClass: "afs-nfs",
|
||||
csi: &csiVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"protocol": "nfs"}},
|
||||
},
|
||||
expectedAction: &Action{Type: "skip"},
|
||||
},
|
||||
{
|
||||
name: "match policy AFS SMB",
|
||||
volume: &structuredVolume{
|
||||
capacity: *resource.NewQuantity(5<<30, resource.BinarySI),
|
||||
storageClass: "afs-smb",
|
||||
csi: &csiVolumeSource{Driver: "files.csi.driver"},
|
||||
},
|
||||
expectedAction: nil,
|
||||
},
|
||||
{
|
||||
name: "both matches return the first policy",
|
||||
volume: &structuredVolume{
|
||||
|
@ -182,7 +267,7 @@ func TestGetResourceMatchedAction(t *testing.T) {
|
|||
expectedAction: &Action{Type: "snapshot"},
|
||||
},
|
||||
{
|
||||
name: "dismatch all policies",
|
||||
name: "mismatch all policies",
|
||||
volume: &structuredVolume{
|
||||
capacity: *resource.NewQuantity(50<<30, resource.BinarySI),
|
||||
storageClass: "ebs-sc",
|
||||
|
@ -190,6 +275,29 @@ func TestGetResourceMatchedAction(t *testing.T) {
|
|||
},
|
||||
expectedAction: nil,
|
||||
},
|
||||
{
|
||||
name: "match pvcLabels condition",
|
||||
volume: &structuredVolume{
|
||||
capacity: *resource.NewQuantity(5<<30, resource.BinarySI),
|
||||
storageClass: "some-class",
|
||||
pvcLabels: map[string]string{
|
||||
"environment": "production",
|
||||
"team": "backend",
|
||||
},
|
||||
},
|
||||
expectedAction: &Action{Type: "snapshot"},
|
||||
},
|
||||
{
|
||||
name: "mismatch pvcLabels condition",
|
||||
volume: &structuredVolume{
|
||||
capacity: *resource.NewQuantity(5<<30, resource.BinarySI),
|
||||
storageClass: "some-class",
|
||||
pvcLabels: map[string]string{
|
||||
"environment": "staging",
|
||||
},
|
||||
},
|
||||
expectedAction: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
@ -226,23 +334,69 @@ func TestGetResourcePoliciesFromConfig(t *testing.T) {
|
|||
Namespace: "test-namespace",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"test-data": "version: v1\nvolumePolicies:\n- conditions:\n capacity: '0,10Gi'\n action:\n type: skip",
|
||||
"test-data": `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: '0,10Gi'
|
||||
csi:
|
||||
driver: disks.csi.driver
|
||||
action:
|
||||
type: skip
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
// Call the function and check for errors
|
||||
resPolicies, err := GetResourcePoliciesFromConfig(cm)
|
||||
assert.Nil(t, err)
|
||||
resPolicies, err := getResourcePoliciesFromConfig(cm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check that the returned resourcePolicies object contains the expected data
|
||||
assert.Equal(t, "v1", resPolicies.version)
|
||||
assert.Len(t, resPolicies.volumePolicies, 1)
|
||||
|
||||
assert.Len(t, resPolicies.volumePolicies, 3)
|
||||
|
||||
policies := ResourcePolicies{
|
||||
Version: "v1",
|
||||
VolumePolicies: []VolumePolicy{
|
||||
{
|
||||
Conditions: map[string]interface{}{
|
||||
Conditions: map[string]any{
|
||||
"capacity": "0,10Gi",
|
||||
"csi": map[string]any{
|
||||
"driver": "disks.csi.driver",
|
||||
},
|
||||
},
|
||||
Action: Action{
|
||||
Type: Skip,
|
||||
},
|
||||
},
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"csi": map[string]any{
|
||||
"driver": "files.csi.driver",
|
||||
"volumeAttributes": map[string]string{"protocol": "nfs"},
|
||||
},
|
||||
},
|
||||
Action: Action{
|
||||
Type: Skip,
|
||||
},
|
||||
},
|
||||
{
|
||||
Conditions: map[string]any{
|
||||
"pvcLabels": map[string]string{
|
||||
"environment": "production",
|
||||
},
|
||||
},
|
||||
Action: Action{
|
||||
Type: Skip,
|
||||
|
@ -250,11 +404,13 @@ func TestGetResourcePoliciesFromConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
|
||||
p := &Policies{}
|
||||
err = p.BuildPolicy(&policies)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to build policy with error %v", err)
|
||||
t.Fatalf("failed to build policy: %v", err)
|
||||
}
|
||||
|
||||
assert.Equal(t, p, resPolicies)
|
||||
}
|
||||
|
||||
|
@ -263,6 +419,8 @@ func TestGetMatchAction(t *testing.T) {
|
|||
name string
|
||||
yamlData string
|
||||
vol *v1.PersistentVolume
|
||||
podVol *v1.Volume
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
skip bool
|
||||
}{
|
||||
{
|
||||
|
@ -276,7 +434,7 @@ volumePolicies:
|
|||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "aws.ebs.csi.driver"},
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "ebs.csi.aws.com"},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
|
@ -298,7 +456,173 @@ volumePolicies:
|
|||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "csi not configured",
|
||||
name: "Skip AFS CSI condition with Disk volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "disks.csi.driver"},
|
||||
}},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "Skip AFS CSI condition with AFS volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver"},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "Skip AFS NFS CSI condition with Disk volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "disks.csi.driver"},
|
||||
}},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "Skip AFS NFS CSI condition with AFS SMB volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"key1": "val1"}},
|
||||
}},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "Skip AFS NFS CSI condition with AFS NFS volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip
|
||||
`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"protocol": "nfs"}},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "Skip Disk and AFS NFS CSI condition with Disk volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: disks.csi.driver
|
||||
action:
|
||||
type: skip
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "disks.csi.driver", VolumeAttributes: map[string]string{"key1": "val1"}},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "Skip Disk and AFS NFS CSI condition with AFS SMB volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: disks.csi.driver
|
||||
action:
|
||||
type: skip
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"key1": "val1"}},
|
||||
}},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "Skip Disk and AFS NFS CSI condition with AFS NFS volumes",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
csi:
|
||||
driver: disks.csi.driver
|
||||
action:
|
||||
type: skip
|
||||
- conditions:
|
||||
csi:
|
||||
driver: files.csi.driver
|
||||
volumeAttributes:
|
||||
protocol: nfs
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "files.csi.driver", VolumeAttributes: map[string]string{"key1": "val1", "protocol": "nfs"}},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "csi not configured and testing capacity condition",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
|
@ -311,7 +635,7 @@ volumePolicies:
|
|||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "aws.ebs.csi.driver"},
|
||||
CSI: &v1.CSIPersistentVolumeSource{Driver: "ebs.csi.aws.com"},
|
||||
}},
|
||||
},
|
||||
skip: true,
|
||||
|
@ -394,7 +718,7 @@ volumePolicies:
|
|||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "dismatch volume by types",
|
||||
name: "mismatch volume by types",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
|
@ -415,6 +739,224 @@ volumePolicies:
|
|||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "PVC labels match",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: "0,100Gi"
|
||||
pvcLabels:
|
||||
environment: production
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
Labels: map[string]string{"environment": "production"},
|
||||
},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "PVC labels match, criteria label is a subset of the pvc labels",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: "0,100Gi"
|
||||
pvcLabels:
|
||||
environment: production
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
Labels: map[string]string{"environment": "production", "app": "backend"},
|
||||
},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "PVC labels match don't match exactly",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: "0,100Gi"
|
||||
pvcLabels:
|
||||
environment: production
|
||||
app: frontend
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
Labels: map[string]string{"environment": "production"},
|
||||
},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "PVC labels mismatch",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
capacity: "0,100Gi"
|
||||
pvcLabels:
|
||||
environment: production
|
||||
action:
|
||||
type: skip`,
|
||||
vol: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-2",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: "pvc-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
Labels: map[string]string{"environment": "staging"},
|
||||
},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "PodVolume case with PVC labels match",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: &v1.Volume{Name: "pod-vol-1"},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-1",
|
||||
Labels: map[string]string{"environment": "production"},
|
||||
},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "PodVolume case with PVC labels mismatch",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: &v1.Volume{Name: "pod-vol-2"},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-2",
|
||||
Labels: map[string]string{"environment": "staging"},
|
||||
},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "PodVolume case with PVC labels match with extra keys on PVC",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: &v1.Volume{Name: "pod-vol-3"},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-3",
|
||||
Labels: map[string]string{"environment": "production", "app": "backend"},
|
||||
},
|
||||
},
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "PodVolume case with PVC labels don't match exactly",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
pvcLabels:
|
||||
environment: production
|
||||
app: frontend
|
||||
action:
|
||||
type: skip`,
|
||||
vol: nil,
|
||||
podVol: &v1.Volume{Name: "pod-vol-4"},
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pvc-4",
|
||||
Labels: map[string]string{"environment": "production"},
|
||||
},
|
||||
},
|
||||
skip: false,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
@ -422,12 +964,25 @@ volumePolicies:
|
|||
if err != nil {
|
||||
t.Fatalf("got error when get match action %v", err)
|
||||
}
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
policies := &Policies{}
|
||||
err = policies.BuildPolicy(resPolicies)
|
||||
assert.Nil(t, err)
|
||||
action, err := policies.GetMatchAction(tc.vol)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
vfd := VolumeFilterData{}
|
||||
if tc.pvc != nil {
|
||||
vfd.PVC = tc.pvc
|
||||
}
|
||||
|
||||
if tc.vol != nil {
|
||||
vfd.PersistentVolume = tc.vol
|
||||
}
|
||||
|
||||
if tc.podVol != nil {
|
||||
vfd.PodVolume = tc.podVol
|
||||
}
|
||||
|
||||
action, err := policies.GetMatchAction(vfd)
|
||||
assert.NoError(t, err)
|
||||
|
||||
if tc.skip {
|
||||
if action.Type != Skip {
|
||||
|
@ -439,3 +994,82 @@ volumePolicies:
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMatchAction_Errors(t *testing.T) {
|
||||
p := &Policies{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
input any
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "invalid input type",
|
||||
input: "invalid input",
|
||||
expectedErr: "failed to convert input to VolumeFilterData",
|
||||
},
|
||||
{
|
||||
name: "no volume provided",
|
||||
input: VolumeFilterData{
|
||||
PersistentVolume: nil,
|
||||
PodVolume: nil,
|
||||
PVC: nil,
|
||||
},
|
||||
expectedErr: "failed to convert object",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
action, err := p.GetMatchAction(tc.input)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tc.expectedErr)
|
||||
assert.Nil(t, action)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePVC(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
expectedLabels map[string]string
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid PVC with labels",
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"env": "prod"},
|
||||
},
|
||||
},
|
||||
expectedLabels: map[string]string{"env": "prod"},
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid PVC with empty labels",
|
||||
pvc: &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
},
|
||||
expectedLabels: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "nil PVC pointer",
|
||||
pvc: (*v1.PersistentVolumeClaim)(nil),
|
||||
expectedLabels: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
s := &structuredVolume{}
|
||||
s.parsePVC(tc.pvc)
|
||||
|
||||
assert.Equal(t, tc.expectedLabels, s.pvcLabels)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
package resourcepolicies
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// VolumeFilterData bundles the volume data needed for volume policy filtering
|
||||
type VolumeFilterData struct {
|
||||
PersistentVolume *corev1.PersistentVolume
|
||||
PodVolume *corev1.Volume
|
||||
PVC *corev1.PersistentVolumeClaim
|
||||
}
|
||||
|
||||
// NewVolumeFilterData constructs a new VolumeFilterData instance.
|
||||
func NewVolumeFilterData(pv *corev1.PersistentVolume, podVol *corev1.Volume, pvc *corev1.PersistentVolumeClaim) VolumeFilterData {
|
||||
return VolumeFilterData{
|
||||
PersistentVolume: pv,
|
||||
PodVolume: podVol,
|
||||
PVC: pvc,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
package resourcepolicies
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestNewVolumeFilterData(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pv *corev1.PersistentVolume
|
||||
podVol *corev1.Volume
|
||||
pvc *corev1.PersistentVolumeClaim
|
||||
expectedPVName string
|
||||
expectedPodName string
|
||||
expectedPVCName string
|
||||
}{
|
||||
{
|
||||
name: "all provided",
|
||||
pv: &corev1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-test",
|
||||
},
|
||||
},
|
||||
podVol: &corev1.Volume{
|
||||
Name: "pod-vol-test",
|
||||
},
|
||||
pvc: &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc-test",
|
||||
},
|
||||
},
|
||||
expectedPVName: "pv-test",
|
||||
expectedPodName: "pod-vol-test",
|
||||
expectedPVCName: "pvc-test",
|
||||
},
|
||||
{
|
||||
name: "only PV provided",
|
||||
pv: &corev1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv-only",
|
||||
},
|
||||
},
|
||||
podVol: nil,
|
||||
pvc: nil,
|
||||
expectedPVName: "pv-only",
|
||||
expectedPodName: "",
|
||||
expectedPVCName: "",
|
||||
},
|
||||
{
|
||||
name: "only PodVolume provided",
|
||||
pv: nil,
|
||||
podVol: &corev1.Volume{
|
||||
Name: "pod-only",
|
||||
},
|
||||
pvc: nil,
|
||||
expectedPVName: "",
|
||||
expectedPodName: "pod-only",
|
||||
expectedPVCName: "",
|
||||
},
|
||||
{
|
||||
name: "only PVC provided",
|
||||
pv: nil,
|
||||
podVol: nil,
|
||||
pvc: &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc-only",
|
||||
},
|
||||
},
|
||||
expectedPVName: "",
|
||||
expectedPodName: "",
|
||||
expectedPVCName: "pvc-only",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
vfd := NewVolumeFilterData(tc.pv, tc.podVol, tc.pvc)
|
||||
if tc.expectedPVName != "" {
|
||||
assert.NotNil(t, vfd.PersistentVolume)
|
||||
assert.Equal(t, tc.expectedPVName, vfd.PersistentVolume.Name)
|
||||
} else {
|
||||
assert.Nil(t, vfd.PersistentVolume)
|
||||
}
|
||||
if tc.expectedPodName != "" {
|
||||
assert.NotNil(t, vfd.PodVolume)
|
||||
assert.Equal(t, tc.expectedPodName, vfd.PodVolume.Name)
|
||||
} else {
|
||||
assert.Nil(t, vfd.PodVolume)
|
||||
}
|
||||
if tc.expectedPVCName != "" {
|
||||
assert.NotNil(t, vfd.PVC)
|
||||
assert.Equal(t, tc.expectedPVCName, vfd.PVC.Name)
|
||||
} else {
|
||||
assert.Nil(t, vfd.PVC)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -20,6 +20,8 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v3"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
|
@ -48,6 +50,7 @@ type structuredVolume struct {
|
|||
nfs *nFSVolumeSource
|
||||
csi *csiVolumeSource
|
||||
volumeType SupportedVolume
|
||||
pvcLabels map[string]string
|
||||
}
|
||||
|
||||
func (s *structuredVolume) parsePV(pv *corev1api.PersistentVolume) {
|
||||
|
@ -60,12 +63,18 @@ func (s *structuredVolume) parsePV(pv *corev1api.PersistentVolume) {
|
|||
|
||||
csi := pv.Spec.CSI
|
||||
if csi != nil {
|
||||
s.csi = &csiVolumeSource{Driver: csi.Driver}
|
||||
s.csi = &csiVolumeSource{Driver: csi.Driver, VolumeAttributes: csi.VolumeAttributes}
|
||||
}
|
||||
|
||||
s.volumeType = getVolumeTypeFromPV(pv)
|
||||
}
|
||||
|
||||
func (s *structuredVolume) parsePVC(pvc *corev1api.PersistentVolumeClaim) {
|
||||
if pvc != nil && len(pvc.GetLabels()) > 0 {
|
||||
s.pvcLabels = pvc.Labels
|
||||
}
|
||||
}
|
||||
|
||||
func (s *structuredVolume) parsePodVolume(vol *corev1api.Volume) {
|
||||
nfs := vol.NFS
|
||||
if nfs != nil {
|
||||
|
@ -74,12 +83,33 @@ func (s *structuredVolume) parsePodVolume(vol *corev1api.Volume) {
|
|||
|
||||
csi := vol.CSI
|
||||
if csi != nil {
|
||||
s.csi = &csiVolumeSource{Driver: csi.Driver}
|
||||
s.csi = &csiVolumeSource{Driver: csi.Driver, VolumeAttributes: csi.VolumeAttributes}
|
||||
}
|
||||
|
||||
s.volumeType = getVolumeTypeFromVolume(vol)
|
||||
}
|
||||
|
||||
// pvcLabelsCondition defines a condition that matches if the PVC's labels contain all the provided key/value pairs.
|
||||
type pvcLabelsCondition struct {
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
func (c *pvcLabelsCondition) match(v *structuredVolume) bool {
|
||||
// No labels specified: always match.
|
||||
if len(c.labels) == 0 {
|
||||
return true
|
||||
}
|
||||
if v.pvcLabels == nil {
|
||||
return false
|
||||
}
|
||||
selector := labels.SelectorFromSet(c.labels)
|
||||
return selector.Matches(labels.Set(v.pvcLabels))
|
||||
}
|
||||
|
||||
func (c *pvcLabelsCondition) validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type capacityCondition struct {
|
||||
capacity capacity
|
||||
}
|
||||
|
@ -160,7 +190,25 @@ func (c *csiCondition) match(v *structuredVolume) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
return c.csi.Driver == v.csi.Driver
|
||||
if c.csi.Driver != v.csi.Driver {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(c.csi.VolumeAttributes) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(v.csi.VolumeAttributes) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for key, value := range c.csi.VolumeAttributes {
|
||||
if value != v.csi.VolumeAttributes[key] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// parseCapacity parse string into capacity format
|
||||
|
@ -208,9 +256,9 @@ func (c *capacity) isInRange(y resource.Quantity) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// unmarshalVolConditions parse map[string]interface{} into volumeConditions format
|
||||
// unmarshalVolConditions parse map[string]any into volumeConditions format
|
||||
// and validate key fields of the map.
|
||||
func unmarshalVolConditions(con map[string]interface{}) (*volumeConditions, error) {
|
||||
func unmarshalVolConditions(con map[string]any) (*volumeConditions, error) {
|
||||
volConditons := &volumeConditions{}
|
||||
buffer := new(bytes.Buffer)
|
||||
err := yaml.NewEncoder(buffer).Encode(con)
|
||||
|
|