-
Notifications
You must be signed in to change notification settings - Fork 152
/
Copy pathcontroller.go
2515 lines (2209 loc) · 96.3 KB
/
controller.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gceGCEDriver
import (
"context"
"errors"
"fmt"
"math/rand"
neturl "net/url"
"sort"
"strings"
"time"
"github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta"
csi "github.com/container-storage-interface/spec/lib/go/csi"
compute "google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/timestamppb"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/klog/v2"
"k8s.io/utils/strings/slices"
"sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/common"
gce "sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/gce-cloud-provider/compute"
"sigs.k8s.io/gcp-compute-persistent-disk-csi-driver/pkg/metrics"
)
type GCEControllerServer struct {
Driver *GCEDriver
CloudProvider gce.GCECompute
Metrics metrics.MetricsManager
volumeEntries []*csi.ListVolumesResponse_Entry
volumeEntriesSeen map[string]int
snapshots []*csi.ListSnapshotsResponse_Entry
snapshotTokens map[string]int
// A map storing all volumes with ongoing operations so that additional
// operations for that same volume (as defined by Volume Key) return an
// Aborted error
volumeLocks *common.VolumeLocks
// There are several kinds of errors that are immediately retried by either
// the CSI sidecars or the k8s control plane. The retries consume GCP api
// quota, eg by doing ListVolumes, and so backoff needs to be used to
// prevent quota exhaustion.
//
// Examples of these errors are the per-instance GCE operation queue getting
// full (typically only 32 operations in flight at a time are allowed), and
// disks being deleted out from under a PV causing unpublish errors.
//
// While we need to backoff, we also need some semblance of fairness. In
// particular, volume unpublish retries happen very quickly, and with
// a single backoff per node these retries can prevent any other operation
// from making progess, even if it would succeed. Hence we track errors on
// node and disk pairs, backing off only for calls matching such a
// pair.
//
// An implication is that in the full operation queue situation, requests
// for new disks will not backoff the first time. This is acceptible as a
// single spurious call will not cause problems for quota exhaustion or make
// the operation queue problem worse. This is well compensated by giving
// disks where no problems are ocurring a chance to be processed.
//
// errorBackoff keeps track of any active backoff condition on a given node,
// and the time when retry of controller publish/unpublish is permissible. A
// node and disk pair is marked with backoff when any error is encountered
// by the driver during controller publish/unpublish calls. If the
// controller eventually allows controller publish/publish requests for
// volumes (because the backoff time expired), and those requests fail, the
// next backoff retry time will be updated on every failure and capped at
// 'errorBackoffMaxDuration'. Also, any successful controller
// publish/unpublish call will clear the backoff condition for a node and
// disk.
errorBackoff *csiErrorBackoff
// Requisite zones to fallback to when provisioning a disk.
// If there are an insufficient number of zones available in the union
// of preferred/requisite topology, this list is used instead of
// the passed in requisite topology.
// The main use case of this field is to support Regional Persistent Disk
// provisioning in GKE Autopilot, where a GKE cluster to
// be scaled down to 1 zone.
fallbackRequisiteZones []string
// If set to true, the CSI Driver will allow volumes to be provisioned in Storage Pools.
enableStoragePools bool
multiZoneVolumeHandleConfig MultiZoneVolumeHandleConfig
listVolumesConfig ListVolumesConfig
provisionableDisksConfig ProvisionableDisksConfig
// Embed UnimplementedControllerServer to ensure the driver returns Unimplemented for any
// new RPC methods that might be introduced in future versions of the spec.
csi.UnimplementedControllerServer
}
type MultiZoneVolumeHandleConfig struct {
// A set of supported disk types that are compatible with multi-zone volumeHandles.
// The disk type is only validated on ControllerPublish.
// Other operations that interacti with volumeHandle (ListVolumes/ControllerUnpublish)
// don't validate the disk type. This ensures existing published multi-zone volumes
// are listed and unpublished correctly. This allows this flag
// to be ratcheted to be more restricted without affecting volumes that are already
// published.
DiskTypes []string
// If set to true, the CSI driver will enable the multi-zone volumeHandle feature.
// If set to false, volumeHandles that contain 'multi-zone' will not be translated
// to their respective attachment zone (based on the node), which will result in
// an "Unknown zone" error on ControllerPublish/ControllerUnpublish.
Enable bool
}
type ListVolumesConfig struct {
UseInstancesAPIForPublishedNodes bool
}
type ProvisionableDisksConfig struct {
SupportsIopsChange []string
SupportsThroughputChange []string
}
func (c ListVolumesConfig) listDisksFields() []googleapi.Field {
if c.UseInstancesAPIForPublishedNodes {
// If we are using the instances.list API in ListVolumes,
// don't include the users field in the response, as an optimization.
// We rely on instances.list items.disks for attachment pairings.
return listDisksFieldsWithoutUsers
}
return listDisksFieldsWithUsers
}
type csiErrorBackoffId string
type csiErrorBackoff struct {
backoff *flowcontrol.Backoff
errorCodes map[csiErrorBackoffId]codes.Code
}
type workItem struct {
ctx context.Context
publishReq *csi.ControllerPublishVolumeRequest
unpublishReq *csi.ControllerUnpublishVolumeRequest
}
// locationRequirements are additional location topology requirements that must be respected when creating a volume.
type locationRequirements struct {
srcVolRegion string
srcVolZone string
srcReplicationType string
cloneReplicationType string
}
// PDCSIContext is the extracted VolumeContext from controller requests.
type PDCSIContext struct {
ForceAttach bool
}
var _ csi.ControllerServer = &GCEControllerServer{}
const (
// MaxVolumeSizeInBytes is the maximum standard and ssd size of 64TB
MaxVolumeSizeInBytes int64 = 64 * 1024 * 1024 * 1024 * 1024
MinimumVolumeSizeInBytes int64 = 1 * 1024 * 1024 * 1024
MinimumDiskSizeInGb = 1
attachableDiskTypePersistent = "PERSISTENT"
replicationTypeNone = "none"
replicationTypeRegionalPD = "regional-pd"
// The maximum number of entries that we can include in the
// ListVolumesResposne
// In reality, the limit here is 4MB (based on gRPC client response limits),
// but 500 is a good proxy (gives ~8KB of data per ListVolumesResponse#Entry)
// See https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/grpc_types.h#L503)
maxListVolumesResponseEntries = 500
// Keys in the volume context.
contextForceAttach = "force-attach"
resourceApiScheme = "https"
resourceApiService = "compute"
resourceProject = "projects"
listDisksUsersField = googleapi.Field("items/users")
readOnlyManyAccessMode = "READ_ONLY_MANY"
)
var (
validResourceApiVersions = map[string]bool{"v1": true, "alpha": true, "beta": true, "staging_v1": true, "staging_beta": true, "staging_alpha": true}
// By default GCE returns a lot of data for each instance. Request only a subset of the fields.
listInstancesFields = []googleapi.Field{
"items/disks/deviceName",
"items/disks/source",
"items/selfLink",
"nextPageToken",
}
// By default GCE returns a lot of data for each disk. Request only a subset of the fields.
listDisksFieldsWithoutUsers = []googleapi.Field{
"items/labels",
"items/selfLink",
"nextPageToken",
}
listDisksFieldsWithUsers = append(listDisksFieldsWithoutUsers, "items/users")
disksWithModifiableAccessMode = []string{"hyperdisk-ml"}
)
func isDiskReady(disk *gce.CloudDisk) (bool, error) {
status := disk.GetStatus()
switch status {
case "READY":
return true, nil
case "FAILED":
return false, fmt.Errorf("Disk %s status is FAILED", disk.GetName())
case "CREATING":
klog.V(4).Infof("Disk %s status is CREATING", disk.GetName())
return false, nil
case "DELETING":
klog.V(4).Infof("Disk %s status is DELETING", disk.GetName())
return false, nil
case "RESTORING":
klog.V(4).Infof("Disk %s status is RESTORING", disk.GetName())
return false, nil
default:
klog.V(4).Infof("Disk %s status is: %s", disk.GetName(), status)
}
return false, nil
}
// cloningLocationRequirements returns additional location requirements to be applied to the given create volume requests topology.
// If the CreateVolumeRequest will use volume cloning, location requirements in compliance with the volume cloning limitations
// will be returned: https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/volume-cloning#limitations.
func cloningLocationRequirements(req *csi.CreateVolumeRequest, cloneReplicationType string) (*locationRequirements, error) {
if !useVolumeCloning(req) {
return nil, nil
}
// If we are using volume cloning, this will be set.
volSrc := req.VolumeContentSource.GetVolume()
volSrcVolID := volSrc.GetVolumeId()
_, sourceVolKey, err := common.VolumeIDToKey(volSrcVolID)
if err != nil {
return nil, fmt.Errorf("volume ID is invalid: %w", err)
}
isZonalSrcVol := sourceVolKey.Type() == meta.Zonal
if isZonalSrcVol {
region, err := common.GetRegionFromZones([]string{sourceVolKey.Zone})
if err != nil {
return nil, fmt.Errorf("failed to get region from zones: %w", err)
}
sourceVolKey.Region = region
}
srcReplicationType := replicationTypeNone
if !isZonalSrcVol {
srcReplicationType = replicationTypeRegionalPD
}
return &locationRequirements{srcVolZone: sourceVolKey.Zone, srcVolRegion: sourceVolKey.Region, srcReplicationType: srcReplicationType, cloneReplicationType: cloneReplicationType}, nil
}
// useVolumeCloning returns true if the create volume request should be created with volume cloning.
func useVolumeCloning(req *csi.CreateVolumeRequest) bool {
return req.VolumeContentSource != nil && req.VolumeContentSource.GetVolume() != nil
}
func (gceCS *GCEControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
response, err := gceCS.createVolumeInternal(ctx, req)
if err != nil && req != nil {
klog.V(4).Infof("CreateVolume succeeded for volume %v", req.Name)
}
return response, err
}
func (gceCS *GCEControllerServer) createVolumeInternal(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
var err error
// Apply Parameters (case-insensitive). We leave validation of
// the values to the cloud provider.
params, err := gceCS.parameterProcessor().ExtractAndDefaultParameters(req.GetParameters(), gceCS.Driver.extraVolumeLabels, gceCS.Driver.extraTags)
metrics.UpdateRequestMetadataFromParams(ctx, params)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "failed to extract parameters: %v", err.Error())
}
// Validate arguments
volumeCapabilities := req.GetVolumeCapabilities()
capacityRange := req.GetCapacityRange()
if len(req.GetName()) == 0 {
return nil, status.Error(codes.InvalidArgument, "CreateVolume Name must be provided")
}
if volumeCapabilities == nil || len(volumeCapabilities) == 0 {
return nil, status.Error(codes.InvalidArgument, "CreateVolume Volume capabilities must be provided")
}
// Validate request capacity early
if _, err := getRequestCapacity(capacityRange); err != nil {
return nil, status.Errorf(codes.InvalidArgument, "CreateVolume Request Capacity is invalid: %v", err.Error())
}
err = validateVolumeCapabilities(volumeCapabilities)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "VolumeCapabilities is invalid: %v", err.Error())
}
// https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume
// mutable_parameters MUST take precedence over the values from parameters.
mutableParams := req.GetMutableParameters()
// If the disk type does not support dynamic provisioning, throw an error
supportsIopsChange := gceCS.diskSupportsIopsChange(params.DiskType)
supportsThroughputChange := gceCS.diskSupportsThroughputChange(params.DiskType)
if len(mutableParams) > 0 {
if !supportsIopsChange && !supportsThroughputChange {
return nil, status.Errorf(codes.InvalidArgument, "Disk type %s does not support dynamic provisioning", params.DiskType)
}
p, err := common.ExtractModifyVolumeParameters(mutableParams)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "Invalid mutable parameters: %v", err)
}
if p.IOPS != nil {
if !supportsIopsChange {
return nil, status.Errorf(codes.InvalidArgument, "Cannot specify IOPS for disk type %s", params.DiskType)
}
params.ProvisionedIOPSOnCreate = *p.IOPS
}
if p.Throughput != nil {
if !supportsThroughputChange {
return nil, status.Errorf(codes.InvalidArgument, "Cannot specify throughput for disk type %s", params.DiskType)
}
params.ProvisionedThroughputOnCreate = *p.Throughput
}
}
// Validate multiwriter
if _, err := getMultiWriterFromCapabilities(volumeCapabilities); err != nil {
return nil, status.Errorf(codes.InvalidArgument, "VolumeCapabilities is invalid: %v", err.Error())
}
err = validateStoragePools(req, params, gceCS.CloudProvider.GetDefaultProject())
if err != nil {
// Reassign error so that all errors are reported as InvalidArgument to RecordOperationErrorMetrics.
err = status.Errorf(codes.InvalidArgument, "CreateVolume failed to validate storage pools: %v", err)
return nil, err
}
// Validate VolumeContentSource is set when access mode is read only
readonly, _ := getReadOnlyFromCapabilities(volumeCapabilities)
if readonly && req.GetVolumeContentSource() == nil {
return nil, status.Error(codes.InvalidArgument, "VolumeContentSource must be provided when AccessMode is set to read only")
}
// Validate multi-zone provisioning configuration
err = gceCS.validateMultiZoneProvisioning(req, params)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "CreateVolume failed to validate multi-zone provisioning request: %v", err)
}
// Verify that the regional availability class is only used on regional disks.
if params.ForceAttach && params.ReplicationType != replicationTypeRegionalPD {
return nil, status.Errorf(codes.InvalidArgument, "invalid availabilty class for zonal disk")
}
if gceCS.multiZoneVolumeHandleConfig.Enable && params.MultiZoneProvisioning {
// Create multi-zone disk, that may have up to N disks.
return gceCS.createMultiZoneDisk(ctx, req, params)
}
// Create single device zonal or regional disk
return gceCS.createSingleDeviceDisk(ctx, req, params)
}
func (gceCS *GCEControllerServer) getSupportedZonesForPDType(ctx context.Context, zones []string, diskType string) ([]string, error) {
project := gceCS.CloudProvider.GetDefaultProject()
zones, err := gceCS.CloudProvider.ListCompatibleDiskTypeZones(ctx, project, zones, diskType)
if err != nil {
return nil, err
}
return zones, nil
}
func (gceCS *GCEControllerServer) getMultiZoneProvisioningZones(ctx context.Context, req *csi.CreateVolumeRequest, params common.DiskParameters) ([]string, error) {
top := req.GetAccessibilityRequirements()
if top == nil {
return nil, status.Errorf(codes.InvalidArgument, "no topology specified")
}
prefZones, err := getZonesFromTopology(top.GetPreferred())
if err != nil {
return nil, fmt.Errorf("could not get zones from preferred topology: %w", err)
}
reqZones, err := getZonesFromTopology(top.GetRequisite())
if err != nil {
return nil, fmt.Errorf("could not get zones from requisite topology: %w", err)
}
prefSet := sets.NewString(prefZones...)
reqSet := sets.NewString(reqZones...)
prefAndReqSet := prefSet.Union(reqSet)
availableZones := prefAndReqSet.List()
if prefAndReqSet.Len() == 0 {
// If there are no specified zones, this means that there were no aggregate
// zones (eg: no nodes running) in the cluster
availableZones = gceCS.fallbackRequisiteZones
}
supportedZones, err := gceCS.getSupportedZonesForPDType(ctx, availableZones, params.DiskType)
if err != nil {
return nil, fmt.Errorf("could not get supported zones for disk type %v from zone list %v: %w", params.DiskType, prefAndReqSet.List(), err)
}
// It's possible that the provided requisite zones shifted since the last time that
// CreateVolume was called (eg: due to a node being removed in a zone)
// Ensure that we combine the supportedZones with any existing zones to get the full set.
existingZones, err := gceCS.getZonesWithDiskNameAndType(ctx, req.Name, params.DiskType)
if err != nil {
return nil, common.LoggedError(fmt.Sprintf("failed to check existing list of zones for request: %v", req.Name), err)
}
supportedSet := sets.NewString(supportedZones...)
existingSet := sets.NewString(existingZones...)
combinedZones := existingSet.Union(supportedSet)
return combinedZones.List(), nil
}
func (gceCS *GCEControllerServer) createMultiZoneDisk(ctx context.Context, req *csi.CreateVolumeRequest, params common.DiskParameters) (*csi.CreateVolumeResponse, error) {
// Determine the zones that are needed.
var err error
// For multi-zone, we either select:
// 1) The zones specified in requisite topology requirements
// 2) All zones in the region that are compatible with the selected disk type
zones, err := gceCS.getMultiZoneProvisioningZones(ctx, req, params)
if err != nil {
return nil, err
}
multiZoneVolKey := meta.ZonalKey(req.GetName(), common.MultiZoneValue)
volumeID, err := common.KeyToVolumeID(multiZoneVolKey, gceCS.CloudProvider.GetDefaultProject())
if err != nil {
return nil, err
}
if acquired := gceCS.volumeLocks.TryAcquire(volumeID); !acquired {
return nil, status.Errorf(codes.Aborted, common.VolumeOperationAlreadyExistsFmt, volumeID)
}
defer gceCS.volumeLocks.Release(volumeID)
createDiskErrs := []error{}
createdDisks := make([]*gce.CloudDisk, 0, len(zones))
for _, zone := range zones {
volKey := meta.ZonalKey(req.GetName(), zone)
klog.V(4).Infof("Creating single zone disk for zone %q and volume: %v", zone, volKey)
disk, err := gceCS.createSingleDisk(ctx, req, params, volKey, []string{zone})
if err != nil {
createDiskErrs = append(createDiskErrs, err)
continue
}
createdDisks = append(createdDisks, disk)
}
if len(createDiskErrs) > 0 {
return nil, common.LoggedError("Failed to create multi-zone disk: ", errors.Join(createDiskErrs...))
}
if len(createdDisks) == 0 {
return nil, status.Errorf(codes.Internal, "could not create any disks for request: %v", req)
}
// Use the first response as a template
volumeId := fmt.Sprintf("projects/%s/zones/%s/disks/%s", gceCS.CloudProvider.GetDefaultProject(), common.MultiZoneValue, req.GetName())
klog.V(4).Infof("CreateVolume succeeded for multi-zone disks in zones %s: %v", zones, multiZoneVolKey)
return generateCreateVolumeResponseWithVolumeId(createdDisks[0], zones, params, volumeId), nil
}
func (gceCS *GCEControllerServer) getZonesWithDiskNameAndType(ctx context.Context, name string, diskType string) ([]string, error) {
zoneOnlyFields := []googleapi.Field{"items/zone", "items/type"}
nameAndRegionFilter := fmt.Sprintf("name=%s", name)
disksWithZone, _, err := gceCS.CloudProvider.ListDisksWithFilter(ctx, zoneOnlyFields, nameAndRegionFilter)
if err != nil {
return nil, fmt.Errorf("failed to check existing zones for disk name %v: %w", name, err)
}
zones := []string{}
for _, disk := range disksWithZone {
if !strings.Contains(disk.Type, diskType) || disk.Zone == "" {
continue
}
diskZone, err := common.ParseZoneFromURI(disk.Zone)
if err != nil {
klog.Warningf("Malformed zone URI %v for disk %v from ListDisks call. Skipping", disk.Zone, name)
continue
}
zones = append(zones, diskZone)
}
return zones, nil
}
func (gceCS *GCEControllerServer) updateAccessModeIfNecessary(ctx context.Context, volKey *meta.Key, disk *gce.CloudDisk, readonly bool) error {
if !slices.Contains(disksWithModifiableAccessMode, disk.GetPDType()) {
// If this isn't a disk that has access mode (eg: Hyperdisk ML), return
// So far, HyperdiskML is the only disk type that allows the disk type to be modified.
return nil
}
if !readonly {
// Only update the access mode if we're converting from ReadWrite to ReadOnly
return nil
}
project := gceCS.CloudProvider.GetDefaultProject()
if disk.GetAccessMode() == readOnlyManyAccessMode {
// If the access mode is already readonly, return
return nil
}
return gceCS.CloudProvider.SetDiskAccessMode(ctx, project, volKey, readOnlyManyAccessMode)
}
func (gceCS *GCEControllerServer) createSingleDeviceDisk(ctx context.Context, req *csi.CreateVolumeRequest, params common.DiskParameters) (*csi.CreateVolumeResponse, error) {
var err error
var locationTopReq *locationRequirements
if useVolumeCloning(req) {
locationTopReq, err = cloningLocationRequirements(req, params.ReplicationType)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "failed to get location requirements: %v", err.Error())
}
}
// Determine the zone or zones+region of the disk
var zones []string
var volKey *meta.Key
switch params.ReplicationType {
case replicationTypeNone:
zones, err = gceCS.pickZones(ctx, req.GetAccessibilityRequirements(), 1, locationTopReq)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "CreateVolume failed to pick zones for disk: %v", err.Error())
}
if len(zones) != 1 {
return nil, status.Errorf(codes.Internal, "Failed to pick exactly 1 zone for zonal disk, got %v instead", len(zones))
}
volKey = meta.ZonalKey(req.GetName(), zones[0])
case replicationTypeRegionalPD:
zones, err = gceCS.pickZones(ctx, req.GetAccessibilityRequirements(), 2, locationTopReq)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "CreateVolume failed to pick zones for disk: %v", err.Error())
}
region, err := common.GetRegionFromZones(zones)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "CreateVolume failed to get region from zones: %v", err.Error())
}
volKey = meta.RegionalKey(req.GetName(), region)
default:
return nil, status.Errorf(codes.InvalidArgument, "CreateVolume replication type '%s' is not supported", params.ReplicationType)
}
volumeID, err := common.KeyToVolumeID(volKey, gceCS.CloudProvider.GetDefaultProject())
if err != nil {
return nil, common.LoggedError("Failed to convert volume key to volume ID: ", err)
}
if acquired := gceCS.volumeLocks.TryAcquire(volumeID); !acquired {
return nil, status.Errorf(codes.Aborted, common.VolumeOperationAlreadyExistsFmt, volumeID)
}
defer gceCS.volumeLocks.Release(volumeID)
disk, err := gceCS.createSingleDisk(ctx, req, params, volKey, zones)
if err != nil {
return nil, common.LoggedError("CreateVolume failed: %v", err)
}
return generateCreateVolumeResponseWithVolumeId(disk, zones, params, volumeID), err
}
func (gceCS *GCEControllerServer) createSingleDisk(ctx context.Context, req *csi.CreateVolumeRequest, params common.DiskParameters, volKey *meta.Key, zones []string) (*gce.CloudDisk, error) {
capacityRange := req.GetCapacityRange()
capBytes, _ := getRequestCapacity(capacityRange)
multiWriter, _ := getMultiWriterFromCapabilities(req.GetVolumeCapabilities())
readonly, _ := getReadOnlyFromCapabilities(req.GetVolumeCapabilities())
accessMode := ""
if readonly && slices.Contains(disksWithModifiableAccessMode, params.DiskType) {
accessMode = readOnlyManyAccessMode
}
// Validate if disk already exists
existingDisk, err := gceCS.CloudProvider.GetDisk(ctx, gceCS.CloudProvider.GetDefaultProject(), volKey, getGCEApiVersion(multiWriter))
if err != nil {
if !gce.IsGCEError(err, "notFound") {
// failed to GetDisk, however the Disk may already be created, the error code should be non-Final
return nil, common.LoggedError("CreateVolume, failed to getDisk when validating: ", status.Error(codes.Unavailable, err.Error()))
}
}
if err == nil {
// There was no error so we want to validate the disk that we find
err = gceCS.CloudProvider.ValidateExistingDisk(ctx, existingDisk, params,
int64(capacityRange.GetRequiredBytes()),
int64(capacityRange.GetLimitBytes()),
multiWriter)
if err != nil {
return nil, status.Errorf(codes.AlreadyExists, "CreateVolume disk already exists with same name and is incompatible: %v", err.Error())
}
ready, err := isDiskReady(existingDisk)
if err != nil {
return nil, status.Errorf(codes.Aborted, "CreateVolume disk %q had error checking ready status: %v", volKey.String(), err.Error())
}
if !ready {
return nil, status.Errorf(codes.Aborted, "CreateVolume existing disk %v is not ready", volKey)
}
// If there is no validation error, immediately return success
klog.V(4).Infof("CreateVolume succeeded for disk %v, it already exists and was compatible", volKey)
return existingDisk, nil
}
snapshotID := ""
volumeContentSourceVolumeID := ""
content := req.GetVolumeContentSource()
if content != nil {
if content.GetSnapshot() != nil {
snapshotID = content.GetSnapshot().GetSnapshotId()
// Verify that snapshot exists
sl, err := gceCS.getSnapshotByID(ctx, snapshotID)
if err != nil {
return nil, common.LoggedError("CreateVolume failed to get snapshot "+snapshotID+": ", err)
} else if len(sl.Entries) == 0 {
return nil, status.Errorf(codes.NotFound, "CreateVolume source snapshot %s does not exist", snapshotID)
}
}
if content.GetVolume() != nil {
volumeContentSourceVolumeID = content.GetVolume().GetVolumeId()
// Verify that the source VolumeID is in the correct format.
project, sourceVolKey, err := common.VolumeIDToKey(volumeContentSourceVolumeID)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "CreateVolume source volume id is invalid: %v", err.Error())
}
// Verify that the volume in VolumeContentSource exists.
diskFromSourceVolume, err := gceCS.CloudProvider.GetDisk(ctx, project, sourceVolKey, getGCEApiVersion(multiWriter))
if err != nil {
if gce.IsGCEError(err, "notFound") {
return nil, status.Errorf(codes.NotFound, "CreateVolume source volume %s does not exist", volumeContentSourceVolumeID)
} else {
return nil, common.LoggedError("CreateVolume, getDisk error when validating: ", err)
}
}
// Verify the disk type and encryption key of the clone are the same as that of the source disk.
if diskFromSourceVolume.GetPDType() != params.DiskType || !gce.KmsKeyEqual(diskFromSourceVolume.GetKMSKeyName(), params.DiskEncryptionKMSKey) {
return nil, status.Errorf(codes.InvalidArgument, "CreateVolume Parameters %v do not match source volume Parameters", params)
}
// Verify the disk capacity range are the same or greater as that of the source disk.
if diskFromSourceVolume.GetSizeGb() > common.BytesToGbRoundDown(capBytes) {
return nil, status.Errorf(codes.InvalidArgument, "CreateVolume disk CapacityRange %d is less than source volume CapacityRange %d", common.BytesToGbRoundDown(capBytes), diskFromSourceVolume.GetSizeGb())
}
if params.ReplicationType == replicationTypeNone {
// For zonal->zonal disk clones, verify the zone is the same as that of the source disk.
if sourceVolKey.Zone != volKey.Zone {
return nil, status.Errorf(codes.InvalidArgument, "CreateVolume disk zone %s does not match source volume zone %s", volKey.Zone, sourceVolKey.Zone)
}
// regional->zonal disk clones are not allowed.
if diskFromSourceVolume.LocationType() == meta.Regional {
return nil, status.Errorf(codes.InvalidArgument, "Cannot create a zonal disk clone from a regional disk")
}
}
if params.ReplicationType == replicationTypeNone {
// For regional->regional disk clones, verify the region is the same as that of the source disk.
if diskFromSourceVolume.LocationType() == meta.Regional && sourceVolKey.Region != volKey.Region {
return nil, status.Errorf(codes.InvalidArgument, "CreateVolume disk region %s does not match source volume region %s", volKey.Region, sourceVolKey.Region)
}
// For zonal->regional disk clones, verify one of the replica zones matches the source disk zone.
if diskFromSourceVolume.LocationType() == meta.Zonal && !containsZone(zones, sourceVolKey.Zone) {
return nil, status.Errorf(codes.InvalidArgument, "CreateVolume regional disk replica zones %v do not match source volume zone %s", zones, sourceVolKey.Zone)
}
}
// Verify the source disk is ready.
ready, err := isDiskReady(diskFromSourceVolume)
if err != nil {
return nil, status.Errorf(codes.Aborted, "CreateVolume disk from source volume %q had error checking ready status: %v", sourceVolKey.String(), err.Error())
}
if !ready {
return nil, status.Errorf(codes.Aborted, "CreateVolume disk from source volume %v is not ready", sourceVolKey)
}
}
}
// Create the disk
var disk *gce.CloudDisk
name := req.GetName()
switch params.ReplicationType {
case replicationTypeNone:
if len(zones) != 1 {
return nil, status.Errorf(codes.Internal, "CreateVolume failed to get a single zone for creating zonal disk, instead got: %v", zones)
}
disk, err = createSingleZoneDisk(ctx, gceCS.CloudProvider, name, zones, params, capacityRange, capBytes, snapshotID, volumeContentSourceVolumeID, multiWriter, accessMode)
if err != nil {
return nil, common.LoggedError("CreateVolume failed to create single zonal disk "+name+": ", err)
}
case replicationTypeRegionalPD:
if len(zones) != 2 {
return nil, status.Errorf(codes.Internal, "CreateVolume failed to get a 2 zones for creating regional disk, instead got: %v", zones)
}
disk, err = createRegionalDisk(ctx, gceCS.CloudProvider, name, zones, params, capacityRange, capBytes, snapshotID, volumeContentSourceVolumeID, multiWriter, accessMode)
if err != nil {
return nil, common.LoggedError("CreateVolume failed to create regional disk "+name+": ", err)
}
default:
return nil, status.Errorf(codes.InvalidArgument, "CreateVolume replication type '%s' is not supported", params.ReplicationType)
}
ready, err := isDiskReady(disk)
if err != nil {
return nil, status.Errorf(codes.Internal, "CreateVolume disk %v had error checking ready status: %v", volKey, err.Error())
}
if !ready {
return nil, status.Errorf(codes.Internal, "CreateVolume disk %v is not ready", volKey)
}
klog.V(4).Infof("CreateVolume succeeded for disk %v", volKey)
return disk, nil
}
func (gceCS *GCEControllerServer) diskSupportsIopsChange(diskType string) bool {
for _, disk := range gceCS.provisionableDisksConfig.SupportsIopsChange {
if disk == diskType {
return true
}
}
return false
}
func (gceCS *GCEControllerServer) diskSupportsThroughputChange(diskType string) bool {
for _, disk := range gceCS.provisionableDisksConfig.SupportsThroughputChange {
if disk == diskType {
return true
}
}
return false
}
func (gceCS *GCEControllerServer) ControllerModifyVolume(ctx context.Context, req *csi.ControllerModifyVolumeRequest) (*csi.ControllerModifyVolumeResponse, error) {
var err error
volumeID := req.GetVolumeId()
klog.V(4).Infof("Modifying Volume ID: %s", volumeID)
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "volume ID must be provided")
}
project, volKey, err := common.VolumeIDToKey(volumeID)
if err != nil {
// Cannot find volume associated with this ID because VolumeID is not in the correct format
err = status.Errorf(codes.NotFound, "volume ID is invalid: %v", err.Error())
return nil, err
}
volumeModifyParams, err := common.ExtractModifyVolumeParameters(req.GetMutableParameters())
if err != nil {
klog.Errorf("Failed to extract parameters for volume %s: %v", volumeID, err)
err = status.Errorf(codes.InvalidArgument, "Invalid parameters: %v", err)
return nil, err
}
klog.V(4).Infof("Modify Volume Parameters for %s: %v", volumeID, volumeModifyParams)
existingDisk, err := gceCS.CloudProvider.GetDisk(ctx, project, volKey, gce.GCEAPIVersionBeta)
metrics.UpdateRequestMetadataFromDisk(ctx, existingDisk)
if err != nil {
err = fmt.Errorf("Failed to get volume: %w", err)
return nil, err
}
if existingDisk == nil || existingDisk.GetSelfLink() == "" {
err = status.Errorf(codes.Internal, "failed to get volume : %s", volumeID)
return nil, err
}
// Check if the disk supports dynamic IOPS/Throughput provisioning
diskType := existingDisk.GetPDType()
supportsIopsChange := gceCS.diskSupportsIopsChange(diskType)
supportsThroughputChange := gceCS.diskSupportsThroughputChange(diskType)
if !supportsIopsChange && !supportsThroughputChange {
err = status.Errorf(codes.InvalidArgument, "Failed to modify volume: modifications not supported for disk type %s", diskType)
return nil, err
}
if !supportsIopsChange && volumeModifyParams.IOPS != nil {
err = status.Errorf(codes.InvalidArgument, "Cannot specify IOPS for disk type %s", diskType)
return nil, err
}
if !supportsThroughputChange && volumeModifyParams.Throughput != nil {
err = status.Errorf(codes.InvalidArgument, "Cannot specify throughput for disk type %s", diskType)
return nil, err
}
err = gceCS.CloudProvider.UpdateDisk(ctx, project, volKey, existingDisk, volumeModifyParams)
if err != nil {
klog.Errorf("Failed to modify volume %s: %v", volumeID, err)
err = fmt.Errorf("Failed to modify volume %s: %w", volumeID, err)
return nil, err
}
return &csi.ControllerModifyVolumeResponse{}, nil
}
func (gceCS *GCEControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
var err error
// Validate arguments
volumeID := req.GetVolumeId()
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "DeleteVolume Volume ID must be provided")
}
project, volKey, err := common.VolumeIDToKey(volumeID)
if err != nil {
// Cannot find volume associated with this ID because VolumeID is not in
// correct format, this is a success according to the Spec
klog.Warningf("DeleteVolume treating volume as deleted because volume id %s is invalid: %v", volumeID, err.Error())
return &csi.DeleteVolumeResponse{}, nil
}
volumeIsMultiZone := isMultiZoneVolKey(volKey)
if gceCS.multiZoneVolumeHandleConfig.Enable && volumeIsMultiZone {
// Delete multi-zone disk, that may have up to N disks.
return gceCS.deleteMultiZoneDisk(ctx, req, project, volKey)
}
// Delete zonal or regional disk
return gceCS.deleteSingleDeviceDisk(ctx, req, project, volKey)
}
func getGCEApiVersion(multiWriter bool) gce.GCEAPIVersion {
if multiWriter {
return gce.GCEAPIVersionBeta
}
return gce.GCEAPIVersionV1
}
func (gceCS *GCEControllerServer) deleteMultiZoneDisk(ctx context.Context, req *csi.DeleteVolumeRequest, project string, volKey *meta.Key) (*csi.DeleteVolumeResponse, error) {
// List disks with same name
var err error
existingZones := []string{gceCS.CloudProvider.GetDefaultZone()}
zones, err := getDefaultZonesInRegion(ctx, gceCS, existingZones)
if err != nil {
return nil, fmt.Errorf("failed to list default zones: %w", err)
}
volumeID := req.GetVolumeId()
if acquired := gceCS.volumeLocks.TryAcquire(volumeID); !acquired {
return nil, status.Errorf(codes.Aborted, common.VolumeOperationAlreadyExistsFmt, volumeID)
}
defer gceCS.volumeLocks.Release(volumeID)
deleteDiskErrs := []error{}
for _, zone := range zones {
zonalVolKey := &meta.Key{
Name: volKey.Name,
Region: volKey.Region,
Zone: zone,
}
disk, _ := gceCS.CloudProvider.GetDisk(ctx, project, zonalVolKey, gce.GCEAPIVersionV1)
// TODO: Consolidate the parameters here, rather than taking the last.
metrics.UpdateRequestMetadataFromDisk(ctx, disk)
err := gceCS.CloudProvider.DeleteDisk(ctx, project, zonalVolKey)
if err != nil {
deleteDiskErrs = append(deleteDiskErrs, gceCS.CloudProvider.DeleteDisk(ctx, project, volKey))
}
}
if len(deleteDiskErrs) > 0 {
return nil, common.LoggedError("Failed to delete multi-zone disk: ", errors.Join(deleteDiskErrs...))
}
klog.V(4).Infof("DeleteVolume succeeded for disk %v", volKey)
return &csi.DeleteVolumeResponse{}, nil
}
func (gceCS *GCEControllerServer) deleteSingleDeviceDisk(ctx context.Context, req *csi.DeleteVolumeRequest, project string, volKey *meta.Key) (*csi.DeleteVolumeResponse, error) {
var err error
volumeID := req.GetVolumeId()
project, volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, project, volKey)
if err != nil {
if gce.IsGCENotFoundError(err) {
klog.Warningf("DeleteVolume treating volume as deleted because cannot find volume %v: %v", volumeID, err.Error())
return &csi.DeleteVolumeResponse{}, nil
}
return nil, common.LoggedError("DeleteVolume error repairing underspecified volume key: ", err)
}
if acquired := gceCS.volumeLocks.TryAcquire(volumeID); !acquired {
return nil, status.Errorf(codes.Aborted, common.VolumeOperationAlreadyExistsFmt, volumeID)
}
defer gceCS.volumeLocks.Release(volumeID)
disk, _ := gceCS.CloudProvider.GetDisk(ctx, project, volKey, gce.GCEAPIVersionV1)
metrics.UpdateRequestMetadataFromDisk(ctx, disk)
err = gceCS.CloudProvider.DeleteDisk(ctx, project, volKey)
if err != nil {
return nil, common.LoggedError("Failed to delete disk: ", err)
}
klog.V(4).Infof("DeleteVolume succeeded for disk %v", volKey)
return &csi.DeleteVolumeResponse{}, nil
}
func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
var err error
// Only valid requests will be accepted
_, _, _, err = gceCS.validateControllerPublishVolumeRequest(ctx, req)
if err != nil {
return nil, err
}
backoffId := gceCS.errorBackoff.backoffId(req.NodeId, req.VolumeId)
if gceCS.errorBackoff.blocking(backoffId) {
return nil, status.Errorf(gceCS.errorBackoff.code(backoffId), "ControllerPublish not permitted on node %q due to backoff condition", req.NodeId)
}
resp, err, disk := gceCS.executeControllerPublishVolume(ctx, req)
metrics.UpdateRequestMetadataFromDisk(ctx, disk)
if err != nil {
klog.Infof("For node %s adding backoff due to error for volume %s: %v", req.NodeId, req.VolumeId, err)
gceCS.errorBackoff.next(backoffId, common.CodeForError(err))
} else {
klog.Infof("For node %s clear backoff due to successful publish of volume %v", req.NodeId, req.VolumeId)
gceCS.errorBackoff.reset(backoffId)
}
return resp, err
}
func (gceCS *GCEControllerServer) validateControllerPublishVolumeRequest(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (string, *meta.Key, *PDCSIContext, error) {
// Validate arguments
volumeID := req.GetVolumeId()
nodeID := req.GetNodeId()
volumeCapability := req.GetVolumeCapability()
if len(volumeID) == 0 {
return "", nil, nil, status.Error(codes.InvalidArgument, "ControllerPublishVolume Volume ID must be provided")
}
if len(nodeID) == 0 {
return "", nil, nil, status.Error(codes.InvalidArgument, "ControllerPublishVolume Node ID must be provided")
}
if volumeCapability == nil {
return "", nil, nil, status.Error(codes.InvalidArgument, "ControllerPublishVolume Volume capability must be provided")
}
project, volKey, err := common.VolumeIDToKey(volumeID)
if err != nil {
return "", nil, nil, status.Errorf(codes.InvalidArgument, "ControllerPublishVolume volume ID is invalid: %v", err.Error())
}
// TODO(#253): Check volume capability matches for ALREADY_EXISTS
if err = validateVolumeCapability(volumeCapability); err != nil {
return "", nil, nil, status.Errorf(codes.InvalidArgument, "VolumeCapabilities is invalid: %v", err.Error())
}
var pdcsiContext *PDCSIContext
if pdcsiContext, err = extractVolumeContext(req.VolumeContext); err != nil {
return "", nil, nil, status.Errorf(codes.InvalidArgument, "Invalid volume context: %v", err.Error())
}
return project, volKey, pdcsiContext, nil
}
func parseMachineType(machineTypeUrl string) string {
machineType, parseErr := common.ParseMachineType(machineTypeUrl)
if parseErr != nil {
// Parse errors represent an unexpected API change with instance.MachineType; log a warning.
klog.Warningf("ParseMachineType(%v): %v", machineTypeUrl, parseErr)
}
return machineType
}
func convertMultiZoneVolKeyToZoned(volumeKey *meta.Key, instanceZone string) *meta.Key {
volumeKey.Zone = instanceZone
return volumeKey
}