2019-09-27 21:51:53 +00:00
// +build !providerless
/ *
Copyright 2018 The Kubernetes Authors .
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
package azure
import (
"context"
"fmt"
2020-03-26 21:07:15 +00:00
"net/http"
2019-09-27 21:51:53 +00:00
"path"
2020-03-26 21:07:15 +00:00
"regexp"
2019-09-27 21:51:53 +00:00
"strings"
2019-12-12 01:27:03 +00:00
"sync"
2019-09-27 21:51:53 +00:00
"time"
2020-08-10 17:43:49 +00:00
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
2019-09-27 21:51:53 +00:00
"k8s.io/apimachinery/pkg/types"
kwait "k8s.io/apimachinery/pkg/util/wait"
cloudprovider "k8s.io/cloud-provider"
volerr "k8s.io/cloud-provider/volume/errors"
2020-08-10 17:43:49 +00:00
"k8s.io/klog/v2"
2020-03-26 21:07:15 +00:00
azcache "k8s.io/legacy-cloud-providers/azure/cache"
"k8s.io/legacy-cloud-providers/azure/retry"
2019-09-27 21:51:53 +00:00
)
const (
// for limits check https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#storage-limits
maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks
maxDisksPerStorageAccounts = 60
storageAccountUtilizationBeforeGrowing = 0.5
maxLUN = 64 // max number of LUNs per VM
errLeaseFailed = "AcquireDiskLeaseFailed"
errLeaseIDMissing = "LeaseIdMissing"
errContainerNotFound = "ContainerNotFound"
2020-10-14 19:03:41 +00:00
errStatusCode400 = "statuscode=400"
errInvalidParameter = ` code="invalidparameter" `
errTargetInstanceIds = ` target="instanceids" `
2020-03-26 21:07:15 +00:00
sourceSnapshot = "snapshot"
sourceVolume = "volume"
// WriteAcceleratorEnabled support for Azure Write Accelerator on Azure Disks
// https://docs.microsoft.com/azure/virtual-machines/windows/how-to-enable-write-accelerator
WriteAcceleratorEnabled = "writeacceleratorenabled"
// see https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#create-a-managed-disk-by-copying-a-snapshot.
diskSnapshotPath = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/snapshots/%s"
// see https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#create-a-managed-disk-from-an-existing-managed-disk-in-the-same-or-different-subscription.
managedDiskPath = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s"
2019-09-27 21:51:53 +00:00
)
var defaultBackOff = kwait . Backoff {
Steps : 20 ,
Duration : 2 * time . Second ,
Factor : 1.5 ,
Jitter : 0.0 ,
}
2020-03-26 21:07:15 +00:00
var (
managedDiskPathRE = regexp . MustCompile ( ` .*/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/disks/(.+) ` )
diskSnapshotPathRE = regexp . MustCompile ( ` .*/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/snapshots/(.+) ` )
)
2019-09-27 21:51:53 +00:00
type controllerCommon struct {
subscriptionID string
location string
storageEndpointSuffix string
resourceGroup string
2019-12-12 01:27:03 +00:00
// store disk URI when disk is in attaching or detaching process
diskAttachDetachMap sync . Map
// vm disk map used to lock per vm update calls
vmLockMap * lockMap
cloud * Cloud
2019-09-27 21:51:53 +00:00
}
// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type.
2020-03-26 21:07:15 +00:00
func ( c * controllerCommon ) getNodeVMSet ( nodeName types . NodeName , crt azcache . AzureCacheReadType ) ( VMSet , error ) {
2020-10-14 19:03:41 +00:00
// 1. vmType is standard, return cloud.VMSet directly.
2019-09-27 21:51:53 +00:00
if c . cloud . VMType == vmTypeStandard {
2020-10-14 19:03:41 +00:00
return c . cloud . VMSet , nil
2019-09-27 21:51:53 +00:00
}
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
2020-10-14 19:03:41 +00:00
ss , ok := c . cloud . VMSet . ( * scaleSet )
2019-09-27 21:51:53 +00:00
if ! ok {
2020-10-14 19:03:41 +00:00
return nil , fmt . Errorf ( "error of converting vmSet (%q) to scaleSet with vmType %q" , c . cloud . VMSet , c . cloud . VMType )
2019-09-27 21:51:53 +00:00
}
// 3. If the node is managed by availability set, then return ss.availabilitySet.
2019-11-14 18:56:24 +00:00
managedByAS , err := ss . isNodeManagedByAvailabilitySet ( mapNodeNameToVMName ( nodeName ) , crt )
2019-09-27 21:51:53 +00:00
if err != nil {
return nil , err
}
if managedByAS {
// vm is managed by availability set.
return ss . availabilitySet , nil
}
// 4. Node is managed by vmss
return ss , nil
}
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI.
// return (lun, error)
func ( c * controllerCommon ) AttachDisk ( isManagedDisk bool , diskName , diskURI string , nodeName types . NodeName , cachingMode compute . CachingTypes ) ( int32 , error ) {
2019-12-12 01:27:03 +00:00
diskEncryptionSetID := ""
2020-03-26 21:07:15 +00:00
writeAcceleratorEnabled := false
2020-05-26 22:59:35 +00:00
vmset , err := c . getNodeVMSet ( nodeName , azcache . CacheReadTypeUnsafe )
if err != nil {
return - 1 , err
}
2019-09-27 21:51:53 +00:00
if isManagedDisk {
diskName := path . Base ( diskURI )
resourceGroup , err := getResourceGroupFromDiskURI ( diskURI )
if err != nil {
return - 1 , err
}
ctx , cancel := getContextWithCancel ( )
defer cancel ( )
2020-03-26 21:07:15 +00:00
disk , rerr := c . cloud . DisksClient . Get ( ctx , resourceGroup , diskName )
if rerr != nil {
return - 1 , rerr . Error ( )
2019-09-27 21:51:53 +00:00
}
2020-08-10 17:43:49 +00:00
if disk . ManagedBy != nil && ( disk . MaxShares == nil || * disk . MaxShares <= 1 ) {
2019-09-27 21:51:53 +00:00
attachErr := fmt . Sprintf (
"disk(%s) already attached to node(%s), could not be attached to node(%s)" ,
diskURI , * disk . ManagedBy , nodeName )
2020-05-26 22:59:35 +00:00
attachedNode , err := vmset . GetNodeNameByProviderID ( * disk . ManagedBy )
if err != nil {
return - 1 , err
}
2019-09-27 21:51:53 +00:00
klog . V ( 2 ) . Infof ( "found dangling volume %s attached to node %s" , diskURI , attachedNode )
2020-05-26 22:59:35 +00:00
danglingErr := volerr . NewDanglingError ( attachErr , attachedNode , "" )
2019-09-27 21:51:53 +00:00
return - 1 , danglingErr
}
2019-12-12 01:27:03 +00:00
if disk . DiskProperties != nil && disk . DiskProperties . Encryption != nil &&
disk . DiskProperties . Encryption . DiskEncryptionSetID != nil {
diskEncryptionSetID = * disk . DiskProperties . Encryption . DiskEncryptionSetID
}
2020-03-26 21:07:15 +00:00
if v , ok := disk . Tags [ WriteAcceleratorEnabled ] ; ok {
if v != nil && strings . EqualFold ( * v , "true" ) {
writeAcceleratorEnabled = true
}
}
2019-09-27 21:51:53 +00:00
}
instanceid , err := c . cloud . InstanceID ( context . TODO ( ) , nodeName )
if err != nil {
2019-11-14 18:56:24 +00:00
klog . Warningf ( "failed to get azure instance id (%v) for node %s" , err , nodeName )
2019-09-27 21:51:53 +00:00
return - 1 , fmt . Errorf ( "failed to get azure instance id for node %q (%v)" , nodeName , err )
}
2019-12-12 01:27:03 +00:00
c . vmLockMap . LockEntry ( strings . ToLower ( string ( nodeName ) ) )
defer c . vmLockMap . UnlockEntry ( strings . ToLower ( string ( nodeName ) ) )
2019-09-27 21:51:53 +00:00
lun , err := c . GetNextDiskLun ( nodeName )
if err != nil {
klog . Warningf ( "no LUN available for instance %q (%v)" , nodeName , err )
return - 1 , fmt . Errorf ( "all LUNs are used, cannot attach volume (%s, %s) to instance %q (%v)" , diskName , diskURI , instanceid , err )
}
klog . V ( 2 ) . Infof ( "Trying to attach volume %q lun %d to node %q." , diskURI , lun , nodeName )
2019-12-12 01:27:03 +00:00
c . diskAttachDetachMap . Store ( strings . ToLower ( diskURI ) , "attaching" )
defer c . diskAttachDetachMap . Delete ( strings . ToLower ( diskURI ) )
2020-03-26 21:07:15 +00:00
return lun , vmset . AttachDisk ( isManagedDisk , diskName , diskURI , nodeName , lun , cachingMode , diskEncryptionSetID , writeAcceleratorEnabled )
2019-09-27 21:51:53 +00:00
}
// DetachDisk detaches a disk from host. The vhd can be identified by diskName or diskURI.
func ( c * controllerCommon ) DetachDisk ( diskName , diskURI string , nodeName types . NodeName ) error {
2019-12-12 01:27:03 +00:00
_ , err := c . cloud . InstanceID ( context . TODO ( ) , nodeName )
2019-09-27 21:51:53 +00:00
if err != nil {
2019-10-16 05:42:28 +00:00
if err == cloudprovider . InstanceNotFound {
// if host doesn't exist, no need to detach
klog . Warningf ( "azureDisk - failed to get azure instance id(%q), DetachDisk(%s) will assume disk is already detached" ,
nodeName , diskURI )
return nil
}
2019-09-27 21:51:53 +00:00
klog . Warningf ( "failed to get azure instance id (%v)" , err )
return fmt . Errorf ( "failed to get azure instance id for node %q (%v)" , nodeName , err )
}
2020-03-26 21:07:15 +00:00
vmset , err := c . getNodeVMSet ( nodeName , azcache . CacheReadTypeUnsafe )
2019-10-16 05:42:28 +00:00
if err != nil {
return err
}
2019-09-27 21:51:53 +00:00
klog . V ( 2 ) . Infof ( "detach %v from node %q" , diskURI , nodeName )
// make the lock here as small as possible
2019-12-12 01:27:03 +00:00
c . vmLockMap . LockEntry ( strings . ToLower ( string ( nodeName ) ) )
c . diskAttachDetachMap . Store ( strings . ToLower ( diskURI ) , "detaching" )
2020-03-26 21:07:15 +00:00
err = vmset . DetachDisk ( diskName , diskURI , nodeName )
2019-12-12 01:27:03 +00:00
c . diskAttachDetachMap . Delete ( strings . ToLower ( diskURI ) )
c . vmLockMap . UnlockEntry ( strings . ToLower ( string ( nodeName ) ) )
2019-09-27 21:51:53 +00:00
2020-10-14 19:03:41 +00:00
if err != nil {
if isInstanceNotFoundError ( err ) {
// if host doesn't exist, no need to detach
klog . Warningf ( "azureDisk - got InstanceNotFoundError(%v), DetachDisk(%s) will assume disk is already detached" ,
err , diskURI )
return nil
}
if retry . IsErrorRetriable ( err ) && c . cloud . CloudProviderBackoff {
klog . Warningf ( "azureDisk - update backing off: detach disk(%s, %s), err: %v" , diskName , diskURI , err )
retryErr := kwait . ExponentialBackoff ( c . cloud . RequestBackoff ( ) , func ( ) ( bool , error ) {
c . vmLockMap . LockEntry ( strings . ToLower ( string ( nodeName ) ) )
c . diskAttachDetachMap . Store ( strings . ToLower ( diskURI ) , "detaching" )
err := vmset . DetachDisk ( diskName , diskURI , nodeName )
c . diskAttachDetachMap . Delete ( strings . ToLower ( diskURI ) )
c . vmLockMap . UnlockEntry ( strings . ToLower ( string ( nodeName ) ) )
retriable := false
if err != nil && retry . IsErrorRetriable ( err ) {
retriable = true
}
return ! retriable , err
} )
if retryErr != nil {
err = retryErr
klog . V ( 2 ) . Infof ( "azureDisk - update abort backoff: detach disk(%s, %s), err: %v" , diskName , diskURI , err )
2020-03-26 21:07:15 +00:00
}
2019-09-27 21:51:53 +00:00
}
}
if err != nil {
klog . Errorf ( "azureDisk - detach disk(%s, %s) failed, err: %v" , diskName , diskURI , err )
2020-03-26 21:07:15 +00:00
return err
2019-09-27 21:51:53 +00:00
}
2020-03-26 21:07:15 +00:00
klog . V ( 2 ) . Infof ( "azureDisk - detach disk(%s, %s) succeeded" , diskName , diskURI )
return nil
2019-09-27 21:51:53 +00:00
}
// getNodeDataDisks invokes vmSet interfaces to get data disks for the node.
2020-03-26 21:07:15 +00:00
func ( c * controllerCommon ) getNodeDataDisks ( nodeName types . NodeName , crt azcache . AzureCacheReadType ) ( [ ] compute . DataDisk , error ) {
2019-11-14 18:56:24 +00:00
vmset , err := c . getNodeVMSet ( nodeName , crt )
2019-09-27 21:51:53 +00:00
if err != nil {
return nil , err
}
2019-11-14 18:56:24 +00:00
return vmset . GetDataDisks ( nodeName , crt )
2019-09-27 21:51:53 +00:00
}
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
func ( c * controllerCommon ) GetDiskLun ( diskName , diskURI string , nodeName types . NodeName ) ( int32 , error ) {
2019-11-14 18:56:24 +00:00
// getNodeDataDisks need to fetch the cached data/fresh data if cache expired here
// to ensure we get LUN based on latest entry.
2020-03-26 21:07:15 +00:00
disks , err := c . getNodeDataDisks ( nodeName , azcache . CacheReadTypeDefault )
2019-09-27 21:51:53 +00:00
if err != nil {
klog . Errorf ( "error of getting data disks for node %q: %v" , nodeName , err )
return - 1 , err
}
for _ , disk := range disks {
if disk . Lun != nil && ( disk . Name != nil && diskName != "" && strings . EqualFold ( * disk . Name , diskName ) ) ||
( disk . Vhd != nil && disk . Vhd . URI != nil && diskURI != "" && strings . EqualFold ( * disk . Vhd . URI , diskURI ) ) ||
( disk . ManagedDisk != nil && strings . EqualFold ( * disk . ManagedDisk . ID , diskURI ) ) {
2019-12-12 01:27:03 +00:00
if disk . ToBeDetached != nil && * disk . ToBeDetached {
klog . Warningf ( "azureDisk - find disk(ToBeDetached): lun %d name %q uri %q" , * disk . Lun , diskName , diskURI )
} else {
// found the disk
klog . V ( 2 ) . Infof ( "azureDisk - find disk: lun %d name %q uri %q" , * disk . Lun , diskName , diskURI )
return * disk . Lun , nil
}
2019-09-27 21:51:53 +00:00
}
}
return - 1 , fmt . Errorf ( "cannot find Lun for disk %s" , diskName )
}
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
func ( c * controllerCommon ) GetNextDiskLun ( nodeName types . NodeName ) ( int32 , error ) {
2020-03-26 21:07:15 +00:00
disks , err := c . getNodeDataDisks ( nodeName , azcache . CacheReadTypeDefault )
2019-09-27 21:51:53 +00:00
if err != nil {
klog . Errorf ( "error of getting data disks for node %q: %v" , nodeName , err )
return - 1 , err
}
used := make ( [ ] bool , maxLUN )
for _ , disk := range disks {
if disk . Lun != nil {
used [ * disk . Lun ] = true
}
}
for k , v := range used {
if ! v {
return int32 ( k ) , nil
}
}
return - 1 , fmt . Errorf ( "all luns are used" )
}
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
func ( c * controllerCommon ) DisksAreAttached ( diskNames [ ] string , nodeName types . NodeName ) ( map [ string ] bool , error ) {
attached := make ( map [ string ] bool )
for _ , diskName := range diskNames {
attached [ diskName ] = false
}
2019-11-14 18:56:24 +00:00
// doing stalled read for getNodeDataDisks to ensure we don't call ARM
// for every reconcile call. The cache is invalidated after Attach/Detach
// disk. So the new entry will be fetched and cached the first time reconcile
// loop runs after the Attach/Disk OP which will reflect the latest model.
2020-03-26 21:07:15 +00:00
disks , err := c . getNodeDataDisks ( nodeName , azcache . CacheReadTypeUnsafe )
2019-09-27 21:51:53 +00:00
if err != nil {
if err == cloudprovider . InstanceNotFound {
// if host doesn't exist, no need to detach
klog . Warningf ( "azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it." ,
nodeName , diskNames )
return attached , nil
}
return attached , err
}
for _ , disk := range disks {
for _ , diskName := range diskNames {
if disk . Name != nil && diskName != "" && strings . EqualFold ( * disk . Name , diskName ) {
attached [ diskName ] = true
}
}
}
return attached , nil
}
2019-12-12 01:27:03 +00:00
func filterDetachingDisks ( unfilteredDisks [ ] compute . DataDisk ) [ ] compute . DataDisk {
filteredDisks := [ ] compute . DataDisk { }
for _ , disk := range unfilteredDisks {
if disk . ToBeDetached != nil && * disk . ToBeDetached {
if disk . Name != nil {
klog . V ( 2 ) . Infof ( "Filtering disk: %s with ToBeDetached flag set." , * disk . Name )
}
} else {
filteredDisks = append ( filteredDisks , disk )
}
}
return filteredDisks
}
2020-03-13 19:24:50 +00:00
func ( c * controllerCommon ) filterNonExistingDisks ( ctx context . Context , unfilteredDisks [ ] compute . DataDisk ) [ ] compute . DataDisk {
filteredDisks := [ ] compute . DataDisk { }
for _ , disk := range unfilteredDisks {
filter := false
if disk . ManagedDisk != nil && disk . ManagedDisk . ID != nil {
diskURI := * disk . ManagedDisk . ID
exist , err := c . cloud . checkDiskExists ( ctx , diskURI )
if err != nil {
klog . Errorf ( "checkDiskExists(%s) failed with error: %v" , diskURI , err )
} else {
// only filter disk when checkDiskExists returns <false, nil>
filter = ! exist
if filter {
klog . Errorf ( "disk(%s) does not exist, removed from data disk list" , diskURI )
}
}
}
if ! filter {
filteredDisks = append ( filteredDisks , disk )
}
}
return filteredDisks
}
func ( c * controllerCommon ) checkDiskExists ( ctx context . Context , diskURI string ) ( bool , error ) {
diskName := path . Base ( diskURI )
resourceGroup , err := getResourceGroupFromDiskURI ( diskURI )
if err != nil {
return false , err
}
2020-03-26 21:07:15 +00:00
if _ , rerr := c . cloud . DisksClient . Get ( ctx , resourceGroup , diskName ) ; rerr != nil {
if rerr . HTTPStatusCode == http . StatusNotFound {
2020-03-13 19:24:50 +00:00
return false , nil
}
2020-03-26 21:07:15 +00:00
return false , rerr . Error ( )
2020-03-13 19:24:50 +00:00
}
return true , nil
}
2020-03-26 21:07:15 +00:00
func getValidCreationData ( subscriptionID , resourceGroup , sourceResourceID , sourceType string ) ( compute . CreationData , error ) {
if sourceResourceID == "" {
return compute . CreationData {
CreateOption : compute . Empty ,
} , nil
}
switch sourceType {
case sourceSnapshot :
if match := diskSnapshotPathRE . FindString ( sourceResourceID ) ; match == "" {
sourceResourceID = fmt . Sprintf ( diskSnapshotPath , subscriptionID , resourceGroup , sourceResourceID )
}
case sourceVolume :
if match := managedDiskPathRE . FindString ( sourceResourceID ) ; match == "" {
sourceResourceID = fmt . Sprintf ( managedDiskPath , subscriptionID , resourceGroup , sourceResourceID )
}
default :
return compute . CreationData {
CreateOption : compute . Empty ,
} , nil
}
splits := strings . Split ( sourceResourceID , "/" )
if len ( splits ) > 9 {
if sourceType == sourceSnapshot {
return compute . CreationData { } , fmt . Errorf ( "sourceResourceID(%s) is invalid, correct format: %s" , sourceResourceID , diskSnapshotPathRE )
}
return compute . CreationData { } , fmt . Errorf ( "sourceResourceID(%s) is invalid, correct format: %s" , sourceResourceID , managedDiskPathRE )
}
return compute . CreationData {
CreateOption : compute . Copy ,
SourceResourceID : & sourceResourceID ,
} , nil
}
2020-10-14 19:03:41 +00:00
func isInstanceNotFoundError ( err error ) bool {
errMsg := strings . ToLower ( err . Error ( ) )
return strings . Contains ( errMsg , errStatusCode400 ) && strings . Contains ( errMsg , errInvalidParameter ) && strings . Contains ( errMsg , errTargetInstanceIds )
}