Skip to content

Commit

Permalink
Merge pull request kubernetes#57482 from vmware/automated-cherry-pick…
Browse files Browse the repository at this point in the history
…-of-#56648-kubernetes#56726-kubernetes#57053-upstream-release-1.9

Automatic merge from submit-queue.

Automated cherry pick of kubernetes#56648 kubernetes#56726 kubernetes#57053 upstream release 1.9

Cherry pick of kubernetes#56648 kubernetes#56726 kubernetes#57053 on release 1.9.

kubernetes#56648: Fix session timeout issue in vSphere Cloud Provider 
kubernetes#56726: Fix dynamic volume provisioning iussue when VM is removed from inventory
kubernetes#57053: Compare correct file names for volume detach operation
  • Loading branch information
Kubernetes Submit Queue authored Jan 3, 2018
2 parents a0af579 + 40cdcd6 commit 3a1c944
Show file tree
Hide file tree
Showing 4 changed files with 68 additions and 15 deletions.
32 changes: 28 additions & 4 deletions pkg/cloudprovider/providers/vsphere/nodemanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,14 @@ package vsphere

import (
"fmt"
"strings"
"sync"

"github.com/golang/glog"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"strings"
"sync"
)

// Stores info about the kubernetes node
Expand Down Expand Up @@ -241,6 +242,10 @@ func (nm *NodeManager) removeNode(node *v1.Node) {
nm.registeredNodesLock.Lock()
delete(nm.registeredNodes, node.ObjectMeta.Name)
nm.registeredNodesLock.Unlock()

nm.nodeInfoLock.Lock()
delete(nm.nodeInfoMap, node.ObjectMeta.Name)
nm.nodeInfoLock.Unlock()
}

// GetNodeInfo returns a NodeInfo which datacenter, vm and vc server ip address.
Expand All @@ -265,14 +270,33 @@ func (nm *NodeManager) GetNodeInfo(nodeName k8stypes.NodeName) (NodeInfo, error)
return *nodeInfo, nil
}

func (nm *NodeManager) GetNodeDetails() []NodeDetails {
func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) {
nm.nodeInfoLock.RLock()
defer nm.nodeInfoLock.RUnlock()
var nodeDetails []NodeDetails
vsphereSessionRefreshMap := make(map[string]bool)

// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

for nodeName, nodeInfo := range nm.nodeInfoMap {
nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm})
if vsphereSessionRefreshMap[nodeInfo.vcServer] {
continue
}
vsphereInstance := nm.vsphereInstanceMap[nodeInfo.vcServer]
if vsphereInstance == nil {
err := fmt.Errorf("vSphereInstance for vc server %q not found while looking for vm %q", nodeInfo.vcServer, nodeInfo.vm)
return nil, err
}
err := vsphereInstance.conn.Connect(ctx)
if err != nil {
return nil, err
}
vsphereSessionRefreshMap[nodeInfo.vcServer] = true
}
return nodeDetails
return nodeDetails, nil
}

func (nm *NodeManager) addNodeInfo(nodeName string, nodeInfo *NodeInfo) {
Expand Down
12 changes: 11 additions & 1 deletion pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package vclib
import (
"context"
"fmt"
"strings"
"time"

"github.com/golang/glog"
Expand Down Expand Up @@ -362,12 +363,14 @@ func (vm *VirtualMachine) getVirtualDeviceByPath(ctx context.Context, diskPath s
glog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}

// filter vm devices to retrieve device for the given vmdk file identified by disk path
for _, device := range vmDevices {
if vmDevices.TypeName(device) == "VirtualDisk" {
virtualDevice := device.GetVirtualDevice()
if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
if backing.FileName == diskPath {
if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
glog.V(LogLevel).Infof("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
return device, nil
}
}
Expand All @@ -376,6 +379,13 @@ func (vm *VirtualMachine) getVirtualDeviceByPath(ctx context.Context, diskPath s
return nil, nil
}

func matchVirtualDiskAndVolPath(diskPath, volPath string) bool {
fileExt := ".vmdk"
diskPath = strings.TrimSuffix(diskPath, fileExt)
volPath = strings.TrimSuffix(volPath, fileExt)
return diskPath == volPath
}

// deleteController removes latest added SCSI controller from VM.
func (vm *VirtualMachine) deleteController(ctx context.Context, controllerDevice types.BaseVirtualDevice, vmDevices object.VirtualDeviceList) error {
controllerDeviceList := vmDevices.SelectByType(controllerDevice)
Expand Down
16 changes: 12 additions & 4 deletions pkg/cloudprovider/providers/vsphere/vsphere.go
Original file line number Diff line number Diff line change
Expand Up @@ -639,7 +639,8 @@ func (vs *VSphere) InstanceID(nodeName k8stypes.NodeName) (string, error) {

instanceID, err := instanceIDInternal()
if err != nil {
isManagedObjectNotFoundError, err := vs.retry(nodeName, err)
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == nil {
glog.V(4).Infof("InstanceID: Found node %q", convertToString(nodeName))
Expand Down Expand Up @@ -729,14 +730,17 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeN
requestTime := time.Now()
diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName)
if err != nil {
isManagedObjectNotFoundError, err := vs.retry(nodeName, err)
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == nil {
glog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName))
diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName)
glog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", convertToString(nodeName), diskUUID, err)
}
}
}
glog.V(4).Infof("AttachDisk executed for node %s and volume %s with diskUUID %s. Err: %s", convertToString(nodeName), vmDiskPath, diskUUID, err)
vclib.RecordvSphereMetric(vclib.OperationAttachVolume, requestTime, err)
return diskUUID, err
}
Expand Down Expand Up @@ -792,7 +796,8 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error
requestTime := time.Now()
err := detachDiskInternal(volPath, nodeName)
if err != nil {
isManagedObjectNotFoundError, err := vs.retry(nodeName, err)
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == nil {
err = detachDiskInternal(volPath, nodeName)
Expand Down Expand Up @@ -835,19 +840,22 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b
glog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err)
return false, err
}

volPath = vclib.RemoveStorageClusterORFolderNameFromVDiskPath(volPath)
attached, err := vm.IsDiskAttached(ctx, volPath)
if err != nil {
glog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached on node %q",
volPath,
vSphereInstance)
}
glog.V(4).Infof("DiskIsAttached result: %q and error: %q, for volume: %q", attached, err, volPath)
return attached, err
}
requestTime := time.Now()
isAttached, err := diskIsAttachedInternal(volPath, nodeName)
if err != nil {
isManagedObjectNotFoundError, err := vs.retry(nodeName, err)
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == vclib.ErrNoVMFound {
isAttached, err = false, nil
Expand Down
23 changes: 17 additions & 6 deletions pkg/cloudprovider/providers/vsphere/vsphere_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,13 @@ import (

"fmt"

"path/filepath"

"github.com/vmware/govmomi/vim25/mo"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers"
"path/filepath"
)

const (
Expand Down Expand Up @@ -187,20 +188,30 @@ func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nod

// Get all datastores accessible for the virtual machine object.
func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) {
nodeVmDetails := nodeManager.GetNodeDetails()
if nodeVmDetails == nil || len(nodeVmDetails) == 0 {
nodeVmDetails, err := nodeManager.GetNodeDetails()
if err != nil {
glog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err)
return nil, err
}

if len(nodeVmDetails) == 0 {
msg := fmt.Sprintf("Kubernetes node nodeVmDetail details is empty. nodeVmDetails : %+v", nodeVmDetails)
glog.Error(msg)
return nil, fmt.Errorf(msg)
}
var sharedDatastores []*vclib.DatastoreInfo
for index, nodeVmDetail := range nodeVmDetails {
for _, nodeVmDetail := range nodeVmDetails {
glog.V(9).Infof("Getting accessible datastores for node %s", nodeVmDetail.NodeName)
accessibleDatastores, err := getAccessibleDatastores(ctx, &nodeVmDetail, nodeManager)
if err != nil {
if err == vclib.ErrNoVMFound {
glog.V(9).Infof("Got NoVMFound error for node %s", nodeVmDetail.NodeName)
continue
}
return nil, err
}
if index == 0 {

if len(sharedDatastores) == 0 {
sharedDatastores = accessibleDatastores
} else {
sharedDatastores = intersect(sharedDatastores, accessibleDatastores)
Expand All @@ -210,7 +221,7 @@ func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter,
}
}
glog.V(9).Infof("sharedDatastores : %+v", sharedDatastores)
sharedDatastores, err := getDatastoresForEndpointVC(ctx, dc, sharedDatastores)
sharedDatastores, err = getDatastoresForEndpointVC(ctx, dc, sharedDatastores)
if err != nil {
glog.Errorf("Failed to get shared datastores from endpoint VC. err: %+v", err)
return nil, err
Expand Down

0 comments on commit 3a1c944

Please sign in to comment.