k3s/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go

189 lines
7.2 KiB
Go
Raw Normal View History

2019-01-12 04:58:27 +00:00
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"fmt"
"syscall"
2019-01-12 04:58:27 +00:00
2019-01-22 20:53:35 +00:00
v1 "k8s.io/api/core/v1"
2019-01-12 04:58:27 +00:00
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
2020-08-10 17:43:49 +00:00
"k8s.io/klog/v2"
2019-01-12 04:58:27 +00:00
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/util/removeall"
"k8s.io/kubernetes/pkg/volume"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
// ListVolumesForPod returns a map of the mounted volumes for the given pod.
// The key in the map is the OuterVolumeSpecName (i.e. pod.Spec.Volumes[x].Name)
func (kl *Kubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) {
volumesToReturn := make(map[string]volume.Volume)
podVolumes := kl.volumeManager.GetMountedVolumesForPod(
volumetypes.UniquePodName(podUID))
for outerVolumeSpecName, volume := range podVolumes {
// TODO: volume.Mounter could be nil if volume object is recovered
// from reconciler's sync state process. PR 33616 will fix this problem
// to create Mounter object when recovering volume state.
if volume.Mounter == nil {
continue
}
volumesToReturn[outerVolumeSpecName] = volume.Mounter
}
return volumesToReturn, len(volumesToReturn) > 0
}
// podVolumesExist checks with the volume manager and returns true any of the
// pods for the specified volume are mounted.
func (kl *Kubelet) podVolumesExist(podUID types.UID) bool {
if mountedVolumes :=
kl.volumeManager.GetMountedVolumesForPod(
volumetypes.UniquePodName(podUID)); len(mountedVolumes) > 0 {
return true
}
// TODO: This checks pod volume paths and whether they are mounted. If checking returns error, podVolumesExist will return true
// which means we consider volumes might exist and requires further checking.
// There are some volume plugins such as flexvolume might not have mounts. See issue #61229
volumePaths, err := kl.getMountedVolumePathListFromDisk(podUID)
if err != nil {
klog.ErrorS(err, "Pod found, but error occurred during checking mounted volumes from disk", "podUID", podUID)
2019-01-12 04:58:27 +00:00
return true
}
if len(volumePaths) > 0 {
klog.V(4).InfoS("Pod found, but volumes are still mounted on disk", "podUID", podUID, "paths", volumePaths)
2019-01-12 04:58:27 +00:00
return true
}
return false
}
// newVolumeMounterFromPlugins attempts to find a plugin by volume spec, pod
// and volume options and then creates a Mounter.
// Returns a valid mounter or an error.
func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec)
if err != nil {
return nil, fmt.Errorf("can't use volume plugins for %s: %v", spec.Name(), err)
}
physicalMounter, err := plugin.NewMounter(spec, pod, opts)
if err != nil {
return nil, fmt.Errorf("failed to instantiate mounter for volume: %s using plugin: %s with a root cause: %v", spec.Name(), plugin.GetPluginName(), err)
}
klog.V(10).InfoS("Using volume plugin for mount", "volumePluginName", plugin.GetPluginName(), "volumeName", spec.Name())
2019-01-12 04:58:27 +00:00
return physicalMounter, nil
}
// cleanupOrphanedPodDirs removes the volumes of pods that should not be
// running and that have no containers running. Note that we roll up logs here since it runs in the main loop.
func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecontainer.Pod) error {
allPods := sets.NewString()
for _, pod := range pods {
allPods.Insert(string(pod.UID))
}
for _, pod := range runningPods {
allPods.Insert(string(pod.ID))
}
found, err := kl.listPodsFromDisk()
if err != nil {
return err
}
orphanRemovalErrors := []error{}
orphanVolumeErrors := []error{}
for _, uid := range found {
if allPods.Has(string(uid)) {
continue
}
// If volumes have not been unmounted/detached, do not delete directory.
// Doing so may result in corruption of data.
2019-01-22 20:53:35 +00:00
// TODO: getMountedVolumePathListFromDisk() call may be redundant with
// kl.getPodVolumePathListFromDisk(). Can this be cleaned up?
2019-01-12 04:58:27 +00:00
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist {
klog.V(3).InfoS("Orphaned pod found, but volumes are not cleaned up", "podUID", uid)
2019-01-12 04:58:27 +00:00
continue
}
allVolumesCleanedUp := true
// If there are still volume directories, attempt to rmdir them
2019-01-12 04:58:27 +00:00
volumePaths, err := kl.getPodVolumePathListFromDisk(uid)
if err != nil {
2019-09-27 21:51:53 +00:00
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading volume dir from disk", uid, err))
2019-01-12 04:58:27 +00:00
continue
}
if len(volumePaths) > 0 {
for _, volumePath := range volumePaths {
if err := syscall.Rmdir(volumePath); err != nil {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but failed to rmdir() volume at path %v: %v", uid, volumePath, err))
allVolumesCleanedUp = false
} else {
klog.InfoS("Cleaned up orphaned volume from pod", "podUID", uid, "path", volumePath)
}
}
2019-01-12 04:58:27 +00:00
}
2019-01-22 20:53:35 +00:00
// If there are any volume-subpaths, attempt to rmdir them
subpathVolumePaths, err := kl.getPodVolumeSubpathListFromDisk(uid)
2019-01-22 20:53:35 +00:00
if err != nil {
2019-09-27 21:51:53 +00:00
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading of volume-subpaths dir from disk", uid, err))
2019-01-22 20:53:35 +00:00
continue
}
if len(subpathVolumePaths) > 0 {
for _, subpathVolumePath := range subpathVolumePaths {
if err := syscall.Rmdir(subpathVolumePath); err != nil {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but failed to rmdir() subpath at path %v: %v", uid, subpathVolumePath, err))
allVolumesCleanedUp = false
} else {
klog.InfoS("Cleaned up orphaned volume subpath from pod", "podUID", uid, "path", subpathVolumePath)
}
}
}
if !allVolumesCleanedUp {
// Not all volumes were removed, so don't clean up the pod directory yet. It is likely
// that there are still mountpoints left which could stall RemoveAllOneFilesystem which
// would otherwise be called below.
// Errors for all removal operations have already been recorded, so don't add another
// one here.
2019-01-22 20:53:35 +00:00
continue
}
klog.V(3).InfoS("Orphaned pod found, removing", "podUID", uid)
2019-01-12 04:58:27 +00:00
if err := removeall.RemoveAllOneFilesystem(kl.mounter, kl.getPodDir(uid)); err != nil {
klog.ErrorS(err, "Failed to remove orphaned pod dir", "podUID", uid)
2019-01-12 04:58:27 +00:00
orphanRemovalErrors = append(orphanRemovalErrors, err)
}
}
logSpew := func(errs []error) {
if len(errs) > 0 {
klog.ErrorS(errs[0], "There were many similar errors. Turn up verbosity to see them.", "numErrs", len(errs))
2019-01-12 04:58:27 +00:00
for _, err := range errs {
klog.V(5).InfoS("Orphan pod", "err", err)
2019-01-12 04:58:27 +00:00
}
}
}
logSpew(orphanVolumeErrors)
logSpew(orphanRemovalErrors)
return utilerrors.NewAggregate(orphanRemovalErrors)
}