Skip to content

Commit

Permalink
Merge pull request kubernetes#51107 from jsafrane/exec-gluster
Browse files Browse the repository at this point in the history
Automatic merge from submit-queue (batch tested with PRs 51105, 51097, 51110, 50843, 51107)

gluster: Use VolumeHost.GetExec() to execute stuff in volume plugins

**What this PR does / why we need it**:
This PR updates GlusterFS volume plugin to use `VolumeHost.GetExec()` to execute utilities like mkfs and lsblk instead of simple `os/exec`. This prepares the volume plugin to run these utilities in containers instead of running them on the host + makes the volume plugin more independent and less hardcoded.

See proposal in kubernetes/community#589.

Note that this PR does **not** change place where the utilities are executed - `VolumeHost.GetExec()` still leads directly to `os/exec`. It will be changed when the aforementioned proposal is merged and implemented.

**Special notes for your reviewer**:

There are two commits:
* The first one removes unused `plugin.execCommand` (so we don't need to update it to `VolumeHost.GetExec`)
* The second one does the `VolumeHost.GetExec` trick described above.

@kubernetes/sig-storage-pr-reviews 

**Release note**:
```release-note
NONE
```
  • Loading branch information
Kubernetes Submit Queue authored Aug 23, 2017
2 parents 4fb6c28 + 3636c0a commit 6a4203e
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 34 deletions.
3 changes: 0 additions & 3 deletions pkg/volume/glusterfs/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)

Expand All @@ -55,8 +54,6 @@ go_test(
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],
)

Expand Down
19 changes: 5 additions & 14 deletions pkg/volume/glusterfs/glusterfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,17 +43,15 @@ import (
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/utils/exec"
)

// ProbeVolumePlugins is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&glusterfsPlugin{host: nil, exe: exec.New(), gidTable: make(map[string]*MinMaxAllocator)}}
return []volume.VolumePlugin{&glusterfsPlugin{host: nil, gidTable: make(map[string]*MinMaxAllocator)}}
}

type glusterfsPlugin struct {
host volume.VolumeHost
exe exec.Interface
gidTable map[string]*MinMaxAllocator
gidTableLock sync.Mutex
}
Expand Down Expand Up @@ -152,7 +150,7 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu
return nil, err
}
glog.V(1).Infof("glusterfs: endpoints %v", ep)
return plugin.newMounterInternal(spec, ep, pod, plugin.host.GetMounter(plugin.GetPluginName()), exec.New())
return plugin.newMounterInternal(spec, ep, pod, plugin.host.GetMounter(plugin.GetPluginName()))
}

func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) (*v1.GlusterfsVolumeSource, bool) {
Expand All @@ -164,7 +162,7 @@ func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) (*v1.Gl
return spec.PersistentVolume.Spec.Glusterfs, spec.ReadOnly
}

func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endpoints, pod *v1.Pod, mounter mount.Interface, exe exec.Interface) (volume.Mounter, error) {
func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endpoints, pod *v1.Pod, mounter mount.Interface) (volume.Mounter, error) {
source, readOnly := plugin.getGlusterVolumeSource(spec)
return &glusterfsMounter{
glusterfs: &glusterfs{
Expand All @@ -176,7 +174,6 @@ func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endp
hosts: ep,
path: source.Path,
readOnly: readOnly,
exe: exe,
mountOptions: volume.MountOptionFromSpec(spec),
}, nil
}
Expand All @@ -194,11 +191,6 @@ func (plugin *glusterfsPlugin) newUnmounterInternal(volName string, podUID types
}}, nil
}

func (plugin *glusterfsPlugin) execCommand(command string, args []string) ([]byte, error) {
cmd := plugin.exe.Command(command, args...)
return cmd.CombinedOutput()
}

func (plugin *glusterfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
glusterfsVolume := &v1.Volume{
Name: volumeName,
Expand Down Expand Up @@ -226,7 +218,6 @@ type glusterfsMounter struct {
hosts *v1.Endpoints
path string
readOnly bool
exe exec.Interface
mountOptions []string
}

Expand All @@ -244,10 +235,10 @@ func (b *glusterfsMounter) GetAttributes() volume.Attributes {
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *glusterfsMounter) CanMount() error {
exe := exec.New()
exe := b.plugin.host.GetExec(b.plugin.GetPluginName())
switch runtime.GOOS {
case "linux":
if _, err := exe.Command("/bin/ls", gciLinuxGlusterMountBinaryPath).CombinedOutput(); err != nil {
if _, err := exe.Run("/bin/ls", gciLinuxGlusterMountBinaryPath); err != nil {
return fmt.Errorf("Required binary %s is missing", gciLinuxGlusterMountBinaryPath)
}
}
Expand Down
18 changes: 1 addition & 17 deletions pkg/volume/glusterfs/glusterfs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,6 @@ import (
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
)

func TestCanSupport(t *testing.T) {
Expand Down Expand Up @@ -104,22 +102,8 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
}
ep := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}}}}
var fcmd fakeexec.FakeCmd
fcmd = fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
// mount
func() ([]byte, error) {
return []byte{}, nil
},
},
}
fake := fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
},
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*glusterfsPlugin).newMounterInternal(spec, ep, pod, &mount.FakeMounter{}, &fake)
mounter, err := plug.(*glusterfsPlugin).newMounterInternal(spec, ep, pod, &mount.FakeMounter{})
volumePath := mounter.GetPath()
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
Expand Down

0 comments on commit 6a4203e

Please sign in to comment.