Skip to content

Commit

Permalink
Merge pull request kubernetes#1754 from brendandburns/valid
Browse files Browse the repository at this point in the history
Add a predicate for persistent disk scheduling.
  • Loading branch information
lavalamp committed Oct 15, 2014
2 parents 180e90f + 47c4b8f commit 1dcb25e
Show file tree
Hide file tree
Showing 3 changed files with 88 additions and 0 deletions.
33 changes: 33 additions & 0 deletions pkg/scheduler/predicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,39 @@ func (nodes ClientNodeInfo) GetNodeInfo(nodeID string) (*api.Minion, error) {
return nodes.GetMinion(nodeID)
}

func isVolumeConflict(volume api.Volume, pod *api.Pod) bool {
if volume.Source.GCEPersistentDisk == nil {
return false
}
pdName := volume.Source.GCEPersistentDisk.PDName

manifest := &(pod.DesiredState.Manifest)
for ix := range manifest.Volumes {
if manifest.Volumes[ix].Source.GCEPersistentDisk != nil &&
manifest.Volumes[ix].Source.GCEPersistentDisk.PDName == pdName {
return true
}
}
return false
}

// NoDiskConflict evaluates if a pod can fit due to the volumes it requests, and those that
// are already mounted. Some times of volumes are mounted onto node machines. For now, these mounts
// are exclusive so if there is already a volume mounted on that node, another pod can't schedule
// there. This is GCE specific for now.
// TODO: migrate this into some per-volume specific code?
func NoDiskConflict(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
manifest := &(pod.DesiredState.Manifest)
for ix := range manifest.Volumes {
for podIx := range existingPods {
if isVolumeConflict(manifest.Volumes[ix], &existingPods[podIx]) {
return false, nil
}
}
}
return true, nil
}

type ResourceFit struct {
info NodeInfo
}
Expand Down
53 changes: 53 additions & 0 deletions pkg/scheduler/predicates_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,3 +181,56 @@ func TestPodFitsPorts(t *testing.T) {
}
}
}

func TestDiskConflicts(t *testing.T) {
volState := api.PodState{
Manifest: api.ContainerManifest{
Volumes: []api.Volume{
{
Source: &api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDisk{
PDName: "foo",
},
},
},
},
},
}
volState2 := api.PodState{
Manifest: api.ContainerManifest{
Volumes: []api.Volume{
{
Source: &api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDisk{
PDName: "bar",
},
},
},
},
},
}
tests := []struct {
pod api.Pod
existingPods []api.Pod
isOk bool
test string
}{
{api.Pod{}, []api.Pod{}, true, "nothing"},
{api.Pod{}, []api.Pod{{DesiredState: volState}}, true, "one state"},
{api.Pod{DesiredState: volState}, []api.Pod{{DesiredState: volState}}, false, "same state"},
{api.Pod{DesiredState: volState2}, []api.Pod{{DesiredState: volState}}, true, "different state"},
}

for _, test := range tests {
ok, err := NoDiskConflict(test.pod, test.existingPods, "machine")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if test.isOk && !ok {
t.Errorf("expected ok, got none. %v %v %s", test.pod, test.existingPods, test.test)
}
if !test.isOk && ok {
t.Errorf("expected no ok, got one. %v %v %s", test.pod, test.existingPods, test.test)
}
}
}
2 changes: 2 additions & 0 deletions plugin/pkg/scheduler/factory/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ func (factory *ConfigFactory) Create() *scheduler.Config {
algorithm.PodFitsPorts,
// Fit is determined by resource availability
algorithm.NewResourceFitPredicate(minionLister),
// Fit is determined by non-conflicting disk volumes
algorithm.NoDiskConflict,
},
// Prioritize nodes by least requested utilization.
algorithm.LeastRequestedPriority,
Expand Down

0 comments on commit 1dcb25e

Please sign in to comment.