From 17c7e86c6d10839f071ff93dcc89b2df4cfaf8b5 Mon Sep 17 00:00:00 2001 From: Kevin Klues Date: Fri, 14 May 2021 13:16:58 +0000 Subject: [PATCH 1/6] Add NUMA support to the CPU assignment algorithm in the CPUManager Signed-off-by: Kevin Klues --- pkg/kubelet/cm/cpumanager/cpu_assignment.go | 138 +++++++++++++++--- .../cm/cpumanager/topology/topology.go | 18 ++- .../cm/cpumanager/topology/topology_test.go | 21 +-- 3 files changed, 141 insertions(+), 36 deletions(-) diff --git a/pkg/kubelet/cm/cpumanager/cpu_assignment.go b/pkg/kubelet/cm/cpumanager/cpu_assignment.go index b599485151c0a..21d5530481a27 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_assignment.go +++ b/pkg/kubelet/cm/cpumanager/cpu_assignment.go @@ -42,6 +42,11 @@ func newCPUAccumulator(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, } } +// Returns true if the supplied NUMANode is fully available in `topoDetails`. +func (a *cpuAccumulator) isNUMANodeFree(numaID int) bool { + return a.details.CPUsInNUMANodes(numaID).Size() == a.topo.CPUDetails.CPUsInNUMANodes(numaID).Size() +} + // Returns true if the supplied socket is fully available in `topoDetails`. func (a *cpuAccumulator) isSocketFree(socketID int) bool { return a.details.CPUsInSockets(socketID).Size() == a.topo.CPUsPerSocket() @@ -52,6 +57,17 @@ func (a *cpuAccumulator) isCoreFree(coreID int) bool { return a.details.CPUsInCores(coreID).Size() == a.topo.CPUsPerCore() } +// Returns free NUMA Node IDs as a slice sorted by sortAvailableNUMANodes(). +func (a *cpuAccumulator) freeNUMANodes() []int { + free := []int{} + for _, numa := range a.sortAvailableNUMANodes() { + if a.isNUMANodeFree(numa) { + free = append(free, numa) + } + } + return free +} + // Returns free socket IDs as a slice sorted by sortAvailableSockets(). func (a *cpuAccumulator) freeSockets() []int { free := []int{} @@ -79,12 +95,12 @@ func (a *cpuAccumulator) freeCPUs() []int { return a.sortAvailableCPUs() } -// Sorts the provided list of sockets/cores/cpus referenced in 'ids' by the -// number of available CPUs contained within them (smallest to largest). The -// 'getCPU()' paramater defines the function that should be called to retrieve -// the list of available CPUs for the type of socket/core/cpu being referenced. -// If two sockets/cores/cpus have the same number of available CPUs, they are -// sorted in ascending order by their id. +// Sorts the provided list of NUMA nodes/sockets/cores/cpus referenced in 'ids' +// by the number of available CPUs contained within them (smallest to largest). +// The 'getCPU()' paramater defines the function that should be called to +// retrieve the list of available CPUs for the type being referenced. If two +// NUMA nodes/sockets/cores/cpus have the same number of available CPUs, they +// are sorted in ascending order by their id. func (a *cpuAccumulator) sort(ids []int, getCPUs func(ids ...int) cpuset.CPUSet) { sort.Slice(ids, func(i, j int) bool { @@ -100,20 +116,74 @@ func (a *cpuAccumulator) sort(ids []int, getCPUs func(ids ...int) cpuset.CPUSet) }) } -// Sort all sockets with free CPUs using the sort() algorithm defined above. +// Sort all NUMA nodes with free CPUs: +// - If NUMA nodes are higher than sockets in the memory hierarchy then sort +// them directly using the sort() algorithm defined above. +// - Otherwise sort them: +// - First by socket using sortAvailableSockets(). +// - Then within each socket, using the sort() algorithm defined above. +func (a *cpuAccumulator) sortAvailableNUMANodes() []int { + // If NUMA nodes are equal or higher in the memory hierarchy than sockets + if a.topo.NumSockets >= a.topo.NumNUMANodes { + numas := a.details.NUMANodes().ToSliceNoSort() + a.sort(numas, a.details.CPUsInNUMANodes) + return numas + } + + // Otherwise each socket has multiple NUMA nodes + var result []int + for _, socket := range a.sortAvailableSockets() { + numas := a.details.NUMANodesInSockets(socket).ToSliceNoSort() + a.sort(numas, a.details.CPUsInNUMANodes) + result = append(result, numas...) + } + return result +} + +// Sort all sockets with free CPUs: +// - If sockets are higher than NUMA nodes in the memory hierarchy then sort +// them directly using the sort() algorithm defined above. +// - Otherwise sort them: +// - First by NUMA node using sortAvailableNUMANodes(). +// - Then within each NUMA node, using the sort() algorithm defined above. func (a *cpuAccumulator) sortAvailableSockets() []int { - sockets := a.details.Sockets().ToSliceNoSort() - a.sort(sockets, a.details.CPUsInSockets) - return sockets + // If sockets are higher than NUMA nodes in the memory hierarchy + if a.topo.NumNUMANodes >= a.topo.NumSockets { + sockets := a.details.Sockets().ToSliceNoSort() + a.sort(sockets, a.details.CPUsInSockets) + return sockets + } + + // Otherwise each NUMA Node has multiple sockets + var result []int + for _, numa := range a.sortAvailableNUMANodes() { + sockets := a.details.SocketsInNUMANodes(numa).ToSliceNoSort() + a.sort(sockets, a.details.CPUsInSockets) + result = append(result, sockets...) + } + return result } // Sort all cores with free CPUs: -// - First by socket using sortAvailableSockets(). -// - Then within each socket, using the sort() algorithm defined above. +// - First by socket (or NUMA node) using sortAvailableSockets() (or sortAvailableNUMANodes()). +// - Then within each socket or NUMA node, using the sort() algorithm defined above. func (a *cpuAccumulator) sortAvailableCores() []int { + // If NUMA nodes are higher in the memory hierarchy than sockets, then + // cores sit directly below sockets in the memory hierarchy. + if a.topo.NumSockets >= a.topo.NumNUMANodes { + var result []int + for _, socket := range a.sortAvailableSockets() { + cores := a.details.CoresInSockets(socket).ToSliceNoSort() + a.sort(cores, a.details.CPUsInCores) + result = append(result, cores...) + } + return result + } + + // Otherwise they sit directly below NUMA nodes. var result []int - for _, socket := range a.sortAvailableSockets() { - cores := a.details.CoresInSockets(socket).ToSliceNoSort() + for _, numa := range a.sortAvailableNUMANodes() { + cores := a.details.CoresInNUMANodes(numa).ToSliceNoSort() a.sort(cores, a.details.CPUsInCores) result = append(result, cores...) } @@ -139,6 +209,17 @@ func (a *cpuAccumulator) take(cpus cpuset.CPUSet) { a.numCPUsNeeded -= cpus.Size() } +func (a *cpuAccumulator) takeFullNUMANodes() { + for _, numa := range a.freeNUMANodes() { + cpusInNUMANode := a.topo.CPUDetails.CPUsInNUMANodes(numa) + if !a.needs(cpusInNUMANode.Size()) { + continue + } + klog.V(4).InfoS("takeFullNUMANodes: claiming NUMA node", "numa", numa) + a.take(cpusInNUMANode) + } +} + func (a *cpuAccumulator) takeFullSockets() { for _, socket := range a.freeSockets() { cpusInSocket := a.topo.CPUDetails.CPUsInSockets(socket) @@ -193,11 +274,30 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num } // Algorithm: topology-aware best-fit - // 1. Acquire whole sockets, if available and the container requires at - // least a socket's-worth of CPUs. - acc.takeFullSockets() - if acc.isSatisfied() { - return acc.result, nil + // 1. Acquire whole NUMA nodes and sockets, if available and the container + // requires at least a NUMA node or socket's-worth of CPUs. If NUMA + // Nodes map to 1 or more sockets, pull from NUMA nodes first. + // Otherwise pull from sockets first. + if acc.topo.NumSockets >= acc.topo.NumNUMANodes { + acc.takeFullNUMANodes() + if acc.isSatisfied() { + return acc.result, nil + } + + acc.takeFullSockets() + if acc.isSatisfied() { + return acc.result, nil + } + } else { + acc.takeFullSockets() + if acc.isSatisfied() { + return acc.result, nil + } + + acc.takeFullNUMANodes() + if acc.isSatisfied() { + return acc.result, nil + } } // 2. Acquire whole cores, if available and the container requires at least diff --git a/pkg/kubelet/cm/cpumanager/topology/topology.go b/pkg/kubelet/cm/cpumanager/topology/topology.go index b397165d97e64..37c2f105b7e65 100644 --- a/pkg/kubelet/cm/cpumanager/topology/topology.go +++ b/pkg/kubelet/cm/cpumanager/topology/topology.go @@ -36,10 +36,11 @@ type CPUDetails map[int]CPUInfo // Core - physical CPU, cadvisor - Core // Socket - socket, cadvisor - Node type CPUTopology struct { - NumCPUs int - NumCores int - NumSockets int - CPUDetails CPUDetails + NumCPUs int + NumCores int + NumSockets int + NumNUMANodes int + CPUDetails CPUDetails } // CPUsPerCore returns the number of logical CPUs are associated with @@ -243,10 +244,11 @@ func Discover(machineInfo *cadvisorapi.MachineInfo) (*CPUTopology, error) { } return &CPUTopology{ - NumCPUs: machineInfo.NumCores, - NumSockets: machineInfo.NumSockets, - NumCores: numPhysicalCores, - CPUDetails: CPUDetails, + NumCPUs: machineInfo.NumCores, + NumSockets: machineInfo.NumSockets, + NumCores: numPhysicalCores, + NumNUMANodes: CPUDetails.NUMANodes().Size(), + CPUDetails: CPUDetails, }, nil } diff --git a/pkg/kubelet/cm/cpumanager/topology/topology_test.go b/pkg/kubelet/cm/cpumanager/topology/topology_test.go index 48478b8d8a125..36f4ce3d67f38 100644 --- a/pkg/kubelet/cm/cpumanager/topology/topology_test.go +++ b/pkg/kubelet/cm/cpumanager/topology/topology_test.go @@ -57,9 +57,10 @@ func Test_Discover(t *testing.T) { }, }, want: &CPUTopology{ - NumCPUs: 8, - NumSockets: 1, - NumCores: 4, + NumCPUs: 8, + NumSockets: 1, + NumCores: 4, + NumNUMANodes: 1, CPUDetails: map[int]CPUInfo{ 0: {CoreID: 0, SocketID: 0, NUMANodeID: 0}, 1: {CoreID: 1, SocketID: 0, NUMANodeID: 0}, @@ -94,9 +95,10 @@ func Test_Discover(t *testing.T) { }, }, want: &CPUTopology{ - NumCPUs: 4, - NumSockets: 2, - NumCores: 4, + NumCPUs: 4, + NumSockets: 2, + NumCores: 4, + NumNUMANodes: 2, CPUDetails: map[int]CPUInfo{ 0: {CoreID: 0, SocketID: 0, NUMANodeID: 0}, 1: {CoreID: 1, SocketID: 1, NUMANodeID: 1}, @@ -129,9 +131,10 @@ func Test_Discover(t *testing.T) { }, }, want: &CPUTopology{ - NumCPUs: 12, - NumSockets: 2, - NumCores: 6, + NumCPUs: 12, + NumSockets: 2, + NumCores: 6, + NumNUMANodes: 2, CPUDetails: map[int]CPUInfo{ 0: {CoreID: 0, SocketID: 0, NUMANodeID: 0}, 1: {CoreID: 1, SocketID: 0, NUMANodeID: 0}, From aff54a09148cf8f83313ccc7a8e97af41792ae98 Mon Sep 17 00:00:00 2001 From: Kevin Klues Date: Fri, 15 Oct 2021 10:26:50 +0000 Subject: [PATCH 2/6] Abstract out whether NUMA or Sockets come first in the memory hierarchy This allows us to get rid of the check for determining which one is higher all throughout the code. Now we just check once and instantiate an interface of the appropriate type that makes sure the ordering in the hierarchy is preserved through the appropriate calls. Signed-off-by: Kevin Klues --- pkg/kubelet/cm/cpumanager/cpu_assignment.go | 219 ++++++++++++-------- 1 file changed, 130 insertions(+), 89 deletions(-) diff --git a/pkg/kubelet/cm/cpumanager/cpu_assignment.go b/pkg/kubelet/cm/cpumanager/cpu_assignment.go index 21d5530481a27..50011eae8875b 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_assignment.go +++ b/pkg/kubelet/cm/cpumanager/cpu_assignment.go @@ -26,20 +26,133 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ) +type numaOrSocketsFirstFuncs interface { + takeFullFirstLevel() + takeFullSecondLevel() + sortAvailableNUMANodes() []int + sortAvailableSockets() []int + sortAvailableCores() []int +} + +type numaFirst struct{ acc *cpuAccumulator } +type socketsFirst struct{ acc *cpuAccumulator } + +var _ numaOrSocketsFirstFuncs = (*numaFirst)(nil) +var _ numaOrSocketsFirstFuncs = (*socketsFirst)(nil) + +// If NUMA nodes are higher in the memory hierarchy than sockets, then we take +// from the set of NUMA Nodes as the first level. +func (n *numaFirst) takeFullFirstLevel() { + n.acc.takeFullNUMANodes() +} + +// If NUMA nodes are higher in the memory hierarchy than sockets, then we take +// from the set of sockets as the second level. +func (n *numaFirst) takeFullSecondLevel() { + n.acc.takeFullSockets() +} + +// If NUMA nodes are higher in the memory hierarchy than sockets, then just +// sort the NUMA nodes directly, and return them. +func (n *numaFirst) sortAvailableNUMANodes() []int { + numas := n.acc.details.NUMANodes().ToSliceNoSort() + n.acc.sort(numas, n.acc.details.CPUsInNUMANodes) + return numas +} + +// If NUMA nodes are higher in the memory hierarchy than sockets, then we need +// to pull the set of sockets out of each sorted NUMA node, and accumulate the +// partial order across them. +func (n *numaFirst) sortAvailableSockets() []int { + var result []int + for _, numa := range n.sortAvailableNUMANodes() { + sockets := n.acc.details.SocketsInNUMANodes(numa).ToSliceNoSort() + n.acc.sort(sockets, n.acc.details.CPUsInSockets) + result = append(result, sockets...) + } + return result +} + +// If NUMA nodes are higher in the memory hierarchy than sockets, then +// cores sit directly below sockets in the memory hierarchy. +func (n *numaFirst) sortAvailableCores() []int { + var result []int + for _, socket := range n.acc.sortAvailableSockets() { + cores := n.acc.details.CoresInSockets(socket).ToSliceNoSort() + n.acc.sort(cores, n.acc.details.CPUsInCores) + result = append(result, cores...) + } + return result +} + +// If sockets are higher in the memory hierarchy than NUMA nodes, then we take +// from the set of sockets as the first level. +func (s *socketsFirst) takeFullFirstLevel() { + s.acc.takeFullSockets() +} + +// If sockets are higher in the memory hierarchy than NUMA nodes, then we take +// from the set of NUMA Nodes as the second level. +func (s *socketsFirst) takeFullSecondLevel() { + s.acc.takeFullNUMANodes() +} + +// If sockets are higher in the memory hierarchy than NUMA nodes, then we need +// to pull the set of NUMA nodes out of each sorted Socket, and accumulate the +// partial order across them. +func (s *socketsFirst) sortAvailableNUMANodes() []int { + var result []int + for _, socket := range s.sortAvailableSockets() { + numas := s.acc.details.NUMANodesInSockets(socket).ToSliceNoSort() + s.acc.sort(numas, s.acc.details.CPUsInNUMANodes) + result = append(result, numas...) + } + return result +} + +// If sockets are higher in the memory hierarchy than NUMA nodes, then just +// sort the sockets directly, and return them. +func (s *socketsFirst) sortAvailableSockets() []int { + sockets := s.acc.details.Sockets().ToSliceNoSort() + s.acc.sort(sockets, s.acc.details.CPUsInSockets) + return sockets +} + +// If sockets are higher in the memory hierarchy than NUMA nodes, then cores +// sit directly below NUMA Nodes in the memory hierarchy. +func (s *socketsFirst) sortAvailableCores() []int { + var result []int + for _, numa := range s.acc.sortAvailableNUMANodes() { + cores := s.acc.details.CoresInNUMANodes(numa).ToSliceNoSort() + s.acc.sort(cores, s.acc.details.CPUsInCores) + result = append(result, cores...) + } + return result +} + type cpuAccumulator struct { - topo *topology.CPUTopology - details topology.CPUDetails - numCPUsNeeded int - result cpuset.CPUSet + topo *topology.CPUTopology + details topology.CPUDetails + numCPUsNeeded int + result cpuset.CPUSet + numaOrSocketsFirst numaOrSocketsFirstFuncs } func newCPUAccumulator(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int) *cpuAccumulator { - return &cpuAccumulator{ + acc := &cpuAccumulator{ topo: topo, details: topo.CPUDetails.KeepOnly(availableCPUs), numCPUsNeeded: numCPUs, result: cpuset.NewCPUSet(), } + + if topo.NumSockets >= topo.NumNUMANodes { + acc.numaOrSocketsFirst = &numaFirst{acc} + } else { + acc.numaOrSocketsFirst = &socketsFirst{acc} + } + + return acc } // Returns true if the supplied NUMANode is fully available in `topoDetails`. @@ -116,78 +229,19 @@ func (a *cpuAccumulator) sort(ids []int, getCPUs func(ids ...int) cpuset.CPUSet) }) } -// Sort all NUMA nodes with free CPUs: -// - If NUMA nodes are higher than sockets in the memory hierarchy then sort -// them directly using the sort() algorithm defined above. -// - Otherwise sort them: -// - First by socket using sortAvailableSockets(). -// - Then within each socket, using the sort() algorithm defined above. +// Sort all NUMA nodes with free CPUs. func (a *cpuAccumulator) sortAvailableNUMANodes() []int { - // If NUMA nodes are equal or higher in the memory hierarchy than sockets - if a.topo.NumSockets >= a.topo.NumNUMANodes { - numas := a.details.NUMANodes().ToSliceNoSort() - a.sort(numas, a.details.CPUsInNUMANodes) - return numas - } - - // Otherwise each socket has multiple NUMA nodes - var result []int - for _, socket := range a.sortAvailableSockets() { - numas := a.details.NUMANodesInSockets(socket).ToSliceNoSort() - a.sort(numas, a.details.CPUsInNUMANodes) - result = append(result, numas...) - } - return result + return a.numaOrSocketsFirst.sortAvailableNUMANodes() } -// Sort all sockets with free CPUs: -// - If sockets are higher than NUMA nodes in the memory hierarchy then sort -// them directly using the sort() algorithm defined above. -// - Otherwise sort them: -// - First by NUMA node using sortAvailableNUMANodes(). -// - Then within each NUMA node, using the sort() algorithm defined above. +// Sort all sockets with free CPUs. func (a *cpuAccumulator) sortAvailableSockets() []int { - // If sockets are higher than NUMA nodes in the memory hierarchy - if a.topo.NumNUMANodes >= a.topo.NumSockets { - sockets := a.details.Sockets().ToSliceNoSort() - a.sort(sockets, a.details.CPUsInSockets) - return sockets - } - - // Otherwise each NUMA Node has multiple sockets - var result []int - for _, numa := range a.sortAvailableNUMANodes() { - sockets := a.details.SocketsInNUMANodes(numa).ToSliceNoSort() - a.sort(sockets, a.details.CPUsInSockets) - result = append(result, sockets...) - } - return result + return a.numaOrSocketsFirst.sortAvailableSockets() } // Sort all cores with free CPUs: -// - First by socket (or NUMA node) using sortAvailableSockets() (or sortAvailableNUMANodes()). -// - Then within each socket or NUMA node, using the sort() algorithm defined above. func (a *cpuAccumulator) sortAvailableCores() []int { - // If NUMA nodes are higher in the memory hierarchy than sockets, then - // cores sit directly below sockets in the memory hierarchy. - if a.topo.NumSockets >= a.topo.NumNUMANodes { - var result []int - for _, socket := range a.sortAvailableSockets() { - cores := a.details.CoresInSockets(socket).ToSliceNoSort() - a.sort(cores, a.details.CPUsInCores) - result = append(result, cores...) - } - return result - } - - // Otherwise they sit directly below NUMA nodes. - var result []int - for _, numa := range a.sortAvailableNUMANodes() { - cores := a.details.CoresInNUMANodes(numa).ToSliceNoSort() - a.sort(cores, a.details.CPUsInCores) - result = append(result, cores...) - } - return result + return a.numaOrSocketsFirst.sortAvailableCores() } // Sort all available CPUs: @@ -278,26 +332,13 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num // requires at least a NUMA node or socket's-worth of CPUs. If NUMA // Nodes map to 1 or more sockets, pull from NUMA nodes first. // Otherwise pull from sockets first. - if acc.topo.NumSockets >= acc.topo.NumNUMANodes { - acc.takeFullNUMANodes() - if acc.isSatisfied() { - return acc.result, nil - } - - acc.takeFullSockets() - if acc.isSatisfied() { - return acc.result, nil - } - } else { - acc.takeFullSockets() - if acc.isSatisfied() { - return acc.result, nil - } - - acc.takeFullNUMANodes() - if acc.isSatisfied() { - return acc.result, nil - } + acc.numaOrSocketsFirst.takeFullFirstLevel() + if acc.isSatisfied() { + return acc.result, nil + } + acc.numaOrSocketsFirst.takeFullSecondLevel() + if acc.isSatisfied() { + return acc.result, nil } // 2. Acquire whole cores, if available and the container requires at least From 15caa134b2860aba409c8ef6b1288e618ced5bc2 Mon Sep 17 00:00:00 2001 From: Francesco Romani Date: Thu, 14 Oct 2021 16:46:16 +0200 Subject: [PATCH 3/6] cpumanager: topology: use rich cmp package User the `cmp.Diff` package in the unit tests, moving away from `reflect.DeepEqual`. This gives us a clearer picture of the differences when the tests fail. Signed-off-by: Francesco Romani --- pkg/kubelet/cm/cpumanager/topology/topology_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/kubelet/cm/cpumanager/topology/topology_test.go b/pkg/kubelet/cm/cpumanager/topology/topology_test.go index 36f4ce3d67f38..f28bc800c2056 100644 --- a/pkg/kubelet/cm/cpumanager/topology/topology_test.go +++ b/pkg/kubelet/cm/cpumanager/topology/topology_test.go @@ -21,6 +21,7 @@ import ( "testing" cadvisorapi "github.com/google/cadvisor/info/v1" + "github.com/google/go-cmp/cmp" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ) @@ -202,8 +203,8 @@ func Test_Discover(t *testing.T) { } return } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Discover() = %v, want %v", got, tt.want) + if diff := cmp.Diff(got, tt.want); diff != "" { + t.Errorf("Discover() = %v, want %v diff=%s", got, tt.want, diff) } }) } From f6ccc4426ad24317a537d05d0757f73a931a1c96 Mon Sep 17 00:00:00 2001 From: Francesco Romani Date: Thu, 14 Oct 2021 16:49:58 +0200 Subject: [PATCH 4/6] cpumanager: test: use proper subtests The exisiting unit tests where performing subtests without actually using the full features of the testing package (https://pkg.go.dev/testing#hdr-Subtests_and_Sub_benchmarks) Update them with fairly minimal changes. The patch is deceptively large because we need to move the code inside a new block. Signed-off-by: Francesco Romani --- .../cm/cpumanager/cpu_assignment_test.go | 101 ++++++++++-------- 1 file changed, 56 insertions(+), 45 deletions(-) diff --git a/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go b/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go index 7a25e3887c19d..998fab53796ff 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go +++ b/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go @@ -64,11 +64,14 @@ func TestCPUAccumulatorFreeSockets(t *testing.T) { } for _, tc := range testCases { - acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0) - result := acc.freeSockets() - if !reflect.DeepEqual(result, tc.expect) { - t.Errorf("[%s] expected %v to equal %v", tc.description, result, tc.expect) - } + t.Run(tc.description, func(t *testing.T) { + acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0) + result := acc.freeSockets() + if !reflect.DeepEqual(result, tc.expect) { + t.Errorf("expected %v to equal %v", result, tc.expect) + + } + }) } } @@ -130,11 +133,13 @@ func TestCPUAccumulatorFreeCores(t *testing.T) { } for _, tc := range testCases { - acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0) - result := acc.freeCores() - if !reflect.DeepEqual(result, tc.expect) { - t.Errorf("[%s] expected %v to equal %v", tc.description, result, tc.expect) - } + t.Run(tc.description, func(t *testing.T) { + acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0) + result := acc.freeCores() + if !reflect.DeepEqual(result, tc.expect) { + t.Errorf("expected %v to equal %v", result, tc.expect) + } + }) } } @@ -184,11 +189,13 @@ func TestCPUAccumulatorFreeCPUs(t *testing.T) { } for _, tc := range testCases { - acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0) - result := acc.freeCPUs() - if !reflect.DeepEqual(result, tc.expect) { - t.Errorf("[%s] expected %v to equal %v", tc.description, result, tc.expect) - } + t.Run(tc.description, func(t *testing.T) { + acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0) + result := acc.freeCPUs() + if !reflect.DeepEqual(result, tc.expect) { + t.Errorf("expected %v to equal %v", result, tc.expect) + } + }) } } @@ -268,31 +275,33 @@ func TestCPUAccumulatorTake(t *testing.T) { } for _, tc := range testCases { - acc := newCPUAccumulator(tc.topo, tc.availableCPUs, tc.numCPUs) - totalTaken := 0 - for _, cpus := range tc.takeCPUs { - acc.take(cpus) - totalTaken += cpus.Size() - } - if tc.expectSatisfied != acc.isSatisfied() { - t.Errorf("[%s] expected acc.isSatisfied() to be %t", tc.description, tc.expectSatisfied) - } - if tc.expectFailed != acc.isFailed() { - t.Errorf("[%s] expected acc.isFailed() to be %t", tc.description, tc.expectFailed) - } - for _, cpus := range tc.takeCPUs { - availableCPUs := acc.details.CPUs() - if cpus.Intersection(availableCPUs).Size() > 0 { - t.Errorf("[%s] expected intersection of taken cpus [%s] and acc.details.CPUs() [%s] to be empty", tc.description, cpus, availableCPUs) + t.Run(tc.description, func(t *testing.T) { + acc := newCPUAccumulator(tc.topo, tc.availableCPUs, tc.numCPUs) + totalTaken := 0 + for _, cpus := range tc.takeCPUs { + acc.take(cpus) + totalTaken += cpus.Size() + } + if tc.expectSatisfied != acc.isSatisfied() { + t.Errorf("expected acc.isSatisfied() to be %t", tc.expectSatisfied) } - if !cpus.IsSubsetOf(acc.result) { - t.Errorf("[%s] expected [%s] to be a subset of acc.result [%s]", tc.description, cpus, acc.result) + if tc.expectFailed != acc.isFailed() { + t.Errorf("expected acc.isFailed() to be %t", tc.expectFailed) } - } - expNumCPUsNeeded := tc.numCPUs - totalTaken - if acc.numCPUsNeeded != expNumCPUsNeeded { - t.Errorf("[%s] expected acc.numCPUsNeeded to be %d (got %d)", tc.description, expNumCPUsNeeded, acc.numCPUsNeeded) - } + for _, cpus := range tc.takeCPUs { + availableCPUs := acc.details.CPUs() + if cpus.Intersection(availableCPUs).Size() > 0 { + t.Errorf("expected intersection of taken cpus [%s] and acc.details.CPUs() [%s] to be empty", cpus, availableCPUs) + } + if !cpus.IsSubsetOf(acc.result) { + t.Errorf("expected [%s] to be a subset of acc.result [%s]", cpus, acc.result) + } + } + expNumCPUsNeeded := tc.numCPUs - totalTaken + if acc.numCPUsNeeded != expNumCPUsNeeded { + t.Errorf("expected acc.numCPUsNeeded to be %d (got %d)", expNumCPUsNeeded, acc.numCPUsNeeded) + } + }) } } @@ -380,12 +389,14 @@ func TestTakeByTopology(t *testing.T) { } for _, tc := range testCases { - result, err := takeByTopology(tc.topo, tc.availableCPUs, tc.numCPUs) - if tc.expErr != "" && err.Error() != tc.expErr { - t.Errorf("expected error to be [%v] but it was [%v] in test \"%s\"", tc.expErr, err, tc.description) - } - if !result.Equals(tc.expResult) { - t.Errorf("expected result [%s] to equal [%s] in test \"%s\"", result, tc.expResult, tc.description) - } + t.Run(tc.description, func(t *testing.T) { + result, err := takeByTopology(tc.topo, tc.availableCPUs, tc.numCPUs) + if tc.expErr != "" && err.Error() != tc.expErr { + t.Errorf("expected error to be [%v] but it was [%v]", tc.expErr, err) + } + if !result.Equals(tc.expResult) { + t.Errorf("expected result [%s] to equal [%s]", result, tc.expResult) + } + }) } } From 547996f3f67b33a19df2d5e58e1b8da68b810cde Mon Sep 17 00:00:00 2001 From: Francesco Romani Date: Thu, 14 Oct 2021 16:59:28 +0200 Subject: [PATCH 5/6] cpumanager: test NUMA node support for CPU assign (1) This batch of tests adds a real topology on which each physical socket has multiple NUMA zones. Taken by a real dual xeon 6320 gold. Signed-off-by: Francesco Romani --- .../cm/cpumanager/cpu_assignment_test.go | 199 ++++++++++++++++++ pkg/kubelet/cm/cpumanager/policy_test.go | 101 +++++++++ .../cm/cpumanager/topology/topology.go | 5 +- .../cm/cpumanager/topology/topology_test.go | 156 ++++++++++++++ 4 files changed, 459 insertions(+), 2 deletions(-) diff --git a/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go b/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go index 998fab53796ff..bdfffedb676ac 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go +++ b/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go @@ -61,6 +61,24 @@ func TestCPUAccumulatorFreeSockets(t *testing.T) { cpuset.NewCPUSet(0, 2, 3, 4, 5, 6, 7, 8, 9, 11), []int{}, }, + { + "dual socket, multi numa per socket, HT, 2 sockets free", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "0-79"), + []int{0, 1}, + }, + { + "dual socket, multi numa per socket, HT, 1 sockets free", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "1-79"), + []int{1}, + }, + { + "dual socket, multi numa per socket, HT, 0 sockets free", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "1-78"), + []int{}, + }, } for _, tc := range testCases { @@ -75,6 +93,139 @@ func TestCPUAccumulatorFreeSockets(t *testing.T) { } } +func TestCPUAccumulatorFreeNUMANodes(t *testing.T) { + testCases := []struct { + description string + topo *topology.CPUTopology + availableCPUs cpuset.CPUSet + expect []int + }{ + { + "single socket HT, 1 NUMA node free", + topoSingleSocketHT, + cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + []int{0}, + }, + { + "single socket HT, 0 NUMA Node free", + topoSingleSocketHT, + cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7), + []int{}, + }, + { + "dual socket HT, 2 NUMA Node free", + topoDualSocketHT, + cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), + []int{0, 1}, + }, + { + "dual socket HT, 1 NUMA Node free", + topoDualSocketHT, + cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11), + []int{1}, + }, + { + "dual socket HT, 0 NUMA node free", + topoDualSocketHT, + cpuset.NewCPUSet(0, 2, 3, 4, 5, 6, 7, 8, 9, 11), + []int{}, + }, + { + "dual socket, multi numa per socket, HT, 4 NUMA Node free", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "0-79"), + []int{0, 1, 2, 3}, + }, + { + "dual socket, multi numa per socket, HT, 3 NUMA node free", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "1-79"), + []int{1, 2, 3}, + }, + { + "dual socket, multi numa per socket, HT, 2 NUMA node free", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "1-9,11-79"), + []int{2, 3}, + }, + { + "dual socket, multi numa per socket, HT, 1 NUMA node free", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "1-9,11-59,61-79"), + []int{3}, + }, + { + "dual socket, multi numa per socket, HT, 0 NUMA node free", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "1-9,11-59,61-78"), + []int{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0) + result := acc.freeNUMANodes() + if !reflect.DeepEqual(result, tc.expect) { + t.Errorf("expected %v to equal %v", result, tc.expect) + } + }) + } +} + +func TestCPUAccumulatorFreeSocketsAndNUMANodes(t *testing.T) { + testCases := []struct { + description string + topo *topology.CPUTopology + availableCPUs cpuset.CPUSet + expectSockets []int + expectNUMANodes []int + }{ + { + "dual socket, multi numa per socket, HT, 2 Socket/4 NUMA Node free", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "0-79"), + []int{0, 1}, + []int{0, 1, 2, 3}, + }, + { + "dual socket, multi numa per socket, HT, 1 Socket/3 NUMA node free", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "1-79"), + []int{1}, + []int{1, 2, 3}, + }, + { + "dual socket, multi numa per socket, HT, 1 Socket/ 2 NUMA node free", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "1-9,11-79"), + []int{1}, + []int{2, 3}, + }, + { + "dual socket, multi numa per socket, HT, 0 Socket/ 2 NUMA node free", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "1-59,61-79"), + []int{}, + []int{1, 3}, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0) + resultNUMANodes := acc.freeNUMANodes() + if !reflect.DeepEqual(resultNUMANodes, tc.expectNUMANodes) { + t.Errorf("expected NUMA Nodes %v to equal %v", resultNUMANodes, tc.expectNUMANodes) + } + resultSockets := acc.freeSockets() + if !reflect.DeepEqual(resultSockets, tc.expectSockets) { + t.Errorf("expected Sockets %v to equal %v", resultSockets, tc.expectSockets) + } + }) + } +} + func TestCPUAccumulatorFreeCores(t *testing.T) { testCases := []struct { description string @@ -386,6 +537,46 @@ func TestTakeByTopology(t *testing.T) { "", cpuset.NewCPUSet(0, 2, 4, 6, 8, 10), }, + { + "take a socket of cpus from dual socket with multi-numa-per-socket with HT", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "0-79"), + 40, + "", + mustParseCPUSet(t, "0-19,40-59"), + }, + { + "take a NUMA node of cpus from dual socket with multi-numa-per-socket with HT", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "0-79"), + 20, + "", + mustParseCPUSet(t, "0-9,40-49"), + }, + { + "take a NUMA node of cpus from dual socket with multi-numa-per-socket with HT, with 1 NUMA node already taken", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "10-39,50-79"), + 20, + "", + mustParseCPUSet(t, "10-19,50-59"), + }, + { + "take a socket and a NUMA node of cpus from dual socket with multi-numa-per-socket with HT", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "0-79"), + 60, + "", + mustParseCPUSet(t, "0-29,40-69"), + }, + { + "take a socket and a NUMA node of cpus from dual socket with multi-numa-per-socket with HT, a core taken", + topoDualSocketMultiNumaPerSocketHT, + mustParseCPUSet(t, "1-39,41-79"), // reserve the first (phys) core (0,40) + 60, + "", + mustParseCPUSet(t, "10-39,50-79"), + }, } for _, tc := range testCases { @@ -400,3 +591,11 @@ func TestTakeByTopology(t *testing.T) { }) } } + +func mustParseCPUSet(t *testing.T, s string) cpuset.CPUSet { + cpus, err := cpuset.Parse(s) + if err != nil { + t.Errorf("parsing %q: %v", s, err) + } + return cpus +} diff --git a/pkg/kubelet/cm/cpumanager/policy_test.go b/pkg/kubelet/cm/cpumanager/policy_test.go index 7ce051b2b8faf..a106ec6b78258 100644 --- a/pkg/kubelet/cm/cpumanager/policy_test.go +++ b/pkg/kubelet/cm/cpumanager/policy_test.go @@ -414,4 +414,105 @@ var ( 282: {CoreID: 55, SocketID: 3, NUMANodeID: 3}, }, } + /* + Topology from dual xeon gold 6230; lscpu excerpt + CPU(s): 80 + On-line CPU(s) list: 0-79 + Thread(s) per core: 2 + Core(s) per socket: 20 + Socket(s): 2 + NUMA node(s): 4 + NUMA node0 CPU(s): 0-9,40-49 + NUMA node1 CPU(s): 10-19,50-59 + NUMA node2 CPU(s): 20-29,60-69 + NUMA node3 CPU(s): 30-39,70-79 + */ + topoDualSocketMultiNumaPerSocketHT = &topology.CPUTopology{ + NumCPUs: 80, + NumSockets: 2, + NumCores: 40, + NumNUMANodes: 4, + CPUDetails: map[int]topology.CPUInfo{ + 0: {CoreID: 0, SocketID: 0, NUMANodeID: 0}, + 1: {CoreID: 1, SocketID: 0, NUMANodeID: 0}, + 2: {CoreID: 2, SocketID: 0, NUMANodeID: 0}, + 3: {CoreID: 3, SocketID: 0, NUMANodeID: 0}, + 4: {CoreID: 4, SocketID: 0, NUMANodeID: 0}, + 5: {CoreID: 5, SocketID: 0, NUMANodeID: 0}, + 6: {CoreID: 6, SocketID: 0, NUMANodeID: 0}, + 7: {CoreID: 7, SocketID: 0, NUMANodeID: 0}, + 8: {CoreID: 8, SocketID: 0, NUMANodeID: 0}, + 9: {CoreID: 9, SocketID: 0, NUMANodeID: 0}, + 10: {CoreID: 10, SocketID: 0, NUMANodeID: 1}, + 11: {CoreID: 11, SocketID: 0, NUMANodeID: 1}, + 12: {CoreID: 12, SocketID: 0, NUMANodeID: 1}, + 13: {CoreID: 13, SocketID: 0, NUMANodeID: 1}, + 14: {CoreID: 14, SocketID: 0, NUMANodeID: 1}, + 15: {CoreID: 15, SocketID: 0, NUMANodeID: 1}, + 16: {CoreID: 16, SocketID: 0, NUMANodeID: 1}, + 17: {CoreID: 17, SocketID: 0, NUMANodeID: 1}, + 18: {CoreID: 18, SocketID: 0, NUMANodeID: 1}, + 19: {CoreID: 19, SocketID: 0, NUMANodeID: 1}, + 20: {CoreID: 20, SocketID: 1, NUMANodeID: 2}, + 21: {CoreID: 21, SocketID: 1, NUMANodeID: 2}, + 22: {CoreID: 22, SocketID: 1, NUMANodeID: 2}, + 23: {CoreID: 23, SocketID: 1, NUMANodeID: 2}, + 24: {CoreID: 24, SocketID: 1, NUMANodeID: 2}, + 25: {CoreID: 25, SocketID: 1, NUMANodeID: 2}, + 26: {CoreID: 26, SocketID: 1, NUMANodeID: 2}, + 27: {CoreID: 27, SocketID: 1, NUMANodeID: 2}, + 28: {CoreID: 28, SocketID: 1, NUMANodeID: 2}, + 29: {CoreID: 29, SocketID: 1, NUMANodeID: 2}, + 30: {CoreID: 30, SocketID: 1, NUMANodeID: 3}, + 31: {CoreID: 31, SocketID: 1, NUMANodeID: 3}, + 32: {CoreID: 32, SocketID: 1, NUMANodeID: 3}, + 33: {CoreID: 33, SocketID: 1, NUMANodeID: 3}, + 34: {CoreID: 34, SocketID: 1, NUMANodeID: 3}, + 35: {CoreID: 35, SocketID: 1, NUMANodeID: 3}, + 36: {CoreID: 36, SocketID: 1, NUMANodeID: 3}, + 37: {CoreID: 37, SocketID: 1, NUMANodeID: 3}, + 38: {CoreID: 38, SocketID: 1, NUMANodeID: 3}, + 39: {CoreID: 39, SocketID: 1, NUMANodeID: 3}, + 40: {CoreID: 0, SocketID: 0, NUMANodeID: 0}, + 41: {CoreID: 1, SocketID: 0, NUMANodeID: 0}, + 42: {CoreID: 2, SocketID: 0, NUMANodeID: 0}, + 43: {CoreID: 3, SocketID: 0, NUMANodeID: 0}, + 44: {CoreID: 4, SocketID: 0, NUMANodeID: 0}, + 45: {CoreID: 5, SocketID: 0, NUMANodeID: 0}, + 46: {CoreID: 6, SocketID: 0, NUMANodeID: 0}, + 47: {CoreID: 7, SocketID: 0, NUMANodeID: 0}, + 48: {CoreID: 8, SocketID: 0, NUMANodeID: 0}, + 49: {CoreID: 9, SocketID: 0, NUMANodeID: 0}, + 50: {CoreID: 10, SocketID: 0, NUMANodeID: 1}, + 51: {CoreID: 11, SocketID: 0, NUMANodeID: 1}, + 52: {CoreID: 12, SocketID: 0, NUMANodeID: 1}, + 53: {CoreID: 13, SocketID: 0, NUMANodeID: 1}, + 54: {CoreID: 14, SocketID: 0, NUMANodeID: 1}, + 55: {CoreID: 15, SocketID: 0, NUMANodeID: 1}, + 56: {CoreID: 16, SocketID: 0, NUMANodeID: 1}, + 57: {CoreID: 17, SocketID: 0, NUMANodeID: 1}, + 58: {CoreID: 18, SocketID: 0, NUMANodeID: 1}, + 59: {CoreID: 19, SocketID: 0, NUMANodeID: 1}, + 60: {CoreID: 20, SocketID: 1, NUMANodeID: 2}, + 61: {CoreID: 21, SocketID: 1, NUMANodeID: 2}, + 62: {CoreID: 22, SocketID: 1, NUMANodeID: 2}, + 63: {CoreID: 23, SocketID: 1, NUMANodeID: 2}, + 64: {CoreID: 24, SocketID: 1, NUMANodeID: 2}, + 65: {CoreID: 25, SocketID: 1, NUMANodeID: 2}, + 66: {CoreID: 26, SocketID: 1, NUMANodeID: 2}, + 67: {CoreID: 27, SocketID: 1, NUMANodeID: 2}, + 68: {CoreID: 28, SocketID: 1, NUMANodeID: 2}, + 69: {CoreID: 29, SocketID: 1, NUMANodeID: 2}, + 70: {CoreID: 30, SocketID: 1, NUMANodeID: 3}, + 71: {CoreID: 31, SocketID: 1, NUMANodeID: 3}, + 72: {CoreID: 32, SocketID: 1, NUMANodeID: 3}, + 73: {CoreID: 33, SocketID: 1, NUMANodeID: 3}, + 74: {CoreID: 34, SocketID: 1, NUMANodeID: 3}, + 75: {CoreID: 35, SocketID: 1, NUMANodeID: 3}, + 76: {CoreID: 36, SocketID: 1, NUMANodeID: 3}, + 77: {CoreID: 37, SocketID: 1, NUMANodeID: 3}, + 78: {CoreID: 38, SocketID: 1, NUMANodeID: 3}, + 79: {CoreID: 39, SocketID: 1, NUMANodeID: 3}, + }, + } ) diff --git a/pkg/kubelet/cm/cpumanager/topology/topology.go b/pkg/kubelet/cm/cpumanager/topology/topology.go index 37c2f105b7e65..c9b0849261ff0 100644 --- a/pkg/kubelet/cm/cpumanager/topology/topology.go +++ b/pkg/kubelet/cm/cpumanager/topology/topology.go @@ -34,7 +34,8 @@ type CPUDetails map[int]CPUInfo // CPUTopology contains details of node cpu, where : // CPU - logical CPU, cadvisor - thread // Core - physical CPU, cadvisor - Core -// Socket - socket, cadvisor - Node +// Socket - socket, cadvisor - Socket +// NUMA Node - NUMA cell, cadvisor - Node type CPUTopology struct { NumCPUs int NumCores int @@ -254,7 +255,7 @@ func Discover(machineInfo *cadvisorapi.MachineInfo) (*CPUTopology, error) { // getUniqueCoreID computes coreId as the lowest cpuID // for a given Threads []int slice. This will assure that coreID's are -// platform unique (opposite to what cAdvisor reports - socket unique) +// platform unique (opposite to what cAdvisor reports) func getUniqueCoreID(threads []int) (coreID int, err error) { if len(threads) == 0 { return 0, fmt.Errorf("no cpus provided") diff --git a/pkg/kubelet/cm/cpumanager/topology/topology_test.go b/pkg/kubelet/cm/cpumanager/topology/topology_test.go index f28bc800c2056..9cc938226e1ed 100644 --- a/pkg/kubelet/cm/cpumanager/topology/topology_test.go +++ b/pkg/kubelet/cm/cpumanager/topology/topology_test.go @@ -75,6 +75,162 @@ func Test_Discover(t *testing.T) { }, wantErr: false, }, + { + // dual xeon gold 6230 + name: "DualSocketMultiNumaPerSocketHT", + machineInfo: cadvisorapi.MachineInfo{ + NumCores: 80, + NumSockets: 2, + Topology: []cadvisorapi.Node{ + {Id: 0, + Cores: []cadvisorapi.Core{ + {SocketID: 0, Id: 0, Threads: []int{0, 40}}, + {SocketID: 0, Id: 1, Threads: []int{1, 41}}, + {SocketID: 0, Id: 2, Threads: []int{2, 42}}, + {SocketID: 0, Id: 8, Threads: []int{3, 43}}, + {SocketID: 0, Id: 9, Threads: []int{4, 44}}, + {SocketID: 0, Id: 16, Threads: []int{5, 45}}, + {SocketID: 0, Id: 17, Threads: []int{6, 46}}, + {SocketID: 0, Id: 18, Threads: []int{7, 47}}, + {SocketID: 0, Id: 24, Threads: []int{8, 48}}, + {SocketID: 0, Id: 25, Threads: []int{9, 49}}, + }, + }, + {Id: 1, + Cores: []cadvisorapi.Core{ + {SocketID: 0, Id: 3, Threads: []int{10, 50}}, + {SocketID: 0, Id: 4, Threads: []int{11, 51}}, + {SocketID: 0, Id: 10, Threads: []int{12, 52}}, + {SocketID: 0, Id: 11, Threads: []int{13, 53}}, + {SocketID: 0, Id: 12, Threads: []int{14, 54}}, + {SocketID: 0, Id: 19, Threads: []int{15, 55}}, + {SocketID: 0, Id: 20, Threads: []int{16, 56}}, + {SocketID: 0, Id: 26, Threads: []int{17, 57}}, + {SocketID: 0, Id: 27, Threads: []int{18, 58}}, + {SocketID: 0, Id: 28, Threads: []int{19, 59}}, + }, + }, + {Id: 2, + Cores: []cadvisorapi.Core{ + {SocketID: 1, Id: 0, Threads: []int{20, 60}}, + {SocketID: 1, Id: 1, Threads: []int{21, 61}}, + {SocketID: 1, Id: 2, Threads: []int{22, 62}}, + {SocketID: 1, Id: 8, Threads: []int{23, 63}}, + {SocketID: 1, Id: 9, Threads: []int{24, 64}}, + {SocketID: 1, Id: 16, Threads: []int{25, 65}}, + {SocketID: 1, Id: 17, Threads: []int{26, 66}}, + {SocketID: 1, Id: 18, Threads: []int{27, 67}}, + {SocketID: 1, Id: 24, Threads: []int{28, 68}}, + {SocketID: 1, Id: 25, Threads: []int{29, 69}}, + }, + }, + {Id: 3, + Cores: []cadvisorapi.Core{ + {SocketID: 1, Id: 3, Threads: []int{30, 70}}, + {SocketID: 1, Id: 4, Threads: []int{31, 71}}, + {SocketID: 1, Id: 10, Threads: []int{32, 72}}, + {SocketID: 1, Id: 11, Threads: []int{33, 73}}, + {SocketID: 1, Id: 12, Threads: []int{34, 74}}, + {SocketID: 1, Id: 19, Threads: []int{35, 75}}, + {SocketID: 1, Id: 20, Threads: []int{36, 76}}, + {SocketID: 1, Id: 26, Threads: []int{37, 77}}, + {SocketID: 1, Id: 27, Threads: []int{38, 78}}, + {SocketID: 1, Id: 28, Threads: []int{39, 79}}, + }, + }, + }, + }, + want: &CPUTopology{ + NumCPUs: 80, + NumSockets: 2, + NumCores: 40, + NumNUMANodes: 4, + CPUDetails: map[int]CPUInfo{ + 0: {CoreID: 0, SocketID: 0, NUMANodeID: 0}, + 1: {CoreID: 1, SocketID: 0, NUMANodeID: 0}, + 2: {CoreID: 2, SocketID: 0, NUMANodeID: 0}, + 3: {CoreID: 3, SocketID: 0, NUMANodeID: 0}, + 4: {CoreID: 4, SocketID: 0, NUMANodeID: 0}, + 5: {CoreID: 5, SocketID: 0, NUMANodeID: 0}, + 6: {CoreID: 6, SocketID: 0, NUMANodeID: 0}, + 7: {CoreID: 7, SocketID: 0, NUMANodeID: 0}, + 8: {CoreID: 8, SocketID: 0, NUMANodeID: 0}, + 9: {CoreID: 9, SocketID: 0, NUMANodeID: 0}, + 10: {CoreID: 10, SocketID: 0, NUMANodeID: 1}, + 11: {CoreID: 11, SocketID: 0, NUMANodeID: 1}, + 12: {CoreID: 12, SocketID: 0, NUMANodeID: 1}, + 13: {CoreID: 13, SocketID: 0, NUMANodeID: 1}, + 14: {CoreID: 14, SocketID: 0, NUMANodeID: 1}, + 15: {CoreID: 15, SocketID: 0, NUMANodeID: 1}, + 16: {CoreID: 16, SocketID: 0, NUMANodeID: 1}, + 17: {CoreID: 17, SocketID: 0, NUMANodeID: 1}, + 18: {CoreID: 18, SocketID: 0, NUMANodeID: 1}, + 19: {CoreID: 19, SocketID: 0, NUMANodeID: 1}, + 20: {CoreID: 20, SocketID: 1, NUMANodeID: 2}, + 21: {CoreID: 21, SocketID: 1, NUMANodeID: 2}, + 22: {CoreID: 22, SocketID: 1, NUMANodeID: 2}, + 23: {CoreID: 23, SocketID: 1, NUMANodeID: 2}, + 24: {CoreID: 24, SocketID: 1, NUMANodeID: 2}, + 25: {CoreID: 25, SocketID: 1, NUMANodeID: 2}, + 26: {CoreID: 26, SocketID: 1, NUMANodeID: 2}, + 27: {CoreID: 27, SocketID: 1, NUMANodeID: 2}, + 28: {CoreID: 28, SocketID: 1, NUMANodeID: 2}, + 29: {CoreID: 29, SocketID: 1, NUMANodeID: 2}, + 30: {CoreID: 30, SocketID: 1, NUMANodeID: 3}, + 31: {CoreID: 31, SocketID: 1, NUMANodeID: 3}, + 32: {CoreID: 32, SocketID: 1, NUMANodeID: 3}, + 33: {CoreID: 33, SocketID: 1, NUMANodeID: 3}, + 34: {CoreID: 34, SocketID: 1, NUMANodeID: 3}, + 35: {CoreID: 35, SocketID: 1, NUMANodeID: 3}, + 36: {CoreID: 36, SocketID: 1, NUMANodeID: 3}, + 37: {CoreID: 37, SocketID: 1, NUMANodeID: 3}, + 38: {CoreID: 38, SocketID: 1, NUMANodeID: 3}, + 39: {CoreID: 39, SocketID: 1, NUMANodeID: 3}, + 40: {CoreID: 0, SocketID: 0, NUMANodeID: 0}, + 41: {CoreID: 1, SocketID: 0, NUMANodeID: 0}, + 42: {CoreID: 2, SocketID: 0, NUMANodeID: 0}, + 43: {CoreID: 3, SocketID: 0, NUMANodeID: 0}, + 44: {CoreID: 4, SocketID: 0, NUMANodeID: 0}, + 45: {CoreID: 5, SocketID: 0, NUMANodeID: 0}, + 46: {CoreID: 6, SocketID: 0, NUMANodeID: 0}, + 47: {CoreID: 7, SocketID: 0, NUMANodeID: 0}, + 48: {CoreID: 8, SocketID: 0, NUMANodeID: 0}, + 49: {CoreID: 9, SocketID: 0, NUMANodeID: 0}, + 50: {CoreID: 10, SocketID: 0, NUMANodeID: 1}, + 51: {CoreID: 11, SocketID: 0, NUMANodeID: 1}, + 52: {CoreID: 12, SocketID: 0, NUMANodeID: 1}, + 53: {CoreID: 13, SocketID: 0, NUMANodeID: 1}, + 54: {CoreID: 14, SocketID: 0, NUMANodeID: 1}, + 55: {CoreID: 15, SocketID: 0, NUMANodeID: 1}, + 56: {CoreID: 16, SocketID: 0, NUMANodeID: 1}, + 57: {CoreID: 17, SocketID: 0, NUMANodeID: 1}, + 58: {CoreID: 18, SocketID: 0, NUMANodeID: 1}, + 59: {CoreID: 19, SocketID: 0, NUMANodeID: 1}, + 60: {CoreID: 20, SocketID: 1, NUMANodeID: 2}, + 61: {CoreID: 21, SocketID: 1, NUMANodeID: 2}, + 62: {CoreID: 22, SocketID: 1, NUMANodeID: 2}, + 63: {CoreID: 23, SocketID: 1, NUMANodeID: 2}, + 64: {CoreID: 24, SocketID: 1, NUMANodeID: 2}, + 65: {CoreID: 25, SocketID: 1, NUMANodeID: 2}, + 66: {CoreID: 26, SocketID: 1, NUMANodeID: 2}, + 67: {CoreID: 27, SocketID: 1, NUMANodeID: 2}, + 68: {CoreID: 28, SocketID: 1, NUMANodeID: 2}, + 69: {CoreID: 29, SocketID: 1, NUMANodeID: 2}, + 70: {CoreID: 30, SocketID: 1, NUMANodeID: 3}, + 71: {CoreID: 31, SocketID: 1, NUMANodeID: 3}, + 72: {CoreID: 32, SocketID: 1, NUMANodeID: 3}, + 73: {CoreID: 33, SocketID: 1, NUMANodeID: 3}, + 74: {CoreID: 34, SocketID: 1, NUMANodeID: 3}, + 75: {CoreID: 35, SocketID: 1, NUMANodeID: 3}, + 76: {CoreID: 36, SocketID: 1, NUMANodeID: 3}, + 77: {CoreID: 37, SocketID: 1, NUMANodeID: 3}, + 78: {CoreID: 38, SocketID: 1, NUMANodeID: 3}, + 79: {CoreID: 39, SocketID: 1, NUMANodeID: 3}, + }, + }, + wantErr: false, + }, + { name: "DualSocketNoHT", machineInfo: cadvisorapi.MachineInfo{ From 4bae656835769b98fbbad3e1915453a9699503b7 Mon Sep 17 00:00:00 2001 From: Francesco Romani Date: Fri, 15 Oct 2021 08:37:42 +0200 Subject: [PATCH 6/6] cpumanager: test NUMA node support for CPU assign (2) This batch of tests adds a fake topology on which each numa node has multiple sockets. We didn't find yet a real HW topology in the wild like this, but we need one to fully exercise the code. So, until we find a HW topology, we add a fake one flipping the NUMA/socket config of the existing xeon dual gold 6320. Signed-off-by: Francesco Romani --- .../cm/cpumanager/cpu_assignment_test.go | 50 ++++++ pkg/kubelet/cm/cpumanager/policy_test.go | 95 +++++++++++ .../cm/cpumanager/topology/topology_test.go | 156 ++++++++++++++++++ 3 files changed, 301 insertions(+) diff --git a/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go b/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go index bdfffedb676ac..5dfc80b434002 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go +++ b/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go @@ -18,6 +18,7 @@ package cpumanager import ( "reflect" + "sort" "testing" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" @@ -79,12 +80,43 @@ func TestCPUAccumulatorFreeSockets(t *testing.T) { mustParseCPUSet(t, "1-78"), []int{}, }, + { + "dual numa, multi socket per per socket, HT, 4 sockets free", + fakeTopoMultiSocketDualSocketPerNumaHT, + mustParseCPUSet(t, "0-79"), + []int{0, 1, 2, 3}, + }, + { + "dual numa, multi socket per per socket, HT, 3 sockets free", + fakeTopoMultiSocketDualSocketPerNumaHT, + mustParseCPUSet(t, "0-19,21-79"), + []int{0, 1, 3}, + }, + { + "dual numa, multi socket per per socket, HT, 2 sockets free", + fakeTopoMultiSocketDualSocketPerNumaHT, + mustParseCPUSet(t, "0-59,61-78"), + []int{0, 1}, + }, + { + "dual numa, multi socket per per socket, HT, 1 sockets free", + fakeTopoMultiSocketDualSocketPerNumaHT, + mustParseCPUSet(t, "1-19,21-38,41-60,61-78"), + []int{1}, + }, + { + "dual numa, multi socket per per socket, HT, 0 sockets free", + fakeTopoMultiSocketDualSocketPerNumaHT, + mustParseCPUSet(t, "0-40,42-49,51-68,71-79"), + []int{}, + }, } for _, tc := range testCases { t.Run(tc.description, func(t *testing.T) { acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0) result := acc.freeSockets() + sort.Ints(result) if !reflect.DeepEqual(result, tc.expect) { t.Errorf("expected %v to equal %v", result, tc.expect) @@ -160,6 +192,24 @@ func TestCPUAccumulatorFreeNUMANodes(t *testing.T) { mustParseCPUSet(t, "1-9,11-59,61-78"), []int{}, }, + { + "dual numa, multi socket per per socket, HT, 2 NUMA node free", + fakeTopoMultiSocketDualSocketPerNumaHT, + mustParseCPUSet(t, "0-79"), + []int{0, 1}, + }, + { + "dual numa, multi socket per per socket, HT, 1 NUMA node free", + fakeTopoMultiSocketDualSocketPerNumaHT, + mustParseCPUSet(t, "0-9,11-79"), + []int{1}, + }, + { + "dual numa, multi socket per per socket, HT, 0 sockets free", + fakeTopoMultiSocketDualSocketPerNumaHT, + mustParseCPUSet(t, "0-9,11-59,61-79"), + []int{}, + }, } for _, tc := range testCases { diff --git a/pkg/kubelet/cm/cpumanager/policy_test.go b/pkg/kubelet/cm/cpumanager/policy_test.go index a106ec6b78258..2cd681c395f2b 100644 --- a/pkg/kubelet/cm/cpumanager/policy_test.go +++ b/pkg/kubelet/cm/cpumanager/policy_test.go @@ -515,4 +515,99 @@ var ( 79: {CoreID: 39, SocketID: 1, NUMANodeID: 3}, }, } + /* + FAKE Topology from dual xeon gold 6230 + (see: topoDualSocketMultiNumaPerSocketHT). + We flip NUMA cells and Sockets to exercise the code. + TODO(fromanirh): replace with a real-world topology + once we find a suitable one. + */ + fakeTopoMultiSocketDualSocketPerNumaHT = &topology.CPUTopology{ + NumCPUs: 80, + NumSockets: 4, + NumCores: 40, + NumNUMANodes: 2, + CPUDetails: map[int]topology.CPUInfo{ + 0: {CoreID: 0, SocketID: 0, NUMANodeID: 0}, + 1: {CoreID: 1, SocketID: 0, NUMANodeID: 0}, + 2: {CoreID: 2, SocketID: 0, NUMANodeID: 0}, + 3: {CoreID: 3, SocketID: 0, NUMANodeID: 0}, + 4: {CoreID: 4, SocketID: 0, NUMANodeID: 0}, + 5: {CoreID: 5, SocketID: 0, NUMANodeID: 0}, + 6: {CoreID: 6, SocketID: 0, NUMANodeID: 0}, + 7: {CoreID: 7, SocketID: 0, NUMANodeID: 0}, + 8: {CoreID: 8, SocketID: 0, NUMANodeID: 0}, + 9: {CoreID: 9, SocketID: 0, NUMANodeID: 0}, + 10: {CoreID: 10, SocketID: 1, NUMANodeID: 0}, + 11: {CoreID: 11, SocketID: 1, NUMANodeID: 0}, + 12: {CoreID: 12, SocketID: 1, NUMANodeID: 0}, + 13: {CoreID: 13, SocketID: 1, NUMANodeID: 0}, + 14: {CoreID: 14, SocketID: 1, NUMANodeID: 0}, + 15: {CoreID: 15, SocketID: 1, NUMANodeID: 0}, + 16: {CoreID: 16, SocketID: 1, NUMANodeID: 0}, + 17: {CoreID: 17, SocketID: 1, NUMANodeID: 0}, + 18: {CoreID: 18, SocketID: 1, NUMANodeID: 0}, + 19: {CoreID: 19, SocketID: 1, NUMANodeID: 0}, + 20: {CoreID: 20, SocketID: 2, NUMANodeID: 1}, + 21: {CoreID: 21, SocketID: 2, NUMANodeID: 1}, + 22: {CoreID: 22, SocketID: 2, NUMANodeID: 1}, + 23: {CoreID: 23, SocketID: 2, NUMANodeID: 1}, + 24: {CoreID: 24, SocketID: 2, NUMANodeID: 1}, + 25: {CoreID: 25, SocketID: 2, NUMANodeID: 1}, + 26: {CoreID: 26, SocketID: 2, NUMANodeID: 1}, + 27: {CoreID: 27, SocketID: 2, NUMANodeID: 1}, + 28: {CoreID: 28, SocketID: 2, NUMANodeID: 1}, + 29: {CoreID: 29, SocketID: 2, NUMANodeID: 1}, + 30: {CoreID: 30, SocketID: 3, NUMANodeID: 1}, + 31: {CoreID: 31, SocketID: 3, NUMANodeID: 1}, + 32: {CoreID: 32, SocketID: 3, NUMANodeID: 1}, + 33: {CoreID: 33, SocketID: 3, NUMANodeID: 1}, + 34: {CoreID: 34, SocketID: 3, NUMANodeID: 1}, + 35: {CoreID: 35, SocketID: 3, NUMANodeID: 1}, + 36: {CoreID: 36, SocketID: 3, NUMANodeID: 1}, + 37: {CoreID: 37, SocketID: 3, NUMANodeID: 1}, + 38: {CoreID: 38, SocketID: 3, NUMANodeID: 1}, + 39: {CoreID: 39, SocketID: 3, NUMANodeID: 1}, + 40: {CoreID: 0, SocketID: 0, NUMANodeID: 0}, + 41: {CoreID: 1, SocketID: 0, NUMANodeID: 0}, + 42: {CoreID: 2, SocketID: 0, NUMANodeID: 0}, + 43: {CoreID: 3, SocketID: 0, NUMANodeID: 0}, + 44: {CoreID: 4, SocketID: 0, NUMANodeID: 0}, + 45: {CoreID: 5, SocketID: 0, NUMANodeID: 0}, + 46: {CoreID: 6, SocketID: 0, NUMANodeID: 0}, + 47: {CoreID: 7, SocketID: 0, NUMANodeID: 0}, + 48: {CoreID: 8, SocketID: 0, NUMANodeID: 0}, + 49: {CoreID: 9, SocketID: 0, NUMANodeID: 0}, + 50: {CoreID: 10, SocketID: 1, NUMANodeID: 0}, + 51: {CoreID: 11, SocketID: 1, NUMANodeID: 0}, + 52: {CoreID: 12, SocketID: 1, NUMANodeID: 0}, + 53: {CoreID: 13, SocketID: 1, NUMANodeID: 0}, + 54: {CoreID: 14, SocketID: 1, NUMANodeID: 0}, + 55: {CoreID: 15, SocketID: 1, NUMANodeID: 0}, + 56: {CoreID: 16, SocketID: 1, NUMANodeID: 0}, + 57: {CoreID: 17, SocketID: 1, NUMANodeID: 0}, + 58: {CoreID: 18, SocketID: 1, NUMANodeID: 0}, + 59: {CoreID: 19, SocketID: 1, NUMANodeID: 0}, + 60: {CoreID: 20, SocketID: 2, NUMANodeID: 1}, + 61: {CoreID: 21, SocketID: 2, NUMANodeID: 1}, + 62: {CoreID: 22, SocketID: 2, NUMANodeID: 1}, + 63: {CoreID: 23, SocketID: 2, NUMANodeID: 1}, + 64: {CoreID: 24, SocketID: 2, NUMANodeID: 1}, + 65: {CoreID: 25, SocketID: 2, NUMANodeID: 1}, + 66: {CoreID: 26, SocketID: 2, NUMANodeID: 1}, + 67: {CoreID: 27, SocketID: 2, NUMANodeID: 1}, + 68: {CoreID: 28, SocketID: 2, NUMANodeID: 1}, + 69: {CoreID: 29, SocketID: 2, NUMANodeID: 1}, + 70: {CoreID: 30, SocketID: 3, NUMANodeID: 1}, + 71: {CoreID: 31, SocketID: 3, NUMANodeID: 1}, + 72: {CoreID: 32, SocketID: 3, NUMANodeID: 1}, + 73: {CoreID: 33, SocketID: 3, NUMANodeID: 1}, + 74: {CoreID: 34, SocketID: 3, NUMANodeID: 1}, + 75: {CoreID: 35, SocketID: 3, NUMANodeID: 1}, + 76: {CoreID: 36, SocketID: 3, NUMANodeID: 1}, + 77: {CoreID: 37, SocketID: 3, NUMANodeID: 1}, + 78: {CoreID: 38, SocketID: 3, NUMANodeID: 1}, + 79: {CoreID: 39, SocketID: 3, NUMANodeID: 1}, + }, + } ) diff --git a/pkg/kubelet/cm/cpumanager/topology/topology_test.go b/pkg/kubelet/cm/cpumanager/topology/topology_test.go index 9cc938226e1ed..0c53839ff0a3c 100644 --- a/pkg/kubelet/cm/cpumanager/topology/topology_test.go +++ b/pkg/kubelet/cm/cpumanager/topology/topology_test.go @@ -230,7 +230,163 @@ func Test_Discover(t *testing.T) { }, wantErr: false, }, + { + // FAKE Topology from dual xeon gold 6230 + // (see: dual xeon gold 6230). + // We flip NUMA cells and Sockets to exercise the code. + // TODO(fromanirh): replace with a real-world topology + // once we find a suitable one. + // Note: this is a fake topology. Thus, there is not a "correct" + // representation. This one was created following the these concepts: + // 1. be internally consistent (most important rule) + // 2. be as close as possible as existing HW topologies + // 3. if possible, minimize chances wrt existing HW topologies. + name: "DualNumaMultiSocketPerNumaHT", + machineInfo: cadvisorapi.MachineInfo{ + NumCores: 80, + NumSockets: 4, + Topology: []cadvisorapi.Node{ + {Id: 0, + Cores: []cadvisorapi.Core{ + {SocketID: 0, Id: 0, Threads: []int{0, 40}}, + {SocketID: 0, Id: 1, Threads: []int{1, 41}}, + {SocketID: 0, Id: 2, Threads: []int{2, 42}}, + {SocketID: 0, Id: 8, Threads: []int{3, 43}}, + {SocketID: 0, Id: 9, Threads: []int{4, 44}}, + {SocketID: 0, Id: 16, Threads: []int{5, 45}}, + {SocketID: 0, Id: 17, Threads: []int{6, 46}}, + {SocketID: 0, Id: 18, Threads: []int{7, 47}}, + {SocketID: 0, Id: 24, Threads: []int{8, 48}}, + {SocketID: 0, Id: 25, Threads: []int{9, 49}}, + {SocketID: 1, Id: 3, Threads: []int{10, 50}}, + {SocketID: 1, Id: 4, Threads: []int{11, 51}}, + {SocketID: 1, Id: 10, Threads: []int{12, 52}}, + {SocketID: 1, Id: 11, Threads: []int{13, 53}}, + {SocketID: 1, Id: 12, Threads: []int{14, 54}}, + {SocketID: 1, Id: 19, Threads: []int{15, 55}}, + {SocketID: 1, Id: 20, Threads: []int{16, 56}}, + {SocketID: 1, Id: 26, Threads: []int{17, 57}}, + {SocketID: 1, Id: 27, Threads: []int{18, 58}}, + {SocketID: 1, Id: 28, Threads: []int{19, 59}}, + }, + }, + {Id: 1, + Cores: []cadvisorapi.Core{ + {SocketID: 2, Id: 0, Threads: []int{20, 60}}, + {SocketID: 2, Id: 1, Threads: []int{21, 61}}, + {SocketID: 2, Id: 2, Threads: []int{22, 62}}, + {SocketID: 2, Id: 8, Threads: []int{23, 63}}, + {SocketID: 2, Id: 9, Threads: []int{24, 64}}, + {SocketID: 2, Id: 16, Threads: []int{25, 65}}, + {SocketID: 2, Id: 17, Threads: []int{26, 66}}, + {SocketID: 2, Id: 18, Threads: []int{27, 67}}, + {SocketID: 2, Id: 24, Threads: []int{28, 68}}, + {SocketID: 2, Id: 25, Threads: []int{29, 69}}, + {SocketID: 3, Id: 3, Threads: []int{30, 70}}, + {SocketID: 3, Id: 4, Threads: []int{31, 71}}, + {SocketID: 3, Id: 10, Threads: []int{32, 72}}, + {SocketID: 3, Id: 11, Threads: []int{33, 73}}, + {SocketID: 3, Id: 12, Threads: []int{34, 74}}, + {SocketID: 3, Id: 19, Threads: []int{35, 75}}, + {SocketID: 3, Id: 20, Threads: []int{36, 76}}, + {SocketID: 3, Id: 26, Threads: []int{37, 77}}, + {SocketID: 3, Id: 27, Threads: []int{38, 78}}, + {SocketID: 3, Id: 28, Threads: []int{39, 79}}, + }, + }, + }, + }, + want: &CPUTopology{ + NumCPUs: 80, + NumSockets: 4, + NumCores: 40, + NumNUMANodes: 2, + CPUDetails: map[int]CPUInfo{ + 0: {CoreID: 0, SocketID: 0, NUMANodeID: 0}, + 1: {CoreID: 1, SocketID: 0, NUMANodeID: 0}, + 2: {CoreID: 2, SocketID: 0, NUMANodeID: 0}, + 3: {CoreID: 3, SocketID: 0, NUMANodeID: 0}, + 4: {CoreID: 4, SocketID: 0, NUMANodeID: 0}, + 5: {CoreID: 5, SocketID: 0, NUMANodeID: 0}, + 6: {CoreID: 6, SocketID: 0, NUMANodeID: 0}, + 7: {CoreID: 7, SocketID: 0, NUMANodeID: 0}, + 8: {CoreID: 8, SocketID: 0, NUMANodeID: 0}, + 9: {CoreID: 9, SocketID: 0, NUMANodeID: 0}, + 10: {CoreID: 10, SocketID: 1, NUMANodeID: 0}, + 11: {CoreID: 11, SocketID: 1, NUMANodeID: 0}, + 12: {CoreID: 12, SocketID: 1, NUMANodeID: 0}, + 13: {CoreID: 13, SocketID: 1, NUMANodeID: 0}, + 14: {CoreID: 14, SocketID: 1, NUMANodeID: 0}, + 15: {CoreID: 15, SocketID: 1, NUMANodeID: 0}, + 16: {CoreID: 16, SocketID: 1, NUMANodeID: 0}, + 17: {CoreID: 17, SocketID: 1, NUMANodeID: 0}, + 18: {CoreID: 18, SocketID: 1, NUMANodeID: 0}, + 19: {CoreID: 19, SocketID: 1, NUMANodeID: 0}, + 20: {CoreID: 20, SocketID: 2, NUMANodeID: 1}, + 21: {CoreID: 21, SocketID: 2, NUMANodeID: 1}, + 22: {CoreID: 22, SocketID: 2, NUMANodeID: 1}, + 23: {CoreID: 23, SocketID: 2, NUMANodeID: 1}, + 24: {CoreID: 24, SocketID: 2, NUMANodeID: 1}, + 25: {CoreID: 25, SocketID: 2, NUMANodeID: 1}, + 26: {CoreID: 26, SocketID: 2, NUMANodeID: 1}, + 27: {CoreID: 27, SocketID: 2, NUMANodeID: 1}, + 28: {CoreID: 28, SocketID: 2, NUMANodeID: 1}, + 29: {CoreID: 29, SocketID: 2, NUMANodeID: 1}, + 30: {CoreID: 30, SocketID: 3, NUMANodeID: 1}, + 31: {CoreID: 31, SocketID: 3, NUMANodeID: 1}, + 32: {CoreID: 32, SocketID: 3, NUMANodeID: 1}, + 33: {CoreID: 33, SocketID: 3, NUMANodeID: 1}, + 34: {CoreID: 34, SocketID: 3, NUMANodeID: 1}, + 35: {CoreID: 35, SocketID: 3, NUMANodeID: 1}, + 36: {CoreID: 36, SocketID: 3, NUMANodeID: 1}, + 37: {CoreID: 37, SocketID: 3, NUMANodeID: 1}, + 38: {CoreID: 38, SocketID: 3, NUMANodeID: 1}, + 39: {CoreID: 39, SocketID: 3, NUMANodeID: 1}, + 40: {CoreID: 0, SocketID: 0, NUMANodeID: 0}, + 41: {CoreID: 1, SocketID: 0, NUMANodeID: 0}, + 42: {CoreID: 2, SocketID: 0, NUMANodeID: 0}, + 43: {CoreID: 3, SocketID: 0, NUMANodeID: 0}, + 44: {CoreID: 4, SocketID: 0, NUMANodeID: 0}, + 45: {CoreID: 5, SocketID: 0, NUMANodeID: 0}, + 46: {CoreID: 6, SocketID: 0, NUMANodeID: 0}, + 47: {CoreID: 7, SocketID: 0, NUMANodeID: 0}, + 48: {CoreID: 8, SocketID: 0, NUMANodeID: 0}, + 49: {CoreID: 9, SocketID: 0, NUMANodeID: 0}, + 50: {CoreID: 10, SocketID: 1, NUMANodeID: 0}, + 51: {CoreID: 11, SocketID: 1, NUMANodeID: 0}, + 52: {CoreID: 12, SocketID: 1, NUMANodeID: 0}, + 53: {CoreID: 13, SocketID: 1, NUMANodeID: 0}, + 54: {CoreID: 14, SocketID: 1, NUMANodeID: 0}, + 55: {CoreID: 15, SocketID: 1, NUMANodeID: 0}, + 56: {CoreID: 16, SocketID: 1, NUMANodeID: 0}, + 57: {CoreID: 17, SocketID: 1, NUMANodeID: 0}, + 58: {CoreID: 18, SocketID: 1, NUMANodeID: 0}, + 59: {CoreID: 19, SocketID: 1, NUMANodeID: 0}, + 60: {CoreID: 20, SocketID: 2, NUMANodeID: 1}, + 61: {CoreID: 21, SocketID: 2, NUMANodeID: 1}, + 62: {CoreID: 22, SocketID: 2, NUMANodeID: 1}, + 63: {CoreID: 23, SocketID: 2, NUMANodeID: 1}, + 64: {CoreID: 24, SocketID: 2, NUMANodeID: 1}, + 65: {CoreID: 25, SocketID: 2, NUMANodeID: 1}, + 66: {CoreID: 26, SocketID: 2, NUMANodeID: 1}, + 67: {CoreID: 27, SocketID: 2, NUMANodeID: 1}, + 68: {CoreID: 28, SocketID: 2, NUMANodeID: 1}, + 69: {CoreID: 29, SocketID: 2, NUMANodeID: 1}, + 70: {CoreID: 30, SocketID: 3, NUMANodeID: 1}, + 71: {CoreID: 31, SocketID: 3, NUMANodeID: 1}, + 72: {CoreID: 32, SocketID: 3, NUMANodeID: 1}, + 73: {CoreID: 33, SocketID: 3, NUMANodeID: 1}, + 74: {CoreID: 34, SocketID: 3, NUMANodeID: 1}, + 75: {CoreID: 35, SocketID: 3, NUMANodeID: 1}, + 76: {CoreID: 36, SocketID: 3, NUMANodeID: 1}, + 77: {CoreID: 37, SocketID: 3, NUMANodeID: 1}, + 78: {CoreID: 38, SocketID: 3, NUMANodeID: 1}, + 79: {CoreID: 39, SocketID: 3, NUMANodeID: 1}, + }, + }, + wantErr: false, + }, { name: "DualSocketNoHT", machineInfo: cadvisorapi.MachineInfo{