hs-test: Add CPU pinning test suite
Type: test
Added suite to verify that VPP launches with provided
CPU pinning configurations. CPU configuration is
specified per-test.
Change-Id: Ic283339676d3b24636fc21156a09a192c1a8d8da
Signed-off-by: Hadi Rayan Al-Sandid <halsandi@cisco.com>
diff --git a/extras/hs-test/cpu_pinning_test.go b/extras/hs-test/cpu_pinning_test.go
new file mode 100644
index 0000000..b8dec65
--- /dev/null
+++ b/extras/hs-test/cpu_pinning_test.go
@@ -0,0 +1,30 @@
+package main
+
+import (
+ . "fd.io/hs-test/infra"
+)
+
+func init() {
+ RegisterCpuPinningSoloTests(DefaultCpuConfigurationTest, SkipCoresTest)
+}
+
+// TODO: Add more CPU configuration tests
+
+func DefaultCpuConfigurationTest(s *CpuPinningSuite) {
+ vpp := s.GetContainerByName(SingleTopoContainerVpp).VppInstance
+ s.AssertNil(vpp.Start())
+}
+
+func SkipCoresTest(s *CpuPinningSuite) {
+
+ skipCoresConfiguration := VppCpuConfig{
+ PinMainCpu: true,
+ PinWorkersCorelist: true,
+ SkipCores: 1,
+ }
+
+ vpp := s.GetContainerByName(SingleTopoContainerVpp).VppInstance
+ vpp.CpuConfig = skipCoresConfiguration
+
+ s.AssertNil(vpp.Start())
+}
diff --git a/extras/hs-test/infra/container.go b/extras/hs-test/infra/container.go
index 1dd8280..3e8ccb4 100644
--- a/extras/hs-test/infra/container.go
+++ b/extras/hs-test/infra/container.go
@@ -249,6 +249,7 @@
vpp := new(VppInstance)
vpp.Container = c
vpp.Cpus = cpus
+ vpp.setDefaultCpuConfig()
vpp.AdditionalConfig = append(vpp.AdditionalConfig, additionalConfigs...)
c.VppInstance = vpp
return vpp, nil
diff --git a/extras/hs-test/infra/hst_suite.go b/extras/hs-test/infra/hst_suite.go
index b2e0693..41c8d29 100644
--- a/extras/hs-test/infra/hst_suite.go
+++ b/extras/hs-test/infra/hst_suite.go
@@ -247,6 +247,16 @@
}
}
+func (s *HstSuite) SkipIfNotEnoughAvailableCpus(containerCount int, nCpus int) bool {
+ MaxRequestedCpu := (GinkgoParallelProcess() * containerCount * nCpus)
+
+ if len(s.CpuAllocator.cpus)-1 < MaxRequestedCpu {
+ s.Skip(fmt.Sprintf("test case cannot allocate requested cpus (%d cpus * %d containers)", nCpus, containerCount))
+ }
+
+ return true
+}
+
func (s *HstSuite) SkipUnlessExtendedTestsBuilt() {
imageName := "hs-test/nginx-http3"
diff --git a/extras/hs-test/infra/suite_cpu_pinning.go b/extras/hs-test/infra/suite_cpu_pinning.go
new file mode 100644
index 0000000..629d2da
--- /dev/null
+++ b/extras/hs-test/infra/suite_cpu_pinning.go
@@ -0,0 +1,101 @@
+package hst
+
+import (
+ "fmt"
+ . "github.com/onsi/ginkgo/v2"
+ "reflect"
+ "runtime"
+ "strings"
+)
+
+var cpuPinningTests = map[string][]func(s *CpuPinningSuite){}
+var cpuPinningSoloTests = map[string][]func(s *CpuPinningSuite){}
+
+type CpuPinningSuite struct {
+ HstSuite
+}
+
+func RegisterCpuPinningTests(tests ...func(s *CpuPinningSuite)) {
+ cpuPinningTests[getTestFilename()] = tests
+}
+
+func RegisterCpuPinningSoloTests(tests ...func(s *CpuPinningSuite)) {
+ cpuPinningSoloTests[getTestFilename()] = tests
+}
+
+func (s *CpuPinningSuite) SetupSuite() {
+ s.HstSuite.SetupSuite()
+ s.LoadNetworkTopology("tap")
+ s.LoadContainerTopology("singleCpuPinning")
+}
+
+func (s *CpuPinningSuite) SetupTest() {
+ // Skip if we cannot allocate 3 CPUs for test container
+ s.SkipIfNotEnoughAvailableCpus(1, 3)
+ s.CpuPerVpp = 3
+ s.HstSuite.SetupTest()
+ container := s.GetContainerByName(SingleTopoContainerVpp)
+ vpp, err := container.newVppInstance(container.AllocatedCpus)
+ s.AssertNotNil(vpp, fmt.Sprint(err))
+
+}
+
+var _ = Describe("CpuPinningSuite", Ordered, ContinueOnFailure, func() {
+ var s CpuPinningSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ // https://onsi.github.io/ginkgo/#dynamically-generating-specs
+ for filename, tests := range cpuPinningTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
+
+var _ = Describe("CpuPinningSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
+ var s CpuPinningSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range cpuPinningSoloTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
diff --git a/extras/hs-test/infra/vppinstance.go b/extras/hs-test/infra/vppinstance.go
index 48d2b78..d4f5700 100644
--- a/extras/hs-test/infra/vppinstance.go
+++ b/extras/hs-test/infra/vppinstance.go
@@ -88,6 +88,13 @@
Connection *core.Connection
ApiStream api.Stream
Cpus []int
+ CpuConfig VppCpuConfig
+}
+
+type VppCpuConfig struct {
+ PinMainCpu bool
+ PinWorkersCorelist bool
+ SkipCores int
}
func (vpp *VppInstance) getSuite() *HstSuite {
@@ -131,7 +138,7 @@
defaultApiSocketFilePath,
defaultLogFilePath,
)
- configContent += vpp.generateCpuConfig()
+ configContent += vpp.generateVPPCpuConfig()
for _, c := range vpp.AdditionalConfig {
configContent += c.ToString()
}
@@ -476,26 +483,55 @@
vpp.ApiStream.Close()
}
-func (vpp *VppInstance) generateCpuConfig() string {
+func (vpp *VppInstance) setDefaultCpuConfig() {
+ vpp.CpuConfig.PinMainCpu = true
+ vpp.CpuConfig.PinWorkersCorelist = true
+ vpp.CpuConfig.SkipCores = 0
+}
+
+func (vpp *VppInstance) generateVPPCpuConfig() string {
var c Stanza
var s string
+ startCpu := 0
if len(vpp.Cpus) < 1 {
return ""
}
- c.NewStanza("cpu").
- Append(fmt.Sprintf("main-core %d", vpp.Cpus[0]))
- vpp.getSuite().Log(fmt.Sprintf("main-core %d", vpp.Cpus[0]))
- workers := vpp.Cpus[1:]
+
+ c.NewStanza("cpu")
+
+ // If skip-cores is valid, use as start value to assign main/workers CPUs
+ if vpp.CpuConfig.SkipCores != 0 {
+ c.Append(fmt.Sprintf("skip-cores %d", vpp.CpuConfig.SkipCores))
+ vpp.getSuite().Log(fmt.Sprintf("skip-cores %d", vpp.CpuConfig.SkipCores))
+ }
+
+ if len(vpp.Cpus) > vpp.CpuConfig.SkipCores {
+ startCpu = vpp.CpuConfig.SkipCores
+ }
+
+ if vpp.CpuConfig.PinMainCpu {
+ c.Append(fmt.Sprintf("main-core %d", vpp.Cpus[startCpu]))
+ vpp.getSuite().Log(fmt.Sprintf("main-core %d", vpp.Cpus[startCpu]))
+ }
+
+ workers := vpp.Cpus[startCpu+1:]
if len(workers) > 0 {
- for i := 0; i < len(workers); i++ {
- if i != 0 {
- s = s + ", "
+ if vpp.CpuConfig.PinWorkersCorelist {
+ for i := 0; i < len(workers); i++ {
+ if i != 0 {
+ s = s + ", "
+ }
+ s = s + fmt.Sprintf("%d", workers[i])
}
- s = s + fmt.Sprintf("%d", workers[i])
+ c.Append(fmt.Sprintf("corelist-workers %s", s))
+ vpp.getSuite().Log("corelist-workers " + s)
+ } else {
+ s = fmt.Sprintf("%d", len(workers))
+ c.Append(fmt.Sprintf("workers %s", s))
+ vpp.getSuite().Log("workers " + s)
}
- c.Append(fmt.Sprintf("corelist-workers %s", s))
- vpp.getSuite().Log("corelist-workers " + s)
}
+
return c.Close().ToString()
}
diff --git a/extras/hs-test/topo-containers/singleCpuPinning.yaml b/extras/hs-test/topo-containers/singleCpuPinning.yaml
new file mode 100644
index 0000000..6e673aa
--- /dev/null
+++ b/extras/hs-test/topo-containers/singleCpuPinning.yaml
@@ -0,0 +1,11 @@
+---
+volumes:
+ - volume: &shared-vol
+ host-dir: "$HST_VOLUME_DIR/shared-vol"
+
+containers:
+ - name: "vpp"
+ volumes:
+ - <<: *shared-vol
+ container-dir: "/tmp/vpp"
+ is-default-work-dir: true