hs-test: support for multiple workers
Type: test
Signed-off-by: Filip Tehlar <ftehlar@cisco.com>
Change-Id: Ie90e4b02c268bc3ca40171b03829f5686fb83162
diff --git a/extras/hs-test/Makefile b/extras/hs-test/Makefile
index 14d95fc..3d7673a 100644
--- a/extras/hs-test/Makefile
+++ b/extras/hs-test/Makefile
@@ -19,6 +19,10 @@
DEBUG=false
endif
+ifeq ($(CPUS),)
+CPUS=1
+endif
+
ifeq ($(UBUNTU_CODENAME),)
UBUNTU_CODENAME=$(shell grep '^UBUNTU_CODENAME=' /etc/os-release | cut -f2- -d=)
endif
@@ -47,6 +51,7 @@
@echo " UNCONFIGURE=[true|false] - unconfigure selected test"
@echo " DEBUG=[true|false] - attach VPP to GDB"
@echo " TEST=[test-name] - specific test to run"
+ @echo " CPUS=[n-cpus] - number of cpus to run with vpp"
@echo
@echo "List of all tests:"
$(call list_tests)
@@ -64,7 +69,7 @@
.PHONY: test
test: .deps.ok .build.vpp
@bash ./test --persist=$(PERSIST) --verbose=$(VERBOSE) \
- --unconfigure=$(UNCONFIGURE) --debug=$(DEBUG) --test=$(TEST)
+ --unconfigure=$(UNCONFIGURE) --debug=$(DEBUG) --test=$(TEST) --cpus=$(CPUS)
build-go:
go build ./tools/http_server
diff --git a/extras/hs-test/container.go b/extras/hs-test/container.go
index 1dc49b7..fd3aa47 100644
--- a/extras/hs-test/container.go
+++ b/extras/hs-test/container.go
@@ -216,16 +216,12 @@
return cliOption
}
-func (c *Container) newVppInstance(additionalConfig ...Stanza) (*VppInstance, error) {
+func (c *Container) newVppInstance(cpus []int, additionalConfigs ...Stanza) (*VppInstance, error) {
vpp := new(VppInstance)
vpp.container = c
-
- if len(additionalConfig) > 0 {
- vpp.additionalConfig = additionalConfig[0]
- }
-
+ vpp.cpus = cpus
+ vpp.additionalConfig = append(vpp.additionalConfig, additionalConfigs...)
c.vppInstance = vpp
-
return vpp, nil
}
diff --git a/extras/hs-test/cpu.go b/extras/hs-test/cpu.go
new file mode 100644
index 0000000..e17bc11
--- /dev/null
+++ b/extras/hs-test/cpu.go
@@ -0,0 +1,69 @@
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+)
+
+var CPU_PATH = "/sys/fs/cgroup/cpuset.cpus.effective"
+
+type CpuContext struct {
+ cpuAllocator *CpuAllocatorT
+ cpus []int
+}
+
+func (c *CpuContext) Release() {
+ c.cpuAllocator.cpus = append(c.cpuAllocator.cpus, c.cpus...)
+ c.cpus = c.cpus[:0] // empty the list
+}
+
+type CpuAllocatorT struct {
+ cpus []int
+}
+
+var cpuAllocator *CpuAllocatorT = nil
+
+func (c *CpuAllocatorT) Allocate(nCpus int) (*CpuContext, error) {
+ var cpuCtx CpuContext
+
+ if len(c.cpus) < nCpus {
+ return nil, fmt.Errorf("could not allocate %d CPUs; available: %d", nCpus, len(c.cpus))
+ }
+ cpuCtx.cpus = c.cpus[0:nCpus]
+ cpuCtx.cpuAllocator = c
+ c.cpus = c.cpus[nCpus:]
+ return &cpuCtx, nil
+}
+
+func (c *CpuAllocatorT) readCpus(fname string) error {
+ var first, last int
+ file, err := os.Open(CPU_PATH)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ sc := bufio.NewScanner(file)
+ sc.Scan()
+ line := sc.Text()
+ _, err = fmt.Sscanf(line, "%d-%d", &first, &last)
+ if err != nil {
+ return err
+ }
+ for i := first; i <= last; i++ {
+ c.cpus = append(c.cpus, i)
+ }
+ return nil
+}
+
+func CpuAllocator() (*CpuAllocatorT, error) {
+ if cpuAllocator == nil {
+ cpuAllocator = new(CpuAllocatorT)
+ err := cpuAllocator.readCpus(CPU_PATH)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return cpuAllocator, nil
+}
diff --git a/extras/hs-test/hst_suite.go b/extras/hs-test/hst_suite.go
index 042b4fe..1fcffa4 100644
--- a/extras/hs-test/hst_suite.go
+++ b/extras/hs-test/hst_suite.go
@@ -14,13 +14,14 @@
)
const (
- defaultNetworkNumber int = 1
+ DEFAULT_NETWORK_NUM int = 1
)
var isPersistent = flag.Bool("persist", false, "persists topology config")
var isVerbose = flag.Bool("verbose", false, "verbose test output")
var isUnconfiguring = flag.Bool("unconfigure", false, "remove topology")
var isVppDebug = flag.Bool("debug", false, "attach gdb to vpp")
+var nConfiguredCpus = flag.Int("cpus", 1, "number of CPUs assigned to vpp")
type HstSuite struct {
suite.Suite
@@ -30,6 +31,29 @@
netInterfaces map[string]*NetInterface
addresser *Addresser
testIds map[string]string
+ cpuAllocator *CpuAllocatorT
+ cpuContexts []*CpuContext
+ cpuPerVpp int
+}
+
+func (s *HstSuite) SetupSuite() {
+ var err error
+ s.cpuAllocator, err = CpuAllocator()
+ if err != nil {
+ s.FailNow("failed to init cpu allocator: %v", err)
+ }
+ s.cpuPerVpp = *nConfiguredCpus
+}
+
+func (s *HstSuite) AllocateCpus() []int {
+ cpuCtx, err := s.cpuAllocator.Allocate(s.cpuPerVpp)
+ s.assertNil(err)
+ s.AddCpuContext(cpuCtx)
+ return cpuCtx.cpus
+}
+
+func (s *HstSuite) AddCpuContext(cpuCtx *CpuContext) {
+ s.cpuContexts = append(s.cpuContexts, cpuCtx)
}
func (s *HstSuite) TearDownSuite() {
@@ -40,6 +64,9 @@
if *isPersistent {
return
}
+ for _, c := range s.cpuContexts {
+ c.Release()
+ }
s.resetContainers()
s.removeVolumes()
}
@@ -66,7 +93,7 @@
func (s *HstSuite) setupContainers() {
for _, container := range s.containers {
- if container.isOptional == false {
+ if !container.isOptional {
container.run()
}
}
@@ -130,6 +157,12 @@
s.T().SkipNow()
}
+func (s *HstSuite) SkipIfMultiWorker(args ...any) {
+ if *nConfiguredCpus > 1 {
+ s.skip("test case not supported with multiple vpp workers")
+ }
+}
+
func (s *HstSuite) resetContainers() {
for _, container := range s.containers {
container.stop()
diff --git a/extras/hs-test/netconfig.go b/extras/hs-test/netconfig.go
index 9e259ab..6059b7b 100644
--- a/extras/hs-test/netconfig.go
+++ b/extras/hs-test/netconfig.go
@@ -77,7 +77,7 @@
var err error
newInterface.addresser = a
newInterface.name = cfg["name"].(string)
- newInterface.networkNumber = defaultNetworkNumber
+ newInterface.networkNumber = DEFAULT_NETWORK_NUM
if interfaceType, ok := cfg["type"]; ok {
newInterface.category = interfaceType.(string)
diff --git a/extras/hs-test/suite_nginx_test.go b/extras/hs-test/suite_nginx_test.go
index b61ea6c..c250ed7 100644
--- a/extras/hs-test/suite_nginx_test.go
+++ b/extras/hs-test/suite_nginx_test.go
@@ -14,27 +14,25 @@
}
func (s *NginxSuite) SetupSuite() {
+ s.HstSuite.SetupSuite()
s.loadNetworkTopology("2taps")
-
s.loadContainerTopology("nginxProxyAndServer")
}
func (s *NginxSuite) SetupTest() {
- s.skipIfUnconfiguring()
-
- s.setupVolumes()
- s.setupContainers()
+ s.HstSuite.SetupTest()
// Setup test conditions
- var startupConfig Stanza
- startupConfig.
+ var sessionConfig Stanza
+ sessionConfig.
newStanza("session").
append("enable").
append("use-app-socket-api").close()
+ cpus := s.AllocateCpus()
// ... for proxy
vppProxyContainer := s.getContainerByName(vppProxyContainerName)
- proxyVpp, _ := vppProxyContainer.newVppInstance(startupConfig)
+ proxyVpp, _ := vppProxyContainer.newVppInstance(cpus, sessionConfig)
proxyVpp.start()
clientInterface := s.netInterfaces[mirroringClientInterfaceName]
diff --git a/extras/hs-test/suite_no_topo_test.go b/extras/hs-test/suite_no_topo_test.go
index 8ef56b2..8f7c876 100644
--- a/extras/hs-test/suite_no_topo_test.go
+++ b/extras/hs-test/suite_no_topo_test.go
@@ -12,25 +12,24 @@
}
func (s *NoTopoSuite) SetupSuite() {
+ s.HstSuite.SetupSuite()
s.loadNetworkTopology("tap")
-
s.loadContainerTopology("single")
}
func (s *NoTopoSuite) SetupTest() {
- s.skipIfUnconfiguring()
- s.setupVolumes()
- s.setupContainers()
+ s.HstSuite.SetupTest()
// Setup test conditions
- var startupConfig Stanza
- startupConfig.
+ var sessionConfig Stanza
+ sessionConfig.
newStanza("session").
append("enable").
append("use-app-socket-api").close()
+ cpus := s.AllocateCpus()
container := s.getContainerByName(singleTopoContainerVpp)
- vpp, _ := container.newVppInstance(startupConfig)
+ vpp, _ := container.newVppInstance(cpus, sessionConfig)
vpp.start()
tapInterface := s.netInterfaces[tapInterfaceName]
diff --git a/extras/hs-test/suite_ns_test.go b/extras/hs-test/suite_ns_test.go
index 34fc9ec..3bf3cc7 100644
--- a/extras/hs-test/suite_ns_test.go
+++ b/extras/hs-test/suite_ns_test.go
@@ -11,27 +11,26 @@
}
func (s *NsSuite) SetupSuite() {
+ s.HstSuite.SetupSuite()
s.configureNetworkTopology("ns")
-
s.loadContainerTopology("ns")
}
func (s *NsSuite) SetupTest() {
- s.skipIfUnconfiguring()
- s.setupVolumes()
- s.setupContainers()
+ s.HstSuite.SetupTest()
// Setup test conditions
- var startupConfig Stanza
- startupConfig.
+ var sessionConfig Stanza
+ sessionConfig.
newStanza("session").
append("enable").
append("use-app-socket-api").
append("evt_qs_memfd_seg").
append("event-queue-length 100000").close()
+ cpus := s.AllocateCpus()
container := s.getContainerByName("vpp")
- vpp, _ := container.newVppInstance(startupConfig)
+ vpp, _ := container.newVppInstance(cpus, sessionConfig)
vpp.start()
idx, err := vpp.createAfPacket(s.netInterfaces[serverInterface])
diff --git a/extras/hs-test/suite_tap_test.go b/extras/hs-test/suite_tap_test.go
index 96f475c..8b0950a 100644
--- a/extras/hs-test/suite_tap_test.go
+++ b/extras/hs-test/suite_tap_test.go
@@ -10,6 +10,6 @@
func (s *TapSuite) SetupSuite() {
time.Sleep(1 * time.Second)
-
+ s.HstSuite.SetupSuite()
s.configureNetworkTopology("tap")
}
diff --git a/extras/hs-test/suite_veth_test.go b/extras/hs-test/suite_veth_test.go
index be79ce2..bb703df 100644
--- a/extras/hs-test/suite_veth_test.go
+++ b/extras/hs-test/suite_veth_test.go
@@ -16,22 +16,18 @@
func (s *VethsSuite) SetupSuite() {
time.Sleep(1 * time.Second)
-
+ s.HstSuite.SetupSuite()
s.configureNetworkTopology("2peerVeth")
-
s.loadContainerTopology("2peerVeth")
}
func (s *VethsSuite) SetupTest() {
- s.skipIfUnconfiguring()
-
- s.setupVolumes()
- s.setupContainers()
+ s.HstSuite.SetupTest()
// Setup test conditions
- var startupConfig Stanza
- startupConfig.
+ var sessionConfig Stanza
+ sessionConfig.
newStanza("session").
append("enable").
append("use-app-socket-api").close()
@@ -39,7 +35,8 @@
// ... For server
serverContainer := s.getContainerByName("server-vpp")
- serverVpp, _ := serverContainer.newVppInstance(startupConfig)
+ cpus := s.AllocateCpus()
+ serverVpp, _ := serverContainer.newVppInstance(cpus, sessionConfig)
s.assertNotNil(serverVpp)
s.setupServerVpp()
@@ -47,7 +44,8 @@
// ... For client
clientContainer := s.getContainerByName("client-vpp")
- clientVpp, _ := clientContainer.newVppInstance(startupConfig)
+ cpus = s.AllocateCpus()
+ clientVpp, _ := clientContainer.newVppInstance(cpus, sessionConfig)
s.assertNotNil(clientVpp)
s.setupClientVpp()
@@ -67,7 +65,6 @@
namespaceSecret := "1"
err = serverVpp.addAppNamespace(1, idx, namespaceSecret)
s.assertNil(err)
-
}
func (s *VethsSuite) setupClientVpp() {
diff --git a/extras/hs-test/test b/extras/hs-test/test
index a886652..db53d5a 100755
--- a/extras/hs-test/test
+++ b/extras/hs-test/test
@@ -38,6 +38,9 @@
unconfigure_set=1
fi
;;
+ --cpus=*)
+ args="$args -cpus ${i#*=}"
+ ;;
--test=*)
tc_name="${i#*=}"
if [ $tc_name != "all" ]; then
diff --git a/extras/hs-test/utils.go b/extras/hs-test/utils.go
index 151567c..4261f4d 100644
--- a/extras/hs-test/utils.go
+++ b/extras/hs-test/utils.go
@@ -126,7 +126,7 @@
if err != nil {
finished <- fmt.Errorf("wget error: '%v\n\n%s'", err, o)
return
- } else if strings.Contains(string(o), "200 OK") == false {
+ } else if !strings.Contains(string(o), "200 OK") {
finished <- fmt.Errorf("wget error: response not 200 OK")
return
}
diff --git a/extras/hs-test/vppinstance.go b/extras/hs-test/vppinstance.go
index c08514e..a9b97bc 100644
--- a/extras/hs-test/vppinstance.go
+++ b/extras/hs-test/vppinstance.go
@@ -72,9 +72,10 @@
type VppInstance struct {
container *Container
- additionalConfig Stanza
+ additionalConfig []Stanza
connection *core.Connection
apiChannel api.Channel
+ cpus []int
}
func (vpp *VppInstance) getSuite() *HstSuite {
@@ -113,7 +114,10 @@
defaultApiSocketFilePath,
defaultLogFilePath,
)
- configContent += vpp.additionalConfig.toString()
+ configContent += vpp.generateCpuConfig()
+ for _, c := range vpp.additionalConfig {
+ configContent += c.toString()
+ }
startupFileName := vpp.getEtcDir() + "/startup.conf"
vpp.container.createFile(startupFileName, configContent)
@@ -341,3 +345,25 @@
vpp.connection.Disconnect()
vpp.apiChannel.Close()
}
+
+func (vpp *VppInstance) generateCpuConfig() string {
+ var c Stanza
+ var s string
+ if len(vpp.cpus) < 1 {
+ return ""
+ }
+ c.newStanza("cpu").
+ append(fmt.Sprintf("main-core %d", vpp.cpus[0]))
+ workers := vpp.cpus[1:]
+
+ if len(workers) > 0 {
+ for i := 0; i < len(workers); i++ {
+ if i != 0 {
+ s = s + ", "
+ }
+ s = s + fmt.Sprintf("%d", workers[i])
+ }
+ c.append(fmt.Sprintf("corelist-workers %s", s))
+ }
+ return c.close().toString()
+}