hs-test: fix CPU alloc when running in parallel

Type: test

Change-Id: I6062eddffb938880d9ec004c8418a9a731891989
Signed-off-by: Adrian Villin <avillin@cisco.com>
diff --git a/extras/hs-test/container.go b/extras/hs-test/container.go
index 3ac5eee..0bdc3a2 100644
--- a/extras/hs-test/container.go
+++ b/extras/hs-test/container.go
@@ -239,6 +239,7 @@
 	vpp := new(VppInstance)
 	vpp.container = c
 	vpp.cpus = cpus
+	c.suite.vppContainerCount += 1
 	vpp.additionalConfig = append(vpp.additionalConfig, additionalConfigs...)
 	c.vppInstance = vpp
 	return vpp, nil
diff --git a/extras/hs-test/cpu.go b/extras/hs-test/cpu.go
index a976f47..69b4cab 100644
--- a/extras/hs-test/cpu.go
+++ b/extras/hs-test/cpu.go
@@ -4,6 +4,7 @@
 	"bufio"
 	"errors"
 	"fmt"
+	. "github.com/onsi/ginkgo/v2"
 	"os"
 	"os/exec"
 	"strings"
@@ -16,26 +17,31 @@
 	cpus         []int
 }
 
-func (c *CpuContext) Release() {
-	c.cpuAllocator.cpus = append(c.cpuAllocator.cpus, c.cpus...)
-	c.cpus = c.cpus[:0] // empty the list
-}
-
 type CpuAllocatorT struct {
 	cpus []int
 }
 
 var cpuAllocator *CpuAllocatorT = nil
 
-func (c *CpuAllocatorT) Allocate(nCpus int) (*CpuContext, error) {
+func (c *CpuAllocatorT) Allocate(vppContainerCount int, nCpus int) (*CpuContext, error) {
 	var cpuCtx CpuContext
-
-	if len(c.cpus) < nCpus {
-		return nil, fmt.Errorf("could not allocate %d CPUs; available: %d", nCpus, len(c.cpus))
+	maxCpu := GinkgoParallelProcess() * 2 * nCpus
+	minCpu := (GinkgoParallelProcess() - 1) * 2 * nCpus
+	if len(c.cpus) < maxCpu {
+		vppContainerCount += 1
+		err := fmt.Errorf("could not allocate %d CPUs; available: %d; attempted to allocate cores %d-%d",
+			nCpus*vppContainerCount, len(c.cpus), minCpu, minCpu+nCpus*vppContainerCount)
+		return nil, err
 	}
-	cpuCtx.cpus = c.cpus[0:nCpus]
+	if vppContainerCount == 0 {
+		cpuCtx.cpus = c.cpus[minCpu : maxCpu-nCpus]
+	} else if vppContainerCount == 1 {
+		cpuCtx.cpus = c.cpus[minCpu+nCpus : maxCpu]
+	} else {
+		return nil, fmt.Errorf("too many VPP containers; CPU allocation for >2 VPP containers is not implemented yet")
+	}
+
 	cpuCtx.cpuAllocator = c
-	c.cpus = c.cpus[nCpus:]
 	return &cpuCtx, nil
 }
 
diff --git a/extras/hs-test/echo_test.go b/extras/hs-test/echo_test.go
index 33728db..ce852be 100644
--- a/extras/hs-test/echo_test.go
+++ b/extras/hs-test/echo_test.go
@@ -21,7 +21,9 @@
 	s.assertNotContains(o, "failed:")
 }
 
+// unstable with multiple workers
 func TcpWithLossTest(s *VethsSuite) {
+	s.SkipIfMultiWorker()
 	serverVpp := s.getContainerByName("server-vpp").vppInstance
 
 	serverVeth := s.getInterfaceByName(serverInterfaceName)
diff --git a/extras/hs-test/hst_suite.go b/extras/hs-test/hst_suite.go
index 35553f0..9cb79c5 100644
--- a/extras/hs-test/hst_suite.go
+++ b/extras/hs-test/hst_suite.go
@@ -30,18 +30,19 @@
 var vppSourceFileDir = flag.String("vppsrc", "", "vpp source file directory")
 
 type HstSuite struct {
-	containers       map[string]*Container
-	volumes          []string
-	netConfigs       []NetConfig
-	netInterfaces    map[string]*NetInterface
-	ip4AddrAllocator *Ip4AddressAllocator
-	testIds          map[string]string
-	cpuAllocator     *CpuAllocatorT
-	cpuContexts      []*CpuContext
-	cpuPerVpp        int
-	pid              string
-	logger           *log.Logger
-	logFile          *os.File
+	containers        map[string]*Container
+	vppContainerCount int
+	volumes           []string
+	netConfigs        []NetConfig
+	netInterfaces     map[string]*NetInterface
+	ip4AddrAllocator  *Ip4AddressAllocator
+	testIds           map[string]string
+	cpuAllocator      *CpuAllocatorT
+	cpuContexts       []*CpuContext
+	cpuPerVpp         int
+	pid               string
+	logger            *log.Logger
+	logFile           *os.File
 }
 
 func (s *HstSuite) SetupSuite() {
@@ -61,7 +62,7 @@
 }
 
 func (s *HstSuite) AllocateCpus() []int {
-	cpuCtx, err := s.cpuAllocator.Allocate(s.cpuPerVpp)
+	cpuCtx, err := s.cpuAllocator.Allocate(s.vppContainerCount, s.cpuPerVpp)
 	s.assertNil(err)
 	s.AddCpuContext(cpuCtx)
 	return cpuCtx.cpus
@@ -82,9 +83,6 @@
 	if *isPersistent {
 		return
 	}
-	for _, c := range s.cpuContexts {
-		c.Release()
-	}
 	s.resetContainers()
 	s.removeVolumes()
 	s.ip4AddrAllocator.deleteIpAddresses()
@@ -98,6 +96,7 @@
 
 func (s *HstSuite) SetupTest() {
 	s.log("Test Setup")
+	s.vppContainerCount = 0
 	s.skipIfUnconfiguring()
 	s.setupVolumes()
 	s.setupContainers()
@@ -157,7 +156,7 @@
 	for _, container := range s.containers {
 		out, err := container.log(20)
 		if err != nil {
-			fmt.Printf("An error occured while obtaining '%s' container logs: %s\n", container.name, fmt.Sprint(err))
+			s.log("An error occured while obtaining '" + container.name + "' container logs: " + fmt.Sprint(err))
 			continue
 		}
 		s.log("\nvvvvvvvvvvvvvvv " +
diff --git a/extras/hs-test/http_test.go b/extras/hs-test/http_test.go
index 3741619..94cb0ca 100644
--- a/extras/hs-test/http_test.go
+++ b/extras/hs-test/http_test.go
@@ -272,7 +272,9 @@
 	return nil
 }
 
+// unstable with multiple workers
 func NginxPerfCpsTest(s *NoTopoSuite) {
+	s.SkipIfMultiWorker()
 	s.assertNil(runNginxPerf(s, "cps", "ab"))
 }
 
diff --git a/extras/hs-test/mirroring_test.go b/extras/hs-test/mirroring_test.go
index 6c5a860..1fd15dd 100644
--- a/extras/hs-test/mirroring_test.go
+++ b/extras/hs-test/mirroring_test.go
@@ -8,7 +8,9 @@
 	registerNginxTests(MirroringTest)
 }
 
+// broken when CPUS > 1
 func MirroringTest(s *NginxSuite) {
+	s.SkipIfMultiWorker()
 	proxyAddress := s.getInterfaceByName(mirroringClientInterfaceName).peer.ip4AddressString()
 
 	path := "/64B.json"
diff --git a/extras/hs-test/vppinstance.go b/extras/hs-test/vppinstance.go
index 11f68a6..3276c2d 100644
--- a/extras/hs-test/vppinstance.go
+++ b/extras/hs-test/vppinstance.go
@@ -456,6 +456,7 @@
 	}
 	c.newStanza("cpu").
 		append(fmt.Sprintf("main-core %d", vpp.cpus[0]))
+	vpp.getSuite().log(fmt.Sprintf("main-core %d", vpp.cpus[0]))
 	workers := vpp.cpus[1:]
 
 	if len(workers) > 0 {
@@ -466,6 +467,7 @@
 			s = s + fmt.Sprintf("%d", workers[i])
 		}
 		c.append(fmt.Sprintf("corelist-workers %s", s))
+		vpp.getSuite().log("corelist-workers " + s)
 	}
 	return c.close().toString()
 }