Upgrade to rmr which required two changes
* switch to rmr 1.8.1 to pick up a non blocking variant of rmr that deals with bad routing tables (no hanging connections / blocking calls)
* improve test receiver to behave with this setup
* add integration test for this case
* this also switches past 1.5.x, which included another change that altered the behavior of rts; deal with this with a change to a1s helmchart that causes the sourceid to be set to a1s service name, which was not needed prior
* improve integration tests overall
Change-Id: I155994b6f512485a5a73fc31923a46d182aeda87
Signed-off-by: Tommy Carpenter <tc677g@att.com>
diff --git a/Dockerfile b/Dockerfile
index 1e2714a..46739e2 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -17,7 +17,7 @@
# install a well known working rmr
FROM python:3.7-alpine
RUN apk update && apk add autoconf automake build-base cmake libtool ninja pkgconfig git
-RUN git clone --branch 1.3.0 https://gerrit.o-ran-sc.org/r/ric-plt/lib/rmr \
+RUN git clone --branch 1.8.1 https://gerrit.o-ran-sc.org/r/ric-plt/lib/rmr \
&& cd rmr \
&& mkdir build \
&& cd build \
diff --git a/a1/a1rmr.py b/a1/a1rmr.py
index 3858704..abbb84f 100644
--- a/a1/a1rmr.py
+++ b/a1/a1rmr.py
@@ -72,6 +72,7 @@
# we failed all RETRY_TIMES
logger.debug("Send failed all %s times, stopping", RETRY_TIMES)
+ return None
def dequeue_all_waiting_messages(filter_type=None):
diff --git a/container-tag.yaml b/container-tag.yaml
index bdcf339..fc3acb8 100644
--- a/container-tag.yaml
+++ b/container-tag.yaml
@@ -1,4 +1,4 @@
# The Jenkins job uses this string for the tag in the image name
# for example nexus3.o-ran-sc.org:10004/my-image-name:my-tag
---
-tag: 0.12.0-NOT_FOR_USE_YET
+tag: 0.12.1-NOT_FOR_USE_YET
diff --git a/docs/release-notes.rst b/docs/release-notes.rst
index 7a1894f..a146357 100644
--- a/docs/release-notes.rst
+++ b/docs/release-notes.rst
@@ -30,6 +30,15 @@
* Release 1.0.0 will be the Release A version of A1
+[0.12.1] - 9/20/2019
+::
+ * switch to rmr 1.8.1 to pick up a non blocking variant of rmr that deals with bad routing tables (no hanging connections / blocking calls)
+ * improve test receiver to behave with this setup
+ * add integration test for this case
+ * this also switches past 1.5.x, which included another change that altered the behavior of rts; deal with this with a change to a1s helmchart (env: `RMR_SRC_ID`) that causes the sourceid to be set to a1s service name, which was not needed prior
+ * improve integration tests overall
+
+
[0.12.0] - 9/19/2019
::
diff --git a/integration_tests/Dockerfile b/integration_tests/Dockerfile
index 1087b47..19aae4a 100644
--- a/integration_tests/Dockerfile
+++ b/integration_tests/Dockerfile
@@ -17,7 +17,7 @@
# install a well known working rmr
FROM python:3.7-alpine
RUN apk update && apk add autoconf automake build-base cmake libtool ninja pkgconfig git
-RUN git clone --branch 1.3.0 https://gerrit.o-ran-sc.org/r/ric-plt/lib/rmr \
+RUN git clone --branch 1.8.1 https://gerrit.o-ran-sc.org/r/ric-plt/lib/rmr \
&& cd rmr \
&& mkdir build \
&& cd build \
diff --git a/integration_tests/a1mediator/Chart.yaml b/integration_tests/a1mediator/Chart.yaml
index b4f509e..7e405b9 100644
--- a/integration_tests/a1mediator/Chart.yaml
+++ b/integration_tests/a1mediator/Chart.yaml
@@ -1,4 +1,4 @@
apiVersion: v1
description: A1 Helm chart for Kubernetes
name: a1mediator
-version: 0.12.0
+version: 0.12.1
diff --git a/integration_tests/a1mediator/templates/config.yaml b/integration_tests/a1mediator/templates/config.yaml
index bd0dae3..1bb639c 100644
--- a/integration_tests/a1mediator/templates/config.yaml
+++ b/integration_tests/a1mediator/templates/config.yaml
@@ -7,5 +7,6 @@
newrt|start
rte|20000|testreceiverrmrservice:4560
rte|20001|delayreceiverrmrservice:4563
- rte|21024|{{ .Values.rmrservice.name }}:{{ .Values.rmrservice.port }}
+ # purposefully bad route to make sure rmr doesn't block on non listening receivers:
+ rte|20002|testreceiverrmrservice:4563
newrt|end
diff --git a/integration_tests/a1mediator/templates/deployment.yaml b/integration_tests/a1mediator/templates/deployment.yaml
index a30c21c..e94045d 100644
--- a/integration_tests/a1mediator/templates/deployment.yaml
+++ b/integration_tests/a1mediator/templates/deployment.yaml
@@ -25,6 +25,9 @@
mountPath: /opt/route/local.rt
subPath: local.rt
env:
+ # this sets the source field in messages from a1 to point back to a1s service name, rather than it's random pod name
+ - name: RMR_SRC_ID
+ value: {{ .Values.rmrservice.name }}
- name: PYTHONUNBUFFERED
value: "1"
- name: RMR_RETRY_TIMES
@@ -35,7 +38,6 @@
- name: http
containerPort: {{ .Values.httpservice.port }}
protocol: TCP
-
livenessProbe:
httpGet:
path: /a1-p/healthcheck
diff --git a/integration_tests/getlogs.sh b/integration_tests/getlogs.sh
new file mode 100755
index 0000000..b952047
--- /dev/null
+++ b/integration_tests/getlogs.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+kubectl get pods --namespace=default | awk '{ print $1 }' | egrep '^a1-a1mediator-' | xargs kubectl logs
+kubectl get pods --namespace=default | awk '{ print $1 }' | egrep '^testreceiver-' | xargs -I X kubectl logs X testreceiver
+kubectl get pods --namespace=default | awk '{ print $1 }' | egrep '^testreceiver-' | xargs -I X kubectl logs X delayreceiver
+
diff --git a/integration_tests/receiver.py b/integration_tests/receiver.py
index cb5f82c..0dc5e4a 100644
--- a/integration_tests/receiver.py
+++ b/integration_tests/receiver.py
@@ -59,5 +59,13 @@
sbuf.contents.mtype = 21024
print("Pre reply summary: {}".format(rmr.message_summary(sbuf)))
time.sleep(DELAY)
- sbuf = rmr.rmr_rts_msg(mrc, sbuf)
- print("Post reply summary: {}".format(rmr.message_summary(sbuf)))
+
+ # try up to 5 times to send back the ack
+ for _ in range(5):
+ sbuf = rmr.rmr_rts_msg(mrc, sbuf)
+ post_reply_summary = rmr.message_summary(sbuf)
+ print("Post reply summary: {}".format(post_reply_summary))
+ if post_reply_summary["message state"] == 10 and post_reply_summary["message status"] == "RMR_ERR_RETRY":
+ time.sleep(1)
+ else:
+ break
diff --git a/integration_tests/test_a1.tavern.yaml b/integration_tests/test_a1.tavern.yaml
index 25d06be..f2a454c 100644
--- a/integration_tests/test_a1.tavern.yaml
+++ b/integration_tests/test_a1.tavern.yaml
@@ -184,7 +184,7 @@
response:
status_code: 404
- - name: test the delay policy
+ - name: create delay policy instance
request:
url: http://localhost:10000/a1-p/policytypes/20001/policies/delaytest
method: PUT
@@ -205,7 +205,8 @@
test: foo
- name: test the admission control policy status get
- delay_before: 8 # give it a few seconds for rmr ; delay reciever sleeps for 5 seconds by default
+ max_retries: 3
+ delay_before: 5 # give it a few seconds for rmr ; delay reciever sleeps for 5 seconds by default
request:
url: http://localhost:10000/a1-p/policytypes/20001/policies/delaytest/status
method: GET
@@ -215,6 +216,50 @@
- handler_id: delay_receiver
status: OK
+---
+
+test_name: test bad routing file endpoint
+
+stages:
+
+ - name: put the type
+ request:
+ url: http://localhost:10000/a1-p/policytypes/20002
+ method: PUT
+ json:
+ name: test policy
+ description: just for testing
+ policy_type_id: 20002
+ create_schema:
+ "$schema": http://json-schema.org/draft-07/schema#
+ type: object
+ properties:
+ test:
+ type: string
+ required:
+ - test
+ additionalProperties: false
+
+ - name: create policy instance that will go to a broken routing endpoint
+ request:
+ url: http://localhost:10000/a1-p/policytypes/20002/policies/brokentest
+ method: PUT
+ json:
+ test: foo
+ headers:
+ content-type: application/json
+ response:
+ status_code: 201
+
+ - name: should be no status
+ delay_before: 5 # give it a few seconds for rmr ; delay reciever sleeps for 5 seconds by default
+ request:
+ url: http://localhost:10000/a1-p/policytypes/20002/policies/brokentest/status
+ method: GET
+ response:
+ status_code: 200
+ body: []
+
---
@@ -224,7 +269,7 @@
- name: bad type get
request:
- url: http://localhost:10000/a1-p/policytypes/20002
+ url: http://localhost:10000/a1-p/policytypes/20666
method: GET
response:
status_code: 404
diff --git a/integration_tests/testreceiver/templates/config.yaml b/integration_tests/testreceiver/templates/config.yaml
index 4ed857d..78a9f60 100644
--- a/integration_tests/testreceiver/templates/config.yaml
+++ b/integration_tests/testreceiver/templates/config.yaml
@@ -5,7 +5,6 @@
data:
local.rt: |
newrt|start
- rte|20000|{{ .Values.testrmrservice.name }}:{{ .Values.testrmrservice.port }}
rte|21024|a1rmrservice:4562
newrt|end
@@ -18,6 +17,5 @@
data:
local.rt: |
newrt|start
- rte|20001|{{ .Values.delayrmrservice.name }}:{{ .Values.delayrmrservice.port }}
rte|21024|a1rmrservice:4562
newrt|end
diff --git a/setup.py b/setup.py
index 441c054..6f7375e 100644
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
setup(
name="a1",
- version="0.12.0",
+ version="0.12.1",
packages=find_packages(exclude=["tests.*", "tests"]),
author="Tommy Carpenter",
description="RIC A1 Mediator for policy/intent changes",
diff --git a/tox-integration.ini b/tox-integration.ini
index f6191e0..1083f4c 100644
--- a/tox-integration.ini
+++ b/tox-integration.ini
@@ -25,6 +25,7 @@
echo
pkill
kubectl
+ getlogs.sh
passenv = *
deps =
tavern
@@ -35,8 +36,7 @@
helm install --devel testreceiver -n testreceiver
helm install --devel a1mediator -n a1
# wait for helm charts
- sleep 20
- kubectl get pods --all-namespaces
+ sleep 30
./portforward.sh
sleep 2
commands=
@@ -49,7 +49,10 @@
echo "running ab"
# run apache bench
ab -n 100 -c 10 -u putdata -T application/json http://localhost:10000/a1-p/policytypes/20000/policies/admission_control_policy
+# echo "log collection"
+#integration_tests/getlogs.sh
commands_post=
+ echo "teardown"
helm delete testreceiver
helm del --purge testreceiver
helm delete a1