[DMAAP-48] Initial code import

Change-Id: I3e65371093487d7de167ec6c29f327f366f1e299
Signed-off-by: sg481n <sg481n@att.com>
diff --git a/Contributing.txt b/Contributing.txt
new file mode 100644
index 0000000..d64568e
--- /dev/null
+++ b/Contributing.txt
@@ -0,0 +1,35 @@
+This software is distributed under a permissive open source

+license to allow it to be used in any projects, whether open

+source or proprietary. Contributions to the project are welcome

+and it is important to maintain clear record of contributions 

+and terms under which they are licensed.

+

+To indicate your acceptance of Developer's Certificate of Origin 1.1

+terms, please add the following line to the end of the commit message

+for each contribution you make to the project:

+

+Signed-off-by : Your Name <your@email.com>

+

+Developer's Certificate of Origin 1.1

+

+By making a contribution to this project, I certify that:

+

+(a) The contribution was created in whole or inpart by me and I

+have the right to submit it under the open source license indicated

+in the file: or

+

+(b) The contribution is based upon previous work that, to the best

+of my knowledge, is covered under an appropriate open source license

+and I have the right under that license to submit that work with 

+modifications, whether created in whole or part by me, under the same

+open source license (unless I am permitted to submit under a different 

+license), as indicated in the file; or

+

+(c) The contribution was provided directly to me by some other person

+who certified (a), (b) or (c) I have not modified it.

+

+(d) I understand and agree that this project and the contribution are

+public and that a record of the contribution (including all personal

+information I submit with it, including my sign-off)is maintained 

+indefinitely and may be redistributed consistent with this project or

+the open source license(s) involved.
\ No newline at end of file
diff --git a/Jenkinsfile b/Jenkinsfile
new file mode 100644
index 0000000..a8161fc
--- /dev/null
+++ b/Jenkinsfile
@@ -0,0 +1,24 @@
+node {

+    // Get the maven tool.

+    // ** NOTE: This 'M3' maven tool must be configured

+    // **       in the Jenkins global configuration.

+    def mvnHome = tool 'M3'

+    sh "echo ${mvnHome}"

+    

+    

+    // Mark the code checkout 'stage'....

+    stage 'Checkout'

+    // Get some code from a GitHub repository

+    checkout scm    

+   

+    // Mark the code build 'stage'....

+    stage 'Build DMAAP-DR'

+    // Run the maven build

+    //sh for unix bat for windows

+	

+	sh "${mvnHome}/bin/mvn -f datarouter-prov/pom.xml clean deploy"

+    sh "${mvnHome}/bin/mvn -f datarouter-node/pom.xml clean deploy"

+

+	

+   

+}

diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..2ce945c
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,22 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..39dbca9
--- /dev/null
+++ b/README.md
@@ -0,0 +1,149 @@
+# DMAAP_DATAROUTER

+			       

+## OVERVIEW

+  

+The Data Routing System project is intended to provide a common framework by which data producers can make data available to data consumers and a way for potential consumers to find feeds with the data they require.  

+The delivery of data from these kinds of production systems is the domain of the Data Routing System. Its primary goal is to make it easier to move data from existing applications that may not have been designed from the ground up to share data.

+The Data Routing System is different from many existing platforms for distributing messages from producers to consumers which focus on real-time delivery of small messages (on the order of a few kilobytes or so) for more

+

+   Provisioning is implemented as a Java servlet running under Jetty in one JVM

+   

+   Provisioning data is stored in a MySQL database

+   

+   The backup provisioning server and each node is informed any time provisioning data changes

+   

+   The backup provisioning server and each node may request the complete set of provisioning data at any time

+   

+   A Node is implemented as a Java servlet running under Jetty in one JVM

+

+Assumptions

+    For 95% of all feeds (there will be some exceptions):

+	

+    Number of Publishing Endpoints per Feed: 1 – 10

+	

+    Number of Subscribers per Feed: 2 – 10

+	

+    File Size: 105 – 1010 bytes

+	

+    with a distribution towards the high end

+	

+    Frequency of Publishing: 1/day – 10/minute

+	

+    Lifetime of a Feed: months to years

+	

+    Lifetime of a Subscription: months to years

+	

+ 

+Data Router and Sensitive Data Handling

+ 

+    A publisher of a Data Router feed of sensitive (e.g., PCI, SPI, etc.) data needs to encrypt that data prior to delivering it to the Data Router

+	

+    The Data Router will distribute that data to all of the subscribers of that feed.

+	

+    Data Router does not examine the Feed content or enforce any restrictions or Validations on the Feed Content in any way

+	

+    It is the responsibility of the subscribers to work with the publisher to determine how to decrypt that data

+	

+

+

+ 

+

+What the Data Router is NOT:

+

+    Does not support streaming data

+	

+    Does not tightly couple to any specific publish endpoint or subscriber

+	

+    Agnostic as to source and sink of data residing in an RDBMS, NoSQL DB, Other DBMS, Flat Files, etc.

+	

+    Does not transform any published data

+	

+    Does not “examine” any published data

+	

+    Does not verify the integrity of a published file

+	

+    Does not perform any data “cleansing”

+	

+    Does not store feeds (not a repository or archive)

+	

+    There is no long-term storage – assumes subscribers are responsive most of the time

+	

+    Does not encrypt data when queued on a node

+	

+    Does not provide guaranteed order of delivery

+	

+    Per-file metadata can be used for ordering

+	

+   External customers supported is via DITREX (MOTS 18274)

+ 

+ 

+ 

+

+## BUILD  

+ 

+Datarouter can be cloned and repository and builb using Maven 

+In the repository 

+

+Go to datarouter-prov in the root

+

+	mvn clean install

+	

+Go to datarouter-node in the root

+

+	mvn clean install

+	 

+Project Build will be Successful

+

+

+

+

+## RUN 

+

+Datarouter is a Unix based service 

+

+Pre-requisites to run the service

+

+MySQL Version 5.6

+

+Java JDK 1.8

+

+Install MySQL and load needed table into the database

+

+Sample install_db.sql is provided in the datarouter-prov/data .

+

+Go to datarouter-prov module and run the service using main.java 

+ 

+Go to datarouter-node module and run the service using nodemain.java 

+

+Curl Commands to test:

+

+create a feed:

+

+curl -v -X POST -H "Content-Type : application/vnd.att-dr.feed" -H "X-ATT-DR-ON-BEHALF-OF: rs873m" --data-ascii @/opt/app/datartr/addFeed3.txt --post301 --location-trusted  -k https://prov.datarouternew.com:8443

+

+Subscribe to feed:

+

+curl -v -X POST -H "Content-Type: application/vnd.att-dr.subscription" -H "X-ATT-DR-ON-BEHALF-OF: rs873m" --data-ascii @/opt/app/datartr/addSubscriber.txt --post301 --location-trusted -k https://prov.datarouternew.com:8443/subscribe/1

+

+Publish to feed:

+

+curl -v -X PUT --user rs873m:rs873m -H "Content-Type: application/octet-stream" --data-binary @/opt/app/datartr/addFeed3.txt  --post301 --location-trusted -k https://prov.datarouternew.com:8443/publish/1/test1

+

+

+ 

+

+ ## CONFIGURATION 

+

+Recommended 

+

+Environment - Unix based

+

+Java - 1.8

+

+Maven - 3.2.5 

+

+MySQL - 5.6

+

+Self Signed SSL certificates

+ 

+ 

diff --git a/Subscriber/src/SSASubscriber.java b/Subscriber/src/SSASubscriber.java
new file mode 100644
index 0000000..5ec099b
--- /dev/null
+++ b/Subscriber/src/SSASubscriber.java
@@ -0,0 +1,115 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+import org.eclipse.jetty.http.HttpVersion;
+import org.eclipse.jetty.servlet.*;
+import org.eclipse.jetty.util.ssl.*;
+import org.eclipse.jetty.server.*;
+import org.apache.log4j.Logger;
+
+/**
+ *	Example stand alone subscriber
+ */
+public class SSASubscriber {
+	private static final int Port = 8447;
+	private static final String KeyStoreType = "jks";
+	private static final String KeyStoreFile = "/root/sub/subscriber.jks";
+	//private static final String KeyStoreFile = "c:/tmp/subscriber.jks";
+	private static final String KeyStorePassword = "changeit";
+	private static final String KeyPassword = "changeit";
+	private static final String ContextPath = "/";
+	private static final String URLPattern = "/*";
+
+	public static void main(String[] args) throws Exception {
+		//User story # US792630  -Jetty Upgrade to 9.3.11
+		//SSASubscriber register Jetty server.
+        Server server = new Server();
+        HttpConfiguration http_config = new HttpConfiguration();
+        http_config.setSecureScheme("https");
+        http_config.setSecurePort(Port);
+        http_config.setRequestHeaderSize(8192);
+		
+        // HTTP connector
+        ServerConnector http = new ServerConnector(server,
+                new HttpConnectionFactory(http_config));
+        http.setPort(7070);
+        http.setIdleTimeout(30000);
+        
+        // SSL Context Factory
+        SslContextFactory sslContextFactory = new SslContextFactory();
+        sslContextFactory.setKeyStoreType(KeyStoreType);
+        sslContextFactory.setKeyStorePath(KeyStoreFile);
+        sslContextFactory.setKeyStorePassword(KeyStorePassword);
+        sslContextFactory.setKeyManagerPassword(KeyPassword);
+        
+        // sslContextFactory.setTrustStorePath(ncm.getKSFile());
+        // sslContextFactory.setTrustStorePassword("changeit");
+        sslContextFactory.setExcludeCipherSuites("SSL_RSA_WITH_DES_CBC_SHA",
+                "SSL_DHE_RSA_WITH_DES_CBC_SHA", "SSL_DHE_DSS_WITH_DES_CBC_SHA",
+                "SSL_RSA_EXPORT_WITH_RC4_40_MD5",
+                "SSL_RSA_EXPORT_WITH_DES40_CBC_SHA",
+                "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA",
+                "SSL_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA");
+
+        // SSL HTTP Configuration
+        HttpConfiguration https_config = new HttpConfiguration(http_config);
+        https_config.addCustomizer(new SecureRequestCustomizer());
+
+        // SSL Connector
+        ServerConnector sslConnector = new ServerConnector(server,
+            new SslConnectionFactory(sslContextFactory,HttpVersion.HTTP_1_1.asString()),
+            new HttpConnectionFactory(https_config));
+        sslConnector.setPort(Port);
+        server.addConnector(sslConnector);
+        
+    	/**Skip SSLv3 Fixes*/
+        sslContextFactory.addExcludeProtocols("SSLv3");
+        System.out.println("Excluded protocols SSASubscriber-"+sslContextFactory.getExcludeProtocols().toString());  
+		/**End of SSLv3 Fixes*/
+        
+        // HTTPS Configuration
+        ServerConnector https = new ServerConnector(server,
+            new SslConnectionFactory(sslContextFactory,HttpVersion.HTTP_1_1.asString()),
+                new HttpConnectionFactory(https_config));
+        https.setPort(Port);
+        https.setIdleTimeout(30000);
+        //server.setConnectors(new Connector[] { http, https });
+        server.setConnectors(new Connector[] {  http });
+		ServletContextHandler ctxt = new ServletContextHandler(0);
+		ctxt.setContextPath(ContextPath);
+		server.setHandler(ctxt);
+		
+		ctxt.addServlet(new ServletHolder(new SubscriberServlet()), "/*");
+		
+		try { 
+		    server.start();
+		} catch ( Exception e ) { 
+			System.out.println("Jetty failed to start. Reporting will we unavailable-"+e);
+		};
+        server.join();
+        
+        System.out.println("Subscriber started-"+ server.getState());  
+
+	}
+}
\ No newline at end of file
diff --git a/Subscriber/src/SubscriberServlet.java b/Subscriber/src/SubscriberServlet.java
new file mode 100644
index 0000000..1af62a6
--- /dev/null
+++ b/Subscriber/src/SubscriberServlet.java
@@ -0,0 +1,149 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URLEncoder;
+
+import javax.servlet.ServletConfig;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.log4j.Logger;
+
+/**
+ *	Example stand alone subscriber servlet with Authorization header checking
+ */
+public class SubscriberServlet extends HttpServlet	{
+	private static Logger logger = Logger.getLogger("com.att.datarouter.pubsub.ssasubscribe.SubscriberServlet");
+	private String Login = "LOGIN";
+	private String Password = "PASSWORD";
+	private String OutputDirectory = "/root/sub/received";
+
+	private String auth;
+
+	private static String gp(ServletConfig config, String param, String deflt) {
+		param = config.getInitParameter(param);
+		if (param == null || param.length() == 0) {
+			param = deflt;
+		}
+		return(param);
+	}
+	/**
+	 *	Configure this subscriberservlet.  Configuration parameters from config.getInitParameter() are:
+	 *	<ul>
+	 *	<li>Login - The login expected in the Authorization header (default "LOGIN").
+	 *	<li>Password - The password expected in the Authorization header (default "PASSWORD").
+	 *	<li>OutputDirectory - The directory where files are placed (default "received").
+	 *	</ul>
+	 */
+	public void init(ServletConfig config) throws ServletException {
+		Login = gp(config, "Login", Login);
+		Password = gp(config, "Password", Password);
+		OutputDirectory = gp(config, "OutputDirectory", OutputDirectory);
+		(new File(OutputDirectory)).mkdirs();
+		auth = "Basic " + Base64.encodeBase64String((Login + ":" + Password).getBytes());
+	}
+	/**
+	 *	Invoke common(req, resp, false).
+	 */
+	protected void doPut(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
+		common(req, resp, false);
+	}
+	/**
+	 *	Invoke common(req, resp, true).
+	 */
+	protected void doDelete(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
+		common(req, resp, true);
+	}
+	/**
+	 *	Process a PUT or DELETE request.
+	 *	<ol>
+	 *	<li>Verify that the request contains an Authorization header
+	 *	or else UNAUTHORIZED.
+	 *	<li>Verify that the Authorization header matches the configured
+	 *	Login and Password or else FORBIDDEN.
+	 *	<li>If the request is PUT, store the message body as a file
+	 *	in the configured OutputDirectory directory protecting against
+	 *	evil characters in the received FileID.  The file is created
+	 *	initially with its name prefixed with a ".", and once it is complete, it is
+	 *	renamed to remove the leading "." character.
+	 *	<li>If the request is DELETE, instead delete the file (if it exists) from the configured OutputDirectory directory.
+	 *	<li>Respond with NO_CONTENT.
+	 *	</ol>
+	 */
+	protected void common(HttpServletRequest req, HttpServletResponse resp, boolean isdelete) throws ServletException, IOException {
+		String ah = req.getHeader("Authorization");
+		if (ah == null) {
+			logger.info("Rejecting request with no Authorization header from " + req.getRemoteAddr() + ": " + req.getPathInfo());
+			resp.sendError(HttpServletResponse.SC_UNAUTHORIZED);
+			return;
+		}
+		if (!auth.equals(ah)) {
+			logger.info("Rejecting request with incorrect Authorization header from " + req.getRemoteAddr() + ": " + req.getPathInfo());
+			resp.sendError(HttpServletResponse.SC_FORBIDDEN);
+			return;
+		}
+		String fileid = req.getPathInfo();
+		fileid = fileid.substring(fileid.lastIndexOf('/') + 1);
+		String qs = req.getQueryString();
+		if (qs != null) {
+			fileid = fileid + "?" + qs;
+		}
+		String publishid = req.getHeader("X-ATT-DR-PUBLISH-ID");
+		String filename = URLEncoder.encode(fileid, "UTF-8").replaceAll("^\\.", "%2E").replaceAll("\\*", "%2A");
+		String finalname = OutputDirectory + "/" + filename;
+		String tmpname = OutputDirectory + "/." + filename;
+		try {
+			if (isdelete) {
+				(new File(finalname)).delete();
+				logger.info("Received delete for file id " + fileid + " from " + req.getRemoteAddr() + " publish id " + publishid + " as " + finalname);
+			} else {
+				InputStream is = req.getInputStream();
+				OutputStream os = new FileOutputStream(tmpname);
+				byte[] buf = new byte[65536];
+				int i;
+				while ((i = is.read(buf)) > 0) {
+					os.write(buf, 0, i);
+				}
+				is.close();
+				os.close();
+				(new File(tmpname)).renameTo(new File(finalname));
+				logger.info("Received file id " + fileid + " from " + req.getRemoteAddr() + " publish id " + publishid + " as " + finalname);
+				resp.setStatus(HttpServletResponse.SC_NO_CONTENT);
+				logger.info("Received file id " + fileid + " from " + req.getRemoteAddr() + " publish id " + publishid + " as " + finalname);
+			}
+			resp.setStatus(HttpServletResponse.SC_NO_CONTENT);
+		} catch (IOException ioe) {
+			(new File(tmpname)).delete();
+			logger.info("Failure to save file " + finalname + " from " + req.getRemoteAddr() + ": " + req.getPathInfo(), ioe);
+			throw ioe;
+		}
+	}
+}
diff --git a/Subscriber/src/log4j.properties b/Subscriber/src/log4j.properties
new file mode 100644
index 0000000..8c12d5c
--- /dev/null
+++ b/Subscriber/src/log4j.properties
@@ -0,0 +1,9 @@
+log4j.debug=FALSE

+log4j.rootLogger=INFO,Root

+

+log4j.appender.Root=org.apache.log4j.DailyRollingFileAppender

+log4j.appender.Root.file=/opt/app/datartr/logs/subscriber.log

+log4j.appender.Root.datePattern='.'yyyyMMdd

+log4j.appender.Root.append=true

+log4j.appender.Root.layout=org.apache.log4j.PatternLayout

+log4j.appender.Root.layout.ConversionPattern=%d %p %t %m%n

diff --git a/datarouter-node/pom.xml b/datarouter-node/pom.xml
new file mode 100644
index 0000000..b2b798b
--- /dev/null
+++ b/datarouter-node/pom.xml
@@ -0,0 +1,472 @@
+<!--

+  ============LICENSE_START==================================================

+  * org.onap.dmaap

+  * ===========================================================================

+  * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+  * ===========================================================================

+  * Licensed under the Apache License, Version 2.0 (the "License");

+  * you may not use this file except in compliance with the License.

+  * You may obtain a copy of the License at

+  * 

+   *      http://www.apache.org/licenses/LICENSE-2.0

+  * 

+   * Unless required by applicable law or agreed to in writing, software

+  * distributed under the License is distributed on an "AS IS" BASIS,

+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+  * See the License for the specific language governing permissions and

+  * limitations under the License.

+  * ============LICENSE_END====================================================

+  *

+  * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+  *

+-->

+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">

+	<modelVersion>4.0.0</modelVersion>

+

+	<groupId>com.att.datarouter-node</groupId>

+	<artifactId>datarouter-node</artifactId>

+	<version>0.0.1-SNAPSHOT</version>

+	<packaging>jar</packaging>

+

+	<name>datarouter-node</name>

+	<url>https://github.com/att/DMAAP_DATAROUTER</url>

+    <licenses>

+		<license>

+		<name>BSD License</name>

+		<url> </url>

+		</license>

+	</licenses>

+

+

+	<properties>

+		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>

+		<maven.compiler.source>1.8</maven.compiler.source>

+		<maven.compiler.target>1.8</maven.compiler.target>

+		<dockerLocation>${basedir}/target/</dockerLocation>

+		<docker.registry>hub.docker.com</docker.registry>

+	</properties>

+

+	<dependencies>

+		<dependency>

+			<groupId>junit</groupId>

+			<artifactId>junit</artifactId>

+			<version>3.8.1</version>

+			<scope>test</scope>

+		</dependency>

+		<dependency>

+			<groupId>org.json</groupId>

+			<artifactId>json</artifactId>

+			<version>20160810</version>

+		</dependency>

+

+		<dependency>

+			<groupId>javax.mail</groupId>

+			<artifactId>javax.mail-api</artifactId>

+			<version>1.5.1</version>

+		</dependency>

+		<dependency>

+			<groupId>com.att.eelf</groupId>

+			<artifactId>eelf-core</artifactId>

+			<version>0.0.1</version>

+		</dependency>

+		<dependency>

+			<groupId>javax.servlet</groupId>

+			<artifactId>servlet-api</artifactId>

+			<version>2.5</version>

+		</dependency>

+

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-server</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-continuation</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-util</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-deploy</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-servlet</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-servlets</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-http</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-security</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-websocket</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-io</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+

+		<dependency>

+			<groupId>org.apache.commons</groupId>

+			<artifactId>commons-io</artifactId>

+			<version>1.3.2</version>

+		</dependency>

+		<dependency>

+			<groupId>commons-lang</groupId>

+			<artifactId>commons-lang</artifactId>

+			<version>2.4</version>

+		</dependency>

+		<dependency>

+			<groupId>commons-io</groupId>

+			<artifactId>commons-io</artifactId>

+			<version>2.1</version>

+			<scope>compile</scope>

+		</dependency>

+		<dependency>

+			<groupId>org.apache.httpcomponents</groupId>

+			<artifactId>httpcore</artifactId>

+			<version>4.2.2</version>

+		</dependency>

+

+		<dependency>

+			<groupId>commons-codec</groupId>

+			<artifactId>commons-codec</artifactId>

+			<version>1.6</version>

+		</dependency>

+

+		<dependency>

+			<groupId>org.mozilla</groupId>

+			<artifactId>rhino</artifactId>

+			<version>1.7R3</version>

+		</dependency>

+		<dependency>

+			<groupId>org.apache.james</groupId>

+			<artifactId>apache-mime4j-core</artifactId>

+			<version>0.7</version>

+		</dependency>

+		<dependency>

+			<groupId>org.apache.httpcomponents</groupId>

+			<artifactId>httpclient</artifactId>

+			<version>4.2.3</version>

+		</dependency>

+		<dependency>

+			<groupId>org.sonatype.http-testing-harness</groupId>

+			<artifactId>junit-runner</artifactId>

+			<version>0.11</version>

+		</dependency>

+

+

+		<dependency>

+			<groupId>log4j</groupId>

+			<artifactId>log4j</artifactId>

+			<version>1.2.17</version>

+			<scope>compile</scope>

+		</dependency>

+	</dependencies>

+

+	<build>

+		<finalName>datarouter-node</finalName>

+		<resources>

+			<resource>

+				<directory>src/main/resources</directory>

+				<filtering>true</filtering>

+				<includes>

+					<include>**/*.properties</include>

+				</includes>

+			</resource>

+			<resource>

+				<directory>src/main/resources</directory>

+				<filtering>true</filtering>

+				<includes>

+					<include>**/EelfMessages.properties</include>

+				</includes>

+			</resource>

+			<resource>

+				<directory>src/main/resources</directory>

+				<filtering>true</filtering>

+				<includes>

+					<include>**/log4j.properties</include>

+				</includes>

+			</resource>

+

+		</resources>

+		<plugins>

+			<plugin>

+				<groupId>org.apache.maven.plugins</groupId>

+				<artifactId>maven-compiler-plugin</artifactId>

+				<configuration>

+					<archive>

+						<manifest>

+							<mainClass>com.att.research.datarouter.node.NodeMain</mainClass>

+

+						</manifest>

+					</archive>

+

+					<source>1.8</source>

+					<target>1.8</target>

+				</configuration>

+				<version>3.6.0</version>

+			</plugin>

+			<plugin>

+				<artifactId>maven-assembly-plugin</artifactId>

+				<version>2.4</version>

+				<configuration>

+					<descriptorRefs>

+						<descriptorRef>jar-with-dependencies</descriptorRef>

+					</descriptorRefs>

+					<outputDirectory>${basedir}/target/opt/app/datartr/lib</outputDirectory>

+					<archive>

+

+						<manifest>

+							<addClasspath>true</addClasspath>

+							<mainClass>com.att.research.datarouter.node.NodeMain</mainClass>

+						</manifest>

+					</archive>

+				</configuration>

+

+				<executions>

+					<execution>

+						<id>make-assembly</id> <!-- this is used for inheritance merges -->

+						<phase>package</phase> <!-- bind to the packaging phase -->

+						<goals>

+							<goal>single</goal>

+						</goals>

+					</execution>

+				</executions>

+			</plugin>

+			<plugin>

+				<groupId>org.apache.maven.plugins</groupId>

+				<artifactId>maven-resources-plugin</artifactId>

+				<version>2.7</version>

+				<executions>

+					<execution>

+						<id>copy-docker-file</id>

+						<phase>package</phase>

+						<goals>

+							<goal>copy-resources</goal>

+						</goals>

+						<configuration>

+							<outputDirectory>${dockerLocation}</outputDirectory>

+							<overwrite>true</overwrite>

+							<resources>

+								<resource>

+									<directory>${basedir}/src/main/resources/docker</directory>

+									<filtering>true</filtering>

+									<includes>

+										<include>**/*</include>

+									</includes>

+								</resource>

+							</resources>

+						</configuration>

+					</execution>

+					<execution>

+						<id>copy-resources</id>

+						<phase>validate</phase>

+						<goals>

+							<goal>copy-resources</goal>

+						</goals>

+						<configuration>

+							<outputDirectory>${basedir}/target/opt/app/datartr/etc</outputDirectory>

+							<resources>

+								<resource>

+									<directory>${basedir}/src/main/resources</directory>

+									<includes>

+										<include>misc/**</include>

+										<include>**/**</include>

+									</includes>

+								</resource>

+							</resources>

+						</configuration>

+					</execution>

+					<execution>

+        <id>copy-resources-1</id>

+        <phase>validate</phase>

+        <goals>

+          <goal>copy-resources</goal>

+        </goals>

+        <configuration>

+          <outputDirectory>${basedir}/target/opt/app/datartr/self_signed</outputDirectory>

+          <resources>

+            <resource>

+                        <directory>${basedir}/self_signed</directory>

+                        <includes>

+                            <include>misc/**</include>

+                            <include>**/**</include>

+                        </includes>

+                    </resource>

+          </resources>

+        </configuration>

+      </execution>

+				</executions>

+			</plugin>

+			<plugin>

+				<groupId>com.spotify</groupId>

+				<artifactId>docker-maven-plugin</artifactId>

+				<version>0.4.11</version>

+				<configuration>

+					<imageName>datarouter-node</imageName>

+					<dockerDirectory>${dockerLocation}</dockerDirectory>

+					<serverId>docker-hub</serverId>

+					<registryUrl>https://${docker.registry}</registryUrl>

+					<imageTags>

+						<imageTag>${project.version}</imageTag>

+						<imageTag>latest</imageTag>

+					</imageTags>

+					<forceTags>true</forceTags>

+				</configuration>

+			</plugin>

+

+			<plugin>

+				<groupId>org.apache.maven.plugins</groupId>

+				<artifactId>maven-dependency-plugin</artifactId>

+				<version>2.10</version>

+				<executions>

+					<execution>

+						<id>copy-dependencies</id>

+						<phase>package</phase>

+						<goals>

+							<goal>copy-dependencies</goal>

+						</goals>

+						<configuration>

+							<outputDirectory>${project.build.directory}/opt/app/datartr/lib</outputDirectory>

+							<overWriteReleases>false</overWriteReleases>

+							<overWriteSnapshots>false</overWriteSnapshots>

+							<overWriteIfNewer>true</overWriteIfNewer>

+						</configuration>

+					</execution>

+				</executions>

+			</plugin>

+										<plugin>

+			<groupId>org.apache.maven.plugins</groupId>

+			<artifactId>maven-javadoc-plugin</artifactId>

+			<configuration>

+			<failOnError>false</failOnError>

+			</configuration>

+			<executions>

+				<execution>

+					<id>attach-javadocs</id>

+					<goals>

+						<goal>jar</goal>

+					</goals>

+				</execution>

+			</executions>

+		</plugin> 

+	   

+	   

+	       <plugin>

+		      <groupId>org.apache.maven.plugins</groupId>

+		      <artifactId>maven-source-plugin</artifactId>

+		      <version>2.2.1</version>

+		      <executions>

+			<execution>

+			  <id>attach-sources</id>

+			  <goals>

+			    <goal>jar-no-fork</goal>

+			  </goals>

+			</execution>

+		      </executions>

+		    </plugin>

+	

+

+	<plugin>

+	    <groupId>org.apache.maven.plugins</groupId>

+	    <artifactId>maven-gpg-plugin</artifactId>

+	    <version>1.5</version>

+	    <executions>

+		<execution>

+		    <id>sign-artifacts</id>

+		    <phase>verify</phase>

+		    <goals>

+			<goal>sign</goal>

+		    </goals>

+		</execution>

+	    </executions>

+	  </plugin> 

+			

+		<plugin>

+			<groupId>org.sonatype.plugins</groupId>

+			<artifactId>nexus-staging-maven-plugin</artifactId>

+			<version>1.6.7</version>

+			<extensions>true</extensions>

+			<configuration>

+			<serverId>ossrhdme</serverId>

+			<nexusUrl>https://oss.sonatype.org/</nexusUrl>

+			<autoReleaseAfterClose>true</autoReleaseAfterClose>

+			</configuration>

+		</plugin>

+			

+		<plugin>

+				<groupId>org.codehaus.mojo</groupId>

+				<artifactId>cobertura-maven-plugin</artifactId>

+				<version>2.7</version>

+				<configuration>

+					<formats>

+					<format>html</format>

+					<format>xml</format>

+				  </formats>

+				</configuration>

+			</plugin>

+				

+        <plugin>

+               <groupId>com.blackducksoftware.integration</groupId>

+               <artifactId>hub-maven-plugin</artifactId>

+               <version>1.0.4</version>

+                  <inherited>false</inherited>

+               <configuration>

+                  <target>${project.basedir}</target>

+               </configuration>

+              <executions>

+              <execution>

+                 <id>create-bdio-file</id>

+                 <phase>package</phase>

+              <goals>

+               <goal>createHubOutput</goal>

+              </goals>

+             </execution>

+            </executions>

+        </plugin>

+		</plugins>

+	</build>

+	

+	<distributionManagement>

+    		<snapshotRepository>

+      			<id>ossrhdme</id>

+      			<url>https://oss.sonatype.org/content/repositories/snapshots</url>

+    		</snapshotRepository>

+    		<repository>

+      			<id>ossrhdme</id>

+      			<url>https://oss.sonatype.org/service/local/staging/deploy/maven2/</url>

+    		</repository>

+	</distributionManagement>

+	

+	<scm>

+		<connection>https://github.com/att/DMAAP_DATAROUTER.git</connection>

+		<developerConnection>${project.scm.connection}</developerConnection>

+		<url>https://github.com/att/DMAAP_DATAROUTER/tree/master</url>

+	</scm>

+	

+</project>

diff --git a/datarouter-node/self_signed/cacerts.jks b/datarouter-node/self_signed/cacerts.jks
new file mode 100644
index 0000000..dfd8143
--- /dev/null
+++ b/datarouter-node/self_signed/cacerts.jks
Binary files differ
diff --git a/datarouter-node/self_signed/keystore.jks b/datarouter-node/self_signed/keystore.jks
new file mode 100644
index 0000000..e5a4e78
--- /dev/null
+++ b/datarouter-node/self_signed/keystore.jks
Binary files differ
diff --git a/datarouter-node/self_signed/mykey.cer b/datarouter-node/self_signed/mykey.cer
new file mode 100644
index 0000000..2a5c9d7
--- /dev/null
+++ b/datarouter-node/self_signed/mykey.cer
Binary files differ
diff --git a/datarouter-node/self_signed/nodekey.cer b/datarouter-node/self_signed/nodekey.cer
new file mode 100644
index 0000000..4cdfdfe
--- /dev/null
+++ b/datarouter-node/self_signed/nodekey.cer
Binary files differ
diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/Delivery.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/Delivery.java
new file mode 100644
index 0000000..d0e88ec
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/Delivery.java
@@ -0,0 +1,253 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+package com.att.research.datarouter.node;

+

+import java.util.*;

+import java.io.*;

+import org.apache.log4j.Logger;

+

+/**

+ *	Main control point for delivering files to destinations.

+ *	<p>

+ *	The Delivery class manages assignment of delivery threads to delivery

+ *	queues and creation and destruction of delivery queues as

+ *	configuration changes.  DeliveryQueues are assigned threads based on a

+ *	modified round-robin approach giving priority to queues with more work

+ *	as measured by both bytes to deliver and files to deliver and lower

+ *	priority to queues that already have delivery threads working.

+ *	A delivery thread continues to work for a delivery queue as long as

+ *	that queue has more files to deliver.

+ */

+public class Delivery {

+	private static Logger logger = Logger.getLogger("com.att.research.datarouter.node.Delivery");

+	private static class DelItem implements Comparable<DelItem>	{

+		private String pubid;

+		private String spool;

+		public int compareTo(DelItem x) {

+			int i = pubid.compareTo(x.pubid);

+			if (i == 0) {

+				i = spool.compareTo(x.spool);

+			}

+			return(i);

+		}

+		public String getPublishId() {

+			return(pubid);

+		}

+		public String getSpool() {

+			return(spool);

+		}

+		public DelItem(String pubid, String spool) {

+			this.pubid = pubid;

+			this.spool = spool;

+		}

+	}

+	private double	fdstart;

+	private double	fdstop;

+	private int	threads;

+	private int	curthreads;

+	private NodeConfigManager	config;

+	private Hashtable<String, DeliveryQueue>	dqs = new Hashtable<String, DeliveryQueue>();

+	private DeliveryQueue[]	queues = new DeliveryQueue[0];

+	private int	qpos = 0;

+	private long	nextcheck;

+	private Runnable	cmon = new Runnable() {

+		public void run() {

+			checkconfig();

+		}

+	};

+	/**

+	 *	Constructs a new Delivery system using the specified configuration manager.

+	 *	@param config	The configuration manager for this delivery system.

+	 */

+	public Delivery(NodeConfigManager config) {

+		this.config = config;

+		config.registerConfigTask(cmon);

+		checkconfig();

+	}

+	private void cleardir(String dir) {

+		if (dqs.get(dir) != null) {

+			return;

+		}

+		File fdir = new File(dir);

+		for (File junk: fdir.listFiles()) {

+			if (junk.isFile()) {

+				junk.delete();

+			}

+		}

+		fdir.delete();

+	}

+	private void freeDiskCheck() {

+		File spoolfile = new File(config.getSpoolBase());

+		long tspace = spoolfile.getTotalSpace();

+		long start = (long)(tspace * fdstart);

+		long stop = (long)(tspace * fdstop);

+		long cur = spoolfile.getUsableSpace();

+		if (cur >= start) {

+			return;

+		}

+		Vector<DelItem> cv = new Vector<DelItem>();

+		for (String sdir: dqs.keySet()) {

+			for (String meta: (new File(sdir)).list()) {

+				if (!meta.endsWith(".M") || meta.charAt(0) == '.') {

+					continue;

+				}

+				cv.add(new DelItem(meta.substring(0, meta.length() - 2), sdir));

+			}

+		}

+		DelItem[] items = cv.toArray(new DelItem[cv.size()]);

+		Arrays.sort(items);

+		logger.info("NODE0501 Free disk space below red threshold.  current=" + cur + " red=" + start + " total=" + tspace);

+		for (DelItem item: items) {

+			long amount = dqs.get(item.getSpool()).cancelTask(item.getPublishId());

+			logger.info("NODE0502 Attempting to discard " + item.getSpool() + "/" + item.getPublishId() + " to free up disk");

+			if (amount > 0) {

+				cur += amount;

+				if (cur >= stop) {

+					cur = spoolfile.getUsableSpace();

+				}

+				if (cur >= stop) {

+					logger.info("NODE0503 Free disk space at or above yellow threshold.  current=" + cur + " yellow=" + stop + " total=" + tspace);

+					return;

+				}

+			}

+		}

+		cur = spoolfile.getUsableSpace();

+		if (cur >= stop) {

+			logger.info("NODE0503 Free disk space at or above yellow threshold.  current=" + cur + " yellow=" + stop + " total=" + tspace);

+			return;

+		}

+		logger.warn("NODE0504 Unable to recover sufficient disk space to reach green status.  current=" + cur + " yellow=" + stop + " total=" + tspace);

+	}

+	private void cleardirs() {

+		String basedir = config.getSpoolBase();

+		String nbase = basedir + "/n";

+		for (String nodedir: (new File(nbase)).list()) {

+			if (!nodedir.startsWith(".")) {

+				cleardir(nbase + "/" + nodedir);

+			}

+		}

+		String sxbase = basedir + "/s";

+		for (String sxdir: (new File(sxbase)).list()) {

+			if (sxdir.startsWith(".")) {

+				continue;

+			}

+			File sxf = new File(sxbase + "/" + sxdir);

+			for (String sdir: sxf.list()) {

+				if (!sdir.startsWith(".")) {

+					cleardir(sxbase + "/" + sxdir + "/" + sdir);

+				}

+			}

+			sxf.delete();  // won't if anything still in it

+		}

+	}

+	private synchronized void checkconfig() {

+		if (!config.isConfigured()) {

+			return;

+		}

+		fdstart = config.getFreeDiskStart();

+		fdstop = config.getFreeDiskStop();

+		threads = config.getDeliveryThreads();

+		if (threads < 1) {

+			threads = 1;

+		}

+		DestInfo[] alldis = config.getAllDests();

+		DeliveryQueue[] nqs = new DeliveryQueue[alldis.length];

+		qpos = 0;

+		Hashtable<String, DeliveryQueue> ndqs = new Hashtable<String, DeliveryQueue>();

+		for (DestInfo di: alldis) {

+			String spl = di.getSpool();

+			DeliveryQueue dq = dqs.get(spl);

+			if (dq == null) {

+				dq = new DeliveryQueue(config, di);

+			} else {

+				dq.config(di);

+			}

+			ndqs.put(spl, dq);

+			nqs[qpos++] = dq;

+		}

+		queues = nqs;

+		dqs = ndqs;

+		cleardirs();

+		while (curthreads < threads) {

+			curthreads++;

+			(new Thread() {

+				{

+					setName("Delivery Thread");

+				}

+				public void run() {

+					dodelivery();

+				}

+			}).start();

+		}

+		nextcheck = 0;

+		notify();

+	}

+	private void dodelivery() {

+		DeliveryQueue dq;

+		while ((dq = getNextQueue()) != null) {

+			dq.run();

+		}

+	}

+	private synchronized DeliveryQueue getNextQueue() {

+		while (true) {

+			if (curthreads > threads) {

+				curthreads--;

+				return(null);

+			}

+			if (qpos < queues.length) {

+				DeliveryQueue dq = queues[qpos++];

+				if (dq.isSkipSet()) {

+					continue;

+				}

+				nextcheck = 0;

+				notify();

+				return(dq);

+			}

+			long now = System.currentTimeMillis();

+			if (now < nextcheck) {

+				try {

+					wait(nextcheck + 500 - now);

+				} catch (Exception e) {

+				}

+				now = System.currentTimeMillis();

+			}

+			if (now >= nextcheck) {

+				nextcheck = now + 5000;

+				qpos = 0;

+				freeDiskCheck();

+			}

+		}

+	}

+	/**

+	 *	Reset the retry timer for a delivery queue

+	 */

+	public synchronized void resetQueue(String spool) {

+		if (spool != null) {

+			DeliveryQueue dq = dqs.get(spool);

+			if (dq != null) {

+				dq.resetQueue();

+			}

+		}

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/DeliveryQueue.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/DeliveryQueue.java
new file mode 100644
index 0000000..71c7797
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/DeliveryQueue.java
@@ -0,0 +1,348 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+import java.io.*;

+import java.util.*;

+

+/**

+ *	Mechanism for monitoring and controlling delivery of files to a destination.

+ *	<p>

+ *	The DeliveryQueue class maintains lists of DeliveryTasks for a single

+ *	destination (a subscription or another data router node) and assigns

+ *	delivery threads to try to deliver them.  It also maintains a delivery

+ *	status that causes it to back off on delivery attempts after a failure.

+ *	<p>

+ *	If the most recent delivery result was a failure, then no more attempts

+ *	will be made for a period of time.  Initially, and on the first failure

+ *	following a success, this delay will be DeliveryQueueHelper.getInitFailureTimer() (milliseconds).

+ *	If, after this delay, additional failures occur, each failure will

+ *	multiply the delay by DeliveryQueueHelper.getFailureBackoff() up to a

+ *	maximum delay specified by DeliveryQueueHelper.getMaxFailureTimer().

+ *	Note that this behavior applies to the delivery queue as a whole and not

+ *	to individual files in the queue.  If multiple files are being

+ *	delivered and one fails, the delay will be started.  If a second

+ *	delivery fails while the delay was active, it will not change the delay

+ *	or change the duration of any subsequent delay.

+ *	If, however, it succeeds, it will cancel the delay.

+ *	<p>

+ *	The queue maintains 3 collections of files to deliver: A todo list of

+ *	files that will be attempted, a working set of files that are being

+ *	attempted, and a retry set of files that were attempted and failed.

+ *	Whenever the todo list is empty and needs to be refilled, a scan of the

+ *	spool directory is made and the file names sorted.  Any files in the working set are ignored.

+ *	If a DeliveryTask for the file is in the retry set, then that delivery

+ *	task is placed on the todo list.  Otherwise, a new DeliveryTask for the

+ *	file is created and placed on the todo list.

+ *	If, when a DeliveryTask is about to be removed from the todo list, its

+ *	age exceeds DeliveryQueueHelper.getExpirationTimer(), then it is instead

+ *	marked as expired.

+ *	<p>

+ *	A delivery queue also maintains a skip flag.  This flag is true if the

+ *	failure timer is active or if no files are found in a directory scan.

+ */

+public class DeliveryQueue implements Runnable, DeliveryTaskHelper	{

+	private DeliveryQueueHelper	dqh;

+	private DestInfo	di;

+	private Hashtable<String, DeliveryTask>	working = new Hashtable<String, DeliveryTask>();

+	private Hashtable<String, DeliveryTask> retry = new Hashtable<String, DeliveryTask>();

+	private int	todoindex;

+	private boolean	failed;

+	private long	failduration;

+	private long	resumetime;

+	File	dir;

+	private Vector<DeliveryTask> todo = new Vector<DeliveryTask>();

+	/**

+	 *	Try to cancel a delivery task.

+	 *	@return	The length of the task in bytes or 0 if the task cannot be cancelled.

+	 */

+	public synchronized long cancelTask(String pubid) {

+		if (working.get(pubid) != null) {

+			return(0);

+		}

+		DeliveryTask dt = retry.get(pubid);

+		if (dt == null) {

+			for (int i = todoindex; i < todo.size(); i++) {

+				DeliveryTask xdt = todo.get(i);

+				if (xdt.getPublishId().equals(pubid)) {

+					dt = xdt;

+					break;

+				}

+			}

+		}

+		if (dt == null) {

+			dt = new DeliveryTask(this, pubid);

+			if (dt.getFileId() == null) {

+				return(0);

+			}

+		}

+		if (dt.isCleaned()) {

+			return(0);

+		}

+		StatusLog.logExp(dt.getPublishId(), dt.getFeedId(), dt.getSubId(), dt.getURL(), dt.getMethod(), dt.getCType(), dt.getLength(), "diskFull", dt.getAttempts());

+		dt.clean();

+		return(dt.getLength());

+	}

+	/**

+	 *	Mark that a delivery task has succeeded.

+	 */

+	public synchronized void markSuccess(DeliveryTask task) {

+		working.remove(task.getPublishId());

+		task.clean();

+		failed = false;

+		failduration = 0;

+	}

+	/**

+	 *	Mark that a delivery task has expired.

+	 */

+	public synchronized void markExpired(DeliveryTask task) {

+		task.clean();

+	}

+	/**

+	 *	Mark that a delivery task has failed permanently.

+	 */

+	public synchronized void markFailNoRetry(DeliveryTask task) {

+		working.remove(task.getPublishId());

+		task.clean();

+		failed = false;

+		failduration = 0;

+	}

+	private void fdupdate() {

+		if (!failed) {

+			failed = true;

+			if (failduration == 0) {

+				failduration = dqh.getInitFailureTimer();

+			}

+			resumetime = System.currentTimeMillis() + failduration;

+			long maxdur = dqh.getMaxFailureTimer();

+			failduration = (long)(failduration * dqh.getFailureBackoff());

+			if (failduration > maxdur) {

+				failduration = maxdur;

+			}

+		}

+	}

+	/**

+	 *	Mark that a delivery task has been redirected.

+	 */

+	public synchronized void markRedirect(DeliveryTask task) {

+		working.remove(task.getPublishId());

+		retry.put(task.getPublishId(), task);

+	}

+	/**

+	 *	Mark that a delivery task has temporarily failed.

+	 */

+	public synchronized void markFailWithRetry(DeliveryTask task) {

+		working.remove(task.getPublishId());

+		retry.put(task.getPublishId(), task);

+		fdupdate();

+	}

+	/**

+	 *	Get the next task.

+	 */

+	public synchronized DeliveryTask getNext() {

+		DeliveryTask ret = peekNext();

+		if (ret != null) {

+			todoindex++;

+			working.put(ret.getPublishId(), ret);

+		}

+		return(ret);

+	}

+	/**

+	 *	Peek at the next task.

+	 */

+	public synchronized DeliveryTask peekNext() {

+		long now = System.currentTimeMillis();

+		long mindate = now - dqh.getExpirationTimer();

+		if (failed) {

+			if (now > resumetime) {

+				failed = false;

+			} else {

+				return(null);

+			}

+		}

+		while (true) {

+			if (todoindex >= todo.size()) {

+				todoindex = 0;

+				todo = new Vector<DeliveryTask>();

+				String[] files = dir.list();

+				Arrays.sort(files);

+				for (String fname: files) {

+					if (!fname.endsWith(".M")) {

+						continue;

+					}

+					String fname2 = fname.substring(0, fname.length() - 2);

+					long pidtime = 0;

+					int dot = fname2.indexOf('.');

+					if (dot < 1) {

+						continue;

+					}

+					try {

+						pidtime = Long.parseLong(fname2.substring(0, dot));

+					} catch (Exception e) {

+					}

+					if (pidtime < 1000000000000L) {

+						continue;

+					}

+					if (working.get(fname2) != null) {

+						continue;

+					}

+					DeliveryTask dt = retry.get(fname2);

+					if (dt == null) {

+						dt = new DeliveryTask(this, fname2);

+					}

+					todo.add(dt);

+				}

+				retry = new Hashtable<String, DeliveryTask>();

+			}

+			if (todoindex < todo.size()) {

+				DeliveryTask dt = todo.get(todoindex);

+				if (dt.isCleaned()) {

+					todoindex++;

+					continue;

+				}

+				if (dt.getDate() >= mindate) {

+					return(dt);

+				}

+				todoindex++;

+				reportExpiry(dt);

+				continue;

+			}

+			return(null);

+		}

+	}

+	/**

+	 *	Create a delivery queue for a given destination info

+	 */

+	public DeliveryQueue(DeliveryQueueHelper dqh, DestInfo di) {

+		this.dqh = dqh;

+		this.di = di;

+		dir = new File(di.getSpool());

+		dir.mkdirs();

+	}

+	/**

+	 *	Update the destination info for this delivery queue

+	 */

+	public void config(DestInfo di) {

+		this.di = di;

+	}

+	/**

+	 *	Get the dest info

+	 */

+	public DestInfo getDestInfo() {

+		return(di);

+	}

+	/**

+	 *	Get the config manager

+	 */

+	public DeliveryQueueHelper getConfig() {

+		return(dqh);

+	}

+	/**

+	 *	Exceptional condition occurred during delivery

+	 */

+	public void reportDeliveryExtra(DeliveryTask task, long sent) {

+		StatusLog.logDelExtra(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getLength(), sent);

+	}

+	/**

+	 *	Message too old to deliver

+	 */

+	public void reportExpiry(DeliveryTask task) {

+		StatusLog.logExp(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), "retriesExhausted", task.getAttempts());

+		markExpired(task);

+	}

+	/**

+	 *	Completed a delivery attempt

+	 */

+	public void reportStatus(DeliveryTask task, int status, String xpubid, String location) {

+		if (status < 300) {

+			StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, xpubid);

+			markSuccess(task);

+		} else if (status < 400 && dqh.isFollowRedirects()) {

+			StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, location);

+			if (dqh.handleRedirection(di, location, task.getFileId())) {

+				markRedirect(task);

+			} else {

+				StatusLog.logExp(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), "notRetryable", task.getAttempts());

+				markFailNoRetry(task);

+			}

+		} else if (status < 500) {

+			StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, location);

+			StatusLog.logExp(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), "notRetryable", task.getAttempts());

+			markFailNoRetry(task);

+		} else {

+			StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, location);

+			markFailWithRetry(task);

+		}

+	}

+	/**

+	 *	Delivery failed by reason of an exception

+	 */

+	public void reportException(DeliveryTask task, Exception exception) {

+		StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), -1, exception.toString());

+		dqh.handleUnreachable(di);

+		markFailWithRetry(task);

+	}

+	/**

+	 *	Get the feed ID for a subscription

+	 *	@param subid	The subscription ID

+	 *	@return	The feed ID

+	 */

+	public String getFeedId(String subid) {

+		return(dqh.getFeedId(subid));

+	}

+	/**

+	 *	Get the URL to deliver a message to given the file ID

+	 */

+	public String getDestURL(String fileid) {

+		return(dqh.getDestURL(di, fileid));

+	}

+	/**

+	 *	Deliver files until there's a failure or there are no more

+	 *	files to deliver

+	 */

+	public void run() {

+		DeliveryTask t;

+		long endtime = System.currentTimeMillis() + dqh.getFairTimeLimit();

+		int filestogo = dqh.getFairFileLimit();

+		while ((t = getNext()) != null) {

+			t.run();

+			if (--filestogo <= 0 || System.currentTimeMillis() > endtime) {

+				break;

+			}

+		}

+	}

+	/**

+	 *	Is there no work to do for this queue right now?

+	 */

+	public synchronized boolean isSkipSet() {

+		return(peekNext() == null);

+	}

+	/**

+	 *	Reset the retry timer

+	 */

+	public void resetQueue() {

+		resumetime = System.currentTimeMillis();

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/DeliveryQueueHelper.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/DeliveryQueueHelper.java
new file mode 100644
index 0000000..770db1d
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/DeliveryQueueHelper.java
@@ -0,0 +1,89 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+/**

+ *	Interface to allow independent testing of the DeliveryQueue code

+ *	<p>

+ *	This interface represents all of the configuration information and

+ *	feedback mechanisms that a delivery queue needs.

+ */

+public interface	DeliveryQueueHelper	{

+	/**

+	 *	Get the timeout (milliseconds) before retrying after an initial delivery failure

+	 */

+	public long getInitFailureTimer();

+	/**

+	 *	Get the ratio between timeouts on consecutive delivery attempts

+	 */

+	public double	getFailureBackoff();

+	/**

+	 *	Get the maximum timeout (milliseconds) between delivery attempts

+	 */

+	public long	getMaxFailureTimer();

+	/**

+	 *	Get the expiration timer (milliseconds) for deliveries

+	 */

+	public long	getExpirationTimer();

+	/**

+	 *	Get the maximum number of file delivery attempts before checking

+	 *	if another queue has work to be performed.

+	 */

+	public int getFairFileLimit();

+	/**

+	 *	Get the maximum amount of time spent delivering files before checking if another queue has work to be performed.

+	 */

+	public long getFairTimeLimit();

+	/**

+	 *	Get the URL for delivering a file

+	 *	@param dest	The destination information for the file to be delivered.

+	 *	@param fileid	The file id for the file to be delivered.

+	 *	@return	The URL for delivering the file (typically, dest.getURL() + "/" + fileid).

+	 */

+	public String	getDestURL(DestInfo dest, String fileid);

+	/**

+	 *	Forget redirections associated with a subscriber

+	 *	@param	dest	Destination information to forget

+	 */

+	public void	handleUnreachable(DestInfo dest);

+	/**

+	 *	Post redirection for a subscriber

+	 *	@param	dest	Destination information to update

+	 *	@param	location	Location given by subscriber

+	 *	@param	fileid	File ID of request

+	 *	@return	true if this 3xx response is retryable, otherwise, false.

+	 */

+	public boolean	handleRedirection(DestInfo dest, String location, String fileid);

+	/**

+	 *	Should I handle 3xx responses differently than 4xx responses?

+	 */

+	public boolean	isFollowRedirects();

+	/**

+	 *	Get the feed ID for a subscription

+	 *	@param subid	The subscription ID

+	 *	@return	The feed ID

+	 */

+	public String getFeedId(String subid);

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/DeliveryTask.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/DeliveryTask.java
new file mode 100644
index 0000000..3d72a41
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/DeliveryTask.java
@@ -0,0 +1,308 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+import java.io.*;

+import java.net.*;

+import java.util.*;

+import org.apache.log4j.Logger;

+

+/**

+ *	A file to be delivered to a destination.

+ *	<p>

+ *	A Delivery task represents a work item for the data router - a file that

+ *	needs to be delivered and provides mechanisms to get information about

+ *	the file and its delivery data as well as to attempt delivery.

+ */

+public class DeliveryTask implements Runnable, Comparable<DeliveryTask>	{

+	private static Logger logger = Logger.getLogger("com.att.research.datarouter.node.DeliveryTask");

+	private DeliveryTaskHelper	dth;

+	private String	pubid;

+	private	DestInfo	di;

+	private String	spool;

+	private File	datafile;

+	private File	metafile;

+	private long	length;

+	private long	date;

+	private String	method;

+	private String	fileid;

+	private String	ctype;

+	private String	url;

+	private String	feedid;

+	private String	subid;

+	private int	attempts;

+	private String[][]	hdrs;

+	/**

+	 *	Is the object a DeliveryTask with the same publication ID?

+	 */

+	public boolean equals(Object o) {

+		if (!(o instanceof DeliveryTask)) {

+			return(false);

+		}

+		return(pubid.equals(((DeliveryTask)o).pubid));

+	}

+	/**

+	 *	Compare the publication IDs.

+	 */

+	public int compareTo(DeliveryTask o) {

+		return(pubid.compareTo(o.pubid));

+	}

+	/**

+	 *	Get the hash code of the publication ID.

+	 */

+	public int hashCode() {

+		return(pubid.hashCode());

+	}

+	/**

+	 *	Return the publication ID.

+	 */

+	public String toString() {

+		return(pubid);

+	}

+	/**

+	 *	Create a delivery task for a given delivery queue and pub ID

+	 *	@param	dth	The delivery task helper for the queue this task is in.

+	 *	@param	pubid	The publish ID for this file.  This is used as

+	 *	the base for the file name in the spool directory and is of

+	 *	the form <milliseconds since 1970>.<fqdn of initial data router node>

+	 */

+	public DeliveryTask(DeliveryTaskHelper dth, String pubid) {

+		this.dth = dth;

+		this.pubid = pubid;

+		di = dth.getDestInfo();

+		subid = di.getSubId();

+		feedid = di.getLogData();

+		spool = di.getSpool();

+		String dfn = spool + "/" + pubid;

+		String mfn = dfn + ".M";

+		datafile = new File(spool + "/" + pubid);

+		metafile = new File(mfn);

+		boolean monly = di.isMetaDataOnly();

+		date = Long.parseLong(pubid.substring(0, pubid.indexOf('.')));

+		Vector<String[]> hdrv = new Vector<String[]>();

+		try {

+			BufferedReader br = new BufferedReader(new FileReader(metafile));

+			String s = br.readLine();

+			int i = s.indexOf('\t');

+			method = s.substring(0, i);

+			if (!"DELETE".equals(method) && !monly) {

+				length = datafile.length();

+			}

+			fileid = s.substring(i + 1);

+			while ((s = br.readLine()) != null) {

+				i = s.indexOf('\t');

+				String h = s.substring(0, i);

+				String v = s.substring(i + 1);

+				if ("x-att-dr-routing".equalsIgnoreCase(h)) {

+					subid = v.replaceAll("[^ ]*/", "");

+					feedid = dth.getFeedId(subid.replaceAll(" .*", ""));

+				}

+				if (length == 0 && h.toLowerCase().startsWith("content-")) {

+					continue;

+				}

+				if (h.equalsIgnoreCase("content-type")) {

+					ctype = v;

+				}

+				hdrv.add(new String[] {h, v});

+			}

+			br.close();

+		} catch (Exception e) {

+		}

+		hdrs = hdrv.toArray(new String[hdrv.size()][]);

+		url = dth.getDestURL(fileid);

+	}

+	/**

+	 *	Get the publish ID

+	 */

+	public String getPublishId() {

+		return(pubid);

+	}

+	/**

+	 *	Attempt delivery

+	 */

+	public void run() {

+		attempts++;

+		try {

+			di = dth.getDestInfo();

+			boolean expect100 = di.isUsing100();

+			boolean monly = di.isMetaDataOnly();

+			length = 0;

+			if (!"DELETE".equals(method) && !monly) {

+				length = datafile.length();

+			}

+			url = dth.getDestURL(fileid);

+			URL u = new URL(url);

+			HttpURLConnection uc = (HttpURLConnection)u.openConnection();

+			uc.setConnectTimeout(60000);

+			uc.setReadTimeout(60000);

+			uc.setInstanceFollowRedirects(false);

+			uc.setRequestMethod(method);

+			uc.setRequestProperty("Content-Length", Long.toString(length));

+			uc.setRequestProperty("Authorization", di.getAuth());

+			uc.setRequestProperty("X-ATT-DR-PUBLISH-ID", pubid);

+			for (String[] nv: hdrs) {

+				uc.addRequestProperty(nv[0], nv[1]);

+			}

+			if (length > 0) {

+				if (expect100) {

+					uc.setRequestProperty("Expect", "100-continue");

+				}

+				uc.setFixedLengthStreamingMode(length);

+				uc.setDoOutput(true);

+				OutputStream os = null;

+				try {

+					os = uc.getOutputStream();

+				} catch (ProtocolException pe) {

+					dth.reportDeliveryExtra(this, -1L);

+					// Rcvd error instead of 100-continue

+				}

+				if (os != null) {

+					long sofar = 0;

+					try {

+						byte[] buf = new byte[1024 * 1024];

+						InputStream is = new FileInputStream(datafile);

+						while (sofar < length) {

+							int i = buf.length;

+							if (sofar + i > length) {

+								i = (int)(length - sofar);

+							}

+							i = is.read(buf, 0, i);

+							if (i <= 0) {

+								throw new IOException("Unexpected problem reading data file " + datafile);

+							}

+							sofar += i;

+							os.write(buf, 0, i);

+						}

+						is.close();

+						os.close();

+					} catch (IOException ioe) {

+						dth.reportDeliveryExtra(this, sofar);

+						throw ioe;

+					}

+				}

+			}

+			int rc = uc.getResponseCode();

+			String rmsg = uc.getResponseMessage();

+			if (rmsg == null) {

+				String h0 = uc.getHeaderField(0);

+				if (h0 != null) {

+					int i = h0.indexOf(' ');

+					int j = h0.indexOf(' ', i + 1);

+					if (i != -1 && j != -1) {

+						rmsg = h0.substring(j + 1);

+					}

+				}

+			}

+			String xpubid = null;

+			InputStream is;

+			if (rc >= 200 && rc <= 299) {

+				is = uc.getInputStream();

+				xpubid = uc.getHeaderField("X-ATT-DR-PUBLISH-ID");

+			} else {

+				if (rc >= 300 && rc <= 399) {

+					rmsg = uc.getHeaderField("Location");

+				}

+				is = uc.getErrorStream();

+			}

+			byte[] buf = new byte[4096];

+			if (is != null) {

+				while (is.read(buf) > 0) {

+				}

+				is.close();

+			}

+			dth.reportStatus(this, rc, xpubid, rmsg);

+		} catch (Exception e) {

+			dth.reportException(this, e);

+		}

+	}

+	/**

+	 *	Remove meta and data files

+	 */

+	public void clean() {

+		datafile.delete();

+		metafile.delete();

+		hdrs = null;

+	}

+	/**

+	 *	Has this delivery task been cleaned?

+	 */

+	public boolean isCleaned() {

+		return(hdrs == null);

+	}

+	/**

+	 *	Get length of body

+	 */

+	public long	getLength() {

+		return(length);

+	}

+	/**

+	 *	Get creation date as encoded in the publish ID.

+	 */

+	public long	getDate() {

+		return(date);

+	}

+	/**

+	 *	Get the most recent delivery attempt URL

+	 */

+	public String getURL() {

+		return(url);

+	}

+	/**

+	 *	Get the content type

+	 */

+	public String	getCType() {

+		return(ctype);

+	}

+	/**

+	 *	Get the method

+	 */

+	public String	getMethod() {

+		return(method);

+	}

+	/**

+	 *	Get the file ID

+	 */

+	public String	getFileId() {

+		return(fileid);

+	}

+	/**

+	 *	Get the number of delivery attempts

+	 */

+	public int	getAttempts() {

+		return(attempts);

+	}

+	/**

+	 *	Get the (space delimited list of) subscription ID for this delivery task

+	 */

+	public String	getSubId() {

+		return(subid);

+	}

+	/**

+	 *	Get the feed ID for this delivery task

+	 */

+	public String	getFeedId() {

+		return(feedid);

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/DeliveryTaskHelper.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/DeliveryTaskHelper.java
new file mode 100644
index 0000000..702bb29
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/DeliveryTaskHelper.java
@@ -0,0 +1,72 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+/**

+ *	Interface to allow independent testing of the DeliveryTask code.

+ *	<p>

+ *	This interface represents all the configuraiton information and

+ *	feedback mechanisms that a delivery task needs.

+ */

+

+public interface DeliveryTaskHelper	{

+	/**

+	 *	Report that a delivery attempt failed due to an exception (like can't connect to remote host)

+	 *	@param task	The task that failed

+	 *	@param exception	The exception that occurred

+	 */

+	public void reportException(DeliveryTask task, Exception exception);

+	/**

+	 *	Report that a delivery attempt completed (successfully or unsuccessfully)

+	 *	@param task	The task that failed

+	 *	@param status	The HTTP status

+	 *	@param xpubid	The publish ID from the far end (if any)

+	 *	@param location	The redirection location for a 3XX response

+	 */

+	public void reportStatus(DeliveryTask task, int status, String xpubid, String location);

+	/**

+	 *	Report that a delivery attempt either failed while sending data or that an error was returned instead of a 100 Continue.

+	 *	@param task	The task that failed

+	 *	@param sent	The number of bytes sent or -1 if an error was returned instead of 100 Continue.

+	 */

+	public void reportDeliveryExtra(DeliveryTask task, long sent);

+	/**

+	 *	Get the destination information for the delivery queue

+	 *	@return	The destination information

+	 */

+	public DestInfo getDestInfo();

+	/**

+	 *	Given a file ID, get the URL to deliver to

+	 *	@param fileid	The file id

+	 *	@return	The URL to deliver to

+	 */

+	public String	getDestURL(String fileid);

+	/**

+	 *	Get the feed ID for a subscription

+	 *	@param subid	The subscription ID

+	 *	@return	The feed iD

+	 */

+	public String	getFeedId(String subid);

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/DestInfo.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/DestInfo.java
new file mode 100644
index 0000000..e57fef8
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/DestInfo.java
@@ -0,0 +1,132 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+/**

+ *	Information for a delivery destination that doesn't change from message to message

+ */

+public class DestInfo	{

+	private String	name;

+	private String	spool;

+	private String	subid;

+	private String	logdata;

+	private String	url;

+	private String	authuser;

+	private String	authentication;

+	private boolean	metaonly;

+	private boolean	use100;

+	/**

+	 *	Create a destination information object.

+	 *	@param	name	n:fqdn or s:subid

+	 *	@param	spool	The directory where files are spooled.

+	 *	@param	subid	The subscription ID (if applicable).

+	 *	@param	logdata	Text to be included in log messages

+	 *	@param	url	The URL to deliver to.

+	 *	@param	authuser	The auth user for logging.

+	 *	@param	authentication	The credentials.

+	 *	@param	metaonly	Is this a metadata only delivery?

+	 *	@param	use100	Should I use expect 100-continue?

+	 */

+	public DestInfo(String name, String spool, String subid, String logdata, String url, String authuser, String authentication, boolean metaonly, boolean use100) {

+		this.name = name;

+		this.spool = spool;

+		this.subid = subid;

+		this.logdata = logdata;

+		this.url = url;

+		this.authuser = authuser;

+		this.authentication = authentication;

+		this.metaonly = metaonly;

+		this.use100 = use100;

+	}

+	public boolean equals(Object o) {

+		return((o instanceof DestInfo) && ((DestInfo)o).spool.equals(spool));

+	}

+	public int hashCode() {

+		return(spool.hashCode());

+	}

+	/**

+	 *	Get the name of this destination

+	 */

+	public String getName() {

+		return(name);

+	}

+	/**

+	 *	Get the spool directory for this destination.

+	 *	@return	The spool directory

+	 */

+	public String getSpool() {

+		return(spool);

+	}

+	/**

+	 *	Get the subscription ID.

+	 *	@return	Subscription ID or null if this is a node to node delivery.

+	 */

+	public String getSubId() {

+		return(subid);

+	}

+	/**

+	 *	Get the log data.

+	 *	@return	Text to be included in a log message about delivery attempts.

+	 */

+	public String getLogData() {

+		return(logdata);

+	}

+	/**

+	 *	Get the delivery URL.

+	 *	@return	The URL to deliver to (the primary URL).

+	 */

+	public String getURL() {

+		return(url);

+

+	}

+	/**

+	 *	Get the user for authentication

+	 *	@return	The name of the user for logging

+	 */

+	public String	getAuthUser() {

+		return(authuser);

+	}

+	/**

+	 *	Get the authentication header

+	 *	@return	The string to use to authenticate to the recipient.

+	 */

+	public String getAuth() {

+		return(authentication);

+	}

+	/**

+	 *	Is this a metadata only delivery?

+	 *	@return	True if this is a metadata only delivery

+	 */

+	public boolean	isMetaDataOnly() {

+		return(metaonly);

+	}

+	/**

+	 *	Should I send expect 100-continue header?

+	 *	@return	True if I should.

+	 */

+	public boolean isUsing100() {

+		return(use100);

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/IsFrom.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/IsFrom.java
new file mode 100644
index 0000000..bb3e413
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/IsFrom.java
@@ -0,0 +1,82 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+import java.util.*;

+import java.net.*;

+

+/**

+ *	Determine if an IP address is from a machine

+ */

+public class IsFrom	{

+	private long	nextcheck;

+	private String[] ips;

+	private String	fqdn;

+	/**

+	 *	Configure the JVM DNS cache to have a 10 second TTL.  This needs to be called very very early or it won't have any effect.

+	 */

+	public static void setDNSCache() {

+		java.security.Security.setProperty("networkaddress.cache.ttl", "10");

+	}

+	/**

+	 *	Create an IsFrom for the specified fully qualified domain name.

+	 */

+	public IsFrom(String fqdn) {

+		this.fqdn = fqdn;

+	}

+	/**

+	 *	Check if an IP address matches.  If it has been more than

+	 *	10 seconds since DNS was last checked for changes to the

+	 *	IP address(es) of this FQDN, check again.  Then check

+	 *	if the specified IP address belongs to the FQDN.

+	 */

+	public synchronized boolean isFrom(String ip) {

+		long now = System.currentTimeMillis();

+		if (now > nextcheck) {

+			nextcheck = now + 10000;

+			Vector<String> v = new Vector<String>();

+			try {

+				InetAddress[] addrs = InetAddress.getAllByName(fqdn);

+				for (InetAddress a: addrs) {

+					v.add(a.getHostAddress());

+				}

+			} catch (Exception e) {

+			}

+			ips = v.toArray(new String[v.size()]);

+		}

+		for (String s: ips) {

+			if (s.equals(ip)) {

+				return(true);

+			}

+		}

+		return(false);

+	}

+	/**

+	 *	Return the fully qualified domain name

+	 */

+	public String toString() {

+		return(fqdn);

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/LogManager.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/LogManager.java
new file mode 100644
index 0000000..078deaa
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/LogManager.java
@@ -0,0 +1,159 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package com.att.research.datarouter.node;

+

+import java.util.*;

+import java.util.regex.*;

+import java.io.*;

+import java.nio.file.*;

+import java.text.*;

+

+/**

+ *	Cleanup of old log files.

+ *	<p>

+ *	Periodically scan the log directory for log files that are older than

+ *	the log file retention interval, and delete them.  In a future release,

+ *	This class will also be responsible for uploading events logs to the

+ *	log server to support the log query APIs.

+ */

+

+public class LogManager	extends TimerTask	{

+	private NodeConfigManager	config;

+	private Matcher	isnodelog;

+	private Matcher	iseventlog;

+	private Uploader	worker;

+	private String	uploaddir;

+	private String	logdir;

+	private class Uploader extends Thread implements DeliveryQueueHelper {

+		public long getInitFailureTimer() { return(10000L); }

+		public double getFailureBackoff() { return(2.0); }

+		public long getMaxFailureTimer() { return(150000L); }

+		public long getExpirationTimer() { return(604800000L); }

+		public int getFairFileLimit() { return(10000); }

+		public long getFairTimeLimit() { return(86400000); }

+		public String getDestURL(DestInfo dest, String fileid) {

+			return(config.getEventLogUrl());

+		}

+		public void handleUnreachable(DestInfo dest) {}

+		public boolean handleRedirection(DestInfo dest, String location, String fileid) { return(false); }

+		public boolean isFollowRedirects() { return(false); }

+		public String getFeedId(String subid) { return(null); }

+		private DeliveryQueue dq;

+		public Uploader() {

+			dq = new DeliveryQueue(this, new DestInfo("LogUpload", uploaddir, null, null, null, config.getMyName(), config.getMyAuth(), false, false));

+			setDaemon(true);

+			setName("Log Uploader");

+			start();

+		}

+		private synchronized void snooze() {

+			try {

+				wait(10000);

+			} catch (Exception e) {

+			}

+		}

+		private synchronized void poke() {

+			notify();

+		}

+		public void run() {

+			while (true) {

+				scan();

+				dq.run();

+				snooze();

+			}

+		}

+		private void scan() {

+			long threshold = System.currentTimeMillis() - config.getLogRetention();

+			File dir = new File(logdir);

+			String[] fns = dir.list();

+			Arrays.sort(fns);

+			String lastqueued = "events-000000000000.log";

+			String curlog = StatusLog.getCurLogFile();

+			curlog = curlog.substring(curlog.lastIndexOf('/') + 1);

+			try {

+				Writer w = new FileWriter(uploaddir + "/.meta");

+				w.write("POST\tlogdata\nContent-Type\ttext/plain\n");

+				w.close();

+				BufferedReader br = new BufferedReader(new FileReader(uploaddir + "/.lastqueued"));

+				lastqueued = br.readLine();

+				br.close();

+			} catch (Exception e) {

+			}

+			for (String fn: fns) {

+				if (!isnodelog.reset(fn).matches()) {

+					if (!iseventlog.reset(fn).matches()) {

+						continue;

+					}

+					if (lastqueued.compareTo(fn) < 0 && curlog.compareTo(fn) > 0) {

+						lastqueued = fn;

+						try {

+							String pid = config.getPublishId();

+							Files.createLink(Paths.get(uploaddir + "/" + pid), Paths.get(logdir + "/" + fn));

+							Files.createLink(Paths.get(uploaddir + "/" + pid + ".M"), Paths.get(uploaddir + "/.meta"));

+						} catch (Exception e) {

+						}

+					}

+				}

+				File f = new File(dir, fn);

+				if (f.lastModified() < threshold) {

+					f.delete();

+				}

+			}

+			try {

+				(new File(uploaddir + "/.meta")).delete();

+				Writer w = new FileWriter(uploaddir + "/.lastqueued");

+				w.write(lastqueued + "\n");

+				w.close();

+			} catch (Exception e) {

+			}

+		}

+	}

+	/**

+	 *	Construct a log manager

+	 *	<p>

+	 *	The log manager will check for expired log files every 5 minutes

+	 *	at 20 seconds after the 5 minute boundary.  (Actually, the

+	 *	interval is the event log rollover interval, which

+	 *	defaults to 5 minutes).

+	 */

+	public LogManager(NodeConfigManager config) {

+		this.config = config;

+		try {

+			isnodelog = Pattern.compile("node\\.log\\.\\d{8}").matcher("");

+			iseventlog = Pattern.compile("events-\\d{12}\\.log").matcher("");

+		} catch (Exception e) {}

+		logdir = config.getLogDir();

+		uploaddir = logdir + "/.spool";

+		(new File(uploaddir)).mkdirs();

+		long now = System.currentTimeMillis();

+		long intvl = StatusLog.parseInterval(config.getEventLogInterval(), 300000);

+		long when = now - now % intvl + intvl + 20000L;

+		config.getTimer().scheduleAtFixedRate(this, when - now, intvl);

+		worker = new Uploader();

+	}

+	/**

+	 *	Trigger check for expired log files and log files to upload

+	 */

+	public void run() {

+		worker.poke();

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/NodeConfig.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/NodeConfig.java
new file mode 100644
index 0000000..689f765
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/NodeConfig.java
@@ -0,0 +1,722 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+import java.util.*;

+import java.io.*;

+

+/**

+ *	Processed configuration for this node.

+ *	<p>

+ *	The NodeConfig represents a processed configuration from the Data Router

+ *	provisioning server.  Each time configuration data is received from the

+ *	provisioning server, a new NodeConfig is created and the previous one

+ *	discarded.

+ */

+public class NodeConfig	{

+	/**

+	 *	Raw configuration entry for a data router node

+	 */

+	public static class ProvNode {

+		private String cname;

+		/**

+		 *	Construct a node configuration entry.

+		 *	@param cname	The cname of the node.

+		 */

+		public ProvNode(String cname) {

+			this.cname = cname;

+		}

+		/**

+		 *	Get the cname of the node

+		 */

+		public String getCName() {

+			return(cname);

+		}

+	}

+	/**

+	 *	Raw configuration entry for a provisioning parameter

+	 */

+	public static class ProvParam {

+		private String name;

+		private String value;

+		/**

+		 *	Construct a provisioning parameter configuration entry.

+		 *	@param	name The name of the parameter.

+		 *	@param	value The value of the parameter.

+		 */

+		public ProvParam(String name, String value) {

+			this.name = name;

+			this.value = value;

+		}

+		/**

+		 *	Get the name of the parameter.

+		 */

+		public String getName() {

+			return(name);

+		}

+		/**

+		 *	Get the value of the parameter.

+		 */

+		public String getValue() {

+			return(value);

+		}

+	}

+	/**

+	 *	Raw configuration entry for a data feed.

+	 */

+	public static class ProvFeed {

+		private String id;

+		private String logdata;

+		private String status;

+		/**

+		 *	Construct a feed configuration entry.

+		 *	@param id	The feed ID of the entry.

+		 *	@param logdata	String for log entries about the entry.

+		 *	@param status	The reason why this feed cannot be used (Feed has been deleted, Feed has been suspended) or null if it is valid.

+		 */

+		public ProvFeed(String id, String logdata, String status) {

+			this.id = id;

+			this.logdata = logdata;

+			this.status = status;

+		}

+		/**

+		 *	Get the feed id of the data feed.

+		 */

+		public String getId() {

+			return(id);

+		}

+		/**

+		 *	Get the log data of the data feed.

+		 */

+		public String getLogData() {

+			return(logdata);

+		}

+		/**

+		 *	Get the status of the data feed.

+		 */

+		public String getStatus() {

+			return(status);

+		}

+	}

+	/**

+	 *	Raw configuration entry for a feed user.

+	 */

+	public static class ProvFeedUser	{

+		private String feedid;

+		private String user;

+		private String credentials;

+		/**

+		 *	Construct a feed user configuration entry

+		 *	@param feedid	The feed id.

+		 *	@param user	The user that will publish to the feed.

+		 *	@param credentials	The Authorization header the user will use to publish.

+		 */

+		public ProvFeedUser(String feedid, String user, String credentials) {

+			this.feedid = feedid;

+			this.user = user;

+			this.credentials = credentials;

+		}

+		/**

+		 *	Get the feed id of the feed user.

+		 */

+		public String getFeedId() {

+			return(feedid);

+		}

+		/**

+		 *	Get the user for the feed user.

+		 */

+		public String getUser() {

+			return(user);

+		}

+		/**

+		 *	Get the credentials for the feed user.

+		 */

+		public String getCredentials() {

+			return(credentials);

+		}

+	}

+	/**

+	 *	Raw configuration entry for a feed subnet

+	 */

+	public static class ProvFeedSubnet	{

+		private String feedid;

+		private String cidr;

+		/**

+		 *	Construct a feed subnet configuration entry

+		 *	@param feedid	The feed ID

+		 *	@param cidr	The CIDR allowed to publish to the feed.

+		 */

+		public ProvFeedSubnet(String feedid, String cidr) {

+			this.feedid = feedid;

+			this.cidr = cidr;

+		}

+		/**

+		 *	Get the feed id of the feed subnet.

+		 */

+		public String getFeedId() {

+			return(feedid);

+		}

+		/**

+		 *	Get the CIDR of the feed subnet.

+		 */

+		public String getCidr() {

+			return(cidr);

+		}

+	}

+	/**

+	 *	Raw configuration entry for a subscription

+	 */

+	public static class ProvSubscription	{

+		private String	subid;

+		private String	feedid;

+		private String	url;

+		private String	authuser;

+		private String	credentials;

+		private boolean	metaonly;

+		private boolean	use100;

+		/**

+		 *	Construct a subscription configuration entry

+		 *	@param subid	The subscription ID

+		 *	@param feedid	The feed ID

+		 *	@param url	The base delivery URL (not including the fileid)

+		 *	@param authuser	The user in the credentials used to deliver

+		 *	@param credentials	The credentials used to authenticate to the delivery URL exactly as they go in the Authorization header.

+		 *	@param metaonly	Is this a meta data only subscription?

+		 *	@param use100	Should we send Expect: 100-continue?

+		 */

+		public ProvSubscription(String subid, String feedid, String url, String authuser, String credentials, boolean metaonly, boolean use100) {

+			this.subid = subid;

+			this.feedid = feedid;

+			this.url = url;

+			this.authuser = authuser;

+			this.credentials = credentials;

+			this.metaonly = metaonly;

+			this.use100 = use100;

+		}

+		/**

+		 *	Get the subscription ID

+		 */

+		public String getSubId() {

+			return(subid);

+		}

+		/**

+		 *	Get the feed ID

+		 */

+		public String getFeedId() {

+			return(feedid);

+		}

+		/**

+		 *	Get the delivery URL

+		 */

+		public String getURL() {

+			return(url);

+		}

+		/**

+		 *	Get the user

+		 */

+		public String getAuthUser() {

+			return(authuser);

+		}

+		/**

+		 *	Get the delivery credentials

+		 */

+		public String getCredentials() {

+			return(credentials);

+		}

+		/**

+		 *	Is this a meta data only subscription?

+		 */

+		public boolean isMetaDataOnly() {

+			return(metaonly);

+		}

+		/**

+		 *	Should we send Expect: 100-continue?

+		 */

+		public boolean isUsing100() {

+			return(use100);

+		}

+	}

+	/**

+	 *	Raw configuration entry for controlled ingress to the data router node

+	 */

+	public static class ProvForceIngress	{

+		private String feedid;

+		private String subnet;

+		private String user;

+		private String[] nodes;

+		/**

+		 *	Construct a forced ingress configuration entry

+		 *	@param feedid	The feed ID that this entry applies to

+		 *	@param subnet	The CIDR for which publisher IP addresses this entry applies to or "" if it applies to all publisher IP addresses

+		 *	@param user	The publishing user this entry applies to or "" if it applies to all publishing users.

+		 *	@param nodes	The array of FQDNs of the data router nodes to redirect publication attempts to.

+		 */

+		public ProvForceIngress(String feedid, String subnet, String user, String[] nodes) {

+			this.feedid = feedid;

+			this.subnet = subnet;

+			this.user = user;

+			this.nodes = nodes;

+		}

+		/**

+		 *	Get the feed ID

+		 */

+		public String getFeedId() {

+			return(feedid);

+		}

+		/**

+		 *	Get the subnet

+		 */

+		public String getSubnet() {

+			return(subnet);

+		}

+		/**

+		 *	Get the user

+		 */

+		public String getUser() {

+			return(user);

+		}

+		/**

+		 *	Get the node

+		 */

+		public String[] getNodes() {

+			return(nodes);

+		}

+	}

+	/**

+	 *	Raw configuration entry for controlled egress from the data router

+	 */

+	public static class ProvForceEgress	{

+		private String subid;

+		private String node;

+		/**

+		 *	Construct a forced egress configuration entry

+		 *	@param subid	The subscription ID the subscription with forced egress

+		 *	@param node	The node handling deliveries for this subscription

+		 */

+		public ProvForceEgress(String subid, String node) {

+			this.subid = subid;

+			this.node = node;

+		}

+		/**

+		 *	Get the subscription ID

+		 */

+		public String getSubId() {

+			return(subid);

+		}

+		/**

+		 *	Get the node

+		 */

+		public String getNode() {

+			return(node);

+		}

+	}

+	/**

+	 *	Raw configuration entry for routing within the data router network

+	 */

+	public static class ProvHop	{

+		private String	from;

+		private String	to;

+		private String	via;

+		/**

+		 *	A human readable description of this entry

+		 */

+		public String toString() {

+			return("Hop " + from + "->" + to + " via " + via);

+		}

+		/**

+		 *	Construct a hop entry

+		 *	@param from	The FQDN of the node with the data to be delivered

+		 *	@param to	The FQDN of the node that will deliver to the subscriber

+		 *	@param via	The FQDN of the node where the from node should send the data

+		 */

+		public ProvHop(String from, String to, String via) {

+			this.from = from;

+			this.to = to;

+			this.via = via;

+		}

+		/**

+		 *	Get the from node

+		 */

+		public String getFrom() {

+			return(from);

+		}

+		/**

+		 *	Get the to node

+		 */

+		public String getTo() {

+			return(to);

+		}

+		/**

+		 *	Get the next intermediate node

+		 */

+		public String getVia() {

+			return(via);

+		}

+	}

+	private static class Redirection	{

+		public SubnetMatcher snm;

+		public String user;

+		public String[] nodes;

+	}

+	private static class Feed	{

+		public String	loginfo;

+		public String	status;

+		public SubnetMatcher[] subnets;

+		public Hashtable<String, String> authusers = new Hashtable<String, String>();

+		public Redirection[]	redirections;

+		public Target[]	targets;

+	}

+	private Hashtable<String, String> params = new Hashtable<String, String>();

+	private Hashtable<String, Feed>	feeds = new Hashtable<String, Feed>();

+	private Hashtable<String, DestInfo> nodeinfo = new Hashtable<String, DestInfo>();

+	private Hashtable<String, DestInfo> subinfo = new Hashtable<String, DestInfo>();

+	private Hashtable<String, IsFrom> nodes = new Hashtable<String, IsFrom>();

+	private String	myname;

+	private String	myauth;

+	private DestInfo[]	alldests;

+	private int	rrcntr;

+	/**

+	 *	Process the raw provisioning data to configure this node

+	 *	@param pd	The parsed provisioning data

+	 *	@param myname	My name as seen by external systems

+	 *	@param spooldir	The directory where temporary files live

+	 *	@param port	The port number for URLs

+	 *	@param nodeauthkey	The keying string used to generate node authentication credentials

+	 */

+	public NodeConfig(ProvData pd, String myname, String spooldir, int port, String nodeauthkey) {

+		this.myname = myname;

+		for (ProvParam p: pd.getParams()) {

+			params.put(p.getName(), p.getValue());

+		}

+		Vector<DestInfo>	div = new Vector<DestInfo>();

+		myauth = NodeUtils.getNodeAuthHdr(myname, nodeauthkey);

+		for (ProvNode pn: pd.getNodes()) {

+			String cn = pn.getCName();

+			if (nodeinfo.get(cn) != null) {

+				continue;

+			}

+			String auth = NodeUtils.getNodeAuthHdr(cn, nodeauthkey);

+			DestInfo di = new DestInfo("n:" + cn, spooldir + "/n/" + cn, null, "n2n-" + cn, "https://" + cn + ":" + port + "/internal/publish", cn, myauth, false, true);

+			(new File(di.getSpool())).mkdirs();

+			div.add(di);

+			nodeinfo.put(cn, di);

+			nodes.put(auth, new IsFrom(cn));

+		}

+		PathFinder pf = new PathFinder(myname, nodeinfo.keySet().toArray(new String[nodeinfo.size()]), pd.getHops());

+		Hashtable<String, Vector<Redirection>> rdtab = new Hashtable<String, Vector<Redirection>>();

+		for (ProvForceIngress pfi: pd.getForceIngress()) {

+			Vector<Redirection> v = rdtab.get(pfi.getFeedId());

+			if (v == null) {

+				v = new Vector<Redirection>();

+				rdtab.put(pfi.getFeedId(), v);

+			}

+			Redirection r = new Redirection();

+			if (pfi.getSubnet() != null) {

+				r.snm = new SubnetMatcher(pfi.getSubnet());

+			}

+			r.user = pfi.getUser();

+			r.nodes = pfi.getNodes();

+			v.add(r);

+		}

+		Hashtable<String, Hashtable<String, String>> pfutab = new Hashtable<String, Hashtable<String, String>>();

+		for (ProvFeedUser pfu: pd.getFeedUsers()) {

+			Hashtable<String, String> t = pfutab.get(pfu.getFeedId());

+			if (t == null) {

+				t = new Hashtable<String, String>();

+				pfutab.put(pfu.getFeedId(), t);

+			}

+			t.put(pfu.getCredentials(), pfu.getUser());

+		}

+		Hashtable<String, String> egrtab = new Hashtable<String, String>();

+		for (ProvForceEgress pfe: pd.getForceEgress()) {

+			if (pfe.getNode().equals(myname) || nodeinfo.get(pfe.getNode()) == null) {

+				continue;

+			}

+			egrtab.put(pfe.getSubId(), pfe.getNode());

+		}

+		Hashtable<String, Vector<SubnetMatcher>> pfstab = new Hashtable<String, Vector<SubnetMatcher>>();

+		for (ProvFeedSubnet pfs: pd.getFeedSubnets()) {

+			Vector<SubnetMatcher> v = pfstab.get(pfs.getFeedId());

+			if (v == null) {

+				v = new Vector<SubnetMatcher>();

+				pfstab.put(pfs.getFeedId(), v);

+			}

+			v.add(new SubnetMatcher(pfs.getCidr()));

+		}

+		Hashtable<String, StringBuffer> ttab = new Hashtable<String, StringBuffer>();

+		HashSet<String> allfeeds = new HashSet<String>();

+		for (ProvFeed pfx: pd.getFeeds()) {

+			if (pfx.getStatus() == null) {

+				allfeeds.add(pfx.getId());

+			}

+		}

+		for (ProvSubscription ps: pd.getSubscriptions()) {

+			String sid = ps.getSubId();

+			String fid = ps.getFeedId();

+			if (!allfeeds.contains(fid)) {

+				continue;

+			}

+			if (subinfo.get(sid) != null) {

+				continue;

+			}

+			int sididx = 999;

+			try {

+				sididx = Integer.parseInt(sid);

+				sididx -= sididx % 100;

+			} catch (Exception e) {

+			}

+			String siddir = sididx + "/" + sid;

+			DestInfo di = new DestInfo("s:" + sid, spooldir + "/s/" + siddir, sid, fid, ps.getURL(), ps.getAuthUser(), ps.getCredentials(), ps.isMetaDataOnly(), ps.isUsing100());

+			(new File(di.getSpool())).mkdirs();

+			div.add(di);

+			subinfo.put(sid, di);

+			String egr = egrtab.get(sid);

+			if (egr != null) {

+				sid = pf.getPath(egr) + sid;

+			}

+			StringBuffer sb = ttab.get(fid);

+			if (sb == null) {

+				sb = new StringBuffer();

+				ttab.put(fid, sb);

+			}

+			sb.append(' ').append(sid);

+		}

+		alldests = div.toArray(new DestInfo[div.size()]);

+		for (ProvFeed pfx: pd.getFeeds()) {

+			String fid = pfx.getId();

+			Feed f = feeds.get(fid);

+			if (f != null) {

+				continue;

+			}

+			f = new Feed();

+			feeds.put(fid, f);

+			f.loginfo = pfx.getLogData();

+			f.status = pfx.getStatus();

+			Vector<SubnetMatcher> v1 = pfstab.get(fid);

+			if (v1 == null) {

+				f.subnets = new SubnetMatcher[0];

+			} else {

+				f.subnets = v1.toArray(new SubnetMatcher[v1.size()]);

+			}

+			Hashtable<String, String> h1 = pfutab.get(fid);

+			if (h1 == null) {

+				h1 = new Hashtable<String, String>();

+			}

+			f.authusers = h1;

+			Vector<Redirection> v2 = rdtab.get(fid);

+			if (v2 == null) {

+				f.redirections = new Redirection[0];

+			} else {

+				f.redirections = v2.toArray(new Redirection[v2.size()]);

+			}

+			StringBuffer sb = ttab.get(fid);

+			if (sb == null) {

+				f.targets = new Target[0];

+			} else {

+				f.targets = parseRouting(sb.toString());

+			}

+		}

+	}

+	/**

+	 *	Parse a target string into an array of targets

+	 *	@param routing Target string

+	 *	@return	Array of targets.

+	 */

+	public Target[] parseRouting(String routing) {

+		routing = routing.trim();

+		if ("".equals(routing)) {

+			return(new Target[0]);

+		}

+		String[] xx = routing.split("\\s+");

+		Hashtable<String, Target> tmap = new Hashtable<String, Target>();

+		HashSet<String> subset = new HashSet<String>();

+		Vector<Target> tv = new Vector<Target>();

+		Target[] ret = new Target[xx.length];

+		for (int i = 0; i < xx.length; i++) {

+			String t = xx[i];

+			int j = t.indexOf('/');

+			if (j == -1) {

+				DestInfo di = subinfo.get(t);

+				if (di == null) {

+					tv.add(new Target(null, t));

+				} else {

+					if (!subset.contains(t)) {

+						subset.add(t);

+						tv.add(new Target(di, null));

+					}

+				}

+			} else {

+				String node = t.substring(0, j);

+				String rtg = t.substring(j + 1);

+				DestInfo di = nodeinfo.get(node);

+				if (di == null) {

+					tv.add(new Target(null, t));

+				} else {

+					Target tt = tmap.get(node);

+					if (tt == null) {

+						tt = new Target(di, rtg);

+						tmap.put(node, tt);

+						tv.add(tt);

+					} else {

+						tt.addRouting(rtg);

+					}

+				}

+			}

+		}

+		return(tv.toArray(new Target[tv.size()]));

+	}

+	/**

+	 *	Check whether this is a valid node-to-node transfer

+	 *	@param credentials	Credentials offered by the supposed node

+	 *	@param ip	IP address the request came from

+	 */

+	public boolean isAnotherNode(String credentials, String ip) {

+		IsFrom n = nodes.get(credentials);

+		return (n != null && n.isFrom(ip));

+	}

+	/**

+	 *	Check whether publication is allowed.

+	 *	@param feedid	The ID of the feed being requested.

+	 *	@param credentials	The offered credentials

+	 *	@param ip	The requesting IP address

+	 */

+	public String isPublishPermitted(String feedid, String credentials, String ip) {

+		Feed f = feeds.get(feedid);

+		String nf = "Feed does not exist";

+		if (f != null) {

+			nf = f.status;

+		}

+		if (nf != null) {

+			return(nf);

+		}

+		String user = f.authusers.get(credentials);

+		if (user == null) {

+			return("Publisher not permitted for this feed");

+		}

+		if (f.subnets.length == 0) {

+			return(null);

+		}

+		byte[] addr = NodeUtils.getInetAddress(ip);

+		for (SubnetMatcher snm: f.subnets) {

+			if (snm.matches(addr)) {

+				return(null);

+			}

+		}

+		return("Publisher not permitted for this feed");

+	}

+	/**

+	 *	Get authenticated user

+	 */

+	public String getAuthUser(String feedid, String credentials) {

+		return(feeds.get(feedid).authusers.get(credentials));

+	}

+	/**

+	 *	Check if the request should be redirected to a different ingress node

+	 */

+	public String getIngressNode(String feedid, String user, String ip) {

+		Feed f = feeds.get(feedid);

+		if (f.redirections.length == 0) {

+			return(null);

+		}

+		byte[] addr = NodeUtils.getInetAddress(ip);

+		for (Redirection r: f.redirections) {

+			if (r.user != null && !user.equals(r.user)) {

+				continue;

+			}

+			if (r.snm != null && !r.snm.matches(addr)) {

+				continue;

+			}

+			for (String n: r.nodes) {

+				if (myname.equals(n)) {

+					return(null);

+				}

+			}

+			if (r.nodes.length == 0) {

+				return(null);

+			}

+			return(r.nodes[rrcntr++ % r.nodes.length]);

+		}

+		return(null);

+	}

+	/**

+	 *	Get a provisioned configuration parameter

+	 */

+	public String getProvParam(String name) {

+		return(params.get(name));

+	}

+	/**

+	 *	Get all the DestInfos

+	 */

+	public DestInfo[]	getAllDests() {

+		return(alldests);

+	}

+	/**

+	 *	Get the targets for a feed

+	 *	@param feedid	The feed ID

+	 *	@return	The targets this feed should be delivered to

+	 */

+	public Target[] getTargets(String feedid) {

+		if (feedid == null) {

+			return(new Target[0]);

+		}

+		Feed f = feeds.get(feedid);

+		if (f == null) {

+			return(new Target[0]);

+		}

+		return(f.targets);

+	}

+	/**

+	 *	Get the feed ID for a subscription

+	 *	@param subid	The subscription ID

+	 *	@return	The feed ID

+	 */

+	public String getFeedId(String subid) {

+		DestInfo di = subinfo.get(subid);

+		if (di == null) {

+			return(null);

+		}

+		return(di.getLogData());

+	}

+	/**

+	 *	Get the spool directory for a subscription

+	 *	@param subid	The subscription ID

+	 *	@return The spool directory

+	 */

+	public String getSpoolDir(String subid) {

+		DestInfo di = subinfo.get(subid);

+		if (di == null) {

+			return(null);

+		}

+		return(di.getSpool());

+	}

+	/**

+	 *	Get the Authorization value this node uses

+	 *	@return The Authorization header value for this node

+	 */

+	public String getMyAuth() {

+		return(myauth);

+	}

+

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/NodeConfigManager.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/NodeConfigManager.java
new file mode 100644
index 0000000..01ca442
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/NodeConfigManager.java
@@ -0,0 +1,599 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+import java.net.*;

+import java.util.*;

+import java.io.*;

+import org.apache.log4j.Logger;

+

+import com.att.eelf.configuration.EELFLogger;

+import com.att.eelf.configuration.EELFManager;

+import com.att.research.datarouter.node.eelf.EelfMsgs;

+

+

+/**

+ *	Maintain the configuration of a Data Router node

+ *	<p>

+ *	The NodeConfigManager is the single point of contact for servlet, delivery, event logging, and log retention subsystems to access configuration information.  (Log4J has its own configuration mechanism).

+ *	<p>

+ *	There are two basic sets of configuration data.  The

+ *	static local configuration data, stored in a local configuration file (created

+ *	as part of installation by SWM), and the dynamic global

+ *	configuration data fetched from the data router provisioning server.

+ */

+public class NodeConfigManager implements DeliveryQueueHelper	{

+    private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("com.att.research.datarouter.node.NodeConfigManager");

+	private static Logger logger = Logger.getLogger("com.att.research.datarouter.node.NodeConfigManager");

+	private static NodeConfigManager	base = new NodeConfigManager();

+

+	private Timer timer = new Timer("Node Configuration Timer", true);

+	private long	maxfailuretimer;

+	private long	initfailuretimer;

+	private long	expirationtimer;

+	private double	failurebackoff;

+	private long	fairtimelimit;

+	private int	fairfilelimit;

+	private double	fdpstart;

+	private double	fdpstop;

+	private int	deliverythreads;

+	private String	provurl;

+	private String	provhost;

+	private IsFrom	provcheck;

+	private int	gfport;

+	private int	svcport;

+	private int	port;

+	private String	spooldir;

+	private String	logdir;

+	private long	logretention;

+	private String	redirfile;

+	private String	kstype;

+	private String	ksfile;

+	private String	kspass;

+	private String	kpass;

+	private String	tstype;

+	private String	tsfile;

+	private String	tspass;

+	private String	myname;

+	private RedirManager	rdmgr;

+	private RateLimitedOperation	pfetcher;

+	private NodeConfig	config;

+	private File	quiesce;

+	private PublishId	pid;

+	private String	nak;

+	private TaskList	configtasks = new TaskList();

+	private String	eventlogurl;

+	private String	eventlogprefix;

+	private String	eventlogsuffix;

+	private String	eventloginterval;

+	private boolean	followredirects;

+

+	

+	/**

+	 *	Get the default node configuration manager

+	 */

+	public static NodeConfigManager getInstance() {

+		return(base);

+	}

+	/**

+	 *	Initialize the configuration of a Data Router node

+	 */

+	private NodeConfigManager() {

+		Properties p = new Properties();

+		try {

+			p.load(new FileInputStream(System.getProperty("com.att.research.datarouter.node.ConfigFile", "/opt/app/datartr/etc/node.properties")));

+		} catch (Exception e) {

+			

+			NodeUtils.setIpAndFqdnForEelf("NodeConfigManager");

+			eelflogger.error(EelfMsgs.MESSAGE_PROPERTIES_LOAD_ERROR);

+			logger.error("NODE0301 Unable to load local configuration file " + System.getProperty("com.att.research.datarouter.node.ConfigFile", "/opt/app/datartr/etc/node.properties"), e);

+		}

+		provurl = p.getProperty("ProvisioningURL", "https://feeds-drtr.web.att.com/internal/prov");

+		try {

+			provhost = (new URL(provurl)).getHost();

+		} catch (Exception e) {

+			NodeUtils.setIpAndFqdnForEelf("NodeConfigManager");

+			eelflogger.error(EelfMsgs.MESSAGE_BAD_PROV_URL, provurl);

+			logger.error("NODE0302 Bad provisioning server URL " + provurl);

+			System.exit(1);

+		}

+		logger.info("NODE0303 Provisioning server is " + provhost);

+		eventlogurl = p.getProperty("LogUploadURL", "https://feeds-drtr.web.att.com/internal/logs");

+		provcheck = new IsFrom(provhost);

+		gfport = Integer.parseInt(p.getProperty("IntHttpPort", "8080"));

+		svcport = Integer.parseInt(p.getProperty("IntHttpsPort", "8443"));

+		port = Integer.parseInt(p.getProperty("ExtHttpsPort", "443"));

+		long minpfinterval = Long.parseLong(p.getProperty("MinProvFetchInterval", "10000"));

+		long minrsinterval = Long.parseLong(p.getProperty("MinRedirSaveInterval", "10000"));

+		spooldir = p.getProperty("SpoolDir", "spool");

+		File fdir = new File(spooldir + "/f");

+		fdir.mkdirs();

+		for (File junk: fdir.listFiles()) {

+			if (junk.isFile()) {

+				junk.delete();

+			}

+		}

+		logdir = p.getProperty("LogDir", "logs");

+		(new File(logdir)).mkdirs();

+		logretention = Long.parseLong(p.getProperty("LogRetention", "30")) * 86400000L;

+		eventlogprefix = logdir + "/events";

+		eventlogsuffix = ".log";

+		String redirfile = p.getProperty("RedirectionFile", "etc/redirections.dat");

+		kstype = p.getProperty("KeyStoreType", "jks");

+		ksfile = p.getProperty("KeyStoreFile", "etc/keystore");

+		kspass = p.getProperty("KeyStorePassword", "changeme");

+		kpass = p.getProperty("KeyPassword", "changeme");

+		tstype = p.getProperty("TrustStoreType", "jks");

+		tsfile = p.getProperty("TrustStoreFile");

+		tspass = p.getProperty("TrustStorePassword", "changeme");

+		if (tsfile != null && tsfile.length() > 0) {

+			System.setProperty("javax.net.ssl.trustStoreType", tstype);

+			System.setProperty("javax.net.ssl.trustStore", tsfile);

+			System.setProperty("javax.net.ssl.trustStorePassword", tspass);

+		}

+		nak = p.getProperty("NodeAuthKey", "Node123!");

+		quiesce = new File(p.getProperty("QuiesceFile", "etc/SHUTDOWN"));

+		myname = NodeUtils.getCanonicalName(kstype, ksfile, kspass);

+		if (myname == null) {

+			NodeUtils.setIpAndFqdnForEelf("NodeConfigManager");

+			eelflogger.error(EelfMsgs.MESSAGE_KEYSTORE_FETCH_ERROR, ksfile);

+			logger.error("NODE0309 Unable to fetch canonical name from keystore file " + ksfile);

+			System.exit(1);

+		}

+		logger.info("NODE0304 My certificate says my name is " + myname);

+		pid = new PublishId(myname);

+		rdmgr = new RedirManager(redirfile, minrsinterval, timer);

+		pfetcher = new RateLimitedOperation(minpfinterval, timer) {

+			public void run() {

+				fetchconfig();

+			}

+		};

+		logger.info("NODE0305 Attempting to fetch configuration at " + provurl);

+		pfetcher.request();

+	}

+	private void localconfig() {

+		followredirects = Boolean.parseBoolean(getProvParam("FOLLOW_REDIRECTS", "false"));

+		eventloginterval = getProvParam("LOGROLL_INTERVAL", "5m");

+		initfailuretimer = 10000;

+		maxfailuretimer = 3600000;

+		expirationtimer = 86400000;

+		failurebackoff = 2.0;

+		deliverythreads = 40;

+		fairfilelimit = 100;

+		fairtimelimit = 60000;

+		fdpstart = 0.05;

+		fdpstop = 0.2;

+		try { initfailuretimer = (long)(Double.parseDouble(getProvParam("DELIVERY_INIT_RETRY_INTERVAL")) * 1000); } catch (Exception e) {}

+		try { maxfailuretimer = (long)(Double.parseDouble(getProvParam("DELIVERY_MAX_RETRY_INTERVAL")) * 1000); } catch (Exception e) {}

+		try { expirationtimer = (long)(Double.parseDouble(getProvParam("DELIVERY_MAX_AGE")) * 1000); } catch (Exception e) {}

+		try { failurebackoff = Double.parseDouble(getProvParam("DELIVERY_RETRY_RATIO")); } catch (Exception e) {}

+		try { deliverythreads = Integer.parseInt(getProvParam("DELIVERY_THREADS")); } catch (Exception e) {}

+		try { fairfilelimit = Integer.parseInt(getProvParam("FAIR_FILE_LIMIT")); } catch (Exception e) {}

+		try { fairtimelimit = (long)(Double.parseDouble(getProvParam("FAIR_TIME_LIMIT")) * 1000); } catch (Exception e) {}

+		try { fdpstart = Double.parseDouble(getProvParam("FREE_DISK_RED_PERCENT")) / 100.0; } catch (Exception e) {}

+		try { fdpstop = Double.parseDouble(getProvParam("FREE_DISK_YELLOW_PERCENT")) / 100.0; } catch (Exception e) {}

+		if (fdpstart < 0.01) {

+			fdpstart = 0.01;

+		}

+		if (fdpstart > 0.5) {

+			fdpstart = 0.5;

+		}

+		if (fdpstop < fdpstart) {

+			fdpstop = fdpstart;

+		}

+		if (fdpstop > 0.5) {

+			fdpstop = 0.5;

+		}

+	}

+	private void fetchconfig() {

+		try {

+			System.out.println("provurl:: "+provurl);

+			Reader r = new InputStreamReader((new URL(provurl)).openStream());

+			config = new NodeConfig(new ProvData(r), myname, spooldir, port, nak);

+			localconfig();

+			configtasks.startRun();

+			Runnable rr;

+			while ((rr = configtasks.next()) != null) {

+				try {

+					rr.run();

+				} catch (Exception e) {

+				}

+			}

+		} catch (Exception e) {

+			e.printStackTrace();

+			NodeUtils.setIpAndFqdnForEelf("fetchconfigs");

+			eelflogger.error(EelfMsgs.MESSAGE_CONF_FAILED, e.toString());

+			logger.error("NODE0306 Configuration failed " + e.toString() + " - try again later", e);

+			pfetcher.request();

+		}

+	}

+	/**

+	 *	Process a gofetch request from a particular IP address.  If the

+	 *	IP address is not an IP address we would go to to fetch the

+	 *	provisioning data, ignore the request.  If the data has been

+	 *	fetched very recently (default 10 seconds), wait a while before fetching again.

+	 */

+	public synchronized void gofetch(String remoteaddr) {

+		if (provcheck.isFrom(remoteaddr)) {

+			logger.info("NODE0307 Received configuration fetch request from provisioning server " + remoteaddr);

+			pfetcher.request();

+		} else {

+			logger.info("NODE0308 Received configuration fetch request from unexpected server " + remoteaddr);

+		}

+	}

+	/**

+	 *	Am I configured?

+	 */

+	public boolean isConfigured() {

+		return(config != null);

+	}

+	/**

+	 *	Am I shut down?

+	 */

+	public boolean isShutdown() {

+		return(quiesce.exists());

+	}

+	/**

+	 *	Given a routing string, get the targets.

+	 *	@param routing	Target string

+	 *	@return	array of targets

+	 */

+	public Target[] parseRouting(String routing) {

+		return(config.parseRouting(routing));

+	}

+	/**

+	 *	Given a set of credentials and an IP address, is this request from another node?

+	 *	@param credentials	Credentials offered by the supposed node

+	 *	@param ip	IP address the request came from

+	 *	@return	If the credentials and IP address are recognized, true, otherwise false.

+	 */

+	public boolean isAnotherNode(String credentials, String ip) {

+		return(config.isAnotherNode(credentials, ip));

+	}

+	/**

+	 *	Check whether publication is allowed.

+	 *	@param feedid	The ID of the feed being requested

+	 *	@param credentials	The offered credentials

+	 *	@param ip	The requesting IP address

+	 *	@return	True if the IP and credentials are valid for the specified feed.

+	 */

+	public String isPublishPermitted(String feedid, String credentials, String ip) {

+		return(config.isPublishPermitted(feedid, credentials, ip));

+	}

+	/**

+	 *	Check who the user is given the feed ID and the offered credentials.

+	 *	@param feedid	The ID of the feed specified

+	 *	@param credentials	The offered credentials

+	 *	@return	Null if the credentials are invalid or the user if they are valid.

+	 */

+	public String getAuthUser(String feedid, String credentials) {

+		return(config.getAuthUser(feedid, credentials));

+	}

+	/**

+	 *	Check if the publish request should be sent to another node based on the feedid, user, and source IP address.

+	 *	@param feedid	The ID of the feed specified

+	 *	@param user	The publishing user

+	 *	@param ip	The IP address of the publish endpoint

+	 *	@return	Null if the request should be accepted or the correct hostname if it should be sent to another node.

+	 */

+	public String getIngressNode(String feedid, String user, String ip) {

+		return(config.getIngressNode(feedid, user, ip));

+	}

+	/**

+	 *	Get a provisioned configuration parameter (from the provisioning server configuration)

+	 *	@param name	The name of the parameter

+	 *	@return	The value of the parameter or null if it is not defined.

+	 */

+	public String getProvParam(String name) {

+		return(config.getProvParam(name));

+	}

+	/**

+	 *	Get a provisioned configuration parameter (from the provisioning server configuration)

+	 *	@param name	The name of the parameter

+	 *	@param deflt	The value to use if the parameter is not defined

+	 *	@return	The value of the parameter or deflt if it is not defined.

+	 */

+	public String getProvParam(String name, String deflt) {

+		name = config.getProvParam(name);

+		if (name == null) {

+			name = deflt;

+		}

+		return(name);

+	}

+	/**

+	 *	Generate a publish ID

+	 */

+	public String getPublishId() {

+		return(pid.next());

+	}

+	/**

+	 *	Get all the outbound spooling destinations.

+	 *	This will include both subscriptions and nodes.

+	 */

+	public DestInfo[] getAllDests() {

+		return(config.getAllDests());

+	}

+	/**

+	 *	Register a task to run whenever the configuration changes

+	 */

+	public void registerConfigTask(Runnable task) {

+		configtasks.addTask(task);

+	}

+	/**

+	 *	Deregister a task to run whenever the configuration changes

+	 */

+	public void deregisterConfigTask(Runnable task) {

+		configtasks.removeTask(task);

+	}

+	/**

+	 *	Get the URL to deliver a message to.

+	 *	@param destinfo	The destination information

+	 *	@param fileid	The file ID

+	 *	@return	The URL to deliver to

+	 */

+	public String getDestURL(DestInfo destinfo, String fileid) {

+		String subid = destinfo.getSubId();

+		String purl = destinfo.getURL();

+		if (followredirects && subid != null) {

+			purl = rdmgr.lookup(subid, purl);

+		}

+		return(purl + "/" + fileid);

+	}

+	/**

+	 *	Is a destination redirected?

+	 */

+	public boolean isDestRedirected(DestInfo destinfo) {

+		return(followredirects && rdmgr.isRedirected(destinfo.getSubId()));

+	}

+	/**

+	 *	Set up redirection on receipt of a 3XX from a target URL

+	 */

+	public boolean handleRedirection(DestInfo destinfo, String redirto, String fileid) {

+		fileid = "/" + fileid;

+		String subid = destinfo.getSubId();

+		String purl = destinfo.getURL();

+		if (followredirects && subid != null && redirto.endsWith(fileid)) {

+			redirto = redirto.substring(0, redirto.length() - fileid.length());

+			if (!redirto.equals(purl)) {

+				rdmgr.redirect(subid, purl, redirto);

+				return(true);

+			}

+		}

+		return(false);

+	}

+	/**

+	 *	Handle unreachable target URL

+	 */

+	public void handleUnreachable(DestInfo destinfo) {

+		String subid = destinfo.getSubId();

+		if (followredirects && subid != null) {

+			rdmgr.forget(subid);

+		}

+	}

+	/**

+	 *	Get the timeout before retrying after an initial delivery failure

+	 */

+	public long getInitFailureTimer() {

+		return(initfailuretimer);

+	}

+	/**

+	 *	Get the maximum timeout between delivery attempts

+	 */

+	public long getMaxFailureTimer() {

+		return(maxfailuretimer);

+	}

+	/**

+	 *	Get the ratio between consecutive delivery attempts

+	 */

+	public double getFailureBackoff() {

+		return(failurebackoff);

+	}

+	/**

+	 *	Get the expiration timer for deliveries

+	 */

+	public long getExpirationTimer() {

+		return(expirationtimer);

+	}

+	/**

+	 *	Get the maximum number of file delivery attempts before checking

+	 *	if another queue has work to be performed.

+	 */

+	public int getFairFileLimit() {

+		return(fairfilelimit);

+	}

+	/**

+	 *	Get the maximum amount of time spent delivering files before

+	 *	checking if another queue has work to be performed.

+	 */

+	public long getFairTimeLimit() {

+		return(fairtimelimit);

+	}

+	/**

+	 *	Get the targets for a feed

+	 *	@param feedid	The feed ID

+	 *	@return	The targets this feed should be delivered to

+	 */

+	public Target[] getTargets(String feedid) {

+		return(config.getTargets(feedid));

+	}

+	/**

+	 *	Get the spool directory for temporary files

+	 */

+	public String getSpoolDir() {

+		return(spooldir + "/f");

+	}

+	/**

+	 *	Get the base directory for spool directories

+	 */

+	public String getSpoolBase() {

+		return(spooldir);

+	}

+	/**

+	 *	Get the key store type

+	 */

+	public String getKSType() {

+		return(kstype);

+	}

+	/**

+	 *	Get the key store file

+	 */

+	public String getKSFile() {

+		return(ksfile);

+	}

+	/**

+	 *	Get the key store password

+	 */

+	public String getKSPass() {

+		return(kspass);

+	}

+	/**

+	 *	Get the key password

+	 */

+	public String getKPass() {

+		return(kpass);

+	}

+	/**

+	 *	Get the http port

+	 */

+	public int getHttpPort() {

+		return(gfport);

+	}

+	/**

+	 *	Get the https port

+	 */

+	public int getHttpsPort() {

+		return(svcport);

+	}

+	/**

+	 *	Get the externally visible https port

+	 */

+	public int getExtHttpsPort() {

+		return(port);

+	}

+	/**

+	 *	Get the external name of this machine

+	 */

+	public String getMyName() {

+		return(myname);

+	}

+	/**

+	 *	Get the number of threads to use for delivery

+	 */

+	public int	getDeliveryThreads() {

+		return(deliverythreads);

+	}

+	/**

+	 *	Get the URL for uploading the event log data

+	 */

+	public String	getEventLogUrl() {

+		return(eventlogurl);

+	}

+	/**

+	 *	Get the prefix for the names of event log files

+	 */

+	public String	getEventLogPrefix() {

+		return(eventlogprefix);

+	}

+	/**

+	 *	Get the suffix for the names of the event log files

+	 */

+	public String	getEventLogSuffix() {

+		return(eventlogsuffix);

+	}

+	/**

+	 *	Get the interval between event log file rollovers

+	 */

+	public String getEventLogInterval() {

+		return(eventloginterval);

+	}

+	/**

+	 *	Should I follow redirects from subscribers?

+	 */

+	public boolean isFollowRedirects() {

+		return(followredirects);

+	}

+	/**

+	 *	Get the directory where the event and node log files live

+	 */

+	public String getLogDir() {

+		return(logdir);

+	}

+	/**

+	 *	How long do I keep log files (in milliseconds)

+	 */

+	public long getLogRetention() {

+		return(logretention);

+	}

+	/**

+	 *	Get the timer

+	 */

+	public Timer getTimer() {

+		return(timer);

+	}

+	/**

+	 *	Get the feed ID for a subscription

+	 *	@param subid	The subscription ID

+	 *	@return	The feed ID

+	 */

+	public String getFeedId(String subid) {

+		return(config.getFeedId(subid));

+	}

+	/**

+	 *	Get the authorization string this node uses

+	 *	@return The Authorization string for this node

+	 */

+	public String getMyAuth() {

+		return(config.getMyAuth());

+	}

+	/**

+	 *	Get the fraction of free spool disk space where we start throwing away undelivered files.  This is FREE_DISK_RED_PERCENT / 100.0.  Default is 0.05.  Limited by 0.01 <= FreeDiskStart <= 0.5.

+	 */

+	public double getFreeDiskStart() {

+		return(fdpstart);

+	}

+	/**

+	 *	Get the fraction of free spool disk space where we stop throwing away undelivered files.  This is FREE_DISK_YELLOW_PERCENT / 100.0.  Default is 0.2.  Limited by FreeDiskStart <= FreeDiskStop <= 0.5.

+	 */

+	public double getFreeDiskStop() {

+		return(fdpstop);

+	}

+	/**

+	 *	Get the spool directory for a subscription

+	 */

+	public String getSpoolDir(String subid, String remoteaddr) {

+		if (provcheck.isFrom(remoteaddr)) {

+			String sdir = config.getSpoolDir(subid);

+			if (sdir != null) {

+				logger.info("NODE0310 Received subscription reset request for subscription " + subid + " from provisioning server " + remoteaddr);

+			} else {

+				logger.info("NODE0311 Received subscription reset request for unknown subscription " + subid + " from provisioning server " + remoteaddr);

+			}

+			return(sdir);

+		} else {

+			logger.info("NODE0312 Received subscription reset request from unexpected server " + remoteaddr);

+			return(null);

+		}

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/NodeMain.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/NodeMain.java
new file mode 100644
index 0000000..c939041
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/NodeMain.java
@@ -0,0 +1,113 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+import org.eclipse.jetty.servlet.*;

+import org.eclipse.jetty.util.ssl.*;

+import org.eclipse.jetty.server.*;

+import org.eclipse.jetty.server.nio.*;

+import org.eclipse.jetty.server.ssl.*;

+import org.apache.log4j.Logger;

+

+/**

+ *	The main starting point for the Data Router node

+ */

+public class NodeMain	{

+	private NodeMain() {}

+	private static Logger	logger = Logger.getLogger("com.att.research.datarouter.node.NodeMain");

+	private static class wfconfig implements Runnable	{

+		private NodeConfigManager ncm;

+		public wfconfig(NodeConfigManager ncm) {

+			this.ncm = ncm;

+		}

+		public synchronized void run() {

+			notify();

+		}

+		public synchronized void waitforconfig() {

+			ncm.registerConfigTask(this);

+			while (!ncm.isConfigured()) {

+				logger.info("NODE0003 Waiting for Node Configuration");

+				try {

+					wait();

+				} catch (Exception e) {

+				}

+			}

+			ncm.deregisterConfigTask(this);

+			logger.info("NODE0004 Node Configuration Data Received");

+		}

+	}

+	private static Delivery d;

+	private static NodeConfigManager ncm;

+	/**

+	 *	Reset the retry timer for a subscription

+	 */

+	public static void resetQueue(String subid, String ip) {

+		d.resetQueue(ncm.getSpoolDir(subid, ip));

+	}

+	/**

+	 *	Start the data router.

+	 *	<p>

+	 *	The location of the node configuration file can be set using the

+	 *	com.att.research.datarouter.node.ConfigFile system property.  By

+	 *	default, it is "etc/node.properties".

+	 */

+	public static void main(String[] args) throws Exception {

+		logger.info("NODE0001 Data Router Node Starting");

+		IsFrom.setDNSCache();

+		ncm = NodeConfigManager.getInstance();

+		logger.info("NODE0002 I am " + ncm.getMyName());

+		(new wfconfig(ncm)).waitforconfig();

+		d = new Delivery(ncm);

+		LogManager lm = new LogManager(ncm);

+		Server server = new Server();

+		SelectChannelConnector http = new SelectChannelConnector();

+		http.setPort(ncm.getHttpPort());

+		http.setMaxIdleTime(2000);

+		http.setRequestHeaderSize(2048);

+		SslSelectChannelConnector https = new SslSelectChannelConnector();

+		https.setPort(ncm.getHttpsPort());

+		https.setMaxIdleTime(30000);

+		https.setRequestHeaderSize(8192);

+		SslContextFactory cf = https.getSslContextFactory();

+		

+		/**Skip SSLv3 Fixes*/

+		cf.addExcludeProtocols("SSLv3");

+		logger.info("Excluded protocols node-"+cf.getExcludeProtocols());

+		/**End of SSLv3 Fixes*/

+

+		cf.setKeyStoreType(ncm.getKSType());

+		cf.setKeyStorePath(ncm.getKSFile());

+		cf.setKeyStorePassword(ncm.getKSPass());

+		cf.setKeyManagerPassword(ncm.getKPass());

+		server.setConnectors(new Connector[] { http, https });

+		ServletContextHandler ctxt = new ServletContextHandler(0);

+		ctxt.setContextPath("/");

+		server.setHandler(ctxt);

+		ctxt.addServlet(new ServletHolder(new NodeServlet()), "/*");

+		logger.info("NODE0005 Data Router Node Activating Service");

+		server.start();

+		server.join();

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/NodeServlet.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/NodeServlet.java
new file mode 100644
index 0000000..e0ec1f5
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/NodeServlet.java
@@ -0,0 +1,380 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+import javax.servlet.*;

+import javax.servlet.http.*;

+import java.util.*;

+import java.util.regex.*;

+import java.io.*;

+import java.nio.file.*;

+import org.apache.log4j.Logger;

+

+import com.att.eelf.configuration.EELFLogger;

+import com.att.eelf.configuration.EELFManager;

+import com.att.research.datarouter.node.eelf.EelfMsgs;

+

+import java.net.*;

+

+/**

+ *	Servlet for handling all http and https requests to the data router node

+ *	<p>

+ *	Handled requests are:

+ *	<br>

+ *	GET http://<i>node</i>/internal/fetchProv - fetch the provisioning data

+ *	<br>

+ *	PUT/DELETE https://<i>node</i>/internal/publish/<i>fileid</i> - n2n transfer

+ *	<br>

+ *	PUT/DELETE https://<i>node</i>/publish/<i>feedid</i>/<i>fileid</i> - publsh request

+ */

+public class NodeServlet extends HttpServlet	{

+	private static Logger logger = Logger.getLogger("com.att.research.datarouter.node.NodeServlet");

+	private static NodeConfigManager	config;

+	private static Pattern	MetaDataPattern;

+	private static SubnetMatcher internalsubnet = new SubnetMatcher("135.207.136.128/25");

+	//Adding EELF Logger Rally:US664892  

+    private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("com.att.research.datarouter.node.NodeServlet");

+

+	static {

+		try {

+			String ws = "\\s*";

+			// assume that \\ and \" have been replaced by X

+			String string = "\"[^\"]*\"";

+			//String string = "\"(?:[^\"\\\\]|\\\\.)*\"";

+			String number = "[+-]?(?:\\.\\d+|(?:0|[1-9]\\d*)(?:\\.\\d*)?)(?:[eE][+-]?\\d+)?";

+			String value = "(?:" + string + "|" + number + "|null|true|false)";

+			String item = string + ws + ":" + ws + value + ws;

+			String object = ws + "\\{" + ws + "(?:" + item + "(?:" + "," + ws + item + ")*)?\\}" + ws;

+			MetaDataPattern = Pattern.compile(object, Pattern.DOTALL);

+		} catch (Exception e) {

+		}

+	}

+	/**

+	 *	Get the NodeConfigurationManager

+	 */

+	public void init() {

+		config = NodeConfigManager.getInstance();

+		logger.info("NODE0101 Node Servlet Configured");

+	}

+	private boolean down(HttpServletResponse resp) throws IOException {

+		if (config.isShutdown() || !config.isConfigured()) {

+			resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE);

+			logger.info("NODE0102 Rejecting request: Service is being quiesced");

+			return(true);

+		}

+		return(false);

+	}

+	/**

+	 *	Handle a GET for /internal/fetchProv

+	 */

+	protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {

+		NodeUtils.setIpAndFqdnForEelf("doGet");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader("X-ATT-DR-ON-BEHALF-OF"),getIdFromPath(req)+"");

+		if (down(resp)) {

+			return;

+		}

+		String path = req.getPathInfo();

+		String qs = req.getQueryString();

+		String ip = req.getRemoteAddr();

+		if (qs != null) {

+			path = path + "?" + qs;

+		}

+		if ("/internal/fetchProv".equals(path)) {

+			config.gofetch(ip);

+			resp.setStatus(HttpServletResponse.SC_NO_CONTENT);

+			return;

+		} else if (path.startsWith("/internal/resetSubscription/")) {

+			String subid = path.substring(28);

+			if (subid.length() != 0 && subid.indexOf('/') == -1) {

+				NodeMain.resetQueue(subid, ip);

+				resp.setStatus(HttpServletResponse.SC_NO_CONTENT);

+				return;

+			}

+		}

+		if (internalsubnet.matches(NodeUtils.getInetAddress(ip))) {

+			if (path.startsWith("/internal/logs/")) {

+				String f = path.substring(15);

+				File fn = new File(config.getLogDir() + "/" + f);

+				if (f.indexOf('/') != -1 || !fn.isFile()) {

+					logger.info("NODE0103 Rejecting invalid GET of " + path + " from " + ip);

+					resp.sendError(HttpServletResponse.SC_NOT_FOUND);

+					return;

+				}

+				byte[] buf = new byte[65536];

+				resp.setContentType("text/plain");

+				resp.setContentLength((int)fn.length());

+				resp.setStatus(200);

+				InputStream is = new FileInputStream(fn);

+				OutputStream os = resp.getOutputStream();

+				int i;

+				while ((i = is.read(buf)) > 0) {

+					os.write(buf, 0, i);

+				}

+				is.close();

+				return;

+			}

+			if (path.startsWith("/internal/rtt/")) {

+				String xip = path.substring(14);

+				long st = System.currentTimeMillis();

+				String status = " unknown";

+				try {

+					Socket s = new Socket(xip, 443);

+					s.close();

+					status = " connected";

+				} catch (Exception e) {

+					status = " error " + e.toString();

+				}

+				long dur = System.currentTimeMillis() - st;

+				resp.setContentType("text/plain");

+				resp.setStatus(200);

+				byte[] buf = (dur + status + "\n").getBytes();

+				resp.setContentLength(buf.length);

+				resp.getOutputStream().write(buf);

+				return;

+			}

+		}

+		logger.info("NODE0103 Rejecting invalid GET of " + path + " from " + ip);

+		resp.sendError(HttpServletResponse.SC_NOT_FOUND);

+		return;

+	}

+	/**

+	 *	Handle all PUT requests

+	 */

+	protected void doPut(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {

+		NodeUtils.setIpAndFqdnForEelf("doPut");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader("X-ATT-DR-ON-BEHALF-OF"),getIdFromPath(req)+"");

+		common(req, resp, true);

+	}

+	/**

+	 *	Handle all DELETE requests

+	 */

+	protected void doDelete(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {

+		NodeUtils.setIpAndFqdnForEelf("doDelete");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader("X-ATT-DR-ON-BEHALF-OF"),getIdFromPath(req)+"");

+		common(req, resp, false);

+	}

+	private void common(HttpServletRequest req, HttpServletResponse resp, boolean isput) throws ServletException, IOException {

+		if (down(resp)) {

+			return;

+		}

+		if (!req.isSecure()) {

+			logger.info("NODE0104 Rejecting insecure PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, "https required on publish requests");

+			return;

+		}

+		String fileid = req.getPathInfo();

+		if (fileid == null) {

+			logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI.  Expecting <feed-publishing-url>/<fileid>.");

+			return;

+		}

+		String feedid = null;

+		String user = null;

+		String credentials = req.getHeader("Authorization");

+		if (credentials == null) {

+			logger.info("NODE0106 Rejecting unauthenticated PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Authorization header required");

+			return;

+		}

+		String ip = req.getRemoteAddr();

+		String lip = req.getLocalAddr();

+		String pubid = null;

+		String xpubid = null;

+		String rcvd = NodeUtils.logts(System.currentTimeMillis()) + ";from=" + ip + ";by=" + lip;

+		Target[]	targets = null;

+		if (fileid.startsWith("/publish/")) {

+			fileid = fileid.substring(9);

+			int i = fileid.indexOf('/');

+			if (i == -1 || i == fileid.length() - 1) {

+				logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());

+				resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI.  Expecting <feed-publishing-url>/<fileid>.  Possible missing fileid.");

+				return;

+			}

+			feedid = fileid.substring(0, i);

+			fileid = fileid.substring(i + 1);

+			pubid = config.getPublishId();

+			xpubid = req.getHeader("X-ATT-DR-PUBLISH-ID");

+			targets = config.getTargets(feedid);

+		} else if (fileid.startsWith("/internal/publish/")) {

+			if (!config.isAnotherNode(credentials, ip)) {

+				logger.info("NODE0107 Rejecting unauthorized node-to-node transfer attempt from " + ip);

+				resp.sendError(HttpServletResponse.SC_FORBIDDEN);

+				return;

+			}

+			fileid = fileid.substring(18);

+			pubid = req.getHeader("X-ATT-DR-PUBLISH-ID");

+			targets = config.parseRouting(req.getHeader("X-ATT-DR-ROUTING"));

+		} else {

+			logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI.  Expecting <feed-publishing-url>/<fileid>.");

+			return;

+		}

+		if (fileid.indexOf('/') != -1) {

+			logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI.  Expecting <feed-publishing-url>/<fileid>.");

+			return;

+		}

+		String qs = req.getQueryString();

+		if (qs != null) {

+			fileid = fileid + "?" + qs;

+		}

+		String hp = config.getMyName();

+		int xp = config.getExtHttpsPort();

+		if (xp != 443) {

+			hp = hp + ":" + xp;

+		}

+		String logurl = "https://" + hp + "/internal/publish/" + fileid;

+		if (feedid != null) {

+			logurl = "https://" + hp + "/publish/" + feedid + "/" + fileid;

+			String reason = config.isPublishPermitted(feedid, credentials, ip);

+			if (reason != null) {

+				logger.info("NODE0111 Rejecting unauthorized publish attempt to feed " + feedid + " fileid " + fileid + " from " + ip + " reason " + reason);

+				resp.sendError(HttpServletResponse.SC_FORBIDDEN,reason);

+				return;

+			}

+			user = config.getAuthUser(feedid, credentials);

+			String newnode = config.getIngressNode(feedid, user, ip);

+			if (newnode != null) {

+				String port = "";

+				int iport = config.getExtHttpsPort();

+				if (iport != 443) {

+					port = ":" + iport;

+				}

+				String redirto = "https://" + newnode + port + "/publish/" + feedid + "/" + fileid;

+				logger.info("NODE0108 Redirecting publish attempt for feed " + feedid + " user " + user + " ip " + ip + " to " + redirto);

+				resp.sendRedirect(redirto);

+				return;

+			}

+			resp.setHeader("X-ATT-DR-PUBLISH-ID", pubid);

+		}

+		String fbase = config.getSpoolDir() + "/" + pubid;

+		File data = new File(fbase);

+		File meta = new File(fbase + ".M");

+		OutputStream dos = null;

+		Writer mw = null;

+		InputStream is = null;

+		try {

+			StringBuffer mx = new StringBuffer();

+			mx.append(req.getMethod()).append('\t').append(fileid).append('\n');

+			Enumeration hnames = req.getHeaderNames();

+			String ctype = null;

+			while (hnames.hasMoreElements()) {

+				String hn = (String)hnames.nextElement();

+				String hnlc = hn.toLowerCase();

+				if ((isput && ("content-type".equals(hnlc) ||

+				    "content-language".equals(hnlc) ||

+				    "content-md5".equals(hnlc) ||

+				    "content-range".equals(hnlc))) ||

+				    "x-att-dr-meta".equals(hnlc) ||

+				    (feedid == null && "x-att-dr-received".equals(hnlc)) ||

+				    (hnlc.startsWith("x-") && !hnlc.startsWith("x-att-dr-"))) {

+					Enumeration hvals = req.getHeaders(hn);

+					while (hvals.hasMoreElements()) {

+						String hv = (String)hvals.nextElement();

+						if ("content-type".equals(hnlc)) {

+							ctype = hv;

+						}

+						if ("x-att-dr-meta".equals(hnlc)) {

+							if (hv.length() > 4096) {

+								logger.info("NODE0109 Rejecting publish attempt with metadata too long for feed " + feedid + " user " + user + " ip " + ip);

+								resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Metadata too long");

+								return;

+							}

+							if (!MetaDataPattern.matcher(hv.replaceAll("\\\\.", "X")).matches()) {

+								logger.info("NODE0109 Rejecting publish attempt with malformed metadata for feed " + feedid + " user " + user + " ip " + ip);

+								resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Malformed metadata");

+								return;

+							}

+						}

+						mx.append(hn).append('\t').append(hv).append('\n');

+					}

+				}

+			}

+			mx.append("X-ATT-DR-RECEIVED\t").append(rcvd).append('\n');

+			String metadata = mx.toString();

+			byte[] buf = new byte[1024 * 1024];

+			int i;

+			try {

+				is = req.getInputStream();

+				dos = new FileOutputStream(data);

+				while ((i = is.read(buf)) > 0) {

+					dos.write(buf, 0, i);

+				}

+				is.close();

+				is = null;

+				dos.close();

+				dos = null;

+			} catch (IOException ioe) {

+				long exlen = -1;

+				try {

+					exlen = Long.parseLong(req.getHeader("Content-Length"));

+				} catch (Exception e) {

+				}

+				StatusLog.logPubFail(pubid, feedid, logurl, req.getMethod(), ctype, exlen, data.length(), ip, user, ioe.getMessage());

+				throw ioe;

+			}

+			Path dpath = Paths.get(fbase);

+			for (Target t: targets) {

+				DestInfo di = t.getDestInfo();

+				if (di == null) {

+					// TODO: unknown destination

+					continue;

+				}

+				String dbase = di.getSpool() + "/" + pubid;

+				Files.createLink(Paths.get(dbase), dpath);

+				mw = new FileWriter(meta);

+				mw.write(metadata);

+				if (di.getSubId() == null) {

+					mw.write("X-ATT-DR-ROUTING\t" + t.getRouting() + "\n");

+				}

+				mw.close();

+				meta.renameTo(new File(dbase + ".M"));

+			}

+			resp.setStatus(HttpServletResponse.SC_NO_CONTENT);

+			resp.getOutputStream().close();

+			StatusLog.logPub(pubid, feedid, logurl, req.getMethod(), ctype, data.length(), ip, user, HttpServletResponse.SC_NO_CONTENT);

+		} catch (IOException ioe) {

+			logger.info("NODE0110 IO Exception receiving publish attempt for feed " + feedid + " user " + user + " ip " + ip + " " + ioe.toString(), ioe);

+			throw ioe;

+		} finally {

+			if (is != null) { try { is.close(); } catch (Exception e) {}}

+			if (dos != null) { try { dos.close(); } catch (Exception e) {}}

+			if (mw != null) { try { mw.close(); } catch (Exception e) {}}

+			try { data.delete(); } catch (Exception e) {}

+			try { meta.delete(); } catch (Exception e) {}

+		}

+	}

+	

+	private int getIdFromPath(HttpServletRequest req) {

+		String path = req.getPathInfo();

+		if (path == null || path.length() < 2)

+			return -1;

+		try {

+			return Integer.parseInt(path.substring(1));

+		} catch (NumberFormatException e) {

+			return -1;

+		}

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/NodeUtils.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/NodeUtils.java
new file mode 100644
index 0000000..5471c0d
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/NodeUtils.java
@@ -0,0 +1,226 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+import static com.att.eelf.configuration.Configuration.MDC_SERVER_FQDN;

+import static com.att.eelf.configuration.Configuration.MDC_SERVER_IP_ADDRESS;

+import static com.att.eelf.configuration.Configuration.MDC_SERVICE_NAME;

+

+import java.security.*;

+import java.io.*;

+import java.util.*;

+import java.security.cert.*;

+import java.net.*;

+import java.text.*;

+import org.apache.commons.codec.binary.Base64;

+import org.apache.log4j.Logger;

+import org.slf4j.MDC;

+

+import com.att.eelf.configuration.EELFLogger;

+import com.att.eelf.configuration.EELFManager;

+import com.att.research.datarouter.node.eelf.EelfMsgs;

+

+/**

+ *	Utility functions for the data router node

+ */

+public class NodeUtils	{

+    private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("com.att.research.datarouter.node.NodeUtils");

+	private static Logger logger = Logger.getLogger("com.att.research.datarouter.node.NodeUtils");

+	private static SimpleDateFormat	logdate;

+	static {

+		logdate = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");

+		logdate.setTimeZone(TimeZone.getTimeZone("GMT"));

+	}

+	private NodeUtils() {}

+	/**

+	 *	Base64 encode a byte array

+	 *	@param raw	The bytes to be encoded

+	 *	@return	The encoded string

+	 */

+	public static String base64Encode(byte[] raw) {

+		return(Base64.encodeBase64String(raw));

+	}

+	/**

+	 *	Given a user and password, generate the credentials

+	 *	@param user	User name

+	 *	@param password	User password

+	 *	@return	Authorization header value

+	 */

+	public static String getAuthHdr(String user, String password) {

+		if (user == null || password == null) {

+			return(null);

+		}

+		return("Basic " + base64Encode((user + ":" + password).getBytes()));

+	}

+	/**

+	 *	Given a node name, generate the credentials

+	 *	@param node	Node name

+	 */

+	public static String	getNodeAuthHdr(String node, String key) {

+		try {

+			MessageDigest md = MessageDigest.getInstance("SHA");

+			md.update(key.getBytes());

+			md.update(node.getBytes());

+			md.update(key.getBytes());

+			return(getAuthHdr(node, base64Encode(md.digest())));

+		} catch (Exception e) {

+			return(null);

+		}

+	}

+	/**

+	 *	Given a keystore file and its password, return the value of the CN of the first private key entry with a certificate.

+	 *	@param kstype	The type of keystore

+	 *	@param ksfile	The file name of the keystore

+	 *	@param kspass	The password of the keystore

+	 *	@return	CN of the certificate subject or null

+	 */

+	public static String getCanonicalName(String kstype, String ksfile, String kspass) {

+		try {

+			KeyStore ks = KeyStore.getInstance(kstype);

+			ks.load(new FileInputStream(ksfile), kspass.toCharArray());

+			return(getCanonicalName(ks));

+		} catch (Exception e) {

+			setIpAndFqdnForEelf("getCanonicalName");

+			eelflogger.error(EelfMsgs.MESSAGE_KEYSTORE_LOAD_ERROR, ksfile, e.toString());

+			logger.error("NODE0401 Error loading my keystore file + " + ksfile + " " + e.toString(), e);

+			return(null);

+		}

+	}

+	/**

+	 *	Given a keystore, return the value of the CN of the first private key entry with a certificate.

+	 *	@param ks	The KeyStore

+	 *	@return	CN of the certificate subject or null

+	 */

+	public static String getCanonicalName(KeyStore ks) {

+		try {

+			Enumeration<String> aliases = ks.aliases();

+			while (aliases.hasMoreElements()) {

+				String s = aliases.nextElement();

+				if (ks.entryInstanceOf(s, KeyStore.PrivateKeyEntry.class)) {

+					X509Certificate c = (X509Certificate)ks.getCertificate(s);

+					if (c != null) {

+						String subject = c.getSubjectX500Principal().getName();

+						String[] parts = subject.split(",");

+						if (parts.length < 1) {

+							return(null);

+						}

+						subject = parts[0].trim();

+						if (!subject.startsWith("CN=")) {

+							return(null);

+

+						}

+						return(subject.substring(3));

+					}

+				}

+			}

+		} catch (Exception e) {

+			logger.error("NODE0402 Error extracting my name from my keystore file " + e.toString(), e);

+		}

+		return(null);

+	}

+	/**

+	 *	Given a string representation of an IP address, get the corresponding byte array

+	 *	@param ip	The IP address as a string

+	 *	@return	The IP address as a byte array or null if the address is invalid

+	 */

+	public static byte[] getInetAddress(String ip) {

+		try {

+			return(InetAddress.getByName(ip).getAddress());

+		} catch (Exception e) {

+		}

+		return(null);

+	}

+	/**

+	 *	Given a uri with parameters, split out the feed ID and file ID

+	 */

+	public static String[] getFeedAndFileID(String uriandparams) {

+		int end = uriandparams.length();

+		int i = uriandparams.indexOf('#');

+		if (i != -1 && i < end) {

+			end = i;

+		}

+		i = uriandparams.indexOf('?');

+		if (i != -1 && i < end) {

+			end = i;

+		}

+		end = uriandparams.lastIndexOf('/', end);

+		if (end < 2) {

+			return(null);

+		}

+		i = uriandparams.lastIndexOf('/', end - 1);

+		if (i == -1) {

+			return(null);

+		}

+		return(new String[] { uriandparams.substring(i + 1, end - 1), uriandparams.substring(end + 1) });

+	}

+	/**

+	 *	Escape fields that might contain vertical bar, backslash, or newline by replacing them with backslash p, backslash e and backslash n.

+	 */

+	public static String loge(String s) {

+		if (s == null) {

+			return(s);

+		}

+		return(s.replaceAll("\\\\", "\\\\e").replaceAll("\\|", "\\\\p").replaceAll("\n", "\\\\n"));

+	}

+	/**

+	 *	Undo what loge does.

+	 */

+	public static String unloge(String s) {

+		if (s == null) {

+			return(s);

+		}

+		return(s.replaceAll("\\\\p", "\\|").replaceAll("\\\\n", "\n").replaceAll("\\\\e", "\\\\"));

+	}

+	/**

+	 *	Format a logging timestamp as yyyy-mm-ddThh:mm:ss.mmmZ

+	 */

+	public static String logts(long when) {

+		return(logts(new Date(when)));

+	}

+	/**

+	 *	Format a logging timestamp as yyyy-mm-ddThh:mm:ss.mmmZ

+	 */

+	public static synchronized String logts(Date when) {

+		return(logdate.format(when));

+	}

+	

+	/* Method prints method name, server FQDN and IP Address of the machine in EELF logs

+	 * @Method - setIpAndFqdnForEelf - Rally:US664892  

+	 * @Params - method, prints method name in EELF log.

+	 */	

+	public static void setIpAndFqdnForEelf(String method) {

+	 	MDC.clear();

+        MDC.put(MDC_SERVICE_NAME, method);

+        try {

+            MDC.put(MDC_SERVER_FQDN, InetAddress.getLocalHost().getHostName());

+            MDC.put(MDC_SERVER_IP_ADDRESS, InetAddress.getLocalHost().getHostAddress());

+        } catch (Exception e) {

+            e.printStackTrace();

+        }

+

+	}

+	

+

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/PathFinder.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/PathFinder.java
new file mode 100644
index 0000000..7ff9183
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/PathFinder.java
@@ -0,0 +1,132 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+import java.util.*;

+

+/**

+ *	Given a set of node names and next hops, identify and ignore any cycles and figure out the sequence of next hops to get from this node to any other node

+ */

+

+public class PathFinder	{

+	private static class Hop	{

+		public boolean	mark;

+		public boolean	bad;

+		public NodeConfig.ProvHop	basis;

+	}

+	private Vector<String> errors = new Vector<String>();

+	private Hashtable<String, String> routes = new Hashtable<String, String>();

+	/**

+	 *	Get list of errors encountered while finding paths

+	 *	@return array of error descriptions

+	 */

+	public String[] getErrors() {

+		return(errors.toArray(new String[errors.size()]));

+	}

+	/**

+	 *	Get the route from this node to the specified node

+	 *	@param destination node

+	 *	@return	list of node names separated by and ending with "/"

+	 */

+	public String getPath(String destination) {

+		String ret = routes.get(destination);

+		if (ret == null) {

+			return("");

+		}

+		return(ret);

+	}

+	private String plot(String from, String to, Hashtable<String, Hop> info) {

+		Hop nh = info.get(from);

+		if (nh == null || nh.bad) {

+			return(to);

+		}

+		if (nh.mark) {

+			// loop detected;

+			while (!nh.bad) {

+				nh.bad = true;

+				errors.add(nh.basis + " is part of a cycle");

+				nh = info.get(nh.basis.getVia());

+			}

+			return(to);

+		}

+		nh.mark = true;

+		String x = plot(nh.basis.getVia(), to, info);

+		nh.mark = false;

+		if (nh.bad) {

+			return(to);

+		}

+		return(nh.basis.getVia() + "/" + x);

+	}

+	/**

+	 *	Find routes from a specified origin to all of the nodes given a set of specified next hops.

+	 *	@param origin	where we start

+	 *	@param nodes	where we can go

+	 *	@param hops	detours along the way

+	 */

+	public PathFinder(String origin, String[] nodes, NodeConfig.ProvHop[] hops) {

+		HashSet<String> known = new HashSet<String>();

+		Hashtable<String, Hashtable<String, Hop>> ht = new Hashtable<String, Hashtable<String, Hop>>();

+		for (String n: nodes) {

+			known.add(n);

+			ht.put(n, new Hashtable<String, Hop>());

+		}

+		for (NodeConfig.ProvHop ph: hops) {

+			if (!known.contains(ph.getFrom())) {

+				errors.add(ph + " references unknown from node");

+				continue;

+			}

+			if (!known.contains(ph.getTo())) {

+				errors.add(ph + " references unknown destination node");

+				continue;

+			}

+			Hashtable<String, Hop> ht2 = ht.get(ph.getTo());

+			Hop h = ht2.get(ph.getFrom());

+			if (h != null) {

+				h.bad = true;

+				errors.add(ph + " gives duplicate next hop - previous via was " + h.basis.getVia());

+				continue;

+			}

+			h = new Hop();

+			h.basis = ph;

+			ht2.put(ph.getFrom(), h);

+			if (!known.contains(ph.getVia())) {

+				errors.add(ph + " references unknown via node");

+				h.bad = true;

+				continue;

+			}

+			if (ph.getVia().equals(ph.getTo())) {

+				errors.add(ph + " gives destination as via");

+				h.bad = true;

+				continue;

+			}

+		}

+		for (String n: known) {

+			if (n.equals(origin)) {

+				routes.put(n, "");

+			}

+			routes.put(n, plot(origin, n, ht.get(n)) + "/");

+		}

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/ProvData.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/ProvData.java
new file mode 100644
index 0000000..19cb899
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/ProvData.java
@@ -0,0 +1,302 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+import java.io.*;

+import java.util.*;

+import org.json.*;

+import org.apache.log4j.Logger;

+

+import com.att.eelf.configuration.EELFLogger;

+import com.att.eelf.configuration.EELFManager;

+import com.att.research.datarouter.node.eelf.EelfMsgs;

+

+/**

+ *	Parser for provisioning data from the provisioning server.

+ *	<p>

+ *	The ProvData class uses a Reader for the text configuration from the

+ *	provisioning server to construct arrays of raw configuration entries.

+ */

+public class ProvData	{

+    private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("com.att.research.datarouter.node.ProvData");

+	private static Logger logger = Logger.getLogger("com.att.research.datarouter.node.ProvData");

+	private NodeConfig.ProvNode[]	pn;

+	private NodeConfig.ProvParam[]	pp;

+	private NodeConfig.ProvFeed[]	pf;

+	private NodeConfig.ProvFeedUser[]	pfu;

+	private NodeConfig.ProvFeedSubnet[]	pfsn;

+	private NodeConfig.ProvSubscription[]	ps;

+	private NodeConfig.ProvForceIngress[]	pfi;

+	private NodeConfig.ProvForceEgress[]	pfe;

+	private NodeConfig.ProvHop[]	ph;

+	private static String[] gvasa(JSONArray a, int index) {

+		return(gvasa(a.get(index)));

+	}

+	private static String[] gvasa(JSONObject o, String key) {

+		return(gvasa(o.opt(key)));

+	}

+	private static String[] gvasa(Object o) {

+		if (o instanceof JSONArray) {

+			JSONArray a = (JSONArray)o;

+			Vector<String> v = new Vector<String>();

+			for (int i = 0; i < a.length(); i++) {

+				String s = gvas(a, i);

+				if (s != null) {

+					v.add(s);

+				}

+			}

+			return(v.toArray(new String[v.size()]));

+		} else {

+			String s = gvas(o);

+			if (s == null) {

+				return(new String[0]);

+			} else {

+				return(new String[] { s });

+			}

+		}

+	}

+	private static String gvas(JSONArray a, int index) {

+		return(gvas(a.get(index)));

+	}

+	private static String gvas(JSONObject o, String key) {

+		return(gvas(o.opt(key)));

+	}

+	private static String gvas(Object o) {

+		if (o instanceof Boolean || o instanceof Number || o instanceof String) {

+			return(o.toString());

+		}

+		return(null);

+	}

+	/**

+	 *	Construct raw provisioing data entries from the text (JSON)

+	 *	provisioning document received from the provisioning server

+	 *	@param r	The reader for the JSON text.

+	 */

+	public ProvData(Reader r) throws IOException {

+		Vector<NodeConfig.ProvNode> pnv = new Vector<NodeConfig.ProvNode>();

+		Vector<NodeConfig.ProvParam> ppv = new Vector<NodeConfig.ProvParam>();

+		Vector<NodeConfig.ProvFeed> pfv = new Vector<NodeConfig.ProvFeed>();

+		Vector<NodeConfig.ProvFeedUser> pfuv = new Vector<NodeConfig.ProvFeedUser>();

+		Vector<NodeConfig.ProvFeedSubnet> pfsnv = new Vector<NodeConfig.ProvFeedSubnet>();

+		Vector<NodeConfig.ProvSubscription> psv = new Vector<NodeConfig.ProvSubscription>();

+		Vector<NodeConfig.ProvForceIngress> pfiv = new Vector<NodeConfig.ProvForceIngress>();

+		Vector<NodeConfig.ProvForceEgress> pfev = new Vector<NodeConfig.ProvForceEgress>();

+		Vector<NodeConfig.ProvHop> phv = new Vector<NodeConfig.ProvHop>();

+		try {

+			JSONTokener jtx = new JSONTokener(r);

+			JSONObject jcfg = new JSONObject(jtx);

+			char c = jtx.nextClean();

+			if (c != '\0') {

+				throw new JSONException("Spurious characters following configuration");

+			}

+			r.close();

+			JSONArray jfeeds = jcfg.optJSONArray("feeds");

+			if (jfeeds != null) {

+				for (int fx = 0; fx < jfeeds.length(); fx++) {

+					JSONObject jfeed = jfeeds.getJSONObject(fx);

+					String stat = null;

+					if (jfeed.optBoolean("suspend", false)) {

+						stat = "Feed is suspended";

+					}

+					if (jfeed.optBoolean("deleted", false)) {

+						stat = "Feed is deleted";

+					}

+					String fid = gvas(jfeed, "feedid");

+					String fname = gvas(jfeed, "name");

+					String fver = gvas(jfeed, "version");

+					pfv.add(new NodeConfig.ProvFeed(fid, fname + "//" + fver, stat));

+					JSONObject jauth = jfeed.optJSONObject("authorization");

+					if (jauth == null) {

+						continue;

+					}

+					JSONArray jeids = jauth.optJSONArray("endpoint_ids");

+					if (jeids != null) {

+						for (int ux = 0; ux < jeids.length(); ux++) {

+							JSONObject ju = jeids.getJSONObject(ux);

+							String login = gvas(ju, "id");

+							String password = gvas(ju, "password");

+							pfuv.add(new NodeConfig.ProvFeedUser(fid, login, NodeUtils.getAuthHdr(login, password)));

+						}

+					}

+					JSONArray jeips = jauth.optJSONArray("endpoint_addrs");

+					if (jeips != null) {

+						for (int ix = 0; ix < jeips.length(); ix++) {

+							String sn = gvas(jeips, ix);

+							pfsnv.add(new NodeConfig.ProvFeedSubnet(fid, sn));

+						}

+					}

+				}

+			}

+			JSONArray jsubs = jcfg.optJSONArray("subscriptions");

+			if (jsubs != null) {

+				for (int sx = 0; sx < jsubs.length(); sx++) {

+					JSONObject jsub = jsubs.getJSONObject(sx);

+					if (jsub.optBoolean("suspend", false)) {

+						continue;

+					}

+					String sid = gvas(jsub, "subid");

+					String fid = gvas(jsub, "feedid");

+					JSONObject jdel = jsub.getJSONObject("delivery");

+					String delurl = gvas(jdel, "url");

+					String id = gvas(jdel, "user");

+					String password = gvas(jdel, "password");

+					boolean monly = jsub.getBoolean("metadataOnly");

+					boolean use100 = jdel.getBoolean("use100");

+					psv.add(new NodeConfig.ProvSubscription(sid, fid, delurl, id, NodeUtils.getAuthHdr(id, password), monly, use100));

+				}

+			}

+			JSONObject jparams = jcfg.optJSONObject("parameters");

+			if (jparams != null) {

+				for (String pname: JSONObject.getNames(jparams)) {

+					String pvalue = gvas(jparams, pname);

+					if (pvalue != null) {

+						ppv.add(new NodeConfig.ProvParam(pname, pvalue));

+					}

+				}

+				String sfx = gvas(jparams, "PROV_DOMAIN");

+				JSONArray jnodes = jparams.optJSONArray("NODES");

+				if (jnodes != null) {

+					for (int nx = 0; nx < jnodes.length(); nx++) {

+						String nn = gvas(jnodes, nx);

+						if (nn.indexOf('.') == -1) {

+							nn = nn + "." + sfx;

+						}

+						pnv.add(new NodeConfig.ProvNode(nn));

+					}

+				}

+			}

+			JSONArray jingresses = jcfg.optJSONArray("ingress");

+			if (jingresses != null) {

+				for (int fx = 0; fx < jingresses.length(); fx++) {

+					JSONObject jingress = jingresses.getJSONObject(fx);

+					String fid = gvas(jingress, "feedid");

+					String subnet = gvas(jingress, "subnet");

+					String user = gvas(jingress, "user");

+					String[] nodes = gvasa(jingress, "node");

+					if (fid == null || "".equals(fid)) {

+						continue;

+					}

+					if ("".equals(subnet)) {

+						subnet = null;

+					}

+					if ("".equals(user)) {

+						user = null;

+					}

+					pfiv.add(new NodeConfig.ProvForceIngress(fid, subnet, user, nodes));

+				}

+			}

+			JSONObject jegresses = jcfg.optJSONObject("egress");

+			if (jegresses != null && JSONObject.getNames(jegresses) != null) {

+				for (String esid: JSONObject.getNames(jegresses)) {

+					String enode = gvas(jegresses, esid);

+					if (esid != null && enode != null && !"".equals(esid) && !"".equals(enode)) {

+						pfev.add(new NodeConfig.ProvForceEgress(esid, enode));

+					}

+				}

+			}

+			JSONArray jhops = jcfg.optJSONArray("routing");

+			if (jhops != null) {

+				for (int fx = 0; fx < jhops.length(); fx++) {

+					JSONObject jhop = jhops.getJSONObject(fx);

+					String from = gvas(jhop, "from");

+					String to = gvas(jhop, "to");

+					String via = gvas(jhop, "via");

+					if (from == null || to == null || via == null || "".equals(from) || "".equals(to) || "".equals(via)) {

+						continue;

+					}

+					phv.add(new NodeConfig.ProvHop(from, to, via));

+				}

+			}

+		} catch (JSONException jse) {

+			NodeUtils.setIpAndFqdnForEelf("ProvData");

+			eelflogger.error(EelfMsgs.MESSAGE_PARSING_ERROR, jse.toString());

+			logger.error("NODE0201 Error parsing configuration data from provisioning server " + jse.toString(), jse);

+			throw new IOException(jse.toString(), jse);

+		}

+		pn = pnv.toArray(new NodeConfig.ProvNode[pnv.size()]);

+		pp = ppv.toArray(new NodeConfig.ProvParam[ppv.size()]);

+		pf = pfv.toArray(new NodeConfig.ProvFeed[pfv.size()]);

+		pfu = pfuv.toArray(new NodeConfig.ProvFeedUser[pfuv.size()]);

+		pfsn = pfsnv.toArray(new NodeConfig.ProvFeedSubnet[pfsnv.size()]);

+		ps = psv.toArray(new NodeConfig.ProvSubscription[psv.size()]);

+		pfi = pfiv.toArray(new NodeConfig.ProvForceIngress[pfiv.size()]);

+		pfe = pfev.toArray(new NodeConfig.ProvForceEgress[pfev.size()]);

+		ph = phv.toArray(new NodeConfig.ProvHop[phv.size()]);

+	}

+	/**

+	 *	Get the raw node configuration entries

+	 */

+	public NodeConfig.ProvNode[] getNodes() {

+		return(pn);

+	}

+	/**

+	 *	Get the raw parameter configuration entries

+	 */

+	public NodeConfig.ProvParam[] getParams() {

+		return(pp);

+	}

+	/**

+	 *	Ge the raw feed configuration entries

+	 */

+	public NodeConfig.ProvFeed[] getFeeds() {

+		return(pf);

+	}

+	/**

+	 *	Get the raw feed user configuration entries

+	 */

+	public NodeConfig.ProvFeedUser[] getFeedUsers() {

+		return(pfu);

+	}

+	/**

+	 *	Get the raw feed subnet configuration entries

+	 */

+	public NodeConfig.ProvFeedSubnet[] getFeedSubnets() {

+		return(pfsn);

+	}

+	/**

+	 *	Get the raw subscription entries

+	 */

+	public NodeConfig.ProvSubscription[] getSubscriptions() {

+		return(ps);

+	}

+	/**

+	 *	Get the raw forced ingress entries

+	 */

+	public NodeConfig.ProvForceIngress[] getForceIngress() {

+		return(pfi);

+	}

+	/**

+	 *	Get the raw forced egress entries

+	 */

+	public NodeConfig.ProvForceEgress[] getForceEgress() {

+		return(pfe);

+	}

+	/**

+	 *	Get the raw next hop entries

+	 */

+	public NodeConfig.ProvHop[] getHops() {

+		return(ph);

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/PublishId.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/PublishId.java
new file mode 100644
index 0000000..436adba
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/PublishId.java
@@ -0,0 +1,52 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+/**

+ *	Generate publish IDs

+ */

+public class PublishId	{

+	private long	nextuid;

+	private String	myname;

+

+	/**

+	 *	Generate publish IDs for the specified name

+	 *	@param myname	Unique identifier for this publish ID generator (usually fqdn of server)

+	 */

+	public PublishId(String myname) {

+		this.myname = myname;

+	}

+	/**

+	 *	Generate a Data Router Publish ID that uniquely identifies the particular invocation of the Publish API for log correlation purposes.

+	 */

+	public synchronized String next() {

+		long now = System.currentTimeMillis();

+		if (now < nextuid) {

+			now = nextuid;

+		}

+		nextuid = now + 1;

+		return(now + "." + myname);

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/RateLimitedOperation.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/RateLimitedOperation.java
new file mode 100644
index 0000000..5bcbed8
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/RateLimitedOperation.java
@@ -0,0 +1,102 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+import java.util.*;

+

+/**

+ *	Execute an operation no more frequently than a specified interval

+ */

+

+public abstract class RateLimitedOperation implements Runnable	{

+	private boolean	marked;	// a timer task exists

+	private boolean	executing;	// the operation is currently in progress

+	private boolean remark;	// a request was made while the operation was in progress

+	private Timer	timer;

+	private long	last;	// when the last operation started

+	private long	mininterval;

+	/**

+	 *	Create a rate limited operation

+	 *	@param mininterval	The minimum number of milliseconds after the last execution starts before a new execution can begin

+	 *	@param timer	The timer used to perform deferred executions

+	 */

+	public RateLimitedOperation(long mininterval, Timer timer) {

+		this.timer = timer;

+		this.mininterval = mininterval;

+	}

+	private class deferred extends TimerTask	{

+		public void run() {

+			execute();

+		}

+	}

+	private synchronized void unmark() {

+		marked = false;

+	}

+	private void execute() {

+		unmark();

+		request();

+	}

+	/**

+	 *	Request that the operation be performed by this thread or at a later time by the timer

+	 */

+	public void request() {

+		if (premark()) {

+			return;

+		}

+		do {

+			run();

+		} while (demark());

+	}

+	private synchronized boolean premark() {

+		if (executing) {

+			// currently executing - wait until it finishes

+			remark = true;

+			return(true);

+		}

+		if (marked) {

+			// timer currently running - will run when it expires

+			return(true);

+		}

+		long now = System.currentTimeMillis();

+		if (last + mininterval > now) {

+			// too soon - schedule a timer

+			marked = true;

+			timer.schedule(new deferred(), last + mininterval - now);

+			return(true);

+		}

+		last = now;

+		executing = true;

+		// start execution

+		return(false);

+	}

+	private synchronized boolean demark() {

+		executing = false;

+		if (remark) {

+			remark = false;

+			return(!premark());

+		}

+		return(false);

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/RedirManager.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/RedirManager.java
new file mode 100644
index 0000000..09473c1
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/RedirManager.java
@@ -0,0 +1,118 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+import java.util.*;

+import java.io.*;

+

+/**

+ *	Track redirections of subscriptions

+ */

+public class RedirManager	{

+	private Hashtable<String, String> sid2primary = new Hashtable<String, String>();

+	private Hashtable<String, String> sid2secondary = new Hashtable<String, String>();

+	private String	redirfile;

+	RateLimitedOperation	op;

+	/**

+	 *	Create a mechanism for maintaining subscription redirections.

+	 *	@param redirfile	The file to store the redirection information.

+	 *	@param mininterval	The minimum number of milliseconds between writes to the redirection information file.

+	 *	@param timer	The timer thread used to run delayed file writes.

+	 */

+	public RedirManager(String redirfile, long mininterval, Timer timer) {

+		this.redirfile = redirfile;

+		op = new RateLimitedOperation(mininterval, timer) {

+			public void run() {

+				try {

+					StringBuffer sb = new StringBuffer();

+					for (String s: sid2primary.keySet()) {

+						sb.append(s).append(' ').append(sid2primary.get(s)).append(' ').append(sid2secondary.get(s)).append('\n');

+					}

+					OutputStream os = new FileOutputStream(RedirManager.this.redirfile);

+					os.write(sb.toString().getBytes());

+					os.close();

+				} catch (Exception e) {

+				}

+			}

+		};

+		try {

+			String s;

+			BufferedReader br = new BufferedReader(new FileReader(redirfile));

+			while ((s = br.readLine()) != null) {

+				s = s.trim();

+				String[] sx = s.split(" ");

+				if (s.startsWith("#") || sx.length != 3) {

+					continue;

+				}

+				sid2primary.put(sx[0], sx[1]);

+				sid2secondary.put(sx[0], sx[2]);

+			}

+			br.close();

+		} catch (Exception e) {

+			// missing file is normal

+		}

+	}

+	/**

+	 *	Set up redirection.  If a request is to be sent to subscription ID sid, and that is configured to go to URL primary, instead, go to secondary.

+	 *	@param sid	The subscription ID to be redirected

+	 *	@param primary	The URL associated with that subscription ID

+	 *	@param secondary	The replacement URL to use instead

+	 */

+	public synchronized void redirect(String sid, String primary, String secondary) {

+		sid2primary.put(sid, primary);

+		sid2secondary.put(sid, secondary);

+		op.request();

+	}

+	/**

+	 *	Cancel redirection.  If a request is to be sent to subscription ID sid, send it to its primary URL.

+	 *	@param	sid	The subscription ID to remove from the table.

+	 */

+	public synchronized void forget(String sid) {

+		sid2primary.remove(sid);

+		sid2secondary.remove(sid);

+		op.request();

+	}

+	/**

+	 *	Look up where to send a subscription.  If the primary has changed or there is no redirection, use the primary.  Otherwise, redirect to the secondary URL.

+	 *	@param	sid	The subscription ID to look up.

+	 *	@param	primary	The configured primary URL.

+	 *	@return	The destination URL to really use.

+	 */

+	public synchronized String lookup(String sid, String primary) {

+		String oprim = sid2primary.get(sid);

+		if (primary.equals(oprim)) {

+			return(sid2secondary.get(sid));

+		} else if (oprim != null) {

+			forget(sid);

+		}	

+		return(primary);

+	}

+	/**

+	 *	Is a subscription redirected?

+	 */

+	public synchronized boolean isRedirected(String sid) {

+		return(sid != null && sid2secondary.get(sid) != null);

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/StatusLog.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/StatusLog.java
new file mode 100644
index 0000000..66aa4ad
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/StatusLog.java
@@ -0,0 +1,229 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+package com.att.research.datarouter.node;

+

+import java.util.regex.*;

+import java.util.*;

+import java.io.*;

+import java.nio.file.*;

+import java.text.*;

+

+/**

+ *	Logging for data router delivery events (PUB/DEL/EXP)

+ */

+public class StatusLog	{

+	private static StatusLog instance = new StatusLog();

+	private HashSet<String> toship = new HashSet<String>();

+	private SimpleDateFormat	filedate;

+	private String	prefix = "logs/events";

+	private	String	suffix = ".log";

+	private String	plainfile;

+	private String	curfile;

+	private long	nexttime;

+	private OutputStream	os;

+	private long	intvl;

+	private NodeConfigManager	config = NodeConfigManager.getInstance();

+	{

+		try { filedate = new SimpleDateFormat("-yyyyMMddHHmm"); } catch (Exception e) {}

+	}

+	/**

+	 *	Parse an interval of the form xxhyymzzs and round it to the nearest whole fraction of 24 hours.  If no units are specified, assume seconds.

+	 */

+	public static long parseInterval(String interval, int def) {

+		try {

+			Matcher m = Pattern.compile("(?:(\\d+)[Hh])?(?:(\\d+)[Mm])?(?:(\\d+)[Ss]?)?").matcher(interval);

+			if (m.matches()) {

+				int dur = 0;

+				String x = m.group(1);

+				if (x != null) {

+					dur += 3600 * Integer.parseInt(x);

+				}

+				x = m.group(2);

+				if (x != null) {

+					dur += 60 * Integer.parseInt(x);

+				}

+				x = m.group(3);

+				if (x != null) {

+					dur += Integer.parseInt(x);

+				}

+				if (dur < 60) {

+					dur = 60;

+				}

+				int best = 86400;

+				int dist = best - dur;

+				if (dur > best) {

+					dist = dur - best;

+				}

+				int base = 1;

+				for (int i = 0; i < 8; i++) {

+					int base2 = base;

+					base *= 2;

+					for (int j = 0; j < 4; j++) {

+						int base3 = base2;

+						base2 *= 3;

+						for (int k = 0; k < 3; k++) {

+							int cur = base3;

+							base3 *= 5;

+							int ndist = cur - dur;

+							if (dur > cur) {

+								ndist = dur - cur;

+							}

+							if (ndist < dist) {

+								best = cur;

+								dist = ndist;

+							}

+						}

+					}

+				}

+				def = best * 1000;

+			}

+		} catch (Exception e) {

+		}

+		return(def);

+	}

+	private synchronized void checkRoll(long now) throws IOException {

+		if (now >= nexttime) {

+			if (os != null) {

+				os.close();

+				os = null;

+			}

+			intvl = parseInterval(config.getEventLogInterval(), 300000);

+			prefix = config.getEventLogPrefix();

+			suffix = config.getEventLogSuffix();

+			nexttime = now - now % intvl + intvl;

+			curfile = prefix + filedate.format(new Date(nexttime - intvl)) + suffix;

+			plainfile = prefix + suffix;

+			notify();

+		}

+	}

+	/**

+	 *	Get the name of the current log file

+	 *	@return	The full path name of the current event log file

+	 */

+	public static synchronized String getCurLogFile() {

+		try {

+			instance.checkRoll(System.currentTimeMillis());

+		} catch (Exception e) {

+		}

+		return(instance.curfile);

+	}

+	private synchronized void log(String s) {

+		try {

+			long now = System.currentTimeMillis();

+			checkRoll(now);

+			if (os == null) {

+				os = new FileOutputStream(curfile, true);

+				(new File(plainfile)).delete();

+				Files.createLink(Paths.get(plainfile), Paths.get(curfile));

+			}

+			os.write((NodeUtils.logts(new Date(now)) + '|' + s + '\n').getBytes());

+			os.flush();

+		} catch (IOException ioe) {

+		}

+	}

+	/**

+	 *	Log a received publication attempt.

+	 *	@param pubid	The publish ID assigned by the node

+	 *	@param feedid	The feed id given by the publisher

+	 *	@param requrl	The URL of the received request

+	 *	@param method	The method (DELETE or PUT) in the received request

+	 *	@param ctype	The content type (if method is PUT and clen > 0)

+	 *	@param clen	The content length (if method is PUT)

+	 *	@param srcip	The IP address of the publisher

+	 *	@param user	The identity of the publisher

+	 *	@param status	The status returned to the publisher

+	 */

+	public static void logPub(String pubid, String feedid, String requrl, String method, String ctype, long clen, String srcip, String user, int status) {

+		instance.log("PUB|" + pubid + "|" + feedid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + srcip + "|" + user + "|" + status);

+	}

+	/**

+	 *	Log a data transfer error receiving a publication attempt

+	 *	@param pubid	The publish ID assigned by the node

+	 *	@param feedid	The feed id given by the publisher

+	 *	@param requrl	The URL of the received request

+	 *	@param method	The method (DELETE or PUT) in the received request

+	 *	@param ctype	The content type (if method is PUT and clen > 0)

+	 *	@param clen	The expected content length (if method is PUT)

+	 *	@param rcvd	The content length received

+	 *	@param srcip	The IP address of the publisher

+	 *	@param user	The identity of the publisher

+	 *	@param error	The error message from the IO exception

+	 */

+	public static void logPubFail(String pubid, String feedid, String requrl, String method, String ctype, long clen, long rcvd, String srcip, String user, String error) {

+		instance.log("PBF|" + pubid + "|" + feedid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + rcvd + "|" + srcip + "|" + user + "|" + error);

+	}

+	/**

+	 *	Log a delivery attempt.

+	 *	@param pubid	The publish ID assigned by the node

+	 *	@param feedid	The feed ID

+	 *	@param subid	The (space delimited list of) subscription ID

+	 *	@param requrl	The URL used in the attempt

+	 *	@param method	The method (DELETE or PUT) in the attempt

+	 *	@param ctype	The content type (if method is PUT, not metaonly, and clen > 0)

+	 *	@param clen	The content length (if PUT and not metaonly)

+	 *	@param user	The identity given to the subscriber

+	 *	@param status	The status returned by the subscriber or -1 if an exeception occured trying to connect

+	 *	@param xpubid	The publish ID returned by the subscriber

+	 */

+	public static void logDel(String pubid, String feedid, String subid, String requrl, String method, String ctype, long clen, String user, int status, String xpubid) {

+		if (feedid == null) {

+			return;

+		}

+		instance.log("DEL|" + pubid + "|" + feedid + "|" + subid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + user + "|" + status + "|" + xpubid);

+	}

+	/**

+	 *	Log delivery attempts expired

+	 *	@param pubid	The publish ID assigned by the node

+	 *	@param feedid	The feed ID

+	 *	@param subid	The (space delimited list of) subscription ID

+	 *	@param requrl	The URL that would be delivered to

+	 *	@param method	The method (DELETE or PUT) in the request

+	 *	@param ctype	The content type (if method is PUT, not metaonly, and clen > 0)

+	 *	@param clen	The content length (if PUT and not metaonly)

+	 *	@param reason	The reason the attempts were discontinued

+	 *	@param attempts	The number of attempts made

+	 */

+	public static void logExp(String pubid, String feedid, String subid, String requrl, String method, String ctype, long clen, String reason, int attempts) {

+		if (feedid == null) {

+			return;

+		}

+		instance.log("EXP|" + pubid + "|" + feedid + "|" + subid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + reason + "|" + attempts);

+	}

+	/**

+	 *	Log extra statistics about unsuccessful delivery attempts.

+	 *	@param pubid	The publish ID assigned by the node

+	 *	@param feedid	The feed ID

+	 *	@param subid	The (space delimited list of) subscription ID

+	 *	@param clen	The content length

+	 *	@param sent	The # of bytes sent or -1 if subscriber returned an error instead of 100 Continue, otherwise, the number of bytes sent before an error occurred.

+	 */

+	public static void logDelExtra(String pubid, String feedid, String subid, long clen, long sent) {

+		if (feedid == null) {

+			return;

+		}

+		instance.log("DLX|" + pubid + "|" + feedid + "|" + subid + "|" + clen + "|" + sent);

+	}

+	private StatusLog() {

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/SubnetMatcher.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/SubnetMatcher.java
new file mode 100644
index 0000000..c1cfeaa
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/SubnetMatcher.java
@@ -0,0 +1,71 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+import java.net.*;

+

+/**

+ *	Compare IP addresses as byte arrays to a subnet specified as a CIDR

+ */

+public class SubnetMatcher	{

+	private byte[]	sn;

+	private int	len;

+	private int	mask;

+	/**

+	 *	Construct a subnet matcher given a CIDR

+	 *	@param subnet	The CIDR to match

+	 */

+	public SubnetMatcher(String subnet) {

+		int i = subnet.lastIndexOf('/');

+		if (i == -1) {

+			sn = NodeUtils.getInetAddress(subnet);

+			len = sn.length;

+		} else {

+			len = Integer.parseInt(subnet.substring(i + 1));

+			sn = NodeUtils.getInetAddress(subnet.substring(0, i));

+			mask = ((0xff00) >> (len % 8)) & 0xff;

+			len /= 8;

+		}

+	}

+	/**

+	 *	Is the IP address in the CIDR?

+	 *	@param addr the IP address as bytes in network byte order

+	 *	@return true if the IP address matches.

+	 */

+	public boolean matches(byte[] addr) {

+		if (addr.length != sn.length) {

+			return(false);

+		}

+		for (int i = 0; i < len; i++) {

+			if (addr[i] != sn[i]) {

+				return(false);

+			}

+		}

+		if (mask != 0 && ((addr[len] ^ sn[len]) & mask) != 0) {

+			return(false);

+		}

+		return(true);

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/Target.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/Target.java
new file mode 100644
index 0000000..fe595d5
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/Target.java
@@ -0,0 +1,60 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+/**

+ *	A destination to deliver a message

+ */

+public class Target	{

+	private DestInfo	destinfo;

+	private String	routing;

+	/**

+	 *	A destination to deliver a message

+	 *	@param destinfo	Either info for a subscription ID or info for a node-to-node transfer

+	 *	@param routing	For a node-to-node transfer, what to do when it gets there.

+	 */

+	public Target(DestInfo destinfo, String routing) {

+		this.destinfo = destinfo;

+		this.routing = routing;

+	}

+	/**

+	 *	Add additional routing

+	 */

+	public void addRouting(String routing) {

+		this.routing = this.routing + " " + routing;

+	}

+	/**

+	 *	Get the destination information for this target

+	 */

+	public DestInfo getDestInfo() {

+		return(destinfo);

+	}

+	/**

+	 *	Get the next hop information for this target

+	 */

+	public String getRouting() {

+		return(routing);

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/TaskList.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/TaskList.java
new file mode 100644
index 0000000..401c72a
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/TaskList.java
@@ -0,0 +1,113 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.node;

+

+import java.util.*;

+

+/**

+ *	Manage a list of tasks to be executed when an event occurs.

+ *	This makes the following guarantees:

+ *	<ul>

+ *	<li>Tasks can be safely added and removed in the middle of a run.</li>

+ *	<li>No task will be returned more than once during a run.</li>

+ *	<li>No task will be returned when it is not, at that moment, in the list of tasks.</li>

+ *	<li>At the moment when next() returns null, all tasks on the list have been returned during the run.</li>

+ *	<li>Initially and once next() returns null during a run, next() will continue to return null until startRun() is called.

+ *	</ul>

+ */

+public class TaskList	{

+	private Iterator<Runnable>	runlist;

+	private HashSet<Runnable>	tasks = new HashSet<Runnable>();

+	private HashSet<Runnable>	togo;

+	private HashSet<Runnable>	sofar;

+	private HashSet<Runnable>	added;

+	private HashSet<Runnable>	removed;

+	/**

+	 *	Construct a new TaskList

+	 */

+	public TaskList() {

+	}

+	/**

+	 *	Start executing the sequence of tasks.

+	 */

+	public synchronized void	startRun() {

+		sofar = new HashSet<Runnable>();

+		added = new HashSet<Runnable>();

+		removed = new HashSet<Runnable>();

+		togo = new HashSet<Runnable>(tasks);

+		runlist = togo.iterator();

+	}

+	/**

+	 *	Get the next task to execute

+	 */

+	public synchronized Runnable	next() {

+		while (runlist != null) {

+			if (runlist.hasNext()) {

+				Runnable task = runlist.next();

+				if (removed.contains(task)) {

+					continue;

+				}

+				if (sofar.contains(task)) {

+					continue;

+				}

+				sofar.add(task);

+				return(task);

+			}

+			if (added.size() != 0) {

+				togo = added;

+				added = new HashSet<Runnable>();

+				removed.clear();

+				runlist = togo.iterator();

+				continue;

+			}

+			togo = null;

+			added = null;

+			removed = null;

+			sofar = null;

+			runlist = null;

+		}

+		return(null);

+	}

+	/**

+	 *	Add a task to the list of tasks to run whenever the event occurs.

+	 */

+	public synchronized void addTask(Runnable task) {

+		if (runlist != null) {

+			added.add(task);

+			removed.remove(task);

+		}

+		tasks.add(task);

+	}

+	/**

+	 *	Remove a task from the list of tasks to run whenever the event occurs.

+	 */

+	public synchronized void removeTask(Runnable task) {

+		if (runlist != null) {

+			removed.add(task);

+			added.remove(task);

+		}

+		tasks.remove(task);

+	}

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/eelf/EELFFilter.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/eelf/EELFFilter.java
new file mode 100644
index 0000000..9b00658
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/eelf/EELFFilter.java
@@ -0,0 +1,43 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package com.att.research.datarouter.node.eelf;

+

+import ch.qos.logback.classic.spi.ILoggingEvent;

+import ch.qos.logback.core.filter.Filter;

+import ch.qos.logback.core.spi.FilterReply;

+

+/*

+ * When EELF functionality added it default started logging Jetty logs as well which in turn stopped existing functionality of logging jetty statements in node.log

+ * added code in logback.xml to add jetty statements in node.log.

+ * This class removes extran EELF statements from node.log since they are being logged in apicalls.log 

+ */

+public class EELFFilter extends Filter<ILoggingEvent>{

+	  @Override

+	  public FilterReply decide(ILoggingEvent event) {    

+	    if (event.getMessage().contains("EELF")) {

+	      return FilterReply.DENY;

+	    } else {

+	      return FilterReply.ACCEPT;

+	    }

+	  }

+}

diff --git a/datarouter-node/src/main/java/com/att/research/datarouter/node/eelf/EelfMsgs.java b/datarouter-node/src/main/java/com/att/research/datarouter/node/eelf/EelfMsgs.java
new file mode 100644
index 0000000..9963f41
--- /dev/null
+++ b/datarouter-node/src/main/java/com/att/research/datarouter/node/eelf/EelfMsgs.java
@@ -0,0 +1,96 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package com.att.research.datarouter.node.eelf;

+

+import com.att.eelf.i18n.EELFResolvableErrorEnum;

+import com.att.eelf.i18n.EELFResourceManager;

+

+public enum EelfMsgs implements EELFResolvableErrorEnum {

+	

+	/**

+     * Application message prints user (accepts one argument)

+     */

+	MESSAGE_WITH_BEHALF,

+

+	/**

+     * Application message prints user and FeedID (accepts two arguments)

+     */

+

+	MESSAGE_WITH_BEHALF_AND_FEEDID,

+	

+	/**

+     * Application message prints keystore file error in EELF errors log

+     */

+

+	MESSAGE_KEYSTORE_LOAD_ERROR,

+	

+	/**

+     * Application message prints Error extracting my name from my keystore file

+     */

+

+	MESSAGE_KEYSORE_NAME_ERROR,	

+	

+	/**

+     * Application message prints Error parsing configuration data from provisioning server.

+     */

+

+

+	MESSAGE_PARSING_ERROR,		

+	

+	/**

+     * Application message printsConfiguration failed

+     */

+

+

+	MESSAGE_CONF_FAILED,		

+	

+	/**

+     * Application message prints Bad provisioning server URL

+     */

+

+

+	MESSAGE_BAD_PROV_URL,		

+	

+	/**

+     * Application message prints Unable to fetch canonical name from keystore file

+     */

+

+

+	MESSAGE_KEYSTORE_FETCH_ERROR,

+	

+	/**

+     * Application message prints Unable to load local configuration file.

+     */

+

+

+	MESSAGE_PROPERTIES_LOAD_ERROR;

+

+    

+    /**

+     * Static initializer to ensure the resource bundles for this class are loaded...

+     * Here this application loads messages from three bundles

+     */

+    static {

+        EELFResourceManager.loadMessageBundle("EelfMessages");

+    }

+}

diff --git a/datarouter-node/src/main/resources/EelfMessages.properties b/datarouter-node/src/main/resources/EelfMessages.properties
new file mode 100644
index 0000000..8c17417
--- /dev/null
+++ b/datarouter-node/src/main/resources/EelfMessages.properties
@@ -0,0 +1,70 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+########################################################################

+#Resource key=Error Code|Message text|Resolution text |Description text

+#######

+#Newlines can be utilized to add some clarity ensuring continuing line

+#has atleast one leading space

+#ResourceKey=\

+#             ERR0000E\

+#             Sample error msg txt\

+#             Sample resolution msg\

+#             Sample description txt

+#

+######

+#Error code classification category

+#100	Permission errors

+#200	Availability errors/Timeouts

+#300	Data errors

+#400	Schema Interface type/validation errors

+#500	Business process errors

+#900	Unknown errors

+#

+########################################################################

+

+# Messages for Data Router EELF framework

+

+#Prints FeedID in the EELF apicalls log

+MESSAGE_WITH__FEEDID=EELF0001I| FeedID  = {0}

+

+#Prints User in the EELF apicalls log

+MESSAGE_WITH_BEHALF=EELF0002I| User = {0}

+

+#Prints User and FeedID in the EELF apicalls log

+MESSAGE_WITH_BEHALF_AND_FEEDID=EELF0003I| User = {0} FeedID  = {1}

+

+#Prints keystore file error in EELF errors log

+MESSAGE_KEYSTORE_LOAD_ERROR=EELF0001E| Error loading my keystore file {0} {1}

+

+MESSAGE_KEYSORE_NAME_ERROR=EELF0002E| Error extracting my name from my keystore file. {0}

+

+MESSAGE_PARSING_ERROR=EELF0003E| Error parsing configuration data from provisioning server. {0}

+

+MESSAGE_CONF_FAILED=EELF0004E| Configuration failed. {0} - try again later.

+

+MESSAGE_BAD_PROV_URL=EELF0005E| Bad provisioning server URL {0}

+

+MESSAGE_KEYSTORE_FETCH_ERROR=EELF0006E| Unable to fetch canonical name from keystore file {0}

+

+MESSAGE_PROPERTIES_LOAD_ERROR=EELF0007E| Unable to load local configuration file - etc/node.properties

+

diff --git a/datarouter-node/src/main/resources/docker/Dockerfile b/datarouter-node/src/main/resources/docker/Dockerfile
new file mode 100644
index 0000000..fbf5456
--- /dev/null
+++ b/datarouter-node/src/main/resources/docker/Dockerfile
@@ -0,0 +1,7 @@
+FROM java:8 

+ADD opt /opt/

+ADD startup.sh /startup.sh

+RUN chmod 700 /startup.sh

+ENTRYPOINT ./startup.sh start

+EXPOSE 8443

+EXPOSE 8080
\ No newline at end of file
diff --git a/datarouter-node/src/main/resources/docker/startup.sh b/datarouter-node/src/main/resources/docker/startup.sh
new file mode 100644
index 0000000..8cb71dd
--- /dev/null
+++ b/datarouter-node/src/main/resources/docker/startup.sh
@@ -0,0 +1,18 @@
+LIB=/opt/app/datartr/lib
+ETC=/opt/app/datartr/etc
+echo "this is LIB" $LIB
+echo "this is ETC" $ETC
+mkdir -p /opt/app/datartr/logs
+mkdir -p /opt/app/datartr/spool
+mkdir -p /opt/app/datartr/spool/f
+mkdir -p /opt/app/datartr/spool/n
+mkdir -p /opt/app/datartr/spool/s
+CLASSPATH=$ETC
+for FILE in `find $LIB -name *.jar`; do
+  CLASSPATH=$CLASSPATH:$FILE
+done
+java -classpath $CLASSPATH  com.att.research.datarouter.node.NodeMain
+
+runner_file="$LIB/datarouter-node-jar-with-dependencies.jar"
+echo "Starting using" $runner_file
+java -Dcom.att.eelf.logging.file==/opt/app/datartr/etc/logback.xml -Dcom.att.eelf.logging.path=/ -Dcom.att.research.datarouter.node.ConfigFile==/opt/app/datartr/etc/node.properties -jar $runner_file
\ No newline at end of file
diff --git a/datarouter-node/src/main/resources/log4j.properties b/datarouter-node/src/main/resources/log4j.properties
new file mode 100644
index 0000000..5b2f019
--- /dev/null
+++ b/datarouter-node/src/main/resources/log4j.properties
@@ -0,0 +1,32 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+log4j.debug=FALSE

+log4j.rootLogger=INFO,Root

+

+log4j.appender.Root=org.apache.log4j.DailyRollingFileAppender

+log4j.appender.Root.file=/root/node.log

+log4j.appender.Root.datePattern='.'yyyyMMdd

+log4j.appender.Root.append=true

+log4j.appender.Root.layout=org.apache.log4j.PatternLayout

+log4j.appender.Root.layout.ConversionPattern=%d %p %m%n

+!

diff --git a/datarouter-node/src/main/resources/log4j.properties.tmpl b/datarouter-node/src/main/resources/log4j.properties.tmpl
new file mode 100644
index 0000000..299edbf
--- /dev/null
+++ b/datarouter-node/src/main/resources/log4j.properties.tmpl
@@ -0,0 +1,11 @@
+cat <<!EOF
+log4j.debug=FALSE
+log4j.rootLogger=INFO,Root
+
+log4j.appender.Root=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.Root.file=C:/Users/sg481n/node.log
+log4j.appender.Root.datePattern='.'yyyyMMdd
+log4j.appender.Root.append=true
+log4j.appender.Root.layout=org.apache.log4j.PatternLayout
+log4j.appender.Root.layout.ConversionPattern=%d %p %m%n
+!EOF
diff --git a/datarouter-node/src/main/resources/logback.xml b/datarouter-node/src/main/resources/logback.xml
new file mode 100644
index 0000000..a47486d
--- /dev/null
+++ b/datarouter-node/src/main/resources/logback.xml
@@ -0,0 +1,405 @@
+<!--

+  ============LICENSE_START==================================================

+  * org.onap.dmaap

+  * ===========================================================================

+  * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+  * ===========================================================================

+  * Licensed under the Apache License, Version 2.0 (the "License");

+  * you may not use this file except in compliance with the License.

+  * You may obtain a copy of the License at

+  * 

+   *      http://www.apache.org/licenses/LICENSE-2.0

+  * 

+   * Unless required by applicable law or agreed to in writing, software

+  * distributed under the License is distributed on an "AS IS" BASIS,

+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+  * See the License for the specific language governing permissions and

+  * limitations under the License.

+  * ============LICENSE_END====================================================

+  *

+  * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+  *

+-->

+<configuration scan="true" scanPeriod="3 seconds" debug="true">

+  <!--<jmxConfigurator /> -->

+  <!-- directory path for all other type logs -->

+  <!-- property name="logDir" value="/home/eby/dr2/logs" / -->

+  <property name="logDir" value="/opt/app/datartr/logs" />

+ 

+  <!-- directory path for debugging type logs -->

+  <!-- property name="debugDir" value="/home/eby/dr2/debug-logs" /-->

+  

+  <!--  specify the component name 

+    <ECOMP-component-name>::= "MSO" | "DCAE" | "ASDC " | "AAI" |"Policy" | "SDNC" | "AC"  -->

+  <!-- This creates the MSO directory in in the LogDir which is not needed, mentioned last directory of the path-->

+  <!-- property name="componentName" value="logs"></property -->

+  

+  <!--  log file names -->

+  <property name="generalLogName" value="apicalls" />

+  <!-- name="securityLogName" value="security" -->

+  <!-- name="performanceLogName" value="performance" -->

+  <!-- name="serverLogName" value="server" -->

+  <!-- name="policyLogName" value="policy"-->

+  <property name="errorLogName" value="errors" />

+  <!-- name="metricsLogName" value="metrics" -->

+  <!-- name="auditLogName" value="audit" -->

+  <!-- name="debugLogName" value="debug" -->

+  <property name="jettyAndNodeLogName" value="node"></property> 

+  <property name="defaultPattern"    value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|%msg%n" />

+  <property name="jettyAndNodeLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%thread|%.-5level|%msg%n" />

+  

+  <property name="debugLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|[%caller{3}]|%msg%n" />

+     

+  <property name="logDirectory" value="${logDir}" />

+  <!-- property name="debugLogDirectory" value="${debugDir}/${componentName}" /-->

+  

+  

+  <!-- Example evaluator filter applied against console appender -->

+  <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">

+    <encoder>

+      <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+

+  <!-- ============================================================================ -->

+  <!-- EELF Appenders -->

+  <!-- ============================================================================ -->

+

+  <!-- The EELFAppender is used to record events to the general application 

+    log -->

+    

+    

+  <appender name="EELF"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${generalLogName}.log</file>

+     <filter class="ch.qos.logback.classic.filter.LevelFilter">

+		<level>INFO</level>

+		<onMatch>ACCEPT</onMatch>

+		<onMismatch>DENY</onMismatch>

+	</filter>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${generalLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+      <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  

+  <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELF" />

+  </appender>

+

+  <!-- EELF Security Appender. This appender is used to record security events 

+    to the security log file. Security events are separate from other loggers 

+    in EELF so that security log records can be captured and managed in a secure 

+    way separate from the other logs. This appender is set to never discard any 

+    events. -->

+  <!--appender name="EELFSecurity"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${securityLogName}.log</file>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${securityLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+      <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  

+  <appender name="asyncEELFSecurity" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <discardingThreshold>0</discardingThreshold>

+    <appender-ref ref="EELFSecurity" />

+  </appender-->

+

+  <!-- EELF Performance Appender. This appender is used to record performance 

+    records. -->

+  <!--appender name="EELFPerformance"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${performanceLogName}.log</file>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${performanceLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+      <outputPatternAsHeader>true</outputPatternAsHeader>

+      <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  <appender name="asyncEELFPerformance" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELFPerformance" />

+  </appender-->

+

+  <!-- EELF Server Appender. This appender is used to record Server related 

+    logging events. The Server logger and appender are specializations of the 

+    EELF application root logger and appender. This can be used to segregate Server 

+    events from other components, or it can be eliminated to record these events 

+    as part of the application root log. -->

+  <!--appender name="EELFServer"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${serverLogName}.log</file>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${serverLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+        <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  <appender name="asyncEELFServer" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELFServer" />

+  </appender-->

+

+  

+  <!-- EELF Policy Appender. This appender is used to record Policy engine 

+    related logging events. The Policy logger and appender are specializations 

+    of the EELF application root logger and appender. This can be used to segregate 

+    Policy engine events from other components, or it can be eliminated to record 

+    these events as part of the application root log. -->

+  <!--appender name="EELFPolicy"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${policyLogName}.log</file>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${policyLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+        <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  <appender name="asyncEELFPolicy" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELFPolicy" >

+  </appender-->

+  

+  

+  <!-- EELF Audit Appender. This appender is used to record audit engine 

+    related logging events. The audit logger and appender are specializations 

+    of the EELF application root logger and appender. This can be used to segregate 

+    Policy engine events from other components, or it can be eliminated to record 

+    these events as part of the application root log. -->

+    

+  <!--appender name="EELFAudit"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${auditLogName}.log</file>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${auditLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+         <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELFAudit" />

+  </appender-->

+

+<!--appender name="EELFMetrics"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${metricsLogName}.log</file>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${metricsLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder-->

+      <!-- <pattern>"%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - 

+        %msg%n"</pattern> -->

+      <!--pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  

+  

+  <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELFMetrics"/>

+  </appender-->

+   

+  <appender name="EELFError"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${errorLogName}.log</file>

+    <filter class="ch.qos.logback.classic.filter.LevelFilter">

+		<level>ERROR</level>

+		<onMatch>ACCEPT</onMatch>

+		<onMismatch>DENY</onMismatch>

+	</filter>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${errorLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+      <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  

+  <appender name="asyncEELFError" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELFError"/>

+  </appender>

+  

+  <!-- ============================================================================ -->

+   <appender name="jettyAndNodelog"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${jettyAndNodeLogName}.log</file>

+     <filter class="com.att.research.datarouter.node.eelf.EELFFilter" />

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${jettyAndNodeLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+      <pattern>${jettyAndNodeLoggerPattern}</pattern>

+    </encoder>

+  </appender>

+  

+  <appender name="asyncEELFjettyAndNodelog" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="jettyAndNodelog" />

+    <includeCallerData>true</includeCallerData>

+  </appender>

+  

+   <!-- ============================================================================ -->

+

+

+   <!--appender name="EELFDebug"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${debugLogDirectory}/${debugLogName}.log</file>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${debugLogDirectory}/${debugLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+      <pattern>${debugLoggerPattern}</pattern>

+    </encoder>

+  </appender>

+  

+  <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELFDebug" />

+    <includeCallerData>true</includeCallerData>

+  </appender-->

+ 

+  

+  <!-- ============================================================================ -->

+  <!--  EELF loggers -->

+  <!-- ============================================================================ -->

+  <logger name="com.att.eelf" level="info" additivity="false">

+    <appender-ref ref="asyncEELF" />

+  </logger>

+  

+     <logger name="com.att.eelf.error" level="error" additivity="false">

+ 		 <appender-ref ref="asyncEELFError" />

+ 	 </logger>

+  

+     <logger name="log4j.logger.org.eclipse.jetty" additivity="false" level="info">

+		<appender-ref ref="asyncEELFjettyAndNodelog"/>

+	</logger> 

+	

+  <!-- logger name="com.att.eelf.security" level="info" additivity="false">

+    <appender-ref ref="asyncEELFSecurity" /> 

+  </logger>

+  <logger name="com.att.eelf.perf" level="info" additivity="false">

+    <appender-ref ref="asyncEELFPerformance" />

+  </logger>

+  <logger name="com.att.eelf.server" level="info" additivity="false">

+    <appender-ref ref="asyncEELFServer" />

+  </logger>

+  <logger name="com.att.eelf.policy" level="info" additivity="false">

+    <appender-ref ref="asyncEELFPolicy" />

+  </logger>

+

+  <logger name="com.att.eelf.audit" level="info" additivity="false">

+    <appender-ref ref="asyncEELFAudit" />

+  </logger>

+  

+  <logger name="com.att.eelf.metrics" level="info" additivity="false">

+        <appender-ref ref="asyncEELFMetrics" />

+  </logger>

+   

+   <logger name="com.att.eelf.debug" level="debug" additivity="false">

+        <appender-ref ref="asyncEELFDebug" />

+  </logger-->

+

+  

+

+  

+  <root level="INFO">

+    <appender-ref ref="asyncEELF" />

+    <appender-ref ref="asyncEELFError" />

+     <appender-ref ref="asyncEELFjettyAndNodelog" />

+  </root>

+

+</configuration>

diff --git a/datarouter-node/src/main/resources/misc/descriptor.xml b/datarouter-node/src/main/resources/misc/descriptor.xml
new file mode 100644
index 0000000..88fccc1
--- /dev/null
+++ b/datarouter-node/src/main/resources/misc/descriptor.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="utf-8"?>

+<!--

+  ============LICENSE_START==================================================

+  * org.onap.dmaap

+  * ===========================================================================

+  * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+  * ===========================================================================

+  * Licensed under the Apache License, Version 2.0 (the "License");

+  * you may not use this file except in compliance with the License.

+  * You may obtain a copy of the License at

+  * 

+   *      http://www.apache.org/licenses/LICENSE-2.0

+  * 

+   * Unless required by applicable law or agreed to in writing, software

+  * distributed under the License is distributed on an "AS IS" BASIS,

+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+  * See the License for the specific language governing permissions and

+  * limitations under the License.

+  * ============LICENSE_END====================================================

+  *

+  * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+  *

+-->

+<descriptor version="1" xmlns="http://aft.att.com/swm/descriptor">

+	<platforms>

+		<platform os="Linux" osVersions="*" architecture="*"/>

+	</platforms>

+	<paths>

+		<path name="/opt/app/datartr" user="datartr" group="datartr" permissions="755,644" recursive="true"/>

+		<path name="/opt/app/platform/init.d/drtrnode" user="datartr" group="datartr" permissions="755"/>

+	</paths>

+	<actions>

+		<action type="INIT">

+			<proc stage="POST" user="datartr" group="datartr"/>

+		</action>

+		<action type="FALL">

+			<proc stage="PRE" user="datartr" group="datartr"/>

+			<proc stage="POST" user="datartr" group="datartr"/>

+		</action>

+		<action type="INST">

+			<proc stage="PRE" user="datartr" group="datartr"/>

+			<proc stage="POST" user="datartr" group="datartr"/>

+		</action>

+		<action type="DINST">

+			<proc stage="PRE" user="datartr" group="datartr"/>

+		</action>

+	</actions>

+	<dependencies>

+		<dependencyFilter componentName="com.att.java:jdk8lin" versions="[1.8.0.77-02]" sequence="1"/>

+		<dependencyFilter componentName="com.att.platform:initd" versions="[1.0.15,)" sequence="2"/>

+		<dependencyFilter componentName="com.att.dmaap.datarouter:util" versions="[1.0.7,)" sequence="3"/>

+	</dependencies>

+</descriptor>

diff --git a/datarouter-node/src/main/resources/misc/doaction b/datarouter-node/src/main/resources/misc/doaction
new file mode 100644
index 0000000..617b01d
--- /dev/null
+++ b/datarouter-node/src/main/resources/misc/doaction
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+cd /opt/app/datartr/etc
+for action in "$@"
+do
+case "$action" in
+'backup')
+	cp log4j.properties log4j.properties.save 2>/dev/null
+	cp node.properties node.properties.save 2>/dev/null
+	cp havecert havecert.save 2>/dev/null
+	;;
+'stop')
+	/opt/app/platform/init.d/drtrnode stop
+	;;
+'start')
+	/opt/app/platform/init.d/drtrnode start || exit 1
+	;;
+'config')
+	/bin/bash log4j.properties.tmpl >log4j.properties
+	/bin/bash node.properties.tmpl >node.properties
+	/bin/bash havecert.tmpl >havecert
+	echo "$AFTSWM_ACTION_NEW_VERSION" >VERSION.node
+	chmod +x havecert
+	rm -f /opt/app/platform/rc.d/K90drtrnode /opt/app/platform/rc.d/S10drtrnode
+	ln -s ../init.d/drtrnode /opt/app/platform/rc.d/K90drtrnode
+	ln -s ../init.d/drtrnode /opt/app/platform/rc.d/S10drtrnode
+	;;
+'restore')
+	cp log4j.properties.save log4j.properties 2>/dev/null
+	cp node.properties.save node.properties 2>/dev/null
+	cp havecert.save havecert 2>/dev/null
+	;;
+'clean')
+	rm -f log4j.properties node.properties havecert log4j.properties.save node.properties.save havecert.save SHUTDOWN redirections.dat VERSION.node
+	rm -f /opt/app/platform/rc.d/K90drtrnode /opt/app/platform/rc.d/S10drtrnode
+	;;
+*)
+	exit 1
+	;;
+esac
+done
+exit 0
diff --git a/datarouter-node/src/main/resources/misc/drtrnode b/datarouter-node/src/main/resources/misc/drtrnode
new file mode 100644
index 0000000..ba784f3
--- /dev/null
+++ b/datarouter-node/src/main/resources/misc/drtrnode
@@ -0,0 +1,114 @@
+#!/bin/bash
+
+umask 0022
+TZ=GMT0
+export TZ
+PATH=/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/java/jdk/jdk180/bin
+export PATH
+CLASSPATH=`echo /opt/app/datartr/etc /opt/app/datartr/lib/*.jar | tr ' ' ':'` 
+export CLASSPATH
+
+pids() {
+	ps -ef | grep java | grep node.NodeMain | sed -e 's/[^ ]* *//' -e 's/ .*//'
+}
+
+start() {
+	ID=`id -n -u`
+	GRP=`id -n -g`
+	if [ "$ID" != "root" ]
+	then
+		echo drtrnode must be started as user datartr not $ID
+		exit 1
+	fi
+	if [ "$GRP" != "datartr" ]
+	then
+		echo drtrnode must be started as group datartr not $GRP
+		exit 1
+	fi
+	cd /opt/app/datartr
+	if etc/havecert
+	then
+		echo >/dev/null
+	else
+		echo No certificate file available.  Cannot start
+		exit 0
+	fi
+	PIDS=`pids`
+	if [ "$PIDS" != "" ]
+	then
+		echo drtrnode already running
+		exit 0
+	fi
+
+	mkdir -p /opt/app/datartr/spool/s
+	chmod 755 /opt/app/datartr/spool/s
+
+	rm -f /opt/app/datartr/etc/SHUTDOWN
+	nohup java com.att.research.datarouter.node.NodeMain </dev/null >/dev/null 2>&1 &
+	sleep 5
+	PIDS=`pids`
+	if [ "$PIDS" = "" ]
+	then
+		echo drtrnode startup failed
+	else
+		echo drtrnode started
+	fi
+}
+
+stop() {
+	ID=`id -n -u`
+	GRP=`id -n -g`
+	if [ "$ID" != "datartr" ]
+	then
+		echo drtrnode must be stopped as user datartr not $ID
+		exit 1
+	fi
+	if [ "$GRP" != "datartr" ]
+	then
+		echo drtrnode must be stopped as group datartr not $GRP
+		exit 1
+	fi
+	touch /opt/app/datartr/etc/SHUTDOWN
+	PIDS=`pids`
+	if [ "$PIDS" != "" ]
+	then
+		sleep 5
+		kill -9 $PIDS
+		sleep 5
+		echo drtrnode stopped
+	else
+		echo drtrnode not running
+	fi
+}
+
+status() {
+	PIDS=`pids`
+	if [ "$PIDS" != "" ]
+	then
+		echo drtrnode running
+	else
+		echo drtrnode not running
+	fi
+}
+
+case "$1" in
+'start')
+	start
+	;;
+'stop')
+	stop
+	;;
+'restart')
+	stop
+	sleep 20
+	start
+	;;
+'status')
+	status
+	;;
+*)
+	echo "Usage: $0 { start | stop | restart }"
+	exit 1
+	;;
+esac
+exit 0
diff --git a/datarouter-node/src/main/resources/misc/havecert.tmpl b/datarouter-node/src/main/resources/misc/havecert.tmpl
new file mode 100644
index 0000000..2e813ba
--- /dev/null
+++ b/datarouter-node/src/main/resources/misc/havecert.tmpl
@@ -0,0 +1,11 @@
+#!/bin/bash
+cat <<!EOF
+TZ=GMT0
+cd /opt/app/datartr;
+if [ -f ${DRTR_NODE_KSTOREFILE:-etc/keystore} ]
+then
+	exit 0
+fi
+echo `date '+%F %T,000'` WARN Certificate file "${DRTR_NODE_KSTOREFILE:-etc/keystore}" is missing >>${DRTR_NODE_LOGS:-logs}/node.log
+exit 1
+!EOF
diff --git a/datarouter-node/src/main/resources/misc/log4j.properties.tmpl b/datarouter-node/src/main/resources/misc/log4j.properties.tmpl
new file mode 100644
index 0000000..24bd3df
--- /dev/null
+++ b/datarouter-node/src/main/resources/misc/log4j.properties.tmpl
@@ -0,0 +1,11 @@
+cat <<!EOF
+log4j.debug=FALSE
+log4j.rootLogger=INFO,Root
+
+log4j.appender.Root=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.Root.file=${DRTR_NODE_LOGS:-logs}/node.log
+log4j.appender.Root.datePattern='.'yyyyMMdd
+log4j.appender.Root.append=true
+log4j.appender.Root.layout=org.apache.log4j.PatternLayout
+log4j.appender.Root.layout.ConversionPattern=%d %p %m%n
+!EOF
diff --git a/datarouter-node/src/main/resources/misc/node.properties b/datarouter-node/src/main/resources/misc/node.properties
new file mode 100644
index 0000000..fb97702
--- /dev/null
+++ b/datarouter-node/src/main/resources/misc/node.properties
@@ -0,0 +1,112 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+#

+#	Configuration parameters fixed at startup for the DataRouter node

+#

+#	URL to retrieve dynamic configuration

+#

+#ProvisioningURL:	${DRTR_PROV_INTURL}

+ProvisioningURL=https://prov.datarouternew.com:8443/internal/prov

+

+#

+#	URL to upload PUB/DEL/EXP logs

+#

+#LogUploadURL:	${DRTR_LOG_URL}

+LogUploadURL=https://prov.datarouternew.com:8443/internal/logs

+

+#

+#	The port number for http as seen within the server

+#

+#IntHttpPort:	${DRTR_NODE_INTHTTPPORT:-8080}

+IntHttpPort=8080

+#

+#	The port number for https as seen within the server

+#

+IntHttpsPort=8443

+#

+#	The external port number for https taking port mapping into account

+#

+ExtHttpsPort=443

+#

+#	The minimum interval between fetches of the dynamic configuration

+#	from the provisioning server

+#

+MinProvFetchInterval=10000

+#

+#	The minimum interval between saves of the redirection data file

+#

+MinRedirSaveInterval=10000

+#

+#	The path to the directory where log files are stored

+#

+LogDir=/opt/app/datartr/logs

+#

+#	The retention interval (in days) for log files

+#

+LogRetention=30

+#

+#	The path to the directories where data and meta data files are stored

+#

+SpoolDir=/opt/app/datartr/spool

+#

+#	The path to the redirection data file

+#

+#RedirectionFile:	etc/redirections.dat

+#

+#	The type of keystore for https

+#

+KeyStoreType:	jks

+#

+#	The path to the keystore for https

+#

+KeyStoreFile:/opt/app/datartr/self_signed/keystore.jks

+#

+#	The password for the https keystore

+#

+KeyStorePassword=changeit

+#

+#	The password for the private key in the https keystore

+#

+KeyPassword=changeit

+#

+#	The type of truststore for https

+#

+TrustStoreType=jks

+#

+#	The path to the truststore for https

+#

+#TrustStoreFile=/usr/lib/jvm/java-8-oracle/jre/lib/security/cacerts

+TrustStoreFile=/opt/app/datartr/self_signed/cacerts.jks

+#

+#	The password for the https truststore

+#

+TrustStorePassword=changeit

+#

+#	The path to the file used to trigger an orderly shutdown

+#

+QuiesceFile=etc/SHUTDOWN

+#

+#	The key used to generate passwords for node to node transfers

+#

+NodeAuthKey=Node123!

+

diff --git a/datarouter-node/src/main/resources/misc/notes b/datarouter-node/src/main/resources/misc/notes
new file mode 100644
index 0000000..f37a8ea
--- /dev/null
+++ b/datarouter-node/src/main/resources/misc/notes
@@ -0,0 +1,54 @@
+package notes for com.att.dmaap.datarouter:node
+
+This component is for the Data Router Node software.
+
+The following pre-requisite components should already be present:
+	com.att.aft.swm:swm-cli
+	com.att.aft.swm:swm-node
+	- SWM Variables: AFTSWM_AUTOLINK_PARENTS=/opt/app:/opt/app/workload,/opt/app/aft:/opt/app/workload/aft
+	com.att.platform:uam-auto
+	com.att.java:jdk8lin
+	com.att.platform:initd
+	com.att.platform:port-fwd
+	- SWM Variables: PLATFORM_PORT_FWD=80,8080|443,8443
+	com.att.dmaap.datarouter:util
+
+In a non-production environment, the URL for fetching provisioning data from
+the provisioning server must be overridden.  This can be done by setting a SWM
+variable prior to installing this component.  The production (default) value for
+this variable is:
+	DRTR_PROV_INTURL=https://feeds-drtr.web.att.com/internal/prov
+
+Similarly, the URL for uploading event logs to the log server must be overridden.  This can also be done by setting a SWM variable.  The production (default) value is:
+	DRTR_LOG_URL=https://feeds-drtr.web.att.com/internal/logs
+
+Other SWM variables that can be set are:
+
+DRTR_NODE_INTHTTPPORT (default 8080)
+	The TCP/IP port number the component should listen on for "go fetch"
+	requests from the provisioning server
+DRTR_NODE_INTHTTPSPORT (default 8443)
+	The TCP/IP port number the component should listen on for publish
+	requests from feed publishers and other nodes
+DRTR_NODE_EXTHTTPSPORT (default 443)
+	The TCP/IP port number the component should use for node-to-node
+	transfers and for sending redirect requests back to publishers
+DRTR_NODE_SPOOL (default /opt/app/datartr/spool)
+	The directory where data files should be saved while in transit
+DRTR_NODE_LOGS (default /opt/app/datartr/logs)
+	The directory where log files should be kept
+DRTR_NODE_LOG_RETENTION (default 30)
+	How long a log file is kept before being deleted
+DRTR_NODE_KSTOREFILE (default /opt/app/datartr/etc/keystore)
+	The java keystore file containing the server certificate and private key
+	for this server
+DRTR_NODE_KSTOREPASS (default changeit)
+	The password for the keystore file
+DRTR_NODE_PVTKEYPASS (default changeit)
+	The password for the private key in the keystore file
+DRTR_NODE_TSTOREFILE (by default, use the truststore from the Java JDK)
+	The java keystore file containing the trusted certificate authority
+	certificates
+DRTR_NODE_TSTOREPASS (default changeit)
+	The password for the trust store file.  Only applies if a trust store
+	file is specified.
diff --git a/datarouter-node/src/main/resources/node.properties b/datarouter-node/src/main/resources/node.properties
new file mode 100644
index 0000000..fb97702
--- /dev/null
+++ b/datarouter-node/src/main/resources/node.properties
@@ -0,0 +1,112 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+#

+#	Configuration parameters fixed at startup for the DataRouter node

+#

+#	URL to retrieve dynamic configuration

+#

+#ProvisioningURL:	${DRTR_PROV_INTURL}

+ProvisioningURL=https://prov.datarouternew.com:8443/internal/prov

+

+#

+#	URL to upload PUB/DEL/EXP logs

+#

+#LogUploadURL:	${DRTR_LOG_URL}

+LogUploadURL=https://prov.datarouternew.com:8443/internal/logs

+

+#

+#	The port number for http as seen within the server

+#

+#IntHttpPort:	${DRTR_NODE_INTHTTPPORT:-8080}

+IntHttpPort=8080

+#

+#	The port number for https as seen within the server

+#

+IntHttpsPort=8443

+#

+#	The external port number for https taking port mapping into account

+#

+ExtHttpsPort=443

+#

+#	The minimum interval between fetches of the dynamic configuration

+#	from the provisioning server

+#

+MinProvFetchInterval=10000

+#

+#	The minimum interval between saves of the redirection data file

+#

+MinRedirSaveInterval=10000

+#

+#	The path to the directory where log files are stored

+#

+LogDir=/opt/app/datartr/logs

+#

+#	The retention interval (in days) for log files

+#

+LogRetention=30

+#

+#	The path to the directories where data and meta data files are stored

+#

+SpoolDir=/opt/app/datartr/spool

+#

+#	The path to the redirection data file

+#

+#RedirectionFile:	etc/redirections.dat

+#

+#	The type of keystore for https

+#

+KeyStoreType:	jks

+#

+#	The path to the keystore for https

+#

+KeyStoreFile:/opt/app/datartr/self_signed/keystore.jks

+#

+#	The password for the https keystore

+#

+KeyStorePassword=changeit

+#

+#	The password for the private key in the https keystore

+#

+KeyPassword=changeit

+#

+#	The type of truststore for https

+#

+TrustStoreType=jks

+#

+#	The path to the truststore for https

+#

+#TrustStoreFile=/usr/lib/jvm/java-8-oracle/jre/lib/security/cacerts

+TrustStoreFile=/opt/app/datartr/self_signed/cacerts.jks

+#

+#	The password for the https truststore

+#

+TrustStorePassword=changeit

+#

+#	The path to the file used to trigger an orderly shutdown

+#

+QuiesceFile=etc/SHUTDOWN

+#

+#	The key used to generate passwords for node to node transfers

+#

+NodeAuthKey=Node123!

+

diff --git a/datarouter-prov/data/addFeed3.txt b/datarouter-prov/data/addFeed3.txt
new file mode 100644
index 0000000..21f58a3
--- /dev/null
+++ b/datarouter-prov/data/addFeed3.txt
@@ -0,0 +1,23 @@
+

+{

+     "name": "Jettydemo",

+     "version": "m1.0",

+     "description": "Jettydemo",

+     "business_description": "Jettydemo",

+     "suspend": false,

+     "deleted": false,

+     "changeowner": true,

+     "authorization": {

+          "classification": "unclassified",

+          "endpoint_addrs": [

+               "172.18.0.3",

+			],

+          "endpoint_ids": [

+               {

+                    "password": "rs873m",

+                    "id": "rs873m"

+               }

+          ]

+     },

+}

+

diff --git a/datarouter-prov/data/addSubscriber.txt b/datarouter-prov/data/addSubscriber.txt
new file mode 100644
index 0000000..a9d4156
--- /dev/null
+++ b/datarouter-prov/data/addSubscriber.txt
@@ -0,0 +1,15 @@
+

+{ 

+                "delivery" :	

+               				

+                { 

+                                "url" : "http://172.18.0.3:7070/", 

+                                "user" : "LOGIN", 

+                                "password" : "PASSWORD", 

+                                "use100" : true 

+                },

+                "metadataOnly" : false, 

+                "suspend" : false, 

+				"groupid" : 29,

+                "subscriber" : "sg481n"

+}

diff --git a/datarouter-prov/pom.xml b/datarouter-prov/pom.xml
new file mode 100644
index 0000000..36b0a4f
--- /dev/null
+++ b/datarouter-prov/pom.xml
@@ -0,0 +1,558 @@
+<!--

+  ============LICENSE_START==================================================

+  * org.onap.dmaap

+  * ===========================================================================

+  * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+  * ===========================================================================

+  * Licensed under the Apache License, Version 2.0 (the "License");

+  * you may not use this file except in compliance with the License.

+  * You may obtain a copy of the License at

+  * 

+   *      http://www.apache.org/licenses/LICENSE-2.0

+  * 

+   * Unless required by applicable law or agreed to in writing, software

+  * distributed under the License is distributed on an "AS IS" BASIS,

+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+  * See the License for the specific language governing permissions and

+  * limitations under the License.

+  * ============LICENSE_END====================================================

+  *

+  * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+  *

+-->

+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

+	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">

+	<modelVersion>4.0.0</modelVersion>

+

+	<groupId>com.att.datarouter-prov</groupId>

+	<artifactId>datarouter-prov</artifactId>

+	<version>0.0.1-SNAPSHOT</version>

+	<packaging>jar</packaging>

+

+	<name>datarouter-prov</name>

+	<url>https://github.com/att/DMAAP_DATAROUTER</url>

+    <licenses>

+		<license>

+		<name>BSD License</name>

+		<url> </url>

+		</license>

+	</licenses>

+	<properties>

+		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>

+		<maven.compiler.source>1.8</maven.compiler.source>

+		<maven.compiler.target>1.8</maven.compiler.target>

+		<dockerLocation>${basedir}/target/</dockerLocation>

+		<docker.registry>hub.docker.com</docker.registry>

+	</properties>

+	<dependencies>

+		

+		<dependency>

+			<groupId>org.json</groupId>

+			<artifactId>json</artifactId>

+			<version>20160810</version>

+		</dependency>

+		<dependency>

+			<groupId>javax.mail</groupId>

+			<artifactId>javax.mail-api</artifactId>

+			<version>1.5.1</version>

+		</dependency>

+		<dependency>

+			<groupId>com.att.eelf</groupId>

+			<artifactId>eelf-core</artifactId>

+			<version>0.0.1</version>

+		</dependency>

+		<dependency>

+			<groupId>javax.servlet</groupId>

+			<artifactId>servlet-api</artifactId>

+			<version>2.5</version>

+		</dependency>

+

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-server</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-continuation</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-util</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-deploy</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-servlet</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-servlets</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-http</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-security</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-websocket</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+

+		<dependency>

+			<groupId>org.eclipse.jetty</groupId>

+			<artifactId>jetty-io</artifactId>

+			<version>7.6.14.v20131031</version>

+		</dependency>

+

+		<dependency>

+			<groupId>org.apache.commons</groupId>

+			<artifactId>commons-io</artifactId>

+			<version>1.3.2</version>

+		</dependency>

+		<dependency>

+			<groupId>commons-lang</groupId>

+			<artifactId>commons-lang</artifactId>

+			<version>2.4</version>

+		</dependency>

+		<dependency>

+			<groupId>commons-io</groupId>

+			<artifactId>commons-io</artifactId>

+			<version>2.1</version>

+			<scope>compile</scope>

+		</dependency>

+		<dependency>

+			<groupId>org.apache.httpcomponents</groupId>

+			<artifactId>httpcore</artifactId>

+			<version>4.2.2</version>

+		</dependency>

+

+		<dependency>

+			<groupId>org.mozilla</groupId>

+			<artifactId>rhino</artifactId>

+			<version>1.7R3</version>

+		</dependency>

+		<dependency>

+			<groupId>org.apache.james</groupId>

+			<artifactId>apache-mime4j-core</artifactId>

+			<version>0.7</version>

+		</dependency>

+		<dependency>

+			<groupId>org.apache.httpcomponents</groupId>

+			<artifactId>httpclient</artifactId>

+			<version>4.2.3</version>

+		</dependency>

+		<dependency>

+			<groupId>org.sonatype.http-testing-harness</groupId>

+			<artifactId>junit-runner</artifactId>

+			<version>0.11</version>

+		</dependency>

+		<dependency>

+			<groupId>junit</groupId>

+			<artifactId>junit</artifactId>

+			<version>4.10</version>

+			<scope>test</scope>

+		</dependency>

+

+		<dependency>

+		    <groupId>org.mockito</groupId>

+		    <artifactId>mockito-core</artifactId>

+		    <version>1.10.19</version>

+		    <scope>test</scope>

+		</dependency>

+		<dependency>

+		    <groupId>org.powermock</groupId>

+		    <artifactId>powermock-module-junit4</artifactId>

+		    <version>1.6.4</version>

+		    <scope>test</scope>

+		</dependency>

+		<dependency>

+		    <groupId>org.powermock</groupId>

+		    <artifactId>powermock-api-mockito</artifactId>

+		    <version>1.6.4</version>

+		    <scope>test</scope>

+		</dependency>

+<!-- 		<dependency>

+			<groupId>org.junit</groupId>

+			<artifactId>com.springsource.org.junit</artifactId>

+			<version>4.4.0</version>

+		</dependency> -->

+		<dependency>

+			<groupId>mysql</groupId>

+			<artifactId>mysql-connector-java</artifactId>

+			<version>5.1.21</version>

+		</dependency>

+		<dependency>

+			<groupId>org.eclipse.jetty.cdi</groupId>

+			<artifactId>cdi-websocket</artifactId>

+			<version>9.3.11.v20160721</version>

+		</dependency>

+		

+		<dependency>

+			<groupId>log4j</groupId>

+			<artifactId>log4j</artifactId>

+			<version>1.2.17</version>

+			<scope>compile</scope>

+		</dependency>

+	</dependencies>

+

+	<build>

+		<finalName>datarouter-prov</finalName>

+		<resources>

+			<resource>

+				<directory>src/main/resources</directory>

+				<filtering>true</filtering>

+				<includes>

+					<include>**/*.properties</include>

+				</includes>

+			</resource>

+			<resource>

+				<directory>src/main/resources</directory>

+				<filtering>true</filtering>

+				<includes>

+					<include>**/proserver.properties</include>

+				</includes>

+			</resource>

+			<resource>

+				<directory>src/main/resources</directory>

+				<filtering>true</filtering>

+				<includes>

+					<include>**/EelfMessages.properties</include>

+				</includes>

+			</resource>

+			<resource>

+				<directory>src/main/resources</directory>

+				<filtering>true</filtering>

+				<includes>

+					<include>**/log4j.properties</include>

+				</includes>

+			</resource>

+			<!-- <resource> <directory>src/main/config</directory> <filtering>true</filtering> 

+				<includes> <include>**/log4j*.xml</include> </includes> </resource> <resource> 

+				<directory>src/main/resources</directory> <filtering>false</filtering> <excludes> 

+				<exclude>**/cambriaApiVersion.properties</exclude> </excludes> </resource> -->

+		</resources>

+		<plugins>

+		

+			<plugin>

+				<artifactId>maven-assembly-plugin</artifactId>

+				<version>2.4</version>

+				<configuration>

+					<descriptorRefs>

+						<descriptorRef>jar-with-dependencies</descriptorRef>

+					</descriptorRefs>

+					<outputDirectory>${basedir}/target/opt/app/datartr/lib</outputDirectory>

+					<archive>

+

+						<manifest>

+							<addClasspath>true</addClasspath>

+							<mainClass>com.att.research.datarouter.provisioning.Main</mainClass>

+							 

+						</manifest>

+					</archive>

+				</configuration>

+

+				<executions>

+					<execution>

+						<id>make-assembly</id> <!-- this is used for inheritance merges -->

+						<phase>package</phase> <!-- bind to the packaging phase -->

+						<goals>

+							<goal>single</goal>

+						</goals>

+					</execution>

+				</executions>

+			</plugin>

+

+			<plugin>

+				<groupId>org.apache.maven.plugins</groupId>

+				<artifactId>maven-compiler-plugin</artifactId>

+				<configuration>

+					<archive>

+						<manifest>

+							<addClasspath>true</addClasspath>

+							<mainClass>com.att.research.datarouter.provisioning.Main</mainClass>

+							 <outputDirectory>${basedir}/target/opt/app/datartr/lib</outputDirectory>

+						</manifest>

+					</archive>

+

+					<source>1.8</source>

+					<target>1.8</target>

+				</configuration>

+				<version>3.6.0</version>

+			</plugin>

+			<plugin>

+				<groupId>org.apache.maven.plugins</groupId>

+				<artifactId>maven-resources-plugin</artifactId>

+				<version>2.7</version>

+				<executions>

+					<execution>

+						<id>copy-docker-file</id>

+						<phase>package</phase>

+						<goals>

+							<goal>copy-resources</goal>

+						</goals>

+						<configuration>

+							<outputDirectory>${dockerLocation}</outputDirectory>

+							<overwrite>true</overwrite>

+							<resources>

+								<resource>

+									<directory>${basedir}/src/main/resources/docker</directory>

+									<filtering>true</filtering>

+									<includes>

+										<include>**/*</include>

+									</includes>

+								</resource>

+							</resources>

+						</configuration>

+					</execution>

+				</executions>

+			</plugin>

+	<plugin>

+				<groupId>com.spotify</groupId>

+				<artifactId>docker-maven-plugin</artifactId>

+				<version>0.4.11</version>

+				<configuration>

+					<imageName>datarouter-prov</imageName>

+					<dockerDirectory>${dockerLocation}</dockerDirectory>

+					<serverId>docker-hub</serverId>

+					<registryUrl>https://${docker.registry}</registryUrl>

+					<imageTags>

+						<imageTag>${project.version}</imageTag>

+						<imageTag>latest</imageTag>

+					</imageTags>

+					<forceTags>true</forceTags>

+				</configuration>

+			</plugin> 

+			

+

+<plugin>

+ <groupId>com.blackducksoftware.integration</groupId>

+ <artifactId>hub-maven-plugin</artifactId>

+ <version>1.0.4</version>

+ <inherited>false</inherited>

+ <configuration>

+  <target>${project.basedir}</target>

+ </configuration>

+ <executions>

+  <execution>

+   <id>create-bdio-file</id>

+   <phase>package</phase>

+   <goals>

+    <goal>createHubOutput</goal>

+   </goals>

+  </execution>

+ </executions>

+</plugin>

+  

+  <plugin>

+    <artifactId>maven-resources-plugin</artifactId>

+    <version>2.7</version>

+    <executions>

+      <execution>

+        <id>copy-resources-1</id>

+        <phase>validate</phase>

+        <goals>

+          <goal>copy-resources</goal>

+        </goals>

+        <configuration>

+          <outputDirectory>${basedir}/target/opt/app/datartr/lib</outputDirectory>

+          <resources>

+            <resource>

+                        <directory>${project.basedir}/src/main/resources</directory> 

+                        <includes>

+                            <include>**/*.jar</include>

+                        </includes>   

+                    </resource>

+          </resources>

+        </configuration>

+      </execution>

+      <execution>

+        <id>copy-resources-2</id>

+        <phase>validate</phase>

+        <goals>

+          <goal>copy-resources</goal>

+        </goals>

+        <configuration>

+          <outputDirectory>${basedir}/target/opt/app/datartr/etc</outputDirectory>

+          <resources>

+            <resource>

+                        <directory>${basedir}/src/main/resources</directory>

+                        <includes>

+                            <include>misc/**</include>

+                            <include>**/**</include>

+                        </includes>

+                    </resource>

+          </resources>

+        </configuration>

+      </execution>

+      <execution>

+        <id>copy-resources-3</id>

+        <phase>validate</phase>

+        <goals>

+          <goal>copy-resources</goal>

+        </goals>

+        <configuration>

+          <outputDirectory>${basedir}/target/opt/app/datartr</outputDirectory>

+          <resources>

+            <resource>

+                        <directory>${basedir}/data</directory>

+                        <includes>

+                            <include>misc/**</include>

+                            <include>**/**</include>

+                        </includes>

+                    </resource>

+          </resources>

+        </configuration>

+      </execution>

+	   <execution>

+        <id>copy-resources-4</id>

+        <phase>validate</phase>

+        <goals>

+          <goal>copy-resources</goal>

+        </goals>

+        <configuration>

+          <outputDirectory>${basedir}/target/opt/app/datartr/self_signed</outputDirectory>

+          <resources>

+            <resource>

+                        <directory>${basedir}/self_signed</directory>

+                        <includes>

+                            <include>misc/**</include>

+                            <include>**/**</include>

+                        </includes>

+                    </resource>

+          </resources>

+        </configuration>

+      </execution>

+	  

+    </executions>

+  </plugin>

+  

+			<plugin>

+				<groupId>org.apache.maven.plugins</groupId>

+				<artifactId>maven-dependency-plugin</artifactId>

+				<version>2.10</version>

+				<executions>

+					<execution>

+						<id>copy-dependencies</id>

+						<phase>package</phase>

+						<goals>

+							<goal>copy-dependencies</goal>

+						</goals>

+						<configuration>

+							<outputDirectory>${project.build.directory}/opt/app/datartr/lib</outputDirectory>

+							<overWriteReleases>false</overWriteReleases>

+							<overWriteSnapshots>false</overWriteSnapshots>

+							<overWriteIfNewer>true</overWriteIfNewer>

+						</configuration>

+					</execution>

+				</executions>

+			</plugin>

+			

+										<plugin>

+			<groupId>org.apache.maven.plugins</groupId>

+			<artifactId>maven-javadoc-plugin</artifactId>

+			<configuration>

+			<failOnError>false</failOnError>

+			</configuration>

+			<executions>

+				<execution>

+					<id>attach-javadocs</id>

+					<goals>

+						<goal>jar</goal>

+					</goals>

+				</execution>

+			</executions>

+		</plugin> 

+	   

+	   

+	       <plugin>

+		      <groupId>org.apache.maven.plugins</groupId>

+		      <artifactId>maven-source-plugin</artifactId>

+		      <version>2.2.1</version>

+		      <executions>

+			<execution>

+			  <id>attach-sources</id>

+			  <goals>

+			    <goal>jar-no-fork</goal>

+			  </goals>

+			</execution>

+		      </executions>

+		    </plugin>

+	

+

+	<plugin>

+	    <groupId>org.apache.maven.plugins</groupId>

+	    <artifactId>maven-gpg-plugin</artifactId>

+	    <version>1.5</version>

+	    <executions>

+		<execution>

+		    <id>sign-artifacts</id>

+		    <phase>verify</phase>

+		    <goals>

+			<goal>sign</goal>

+		    </goals>

+		</execution>

+	    </executions>

+	  </plugin> 

+			

+		<plugin>

+			<groupId>org.sonatype.plugins</groupId>

+			<artifactId>nexus-staging-maven-plugin</artifactId>

+			<version>1.6.7</version>

+			<extensions>true</extensions>

+			<configuration>

+			<serverId>ossrhdme</serverId>

+			<nexusUrl>https://oss.sonatype.org/</nexusUrl>

+			<autoReleaseAfterClose>true</autoReleaseAfterClose>

+			</configuration>

+		</plugin>

+			

+		<plugin>

+				<groupId>org.codehaus.mojo</groupId>

+				<artifactId>cobertura-maven-plugin</artifactId>

+				<version>2.7</version>

+				<configuration>

+					<formats>

+					<format>html</format>

+					<format>xml</format>

+				  </formats>

+				</configuration>

+			</plugin>

+		</plugins>

+	</build>

+	

+	

+<distributionManagement>

+    		<snapshotRepository>

+      			<id>ossrhdme</id>

+      			<url>https://oss.sonatype.org/content/repositories/snapshots</url>

+    		</snapshotRepository>

+    		<repository>

+      			<id>ossrhdme</id>

+      			<url>https://oss.sonatype.org/service/local/staging/deploy/maven2/</url>

+    		</repository>

+	</distributionManagement>

+	

+	<scm>

+		<connection>https://github.com/att/DMAAP_DATAROUTER.git</connection>

+		<developerConnection>${project.scm.connection}</developerConnection>

+		<url>https://github.com/att/DMAAP_DATAROUTER/tree/master</url>

+	</scm>

+	

+</project>

diff --git a/datarouter-prov/self_signed/cacerts.jks b/datarouter-prov/self_signed/cacerts.jks
new file mode 100644
index 0000000..76a480a
--- /dev/null
+++ b/datarouter-prov/self_signed/cacerts.jks
Binary files differ
diff --git a/datarouter-prov/self_signed/keystore.jks b/datarouter-prov/self_signed/keystore.jks
new file mode 100644
index 0000000..2c22b4a
--- /dev/null
+++ b/datarouter-prov/self_signed/keystore.jks
Binary files differ
diff --git a/datarouter-prov/self_signed/mykey.cer b/datarouter-prov/self_signed/mykey.cer
new file mode 100644
index 0000000..2a5c9d7
--- /dev/null
+++ b/datarouter-prov/self_signed/mykey.cer
Binary files differ
diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/authz/AuthorizationResponse.java b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/AuthorizationResponse.java
new file mode 100644
index 0000000..26956f8
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/AuthorizationResponse.java
@@ -0,0 +1,58 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package com.att.research.datarouter.authz;

+

+import java.util.List;

+

+/**

+ * The <code>AuthorizationResponse</code> interface gives the caller access to information about an authorization

+ * decision.  This information includes the permit/deny decision itself, along with supplementary information in the form of

+ * advice and obligations.  (The advice and obligations will not be used in Data Router R1.)

+ * 

+ * @author J. F. Lucas

+ *

+ */

+public interface AuthorizationResponse {

+	/**

+	 * Indicates whether the request is authorized or not.

+	 * 

+	 * @return a boolean flag that is <code>true</code> if the request is permitted, and <code>false</code> otherwise.

+	 */

+	public boolean isAuthorized();

+	

+	/**

+	 * Returns any advice elements that were included in the authorization response.

+	 * 

+	 * @return A list of objects implementing the <code>AuthorizationResponseSupplement</code> interface, with each object representing an

+	 * advice element from the authorization response.

+	 */

+	public List<AuthorizationResponseSupplement> getAdvice();

+	

+	/**

+	 * Returns any obligation elements that were included in the authorization response.

+	 * 

+	 * @return A list of objects implementing the <code>AuthorizationResponseSupplement</code> interface, with each object representing an

+	 * obligation element from the authorization response.

+	 */

+	public List<AuthorizationResponseSupplement> getObligations();

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/authz/AuthorizationResponseSupplement.java b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/AuthorizationResponseSupplement.java
new file mode 100644
index 0000000..2829c50
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/AuthorizationResponseSupplement.java
@@ -0,0 +1,52 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.authz;

+

+import java.util.Map;

+

+/** An object that meets the <code>AuthorizationResponseSupplement</code> interface carries supplementary

+ * information for an authorization response.  In a XACML-based system, a response to an authorization request

+ * carries not just the permit/deny decision but, optionally, supplemental information in the form of advice and

+ * obligation elements.  The structure of a XACML advice element and a XACML obligation element are similar: each has an identifier and

+ * a set of attributes (name-value) pairs.  (The difference between a XACML advice element and a XACML obligation element is in

+ * how the recipient of the response--the Policy Enforcement Point, in XACML terminology--handles the element.)

+ * 

+ * @author J. F. Lucas

+ *

+ */

+public interface AuthorizationResponseSupplement {

+	/** Return the identifier for the supplementary information element.

+	 * 

+	 * @return a <code>String</code> containing the identifier.

+	 */

+	public String getId();

+	

+	/** Return the attributes for the supplementary information element, as a <code>Map</code> in which

+	 * keys represent attribute identifiers and values represent attribute values.

+	 * 

+	 * @return attributes for the supplementary information element.

+	 */

+	public Map<String, String> getAttributes();

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/authz/Authorizer.java b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/Authorizer.java
new file mode 100644
index 0000000..bfed5c3
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/Authorizer.java
@@ -0,0 +1,62 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.authz;

+

+import java.util.Map;

+import javax.servlet.http.HttpServletRequest;

+

+/**

+ * A Data Router API that requires authorization of incoming requests creates an instance of a class that implements

+ * the <code>Authorizer</code> interface.   The class implements all of the logic necessary to determine if an API

+ * request is permitted.  In Data Router R1, the classes that implement the <code>Authorizer</code> interface will have

+ * local logic that makes the authorization decision.  After R1, these classes will instead have logic that creates XACML

+ * authorization requests, sends these requests to a Policy Decision Point (PDP), and parses the XACML responses.

+ * 

+ * @author J. F. Lucas

+ *

+ */

+public interface Authorizer {

+	/**

+	 * Determine if the API request carried in the <code>request</code> parameter is permitted.

+	 * 

+	 * @param request the HTTP request for which an authorization decision is needed

+	 * @return an object implementing the <code>AuthorizationResponse</code> interface.  This object includes the

+	 * permit/deny decision for the request and (after R1) supplemental information related to the response in the form

+	 * of advice and obligations.

+	 */

+	public AuthorizationResponse decide(HttpServletRequest request);

+	

+	/**

+	 * Determine if the API request carried in the <code>request</code> parameter, with additional attributes provided in

+	 * the <code>additionalAttrs</code> parameter, is permitted.

+	 * 

+	 * @param request the HTTP request for which an authorization decision is needed

+	 * @param additionalAttrs additional attributes that the <code>Authorizer</code> can in making an authorization decision

+	 * @return an object implementing the <code>AuthorizationResponse</code> interface.  This object includes the

+	 * permit/deny decision for the request and (after R1) supplemental information related to the response in the form

+	 * of advice and obligations.

+	 */

+	public AuthorizationResponse decide(HttpServletRequest request, Map<String,String> additionalAttrs);

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/AuthRespImpl.java b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/AuthRespImpl.java
new file mode 100644
index 0000000..db318d3
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/AuthRespImpl.java
@@ -0,0 +1,97 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.authz.impl;

+

+import java.util.ArrayList;

+import java.util.List;

+

+import com.att.research.datarouter.authz.AuthorizationResponse;

+import com.att.research.datarouter.authz.AuthorizationResponseSupplement;

+

+

+/** A representation of an authorization response returned by a XACML Policy Decision Point.

+ *  In Data Router R1, advice and obligations are not used.

+ * @author J. F. Lucas

+ *

+ */

+public class AuthRespImpl implements AuthorizationResponse {

+	private boolean authorized;

+	private List<AuthorizationResponseSupplement> advice;

+	private List<AuthorizationResponseSupplement> obligations;

+	

+	/** Constructor.  This version will not be used in Data Router R1 since we will not have advice and obligations.

+	 * 

+	 * @param authorized flag indicating whether the response carried a permit response (<code>true</code>) 

+	 * or something else (<code>false</code>).

+	 * @param advice list of advice elements returned in the response.

+	 * @param obligations list of obligation elements returned in the response.

+	 */

+	public AuthRespImpl(boolean authorized, List<AuthorizationResponseSupplement> advice, List<AuthorizationResponseSupplement> obligations) {

+		this.authorized = authorized;

+		this.advice = (advice == null ? null : new ArrayList<AuthorizationResponseSupplement> (advice));

+		this.obligations = (obligations == null ? null : new ArrayList<AuthorizationResponseSupplement> (obligations));

+	}

+	

+	/** Constructor.  Simple version for authorization responses that have no advice and no obligations.

+	 * 

+	 * @param authorized flag indicating whether the response carried a permit (<code>true</code>) or something else (<code>false</code>).

+	 */

+	public AuthRespImpl(boolean authorized) {

+		this(authorized, null, null);

+	}

+

+	/**

+	 * Indicates whether the request is authorized or not.

+	 * 

+	 * @return a boolean flag that is <code>true</code> if the request is permitted, and <code>false</code> otherwise.

+	 */

+	@Override

+	public boolean isAuthorized() {

+			return authorized;

+	}

+

+	/**

+	 * Returns any advice elements that were included in the authorization response.

+	 * 

+	 * @return A list of objects implementing the <code>AuthorizationResponseSupplement</code> interface, with each object representing an

+	 * advice element from the authorization response.

+	 */

+	@Override

+	public List<AuthorizationResponseSupplement> getAdvice() {

+			return advice;

+	}

+

+	/**

+	 * Returns any obligation elements that were included in the authorization response.

+	 * 

+	 * @return A list of objects implementing the <code>AuthorizationResponseSupplement</code> interface, with each object representing an

+	 * obligation element from the authorization response.

+	 */

+	@Override

+	public List<AuthorizationResponseSupplement> getObligations() {

+		return obligations;

+	}

+

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/AuthRespSupplementImpl.java b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/AuthRespSupplementImpl.java
new file mode 100644
index 0000000..5d2b61c
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/AuthRespSupplementImpl.java
@@ -0,0 +1,71 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.authz.impl;

+

+import java.util.HashMap;

+import java.util.Map;

+

+import com.att.research.datarouter.authz.AuthorizationResponseSupplement;

+

+/** Carries supplementary information--an advice or an obligation--from the authorization response returned

+ *  by a XACML Policy Decision Point.   Not used in Data Router R1.

+ * @author J. F. Lucas

+ *

+ */

+public class AuthRespSupplementImpl implements AuthorizationResponseSupplement {

+	

+	private String id = null;

+	private Map<String, String> attributes = null;

+

+	/** Constructor, available within the package.

+	 * 

+	 * @param id  The identifier for the advice or obligation element

+	 * @param attributes The attributes (name-value pairs) for the advice or obligation element.

+	 */

+	AuthRespSupplementImpl (String id, Map<String, String> attributes) {

+		this.id = id;

+		this.attributes = new HashMap<String,String>(attributes);

+	}

+

+	/** Return the identifier for the supplementary information element.

+	 * 

+	 * @return a <code>String</code> containing the identifier.

+	 */

+	@Override

+	public String getId() {

+		return id;

+	}

+

+	/** Return the attributes for the supplementary information element, as a <code>Map</code> in which

+	 * keys represent attribute identifiers and values represent attribute values.

+	 * 

+	 * @return attributes for the supplementary information element.

+	 */

+	@Override

+	public Map<String, String> getAttributes() {

+		return attributes;

+	}

+

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/AuthzResource.java b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/AuthzResource.java
new file mode 100644
index 0000000..1a201b7
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/AuthzResource.java
@@ -0,0 +1,100 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.authz.impl;

+

+import java.util.regex.Matcher;

+import java.util.regex.Pattern;

+

+/** Internal representation of an authorization resource (the entity to which access is being requested).  Consists

+ * of a type and an identifier.   The constructor takes the request URI from an HTTP request and checks it against

+ * patterns for the the different resource types.  In DR R1, there are four resource types:

+ * <ul>

+ * <li>the feeds collection resource, the target of POST requests to create a new feed and GET requests to list

+ * the existing feeds.  This is the root resource for the DR provisioning system, and it has no explicit id.

+ * </li>

+ * <li>a feed resource, the target of GET, PUT, and DELETE requests used to manage an existing feed.  Each feed

+ * has a unique feed ID.

+ * </li>

+ * <li>a subscription collection resource, the target of POST requests to create a new subscription and GET requests

+ * to list the subscriptions for a feed.  Each feed has a subscription collection, and the ID associated with a

+ * subscription collection is the ID of the feed.

+ * </li>

+ * <li>a subscription resource, the target of GET, PUT, and DELETE requests used to manage an existing subscription.

+ * Each subscription has a unique subscription ID.

+ * </li>

+ * 

+ * @author J. F. Lucas

+ *

+ */

+public class AuthzResource {

+	private ResourceType type = null;

+	private String id = "";

+

+	/* Construct an AuthzResource by matching a request URI against the various patterns */

+	public AuthzResource(String rURI) {

+		if (rURI != null) {

+			for (ResourceType t : ResourceType.values()) {

+				Matcher m = t.getPattern().matcher(rURI);

+				if (m.find(0)) {

+					this.type = t;

+					if (m.group("id") != null) {

+						this.id = m.group("id");

+					}

+					break;

+				}

+			}

+		}

+	}

+	

+	public ResourceType getType() {

+		return this.type;

+	}

+	

+	public String getId() {

+		return this.id;

+	}

+	

+	/* Enumeration that helps turn a request URI into something more useful for

+	 * authorization purposes by given a type name and a pattern for determining if the URI

+	 * represents that resource type.

+	 * Highly dependent on the URL scheme, could be parameterized.

+	 */

+	public enum ResourceType { 

+		FEEDS_COLLECTION("((://[^/]+/)|(^/))(?<id>)$"), 

+		SUBS_COLLECTION ("((://[^/]+/)|(^/{0,1}))subscribe/(?<id>[^/]+)$"),

+		FEED("((://[^/]+/)|(^/{0,1}))feed/(?<id>[^/]+)$"),

+		SUB("((://[^/]+/)|(^/{0,1}))subs/(?<id>[^/]+)$");

+		

+		private Pattern uriPattern;

+		

+		private ResourceType(String patternString) {

+			this.uriPattern = Pattern.compile(patternString);

+		}

+		

+		Pattern getPattern() {

+			return this.uriPattern;

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/ProvAuthorizer.java b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/ProvAuthorizer.java
new file mode 100644
index 0000000..d6683d5
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/ProvAuthorizer.java
@@ -0,0 +1,179 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+package com.att.research.datarouter.authz.impl;

+

+import java.util.Map;

+

+import javax.servlet.http.HttpServletRequest;

+

+import org.apache.log4j.Logger;

+

+import com.att.research.datarouter.authz.AuthorizationResponse;

+import com.att.research.datarouter.authz.Authorizer;

+import com.att.research.datarouter.authz.impl.AuthzResource.ResourceType;

+

+/** Authorizer for the provisioning API for Data Router R1

+ * 

+ * @author J. F. Lucas

+ *

+ */

+public class ProvAuthorizer implements Authorizer {

+	

+	private Logger log;

+	private ProvDataProvider provData;

+	

+	private static final String SUBJECT_HEADER = "X-ATT-DR-ON-BEHALF-OF";  // HTTP header carrying requester identity

+	private static final String SUBJECT_HEADER_GROUP = "X-ATT-DR-ON-BEHALF-OF-GROUP";  // HTTP header carrying requester identity  by group Rally : US708115

+	/** Constructor. For the moment, do nothing special.  Make it a singleton? 

+	 * 

+	 */

+	public ProvAuthorizer(ProvDataProvider provData) {

+		this.provData = provData;

+		this.log = Logger.getLogger(this.getClass());

+	}

+	

+	/**

+	 * Determine if the API request carried in the <code>request</code> parameter is permitted.

+	 * 

+	 * @param request the HTTP request for which an authorization decision is needed

+	 * @return an object implementing the <code>AuthorizationResponse</code> interface.  This object includes the

+	 * permit/deny decision for the request and (after R1) supplemental information related to the response in the form

+	 * of advice and obligations.

+	 */

+	@Override

+	public AuthorizationResponse decide(HttpServletRequest request) {

+			return this.decide(request, null);

+	}

+	

+	/**

+	 * Determine if the API request carried in the <code>request</code> parameter, with additional attributes provided in

+	 * the <code>additionalAttrs</code> parameter, is permitted.   <code>additionalAttrs</code> isn't used in R1.

+	 * 

+	 * @param request the HTTP request for which an authorization decision is needed

+	 * @param additionalAttrs additional attributes that the <code>Authorizer</code> can in making an authorization decision

+	 * @return an object implementing the <code>AuthorizationResponse</code> interface.  This object includes the

+	 * permit/deny decision for the request and (after R1) supplemental information related to the response in the form

+	 * of advice and obligations.

+	 */

+	@Override

+	public AuthorizationResponse decide(HttpServletRequest request,

+			Map<String, String> additionalAttrs) {

+		log.trace ("Entering decide()");

+		

+		boolean decision = false;

+		

+		// Extract interesting parts of the HTTP request

+		String method = request.getMethod();

+		AuthzResource resource = new AuthzResource(request.getRequestURI());

+		String subject = (request.getHeader(SUBJECT_HEADER));		 // identity of the requester

+		String subjectgroup = (request.getHeader(SUBJECT_HEADER_GROUP)); // identity of the requester by group Rally : US708115

+

+		log.trace("Method: " + method + " -- Type: " + resource.getType() + " -- Id: " + resource.getId() + 

+				" -- Subject: " + subject);

+		

+		// Choose authorization method based on the resource type

+		ResourceType resourceType = resource.getType();

+		if (resourceType != null) {

+

+			switch (resourceType) {

+

+			case FEEDS_COLLECTION:

+				decision = allowFeedsCollectionAccess(resource, method, subject, subjectgroup);

+				break;

+

+			case SUBS_COLLECTION:

+				decision = allowSubsCollectionAccess(resource, method, subject, subjectgroup);

+				break;

+

+			case FEED:

+				decision = allowFeedAccess(resource, method, subject, subjectgroup);

+				break;

+

+			case SUB:

+				decision = allowSubAccess(resource, method, subject, subjectgroup);

+				break;

+

+			default:

+				decision = false;

+				break;

+			}

+		}

+		log.debug("Exit decide(): "  + method + "|" + resourceType + "|" + resource.getId() + "|" + subject + " ==> " + decision);

+		

+		return new AuthRespImpl(decision);

+	}

+	

+	private boolean allowFeedsCollectionAccess(AuthzResource resource,	String method, String subject, String subjectgroup) {

+		

+		// Allow GET or POST unconditionally

+		return method != null && (method.equalsIgnoreCase("GET") || method.equalsIgnoreCase("POST"));

+	}

+	

+	private boolean allowSubsCollectionAccess(AuthzResource resource, String method, String subject, String subjectgroup) {

+		

+		// Allow GET or POST unconditionally

+		return method != null && (method.equalsIgnoreCase("GET") || method.equalsIgnoreCase("POST"));

+	}

+	

+	private boolean allowFeedAccess(AuthzResource resource, String method,	String subject, String subjectgroup) {

+		boolean decision = false;

+		

+		// Allow GET, PUT, or DELETE if requester (subject) is the owner (publisher) of the feed

+		if ( method != null && (method.equalsIgnoreCase("GET") || method.equalsIgnoreCase("PUT") ||

+				method.equalsIgnoreCase("DELETE"))) {

+			

+			String owner = provData.getFeedOwner(resource.getId());

+			decision = (owner != null) && owner.equals(subject);

+			

+			//Verifying by group Rally : US708115

+			if(subjectgroup != null) { 

+				String feedowner = provData.getGroupByFeedGroupId(subject, resource.getId());

+				decision = (feedowner != null) && feedowner.equals(subjectgroup);

+			}

+		}

+		

+		return decision;

+	}

+	

+	private boolean allowSubAccess(AuthzResource resource, String method, String subject, String subjectgroup) {

+		boolean decision = false;

+		

+		// Allow GET, PUT, or DELETE if requester (subject) is the owner of the subscription (subscriber)

+		if (method != null && (method.equalsIgnoreCase("GET") || method.equalsIgnoreCase("PUT") || 

+				method.equalsIgnoreCase("DELETE") || method.equalsIgnoreCase("POST"))) {

+			

+			String owner = provData.getSubscriptionOwner(resource.getId());

+			decision = (owner != null) && owner.equals(subject);

+			

+			//Verifying by group Rally : US708115

+			if(subjectgroup != null) {

+				String feedowner = provData.getGroupBySubGroupId(subject, resource.getId());

+				decision = (feedowner != null) && feedowner.equals(subjectgroup);

+			}

+		}

+		

+		return decision;

+	}

+

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/ProvDataProvider.java b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/ProvDataProvider.java
new file mode 100644
index 0000000..76ae034
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/ProvDataProvider.java
@@ -0,0 +1,66 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package com.att.research.datarouter.authz.impl;

+

+/** Interface to access data about subscriptions and feeds.  A software component that 

+ * uses the <code>ProvAuthorizer</code> needs to supply an implementation of this interface.

+ * @author J. F. Lucas

+ *

+ */

+public interface ProvDataProvider {

+	

+	/** Get the identity of the owner of a feed.

+	 * 

+	 * @param feedId the feed ID of the feed whose owner is being looked up.

+	 * @return the feed owner's identity

+	 */

+	public String getFeedOwner(String feedId);

+	

+	/** Get the security classification of a feed.

+	 * 

+	 * @param feedId the ID of the feed whose classification is being looked up.

+	 * @return the classification of the feed.

+	 */

+	public String getFeedClassification(String feedId);

+	

+	/** Get the identity of the owner of a feed

+	 * 

+	 * @param subId the ID of the subscripition whose owner is being looked up.

+	 * @return the subscription owner's identity.

+	 */

+	public String getSubscriptionOwner(String subId);

+

+	/** Get the identity of the owner of a feed by group id -  Rally : US708115

+	 * 

+	 * @param feedid, user the ID of the feed whose owner is being looked up.

+	 * @return the feed owner's identity by group.

+	 */

+	public String getGroupByFeedGroupId(String owner, String feedId);

+	

+	/** Get the identity of the owner of a sub by group id Rally : US708115

+	 * 

+	 * @param subid, user the ID of the feed whose owner is being looked up.

+	 * @return the feed owner's identity by group.

+	 */

+	public String getGroupBySubGroupId(String owner, String subId);

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/package.html b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/package.html
new file mode 100644
index 0000000..fae27ee
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/impl/package.html
@@ -0,0 +1,68 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+

+<html>

+<head>

+</head>

+<body>

+<p>

+This package provides an implementation of the authorization-related interfaces 

+defined by the <code>com.att.research.datarouter.authz</code> package, intended for

+use with the provisioning server for Data Router Release 1.   In DR R1, we do not

+have an external policy engine, so this implementation performs the authorization

+locally.

+</p>

+<p>

+In order to perform the authorization, this package needs access to provisioning data 

+about feeds and subscriptions.  This package defines an interface

+(<code>com.att.research.datarouter.authz.impl.ProvDataProvider</code>) through which it

+expects to get this data.   The provisioning server code must provide an implementation

+of this interface.

+</p>

+<p>

+A software component that wishes to use this implementation must:

+<ul>

+<li>Provide an implementation of the 

+<code>com.att.research.datarouter.authz.impl.ProvDataProvider</code>

+interface.

+</li>

+<li>

+Create an instance of the <code>ProvDataProvider</code> implementation.

+<li>

+Create an instance of the

+<code>com.att.research.datarouter.authz.impl.ProvAuthorizer</code>

+class defined in this package, passing it an instance of the <code>ProvDataProvider</code>

+implementation.

+</li>

+</ul>

+</p>

+<p>

+Example:

+<pre>

+<code>

+ProvDataProvider dataProv = new MyDataProvider();

+Authorizer authz = new ProvAuthorizer(dataProv);

+</code>

+</pre>

+</body>

+</html>

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/authz/package.html b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/package.html
new file mode 100644
index 0000000..7628ae8
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/authz/package.html
@@ -0,0 +1,38 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+

+<html>

+<head>

+</head>

+<body>

+<p>

+This package defines an interface that can be used by servlet-based HTTP APIs to

+make authorization requests and receive authorization responses from an external

+authorization entity such as a XACML Policy Decision Point (PDP).

+</p>

+<p>

+In Data Router Release 1, there is no external authorization system.  The provisioning server

+will use an implementation of this interface for local authorization of provisioning requests.

+</p>

+</body>

+</html>

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/BaseServlet.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/BaseServlet.java
new file mode 100644
index 0000000..a478493
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/BaseServlet.java
@@ -0,0 +1,869 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning;

+

+import static com.att.eelf.configuration.Configuration.MDC_SERVER_FQDN;

+

+import static com.att.eelf.configuration.Configuration.MDC_SERVER_IP_ADDRESS;

+import static com.att.eelf.configuration.Configuration.MDC_SERVICE_NAME;

+

+import java.io.IOException;

+import java.io.InputStream;

+import java.net.InetAddress;

+import java.net.UnknownHostException;

+import java.security.cert.X509Certificate;

+import java.sql.Connection;

+import java.sql.SQLException;

+import java.util.HashMap;

+import java.util.HashSet;

+import java.util.Map;

+import java.util.Set;

+import java.util.List;

+import java.util.ArrayList;

+

+import javax.servlet.ServletConfig;

+import javax.servlet.ServletException;

+import javax.servlet.http.HttpServlet;

+import javax.servlet.http.HttpServletRequest;

+

+import org.apache.log4j.Logger;

+import org.json.JSONObject;

+import org.json.JSONTokener;

+import org.json.JSONException;	

+import org.slf4j.MDC;

+

+import com.att.research.datarouter.authz.Authorizer;

+import com.att.research.datarouter.authz.impl.ProvAuthorizer;

+import com.att.research.datarouter.authz.impl.ProvDataProvider;

+import com.att.research.datarouter.provisioning.beans.Deleteable;

+import com.att.research.datarouter.provisioning.beans.Feed;

+import com.att.research.datarouter.provisioning.beans.Insertable;

+import com.att.research.datarouter.provisioning.beans.NodeClass;

+import com.att.research.datarouter.provisioning.beans.Parameters;

+import com.att.research.datarouter.provisioning.beans.Subscription;

+import com.att.research.datarouter.provisioning.beans.Updateable;

+import com.att.research.datarouter.provisioning.utils.DB;

+import com.att.research.datarouter.provisioning.utils.ThrottleFilter;

+import com.att.research.datarouter.provisioning.beans.Group; //Groups feature Rally:US708115 - 1610	

+

+import java.util.Properties;

+import java.util.regex.Pattern;

+import javax.mail.Message;

+import javax.mail.MessagingException;

+import javax.mail.Multipart;

+import javax.mail.Session;

+import javax.mail.Transport;

+import javax.mail.internet.AddressException;

+import javax.mail.internet.InternetAddress;

+import javax.mail.internet.MimeBodyPart;

+import javax.mail.internet.MimeMessage;

+import javax.mail.internet.MimeMultipart;

+/**

+ * This is the base class for all Servlets in the provisioning code.

+ * It provides standard constants and some common methods.

+ *

+ * @author Robert Eby

+ * @version $Id: BaseServlet.java,v 1.16 2014/03/12 19:45:40 eby Exp $

+ */

+@SuppressWarnings("serial")

+public class BaseServlet extends HttpServlet implements ProvDataProvider {

+	public static final String BEHALF_HEADER         = "X-ATT-DR-ON-BEHALF-OF";

+	public static final String FEED_BASECONTENT_TYPE = "application/vnd.att-dr.feed";

+	public static final String FEED_CONTENT_TYPE     = "application/vnd.att-dr.feed; version=2.0";

+	public static final String FEEDFULL_CONTENT_TYPE = "application/vnd.att-dr.feed-full; version=2.0";

+	public static final String FEEDLIST_CONTENT_TYPE = "application/vnd.att-dr.feed-list; version=1.0";

+	public static final String SUB_BASECONTENT_TYPE  = "application/vnd.att-dr.subscription";

+	public static final String SUB_CONTENT_TYPE      = "application/vnd.att-dr.subscription; version=2.0";

+	public static final String SUBFULL_CONTENT_TYPE  = "application/vnd.att-dr.subscription-full; version=2.0";

+	public static final String SUBLIST_CONTENT_TYPE  = "application/vnd.att-dr.subscription-list; version=1.0";

+

+	

+	//Adding groups functionality, ...1610

+	public static final String GROUP_BASECONTENT_TYPE = "application/vnd.att-dr.group";

+	public static final String GROUP_CONTENT_TYPE     = "application/vnd.att-dr.group; version=2.0";

+	public static final String GROUPFULL_CONTENT_TYPE = "application/vnd.att-dr.group-full; version=2.0";

+	public static final String GROUPLIST_CONTENT_TYPE = "application/vnd.att-dr.fegrouped-list; version=1.0";

+

+

+	public static final String LOGLIST_CONTENT_TYPE  = "application/vnd.att-dr.log-list; version=1.0";

+	public static final String PROVFULL_CONTENT_TYPE1 = "application/vnd.att-dr.provfeed-full; version=1.0";

+	public static final String PROVFULL_CONTENT_TYPE2 = "application/vnd.att-dr.provfeed-full; version=2.0";

+	public static final String CERT_ATTRIBUTE        = "javax.servlet.request.X509Certificate";

+

+	public static final String DB_PROBLEM_MSG = "There has been a problem with the DB.  It is suggested you try the operation again.";

+

+	public static final int    DEFAULT_MAX_FEEDS     = 10000;

+	public static final int    DEFAULT_MAX_SUBS      = 100000;

+	public static final int    DEFAULT_POKETIMER1    = 5;

+	public static final int    DEFAULT_POKETIMER2    = 30;

+	public static final String DEFAULT_DOMAIN        = "web.att.com";

+	public static final String DEFAULT_PROVSRVR_NAME = "feeds-drtr.web.att.com";

+	public static final String RESEARCH_SUBNET       = "135.207.136.128/25";

+	public static final String STATIC_ROUTING_NODES       = ""; //Adding new param for static Routing - Rally:US664862-1610

+

+	/** A boolean to trigger one time "provisioning changed" event on startup */

+	private static boolean startmsg_flag  = true;

+	/** This POD should require SSL connections from clients; pulled from the DB (PROV_REQUIRE_SECURE) */

+	private static boolean require_secure = true;

+	/** This POD should require signed, recognized certificates from clients; pulled from the DB (PROV_REQUIRE_CERT) */

+	private static boolean require_cert   = true;

+	/** The set of authorized addresses and networks; pulled from the DB (PROV_AUTH_ADDRESSES) */

+	private static Set<String> authorizedAddressesAndNetworks = new HashSet<String>();

+	/** The set of authorized names; pulled from the DB (PROV_AUTH_SUBJECTS) */

+	private static Set<String> authorizedNames = new HashSet<String>();

+	/** The FQDN of the initially "active" provisioning server in this Data Router ecosystem */

+	private static String initial_active_pod;

+	/** The FQDN of the initially "standby" provisioning server in this Data Router ecosystem */

+	private static String initial_standby_pod;

+	/** The FQDN of this provisioning server in this Data Router ecosystem */

+	private static String this_pod;

+	/** "Timer 1" - used to determine when to notify nodes of provisioning changes */

+	private static long poke_timer1;

+	/** "Timer 2" - used to determine when to notify nodes of provisioning changes */

+	private static long poke_timer2;

+	/** Array of nodes names and/or FQDNs */

+	private static String[] nodes = new String[0];

+	/** Array of node IP addresses */

+	private static InetAddress[] nodeAddresses = new InetAddress[0];

+	/** Array of POD IP addresses */

+	private static InetAddress[] podAddresses = new InetAddress[0];

+	/** The maximum number of feeds allowed; pulled from the DB (PROV_MAXFEED_COUNT) */

+	protected static int max_feeds    = 0;

+	/** The maximum number of subscriptions allowed; pulled from the DB (PROV_MAXSUB_COUNT) */

+	protected static int max_subs     = 0;

+	/** The current number of feeds in the system */

+	protected static int active_feeds = 0;

+	/** The current number of subscriptions in the system */

+	protected static int active_subs  = 0;

+	/** The domain used to generate a FQDN from the "bare" node names */

+	public static String prov_domain = "web.att.com";

+	/** The standard FQDN of the provisioning server in this Data Router ecosystem */

+	public static String prov_name   = "feeds-drtr.web.att.com";

+	/** The standard FQDN of the ACTIVE provisioning server in this Data Router ecosystem */

+	public static String active_prov_name   = "feeds-drtr.web.att.com";

+	/** Special subnet that is allowed access to /internal */

+	protected static String special_subnet = RESEARCH_SUBNET;

+

+	/** Special subnet that is allowed access to /internal to Lab Machine */

+	protected static String special_subnet_secondary = RESEARCH_SUBNET;

+	protected static String static_routing_nodes = STATIC_ROUTING_NODES; //Adding new param for static Routing - Rally:US664862-1610

+

+	/** This logger is used to log provisioning events */

+	protected static Logger eventlogger;

+	/** This logger is used to log internal events (errors, etc.) */

+	protected static Logger intlogger;

+	/** Authorizer - interface to the Policy Engine */

+	protected static Authorizer authz;

+	/** The Synchronizer used to sync active DB to standby one */

+	protected static SynchronizerTask synctask = null;

+    

+	//Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.

+	private InetAddress thishost;

+	private InetAddress loopback;

+    private static Boolean mailSendFlag = false;

+

+	public static final String MAILCONFIG_FILE = "mail.properties";

+	private static Properties mailprops;

+	/**

+	 * Initialize data common to all the provisioning server servlets.

+	 */

+	protected BaseServlet() {

+		if (eventlogger == null)

+			eventlogger = Logger.getLogger("com.att.research.datarouter.provisioning.events");

+		if (intlogger == null)

+			intlogger   = Logger.getLogger("com.att.research.datarouter.provisioning.internal");

+		if (authz == null)

+			authz = new ProvAuthorizer(this);

+		if (startmsg_flag) {

+			startmsg_flag = false;

+			provisioningParametersChanged();

+		}

+		if (synctask == null) {

+			synctask = SynchronizerTask.getSynchronizer();

+		}

+		String name = this.getClass().getName();

+		intlogger.info("PROV0002 Servlet "+name+" started.");

+	}

+	@Override

+	public void init(ServletConfig config) throws ServletException {

+		super.init(config);

+		try {

+			thishost = InetAddress.getLocalHost();

+			loopback = InetAddress.getLoopbackAddress();

+			checkHttpsRelaxation(); //Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.

+		} catch (UnknownHostException e) {

+			// ignore

+		}

+	}

+	protected int getIdFromPath(HttpServletRequest req) {

+		String path = req.getPathInfo();

+		if (path == null || path.length() < 2)

+			return -1;

+		try {

+			return Integer.parseInt(path.substring(1));

+		} catch (NumberFormatException e) {

+			return -1;

+		}

+	}

+	/**

+	 * Read the request's input stream and return a JSONObject from it

+	 * @param req the HTTP request

+	 * @return the JSONObject, or null if the stream cannot be parsed

+	 */

+	protected JSONObject getJSONfromInput(HttpServletRequest req) {

+		JSONObject jo = null;

+		try {

+			jo = new JSONObject(new JSONTokener(req.getInputStream()));

+			if (intlogger.isDebugEnabled())

+				intlogger.debug("JSON: "+jo.toString());

+		} catch (Exception e) {

+			intlogger.info("Error reading JSON: "+e);

+		}

+		return jo;

+	}

+	/**

+	 * Check if the remote host is authorized to perform provisioning.

+	 * Is the request secure?

+	 * Is it coming from an authorized IP address or network (configured via PROV_AUTH_ADDRESSES)?

+	 * Does it have a valid client certificate (configured via PROV_AUTH_SUBJECTS)?

+	 * @param request the request

+	 * @return an error string, or null if all is OK

+	 */

+	protected String isAuthorizedForProvisioning(HttpServletRequest request) {

+		// Is the request https?

+		if (require_secure && !request.isSecure()) {

+			return "Request must be made over an HTTPS connection.";

+		}

+

+		// Is remote IP authorized?

+		String remote = request.getRemoteAddr();

+		try {

+			boolean found = false;

+			InetAddress ip = InetAddress.getByName(remote);

+			for (String addrnet : authorizedAddressesAndNetworks) {

+				found |= addressMatchesNetwork(ip, addrnet);

+			}

+			if (!found) {

+				return "Unauthorized address: "+remote;

+			}

+		} catch (UnknownHostException e) {

+			return "Unauthorized address: "+remote;

+		}

+

+		// Does remote have a valid certificate?

+		if (require_cert) {

+			X509Certificate certs[] = (X509Certificate[]) request.getAttribute(CERT_ATTRIBUTE);

+			if (certs == null || certs.length == 0) {

+				return "Client certificate is missing.";

+			}

+			// cert[0] is the client cert

+			// see http://www.proto.research.att.com/java/java7/api/javax/net/ssl/SSLSession.html#getPeerCertificates()

+			String name = certs[0].getSubjectX500Principal().getName();

+			if (!authorizedNames.contains(name)) {

+				return "No authorized certificate found.";

+			}

+		}

+

+		// No problems!

+		return null;

+	}

+	/**

+	 * Check if the remote IP address is authorized to see the /internal URL tree.

+	 * @param request the HTTP request

+	 * @return true iff authorized

+	 */

+	protected boolean isAuthorizedForInternal(HttpServletRequest request) {

+		try {

+			InetAddress ip = InetAddress.getByName(request.getRemoteAddr());

+			for (InetAddress node : getNodeAddresses()) {

+				if (node != null && ip.equals(node))

+					return true;

+			}

+			for (InetAddress pod : getPodAddresses()) {

+				if (pod != null && ip.equals(pod))

+					return true;

+			}

+			if (thishost != null && ip.equals(thishost))

+				return true;

+			if (loopback != null && ip.equals(loopback))

+				return true;

+			// Also allow the "special subnet" access

+			if (addressMatchesNetwork(ip, special_subnet_secondary))

+				return true;

+			if (addressMatchesNetwork(ip, special_subnet))

+				return true;

+		} catch (UnknownHostException e) {

+			// ignore

+		}

+		return false;

+	}

+	/**

+	 * Check if an IP address matches a network address.

+	 * @param ip the IP address

+	 * @param s the network address; a bare IP address may be matched also

+	 * @return true if they intersect

+	 */

+	protected static boolean addressMatchesNetwork(InetAddress ip, String s) {

+		int mlen = -1;

+		int n = s.indexOf("/");

+		if (n >= 0) {

+			mlen = Integer.parseInt(s.substring(n+1));

+			s = s.substring(0, n);

+		}

+		try {

+			InetAddress i2 = InetAddress.getByName(s);

+			byte[] b1 = ip.getAddress();

+			byte[] b2 = i2.getAddress();

+			if (b1.length != b2.length)

+				return false;

+			if (mlen > 0) {

+				byte[] masks = {

+					(byte)0x00, (byte)0x80, (byte)0xC0, (byte)0xE0,

+					(byte)0xF0, (byte)0xF8, (byte)0xFC, (byte)0xFE

+				};

+				byte mask = masks[mlen%8];

+				for (n = mlen/8; n < b1.length; n++) {

+					b1[n] &= mask;

+					b2[n] &= mask;

+					mask = 0;

+				}

+			}

+			for (n = 0; n < b1.length; n++)

+				if (b1[n] != b2[n])

+					return false;

+		} catch (UnknownHostException e) {

+			return false;

+		}

+		return true;

+	}

+	/**

+	 * Something has changed in the provisioning data.

+	 * Start the timers that will cause the pre-packaged JSON string to be regenerated,

+	 * and cause nodes and the other provisioning server to be notified.

+	 */

+	public static void provisioningDataChanged() {

+		long now = System.currentTimeMillis();

+		Poker p = Poker.getPoker();

+		p.setTimers(now + (poke_timer1 * 1000L), now + (poke_timer2 * 1000L));

+	}

+	/**

+	 * Something in the parameters has changed, reload all parameters from the DB.

+	 */

+	public static void provisioningParametersChanged() {

+		Map<String,String> map         = Parameters.getParameters();

+		require_secure   = getBoolean(map, Parameters.PROV_REQUIRE_SECURE);

+		require_cert     = getBoolean(map, Parameters.PROV_REQUIRE_CERT);

+		authorizedAddressesAndNetworks = getSet(map, Parameters.PROV_AUTH_ADDRESSES);

+		authorizedNames  = getSet    (map, Parameters.PROV_AUTH_SUBJECTS);

+		nodes            = getSet    (map, Parameters.NODES).toArray(new String[0]);

+		max_feeds        = getInt    (map, Parameters.PROV_MAXFEED_COUNT, DEFAULT_MAX_FEEDS);

+		max_subs         = getInt    (map, Parameters.PROV_MAXSUB_COUNT, DEFAULT_MAX_SUBS);

+		poke_timer1      = getInt    (map, Parameters.PROV_POKETIMER1, DEFAULT_POKETIMER1);

+		poke_timer2      = getInt    (map, Parameters.PROV_POKETIMER2, DEFAULT_POKETIMER2);

+		prov_domain      = getString (map, Parameters.PROV_DOMAIN, DEFAULT_DOMAIN);

+		prov_name        = getString (map, Parameters.PROV_NAME, DEFAULT_PROVSRVR_NAME);

+		active_prov_name = getString (map, Parameters.PROV_ACTIVE_NAME, prov_name);

+		special_subnet   = getString (map, Parameters.PROV_SPECIAL_SUBNET, RESEARCH_SUBNET);

+		static_routing_nodes = getString (map, Parameters.STATIC_ROUTING_NODES, ""); //Adding new param for static Routing - Rally:US664862-1610

+		initial_active_pod  = getString (map, Parameters.ACTIVE_POD, "");

+		initial_standby_pod = getString (map, Parameters.STANDBY_POD, "");

+		static_routing_nodes = getString (map, Parameters.STATIC_ROUTING_NODES, ""); //Adding new param for static Routing - Rally:US664862-1610

+		active_feeds     = Feed.countActiveFeeds();

+		active_subs      = Subscription.countActiveSubscriptions();

+		try {

+			this_pod = InetAddress.getLocalHost().getHostName();

+		} catch (UnknownHostException e) {

+			this_pod = "";

+			intlogger.warn("PROV0014 Cannot determine the name of this provisioning server.");

+		}

+

+		// Normalize the nodes, and fill in nodeAddresses

+		InetAddress[] na = new InetAddress[nodes.length];

+		for (int i = 0; i < nodes.length; i++) {

+			if (nodes[i].indexOf('.') < 0)

+				nodes[i] += "." + prov_domain;

+			try {

+				na[i] = InetAddress.getByName(nodes[i]);

+				intlogger.debug("PROV0003 DNS lookup: "+nodes[i]+" => "+na[i].toString());

+			} catch (UnknownHostException e) {

+				na[i] = null;

+				intlogger.warn("PROV0004 Cannot lookup "+nodes[i]+": "+e);

+			}

+		}

+

+		//Reset Nodes arr after - removing static routing Nodes, Rally Userstory - US664862 .	

+		List<String> filterNodes = new ArrayList<>();		

+		for (int i = 0; i < nodes.length; i++) {		

+			if(!static_routing_nodes.contains(nodes[i])){		

+				filterNodes.add(nodes[i]);		

+			}		

+		}		

+		String [] filteredNodes = filterNodes.toArray(new String[filterNodes.size()]);		  		

+		nodes = filteredNodes;

+

+		nodeAddresses = na;

+		NodeClass.setNodes(nodes);		// update NODES table

+

+		// Normalize the PODs, and fill in podAddresses

+		String[] pods = getPods();

+		na = new InetAddress[pods.length];

+		for (int i = 0; i < pods.length; i++) {

+			if (pods[i].indexOf('.') < 0)

+				pods[i] += "." + prov_domain;

+			try {

+				na[i] = InetAddress.getByName(pods[i]);

+				intlogger.debug("PROV0003 DNS lookup: "+pods[i]+" => "+na[i].toString());

+			} catch (UnknownHostException e) {

+				na[i] = null;

+				intlogger.warn("PROV0004 Cannot lookup "+pods[i]+": "+e);

+			}

+		}

+		podAddresses = na;

+

+		// Update ThrottleFilter

+		ThrottleFilter.configure();

+

+		// Check if we are active or standby POD

+		if (!isInitialActivePOD() && !isInitialStandbyPOD())

+			intlogger.warn("PROV0015 This machine is neither the active nor the standby POD.");

+	}

+

+

+	/**Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.

+	 * Load mail properties.

+	 * @author vs215k

+	 *  

+	**/

+	private void loadMailProperties() {

+		if (mailprops == null) {

+			mailprops = new Properties();

+			InputStream inStream = getClass().getClassLoader().getResourceAsStream(MAILCONFIG_FILE);

+			try {

+				mailprops.load(inStream);

+			} catch (IOException e) {

+				intlogger.fatal("PROV9003 Opening properties: "+e.getMessage());

+				e.printStackTrace();

+				System.exit(1);

+			}

+			finally {

+				try {

+					inStream.close();

+				} 

+				catch (IOException e) {

+				}

+			}

+		}

+	}

+	

+	/**Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.

+	 * Check if HTTPS Relexaction is enabled 

+	 * @author vs215k

+	 *  

+	**/

+	private void checkHttpsRelaxation() {

+		if(mailSendFlag == false) {

+			Properties p = (new DB()).getProperties();

+			intlogger.info("HTTPS relaxatio: "+p.get("com.att.research.datarouter.provserver.https.relaxation"));

+			

+			if(p.get("com.att.research.datarouter.provserver.https.relaxation").equals("true")) {

+			    try {

+			    	  notifyPSTeam(p.get("com.att.research.datarouter.provserver.https.relax.notify").toString());

+			    } 

+				catch (Exception e) {

+				    e.printStackTrace();

+			    }

+			 }

+			mailSendFlag = true;

+		}

+	}

+	

+	/**Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.

+	 * @author vs215k

+	 * @param email - list of email ids to notify if HTTP relexcation is enabled. 

+	**/

+	private void notifyPSTeam(String email) throws Exception {

+		loadMailProperties(); //Load HTTPS Relex mail properties.

+		String[] emails = email.split(Pattern.quote("|"));

+    	

+    	Properties mailproperties = new Properties();

+		mailproperties.put("mail.smtp.host", mailprops.get("com.att.dmaap.datarouter.mail.server"));

+		mailproperties.put("mail.transport.protocol", mailprops.get("com.att.dmaap.datarouter.mail.protocol"));

+		

+    	Session session = Session.getDefaultInstance(mailproperties, null);

+    	Multipart mp = new MimeMultipart();

+    	MimeBodyPart htmlPart = new MimeBodyPart();

+    	

+    	try {

+    		

+    	  Message msg = new MimeMessage(session);

+    	  msg.setFrom(new InternetAddress(mailprops.get("com.att.dmaap.datarouter.mail.from").toString()));

+    	  

+    	  InternetAddress[] addressTo = new InternetAddress[emails.length];

+    	  for ( int x =0 ; x < emails.length; x++) {

+    	       addressTo[x] = new InternetAddress(emails[x]);

+    	  }

+    	  

+    	  msg.addRecipients(Message.RecipientType.TO, addressTo);

+    	  msg.setSubject(mailprops.get("com.att.dmaap.datarouter.mail.subject").toString());

+    	  htmlPart.setContent(mailprops.get("com.att.dmaap.datarouter.mail.body").toString().replace("[SERVER]", InetAddress.getLocalHost().getHostName()), "text/html");

+    	  mp.addBodyPart(htmlPart);

+      	  msg.setContent(mp);

+      	  

+      	  System.out.println(mailprops.get("com.att.dmaap.datarouter.mail.body").toString().replace("[SERVER]", InetAddress.getLocalHost().getHostName()));

+      	

+    	  Transport.send(msg);

+    	  intlogger.info("HTTPS relaxation mail is sent to - : "+email);

+    	  

+    	} catch (AddressException e) {

+    		  intlogger.error("Invalid email address, unable to send https relaxation mail to - : "+email);

+    	} catch (MessagingException e) {

+    		intlogger.error("Invalid email address, unable to send https relaxation mail to - : "+email);

+    	} 

+	}

+

+

+	/**

+	 * Get an array of all node names in the DR network.

+	 * @return an array of Strings

+	 */

+	public static String[] getNodes() {

+		return nodes;

+	}

+	/**

+	 * Get an array of all node InetAddresses in the DR network.

+	 * @return an array of InetAddresses

+	 */

+	public static InetAddress[] getNodeAddresses() {

+		return nodeAddresses;

+	}

+	/**

+	 * Get an array of all POD names in the DR network.

+	 * @return an array of Strings

+	 */

+	public static String[] getPods() {

+		return new String[] { initial_active_pod, initial_standby_pod };

+	}

+	/**

+	 * Get an array of all POD InetAddresses in the DR network.

+	 * @return an array of InetAddresses

+	 */

+	public static InetAddress[] getPodAddresses() {

+		return podAddresses;

+	}

+	/**

+	 * Gets the FQDN of the initially ACTIVE provisioning server (POD).

+	 * Note: this used to be called isActivePOD(), however, that is a misnomer, as the active status

+	 * could shift to the standby POD without these parameters changing.  Hence, the function names

+	 * have been changed to more accurately reflect their purpose.

+	 * @return the FQDN

+	 */

+	public static boolean isInitialActivePOD() {

+		return this_pod.equals(initial_active_pod);

+	}

+	/**

+	 * Gets the FQDN of the initially STANDBY provisioning server (POD).

+	 * Note: this used to be called isStandbyPOD(), however, that is a misnomer, as the standby status

+	 * could shift to the active POD without these parameters changing.  Hence, the function names

+	 * have been changed to more accurately reflect their purpose.

+	 * @return the FQDN

+	 */

+	public static boolean isInitialStandbyPOD() {

+		return this_pod.equals(initial_standby_pod);

+	}

+	/**

+	 * INSERT an {@link Insertable} bean into the database.

+	 * @param bean the bean representing a row to insert

+	 * @return true if the INSERT was successful

+	 */

+	protected boolean doInsert(Insertable bean) {

+		boolean rv = false;

+		DB db = new DB();

+		Connection conn = null;

+		try {

+			conn = db.getConnection();

+			rv = bean.doInsert(conn);

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0005 doInsert: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			if (conn != null)

+				db.release(conn);

+		}

+		return rv;

+	}

+	/**

+	 * UPDATE an {@link Updateable} bean in the database.

+	 * @param bean the bean representing a row to update

+	 * @return true if the UPDATE was successful

+	 */

+	protected boolean doUpdate(Updateable bean) {

+		boolean rv = false;

+		DB db = new DB();

+		Connection conn = null;

+		try {

+			conn = db.getConnection();

+			rv = bean.doUpdate(conn);

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0006 doUpdate: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			if (conn != null)

+				db.release(conn);

+		}

+		return rv;

+	}

+	/**

+	 * DELETE an {@link Deleteable} bean from the database.

+	 * @param bean the bean representing a row to delete

+	 * @return true if the DELETE was successful

+	 */

+	protected boolean doDelete(Deleteable bean) {

+		boolean rv = false;

+		DB db = new DB();

+		Connection conn = null;

+		try {

+			conn = db.getConnection();

+			rv = bean.doDelete(conn);

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0007 doDelete: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			if (conn != null)

+				db.release(conn);

+		}

+		return rv;

+	}

+	private static boolean getBoolean(Map<String,String> map, String name) {

+		String s = map.get(name);

+		return (s != null) && s.equalsIgnoreCase("true");

+	}

+	private static String getString(Map<String,String> map, String name, String dflt) {

+		String s = map.get(name);

+		return (s != null) ? s : dflt;

+	}

+	private static int getInt(Map<String,String> map, String name, int dflt) {

+		try {

+			String s = map.get(name);

+			return Integer.parseInt(s);

+		} catch (NumberFormatException e) {

+			return dflt;

+		}

+	}

+	private static Set<String> getSet(Map<String,String> map, String name) {

+		Set<String> set = new HashSet<String>();

+		String s = map.get(name);

+		if (s != null) {

+			String[] pp = s.split("\\|");

+			if (pp != null) {

+				for (String t : pp) {

+					String t2 = t.trim();

+					if (t2.length() > 0)

+						set.add(t2);

+				}

+			}

+		}

+		return set;

+	}

+

+	/**

+	 * A class used to encapsulate a Content-type header, separating out the "version" attribute

+	 * (which defaults to "1.0" if missing).

+	 */

+	public class ContentHeader {

+		private String type = "";

+		private Map<String, String> map = new HashMap<String, String>();

+		public ContentHeader() {

+			this("", "1.0");

+		}

+		public ContentHeader(String t, String v) {

+			type = t.trim();

+			map.put("version", v);

+		}

+		public String getType() {

+			return type;

+		}

+		public String getAttribute(String key) {

+			String s = map.get(key);

+			if (s == null)

+				s = "";

+			return s;

+		}

+	}

+

+	/**

+	 * Get the ContentHeader from an HTTP request.

+	 * @param req the request

+	 * @return the header, encapsulated in a ContentHeader object

+	 */

+	public ContentHeader getContentHeader(HttpServletRequest req) {

+		ContentHeader ch = new ContentHeader();

+		String s = req.getHeader("Content-Type");

+		if (s != null) {

+			String[] pp = s.split(";");

+			ch.type = pp[0].trim();

+			for (int i = 1; i < pp.length; i++) {

+				int ix = pp[i].indexOf('=');

+				if (ix > 0) {

+					String k = pp[i].substring(0, ix).trim();

+					String v = pp[i].substring(ix+1).trim();

+					ch.map.put(k,  v);

+				} else {

+					ch.map.put(pp[i].trim(), "");

+				}

+			}

+		}

+		return ch;

+	}

+	// Methods for the Policy Engine classes - ProvDataProvider interface

+	@Override

+	public String getFeedOwner(String feedId) {

+		try {

+			int n = Integer.parseInt(feedId);

+			Feed f = Feed.getFeedById(n);

+			if (f != null)

+				return f.getPublisher();

+		} catch (NumberFormatException e) {

+			// ignore

+		}

+		return null;

+	}

+	@Override

+	public String getFeedClassification(String feedId) {

+		try {

+			int n = Integer.parseInt(feedId);

+			Feed f = Feed.getFeedById(n);

+			if (f != null)

+				return f.getAuthorization().getClassification();

+		} catch (NumberFormatException e) {

+			// ignore

+		}

+		return null;

+	}

+	@Override

+	public String getSubscriptionOwner(String subId) {

+		try {

+			int n = Integer.parseInt(subId);

+			Subscription s = Subscription.getSubscriptionById(n);

+			if (s != null)

+				return s.getSubscriber();

+		} catch (NumberFormatException e) {

+			// ignore

+		}

+		return null;

+	}

+

+	/*

+	 * @Method - isUserMemberOfGroup - Rally:US708115 

+	 * @Params - group object and user to check if exists in given group

+	 * @return - boolean value /true/false

+	 */

+	private boolean isUserMemberOfGroup(Group group, String user) {

+			 

+		String groupdetails = group.getMembers().replace("]", "").replace("[", "");

+	    String s[] =	groupdetails.split("},");

+		

+		for(int i=0; i < s.length; i++) {

+				JSONObject jsonObj = null;

+			 	try {

+		            jsonObj = new JSONObject(s[i]+"}");

+		            if(jsonObj.get("id").equals(user))

+		            	return true;

+		        } catch (JSONException e) {

+		            e.printStackTrace();

+		        }

+		}

+		return false;

+		

+	}

+	

+	/*

+	 * @Method - getGroupByFeedGroupId- Rally:US708115 

+	 * @Params - User to check in group and feedid which is assigned the group.

+	 * @return - string value grupid/null

+	 */

+	@Override

+	public String getGroupByFeedGroupId(String owner, String feedId) {

+		try {

+			int n = Integer.parseInt(feedId);

+			Feed f = Feed.getFeedById(n);

+			if (f != null) {

+				int groupid = f.getGroupid();

+				if(groupid > 0) {

+					Group group = Group.getGroupById(groupid);

+					if(isUserMemberOfGroup(group, owner)) {

+						return group.getAuthid();

+					}

+				}

+			}

+		} catch (NumberFormatException e) {

+			// ignore

+		}

+		return null;

+	}

+	

+	/*

+	 * @Method - getGroupBySubGroupId - Rally:US708115  

+	 * @Params - User to check in group and subid which is assigned the group.

+	 * @return - string value grupid/null

+	 */

+	@Override

+	public String getGroupBySubGroupId(String owner, String subId) {

+		try {

+			int n = Integer.parseInt(subId);

+			Subscription s = Subscription.getSubscriptionById(n);

+			if (s != null) {

+				int groupid = s.getGroupid();

+				if(groupid > 0) {

+					Group group = Group.getGroupById(groupid);

+					if(isUserMemberOfGroup(group, owner)) {

+						return group.getAuthid();

+					}

+				}

+			}

+		} catch (NumberFormatException e) {

+			// ignore

+		}

+		return null;

+	}

+	

+	/*

+	 * @Method - setIpAndFqdnForEelf - Rally:US664892  

+	 * @Params - method, prints method name in EELF log.

+	 */	

+	protected void setIpAndFqdnForEelf(String method) {

+	 	MDC.clear();

+        MDC.put(MDC_SERVICE_NAME, method);

+        try {

+            MDC.put(MDC_SERVER_FQDN, InetAddress.getLocalHost().getHostName());

+            MDC.put(MDC_SERVER_IP_ADDRESS, InetAddress.getLocalHost().getHostAddress());

+        } catch (Exception e) {

+            e.printStackTrace();

+        }

+

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/DRFeedsServlet.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/DRFeedsServlet.java
new file mode 100644
index 0000000..df27042
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/DRFeedsServlet.java
@@ -0,0 +1,300 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning;

+

+import java.io.IOException;

+import java.io.InvalidObjectException;

+import java.util.List;

+

+import javax.servlet.http.HttpServletRequest;

+import javax.servlet.http.HttpServletResponse;

+

+import org.json.JSONObject;

+

+import com.att.eelf.configuration.EELFLogger;

+import com.att.eelf.configuration.EELFManager;

+import com.att.research.datarouter.authz.AuthorizationResponse;

+import com.att.research.datarouter.provisioning.beans.EventLogRecord;

+import com.att.research.datarouter.provisioning.beans.Feed;

+import com.att.research.datarouter.provisioning.eelf.EelfMsgs;

+import com.att.research.datarouter.provisioning.utils.JSONUtilities;

+

+/**

+ * This servlet handles provisioning for the &lt;drFeedsURL&gt; which is the URL on the

+ * provisioning server used to create new feeds.  It supports POST to create new feeds,

+ * and GET to support the Feeds Collection Query function.

+ *

+ * @author Robert Eby

+ * @version $Id$

+ */

+@SuppressWarnings("serial")

+public class DRFeedsServlet extends ProxyServlet {

+	//Adding EELF Logger Rally:US664892  

+    private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("com.att.research.datarouter.provisioning.DRFeedsServlet");

+    

+	/**

+	 * DELETE on the &lt;drFeedsURL&gt; -- not supported.

+	 */

+	@Override

+	public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doDelete");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		String message = "DELETE not allowed for the drFeedsURL.";

+		EventLogRecord elr = new EventLogRecord(req);

+		elr.setMessage(message);

+		elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);

+		eventlogger.info(elr);

+		resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);

+	}

+	/**

+	 * GET on the &lt;drFeedsURL&gt; -- query the list of feeds already existing in the DB.

+	 * See the <i>Feeds Collection Queries</i> section in the <b>Provisioning API</b>

+	 * document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doGet");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		EventLogRecord elr = new EventLogRecord(req);

+		String message = isAuthorizedForProvisioning(req);

+		if (message != null) {

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		if (isProxyServer()) {

+			super.doGet(req, resp);

+			return;

+		}

+		String bhdr = req.getHeader(BEHALF_HEADER);

+		if (bhdr == null) {

+			message = "Missing "+BEHALF_HEADER+" header.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		String path = req.getRequestURI(); // Note: I think this should be getPathInfo(), but that doesn't work (Jetty bug?)

+		if (path != null && !path.equals("/")) {

+			message = "Bad URL.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_NOT_FOUND);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);

+			return;

+		}

+		// Check with the Authorizer

+		AuthorizationResponse aresp = authz.decide(req);

+		if (! aresp.isAuthorized()) {

+			message = "Policy Engine disallows access.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+

+		String name = req.getParameter("name");

+		String vers = req.getParameter("version");

+		String publ = req.getParameter("publisher");

+		String subs = req.getParameter("subscriber");

+		if (name != null && vers != null) {

+			// Display a specific feed

+			Feed feed = Feed.getFeedByNameVersion(name, vers);

+			if (feed == null || feed.isDeleted()) {

+				message = "This feed does not exist in the database.";

+				elr.setMessage(message);

+				elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+				eventlogger.info(elr);

+				resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			} else {

+				// send response

+				elr.setResult(HttpServletResponse.SC_OK);

+				eventlogger.info(elr);

+				resp.setStatus(HttpServletResponse.SC_OK);

+				resp.setContentType(FEEDFULL_CONTENT_TYPE);

+				resp.getOutputStream().print(feed.asJSONObject(true).toString());

+			}

+		} else {

+			// Display a list of URLs

+			List<String> list = null;

+			if (name != null) {

+				list = Feed.getFilteredFeedUrlList("name", name);

+			} else if (publ != null) {

+				list = Feed.getFilteredFeedUrlList("publ", publ);

+			} else if (subs != null) {

+				list = Feed.getFilteredFeedUrlList("subs", subs);

+			} else {

+				list = Feed.getFilteredFeedUrlList("all", null);

+			}

+			String t = JSONUtilities.createJSONArray(list);

+			// send response

+			elr.setResult(HttpServletResponse.SC_OK);

+			eventlogger.info(elr);

+			resp.setStatus(HttpServletResponse.SC_OK);

+			resp.setContentType(FEEDLIST_CONTENT_TYPE);

+			resp.getOutputStream().print(t);

+		}

+	}

+	/**

+	 * PUT on the &lt;drFeedsURL&gt; -- not supported.

+	 */

+	@Override

+	public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doPut");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		String message = "PUT not allowed for the drFeedsURL.";

+		EventLogRecord elr = new EventLogRecord(req);

+		elr.setMessage(message);

+		elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);

+		eventlogger.info(elr);

+		resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);

+	}

+	/**

+	 * POST on the &lt;drFeedsURL&gt; -- create a new feed.

+	 * See the <i>Creating a Feed</i> section in the <b>Provisioning API</b>

+	 * document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doPost");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));

+		EventLogRecord elr = new EventLogRecord(req);

+		String message = isAuthorizedForProvisioning(req);

+		if (message != null) {

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		if (isProxyServer()) {

+			super.doPost(req, resp);

+			return;

+		}

+		String bhdr = req.getHeader(BEHALF_HEADER);

+		if (bhdr == null) {

+			message = "Missing "+BEHALF_HEADER+" header.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		String path = req.getRequestURI(); // Note: I think this should be getPathInfo(), but that doesn't work (Jetty bug?)

+		if (path != null && !path.equals("/")) {

+			message = "Bad URL.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_NOT_FOUND);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);

+			return;

+		}

+		// check content type is FEED_CONTENT_TYPE, version 1.0

+		ContentHeader ch = getContentHeader(req);

+		String ver = ch.getAttribute("version");

+		if (!ch.getType().equals(FEED_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {

+			message = "Incorrect content-type";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);

+			return;

+		}

+		// Check with the Authorizer

+		AuthorizationResponse aresp = authz.decide(req);

+		if (! aresp.isAuthorized()) {

+			message = "Policy Engine disallows access.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		JSONObject jo = getJSONfromInput(req);

+		if (jo == null) {

+			message = "Badly formed JSON";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		if (intlogger.isDebugEnabled())

+			intlogger.debug(jo.toString());

+		if (++active_feeds > max_feeds) {

+			active_feeds--;

+			message = "Cannot create feed; the maximum number of feeds has been configured.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_CONFLICT);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_CONFLICT, message);

+			return;

+		}

+		Feed feed = null;

+		try {

+			feed = new Feed(jo);

+		} catch (InvalidObjectException e) {

+			message = e.getMessage();

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		feed.setPublisher(bhdr);	// set from X-ATT-DR-ON-BEHALF-OF header

+

+		// Check if this feed already exists

+		Feed feed2 = Feed.getFeedByNameVersion(feed.getName(), feed.getVersion());

+		if (feed2 != null) {

+			message = "This feed already exists in the database.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+

+		// Create FEED table entries

+		if (doInsert(feed)) {

+			// send response

+			elr.setResult(HttpServletResponse.SC_CREATED);

+			eventlogger.info(elr);

+			resp.setStatus(HttpServletResponse.SC_CREATED);

+			resp.setContentType(FEEDFULL_CONTENT_TYPE);

+			resp.setHeader("Location", feed.getLinks().getSelf());

+			resp.getOutputStream().print(feed.asLimitedJSONObject().toString());

+			provisioningDataChanged();

+		} else {

+			// Something went wrong with the INSERT

+			elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/FeedLogServlet.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/FeedLogServlet.java
new file mode 100644
index 0000000..dd6b75d
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/FeedLogServlet.java
@@ -0,0 +1,38 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+package com.att.research.datarouter.provisioning;

+

+/**

+ * This servlet handles requests to the &lt;feedLogURL&gt;

+ * which are generated by the provisioning server to handle the log query API.

+ *

+ * @author Robert Eby

+ * @version $Id: FeedLogServlet.java,v 1.1 2013/04/26 21:00:24 eby Exp $

+ */

+@SuppressWarnings("serial")

+public class FeedLogServlet extends LogServlet {

+	public FeedLogServlet() {

+		super(true);

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/FeedServlet.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/FeedServlet.java
new file mode 100644
index 0000000..aff6853
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/FeedServlet.java
@@ -0,0 +1,362 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning;

+

+import java.io.IOException;

+import java.io.InvalidObjectException;

+

+import javax.servlet.http.HttpServletRequest;

+import javax.servlet.http.HttpServletResponse;

+

+import org.json.JSONObject;

+

+import com.att.eelf.configuration.EELFLogger;

+import com.att.eelf.configuration.EELFManager;

+import com.att.research.datarouter.authz.AuthorizationResponse;

+import com.att.research.datarouter.provisioning.beans.EventLogRecord;

+import com.att.research.datarouter.provisioning.beans.Feed;

+import com.att.research.datarouter.provisioning.eelf.EelfMsgs;

+

+/**

+ * This servlet handles provisioning for the &lt;feedURL&gt; which is generated by the provisioning

+ * server to handle a particular feed. It supports DELETE to mark the feed as deleted,

+ * and GET to retrieve information about the feed, and PUT to modify the feed.

+ *

+ * @author Robert Eby

+ * @version $Id$

+ */

+@SuppressWarnings("serial")

+public class FeedServlet extends ProxyServlet {

+

+	//Adding EELF Logger Rally:US664892 

+    private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("com.att.research.datarouter.provisioning.FeedServlet");

+

+	/**

+	 * Delete the Feed at the address /feed/&lt;feednumber&gt;.

+	 * See the <i>Deleting a Feed</i> section in the <b>Provisioning API</b>

+	 * document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doDelete");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		EventLogRecord elr = new EventLogRecord(req);

+		String message = isAuthorizedForProvisioning(req);

+		if (message != null) {

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		if (isProxyServer()) {

+			super.doDelete(req, resp);

+			return;

+		}

+		String bhdr = req.getHeader(BEHALF_HEADER);

+		if (bhdr == null) {

+			message = "Missing "+BEHALF_HEADER+" header.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		int feedid = getIdFromPath(req);

+		if (feedid < 0) {

+			message = "Missing or bad feed number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		Feed feed = Feed.getFeedById(feedid);

+		if (feed == null || feed.isDeleted()) {

+			message = "Missing or bad feed number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_NOT_FOUND);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);

+			return;

+		}

+		// Check with the Authorizer

+		AuthorizationResponse aresp = authz.decide(req);

+		if (! aresp.isAuthorized()) {

+			message = "Policy Engine disallows access.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+

+		// Delete FEED table entry (set DELETED flag)

+		feed.setDeleted(true);

+		if (doUpdate(feed)) {

+			active_feeds--;

+			// send response

+			elr.setResult(HttpServletResponse.SC_NO_CONTENT);

+			eventlogger.info(elr);

+			resp.setStatus(HttpServletResponse.SC_NO_CONTENT);

+			provisioningDataChanged();

+		} else {

+			// Something went wrong with the UPDATE

+			elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);

+		}

+	}

+	/**

+	 * Get information on the feed at the address /feed/&lt;feednumber&gt;.

+	 * See the <i>Retrieving Information about a Feed</i> section in the <b>Provisioning API</b>

+	 * document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doGet");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		EventLogRecord elr = new EventLogRecord(req);

+		String message = isAuthorizedForProvisioning(req);

+		if (message != null) {

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		if (isProxyServer()) {

+			super.doGet(req, resp);

+			return;

+		}

+		String bhdr = req.getHeader(BEHALF_HEADER);

+		if (bhdr == null) {

+			message = "Missing "+BEHALF_HEADER+" header.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		int feedid = getIdFromPath(req);

+		if (feedid < 0) {

+			message = "Missing or bad feed number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		Feed feed = Feed.getFeedById(feedid);

+		if (feed == null || feed.isDeleted()) {

+			message = "Missing or bad feed number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_NOT_FOUND);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);

+			return;

+		}

+		// Check with the Authorizer

+		AuthorizationResponse aresp = authz.decide(req);

+		if (! aresp.isAuthorized()) {

+			message = "Policy Engine disallows access.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+

+		// send response

+		elr.setResult(HttpServletResponse.SC_OK);

+		eventlogger.info(elr);

+		resp.setStatus(HttpServletResponse.SC_OK);

+		resp.setContentType(FEEDFULL_CONTENT_TYPE);

+		resp.getOutputStream().print(feed.asJSONObject(true).toString());

+	}

+	/**

+	 * PUT on the &lt;feedURL&gt; for a feed.

+	 * See the <i>Modifying a Feed</i> section in the <b>Provisioning API</b>

+	 * document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doPut");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		EventLogRecord elr = new EventLogRecord(req);

+		String message = isAuthorizedForProvisioning(req);

+		if (message != null) {

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		if (isProxyServer()) {

+			super.doPut(req, resp);

+			return;

+		}

+		String bhdr = req.getHeader(BEHALF_HEADER);

+		if (bhdr == null) {

+			message = "Missing "+BEHALF_HEADER+" header.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		int feedid = getIdFromPath(req);

+		if (feedid < 0) {

+			message = "Missing or bad feed number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		Feed oldFeed = Feed.getFeedById(feedid);

+		if (oldFeed == null || oldFeed.isDeleted()) {

+			message = "Missing or bad feed number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_NOT_FOUND);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);

+			return;

+		}

+		// check content type is FEED_CONTENT_TYPE, version 1.0

+		ContentHeader ch = getContentHeader(req);

+		String ver = ch.getAttribute("version");

+		if (!ch.getType().equals(FEED_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {

+			message = "Incorrect content-type";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);

+			return;

+		}

+		JSONObject jo = getJSONfromInput(req);

+		if (jo == null) {

+			message = "Badly formed JSON";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		if (intlogger.isDebugEnabled())

+			intlogger.debug(jo.toString());

+		Feed feed = null;

+		try {

+			feed = new Feed(jo);

+		} catch (InvalidObjectException e) {

+			message = e.getMessage();

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		feed.setFeedid(feedid);

+		feed.setPublisher(bhdr);	// set from X-ATT-DR-ON-BEHALF-OF header

+

+		String subjectgroup = (req.getHeader("X-ATT-DR-ON-BEHALF-OF-GROUP"));  //Adding for group feature:Rally US708115  

+		if (!oldFeed.getPublisher().equals(feed.getPublisher()) && subjectgroup == null) {

+			message = "This feed must be modified by the same publisher that created it.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		if (!oldFeed.getName().equals(feed.getName())) {

+			message = "The name of the feed may not be updated.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		if (!oldFeed.getVersion().equals(feed.getVersion())) {

+			message = "The version of the feed may not be updated.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		// Check with the Authorizer

+		AuthorizationResponse aresp = authz.decide(req);

+		if (! aresp.isAuthorized()) {

+			message = "Policy Engine disallows access.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+

+		// Update FEEDS table entries

+		if (doUpdate(feed)) {

+			// send response

+			elr.setResult(HttpServletResponse.SC_OK);

+			eventlogger.info(elr);

+			resp.setStatus(HttpServletResponse.SC_OK);

+			resp.setContentType(FEEDFULL_CONTENT_TYPE);

+			resp.getOutputStream().print(feed.asLimitedJSONObject().toString());

+

+			

+			/**Change Owner ship of Feed //Adding for group feature:Rally US708115*/

+			if (jo.has("changeowner") && subjectgroup != null) {

+				Boolean changeowner = (Boolean) jo.get("changeowner");

+				if (changeowner != null && changeowner.equals(true)) {

+					feed.setPublisher(req.getHeader(BEHALF_HEADER));

+					feed.changeOwnerShip();

+				}

+			}

+			/***End of change ownership*/

+

+			provisioningDataChanged();

+		} else {

+			// Something went wrong with the UPDATE

+			elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);

+		}

+	}

+	/**

+	 * POST on the &lt;feedURL&gt; -- not supported.

+	 */

+	@Override

+	public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doPost");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));

+		String message = "POST not allowed for the feedURL.";

+		EventLogRecord elr = new EventLogRecord(req);

+		elr.setMessage(message);

+		elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);

+		eventlogger.info(elr);

+		resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/GroupServlet.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/GroupServlet.java
new file mode 100644
index 0000000..84ec3d2
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/GroupServlet.java
@@ -0,0 +1,386 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning;

+

+import java.io.IOException;

+import java.io.InvalidObjectException;

+import java.util.Collection;

+

+import javax.servlet.http.HttpServletRequest;

+import javax.servlet.http.HttpServletResponse;

+

+import org.json.JSONObject;

+

+import com.att.research.datarouter.authz.AuthorizationResponse;

+import com.att.research.datarouter.provisioning.BaseServlet.ContentHeader;

+import com.att.research.datarouter.provisioning.beans.EventLogRecord;

+import com.att.research.datarouter.provisioning.beans.Group;

+import com.att.research.datarouter.provisioning.beans.Subscription;

+import com.att.research.datarouter.provisioning.utils.JSONUtilities;

+

+/**

+ * This servlet handles provisioning for the &lt;groups&gt; which is generated by the provisioning

+ * server to handle the creation and inspection of groups for FEEDS and SUBSCRIPTIONS.

+ *

+ * @author Vikram Singh

+ * @version $Id$

+ * @version $Id: Group.java,v 1.0 2016/07/19

+ */

+@SuppressWarnings("serial")

+public class GroupServlet extends ProxyServlet {

+	/**

+	 * DELETE on the &lt;GRUPS&gt; -- not supported.

+	 */

+	@Override

+	public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		String message = "DELETE not allowed for the GROUPS.";

+		EventLogRecord elr = new EventLogRecord(req);

+		elr.setMessage(message);

+		elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);

+		eventlogger.info(elr);

+		resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);

+	}

+	/**

+	 * GET on the the list of groups to a feed/sub.

+	 * See the <i>Groups Collection Query</i> section in the <b>Provisioning API</b>

+	 * document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		EventLogRecord elr = new EventLogRecord(req);

+		String message = isAuthorizedForProvisioning(req);

+		if (message != null) {

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		if (isProxyServer()) {

+			super.doGet(req, resp);

+			return;

+		}

+		String bhdr = req.getHeader(BEHALF_HEADER);

+		if (bhdr == null) {

+			message = "Missing "+BEHALF_HEADER+" header.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		

+		// Check with the Authorizer

+		/*AuthorizationResponse aresp = authz.decide(req);

+		if (! aresp.isAuthorized()) {

+			message = "Policy Engine disallows access.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}*/

+		

+		

+		/*ContentHeader ch = getContentHeader(req);

+		String ver = ch.getAttribute("version");

+		if (!ch.getType().equals(GROUPLIST_CONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {

+			intlogger.debug("Content-type is: "+req.getHeader("Content-Type"));

+			message = "Incorrect content-type";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);

+			return;

+		}*/

+		

+		

+		int groupid = getIdFromPath(req);

+		if (groupid < 0) {

+			message = "Missing or bad group number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+			

+		Group gup = Group.getGroupById(groupid);

+		// send response

+		elr.setResult(HttpServletResponse.SC_OK);

+		eventlogger.info(elr);

+		resp.setStatus(HttpServletResponse.SC_OK);

+		resp.setContentType(GROUPFULL_CONTENT_TYPE);

+		resp.getOutputStream().print(gup.asJSONObject().toString());

+

+		// Display a list of Groups

+		/*Collection<Group> list = Group.getGroupById(groupid);

+		String t = JSONUtilities.createJSONArray(list);

+

+		// send response

+		elr.setResult(HttpServletResponse.SC_OK);

+		eventlogger.info(elr);

+		resp.setStatus(HttpServletResponse.SC_OK);

+		resp.setContentType(GROUPLIST_CONTENT_TYPE);

+		resp.getOutputStream().print(t);*/

+	}

+	/**

+	 * PUT on the &lt;GROUPS&gt; -- not supported.

+	 */

+	@Override

+	public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		EventLogRecord elr = new EventLogRecord(req);

+		String message = isAuthorizedForProvisioning(req);

+		if (message != null) {

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		if (isProxyServer()) {

+			super.doPut(req, resp);

+			return;

+		}

+		String bhdr = req.getHeader(BEHALF_HEADER);

+		if (bhdr == null) {

+			message = "Missing "+BEHALF_HEADER+" header.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		int groupid = getIdFromPath(req);

+		if (groupid < 0) {

+			message = "Missing or bad groupid.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		Group oldgup = Group.getGroupById(groupid);

+		if (oldgup == null) {

+			message = "Missing or bad group number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_NOT_FOUND);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);

+			return;

+		}

+		// Check with the Authorizer

+		/*AuthorizationResponse aresp = authz.decide(req);

+		if (! aresp.isAuthorized()) {

+			message = "Policy Engine disallows access.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}*/

+		// check content type is SUB_CONTENT_TYPE, version 1.0

+		ContentHeader ch = getContentHeader(req);

+		String ver = ch.getAttribute("version");

+		if (!ch.getType().equals(GROUP_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {

+			message = "Incorrect content-type";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);

+			return;

+		}

+		JSONObject jo = getJSONfromInput(req);

+		if (jo == null) {

+			message = "Badly formed JSON";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		if (intlogger.isDebugEnabled())

+			intlogger.debug(jo.toString());

+		Group gup = null;

+		try {

+			gup = new Group(jo);

+		} catch (InvalidObjectException e) {

+			message = e.getMessage();

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		gup.setGroupid(oldgup.getGroupid());

+	

+		

+		Group gb2 = Group.getGroupMatching(gup, oldgup.getGroupid());

+		if (gb2 != null) {

+			eventlogger.warn("PROV0011 Creating a duplicate Group: "+gup.getName());

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Duplicate Group:"+gup.getName());

+			return;

+		}

+		

+		// Update Groups table entries

+		if (doUpdate(gup)) {

+			// send response

+			elr.setResult(HttpServletResponse.SC_OK);

+			eventlogger.info(elr);

+			resp.setStatus(HttpServletResponse.SC_OK);

+			resp.setContentType(GROUPFULL_CONTENT_TYPE);

+			resp.getOutputStream().print(gup.asJSONObject().toString());

+			provisioningDataChanged();

+		} else {

+			// Something went wrong with the UPDATE

+			elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);

+		}

+	}

+	/**

+	 * POST on the &lt;groups&gt; -- create a new GROUPS to a feed.

+	 * See the <i>Creating a GROUPS</i> section in the <b>Provisioning API</b>

+	 * document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		EventLogRecord elr = new EventLogRecord(req);

+		String message = isAuthorizedForProvisioning(req);

+		if (message != null) {

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		if (isProxyServer()) {

+			super.doPost(req, resp);

+			return;

+		}

+		String bhdr = req.getHeader(BEHALF_HEADER);

+		if (bhdr == null) {

+			message = "Missing "+BEHALF_HEADER+" header.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		/*int feedid = getIdFromPath(req);

+		if (feedid < 0) {

+			message = "Missing or bad feed number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		Feed feed = Feed.getFeedById(feedid);

+		if (feed == null || feed.isDeleted()) {

+			message = "Missing or bad feed number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_NOT_FOUND);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);

+			return;

+		}*/

+		// Check with the Authorizer

+		/*AuthorizationResponse aresp = authz.decide(req);

+		if (! aresp.isAuthorized()) {

+			message = "Policy Engine disallows access.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}*/

+

+		// check content type is SUB_CONTENT_TYPE, version 1.0

+		ContentHeader ch = getContentHeader(req);

+		String ver = ch.getAttribute("version");

+		if (!ch.getType().equals(GROUP_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {

+			intlogger.debug("Content-type is: "+req.getHeader("Content-Type"));

+			message = "Incorrect content-type";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);

+			return;

+		}

+		JSONObject jo = getJSONfromInput(req);

+		if (jo == null) {

+			message = "Badly formed JSON";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		if (intlogger.isDebugEnabled())

+			intlogger.debug(jo.toString());

+		

+		Group gup = null;

+		try {

+			gup = new Group(jo);

+		} catch (InvalidObjectException e) {

+			message = e.getMessage();

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		//gup.setFeedid(feedid);

+		//sub.setSubscriber(bhdr);	// set from X-ATT-DR-ON-BEHALF-OF header

+

+		// Check if this group already exists; not an error (yet), just warn

+		Group gb2 = Group.getGroupMatching(gup);

+		if (gb2 != null) {

+			eventlogger.warn("PROV0011 Creating a duplicate Group: "+gup.getName());

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Duplicate Group:"+gup.getName());

+			return;

+		}

+		

+		

+		// Create GROUPS table entries

+		if (doInsert(gup)) {

+			// send response

+			elr.setResult(HttpServletResponse.SC_CREATED);

+			eventlogger.info(elr);

+			resp.setStatus(HttpServletResponse.SC_CREATED);

+			resp.setContentType(GROUPFULL_CONTENT_TYPE);

+			resp.getOutputStream().print(gup.asJSONObject().toString());

+			provisioningDataChanged();

+		} else {

+			// Something went wrong with the INSERT

+			elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/InternalServlet.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/InternalServlet.java
new file mode 100644
index 0000000..e50a478
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/InternalServlet.java
@@ -0,0 +1,506 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning;

+

+import java.io.ByteArrayOutputStream;

+import java.io.File;

+import java.io.IOException;

+import java.io.InputStream;

+import java.nio.file.FileStore;

+import java.nio.file.FileSystem;

+import java.nio.file.Files;

+import java.nio.file.Path;

+import java.nio.file.Paths;

+import java.nio.file.StandardCopyOption;

+import java.util.Properties;

+

+import javax.servlet.http.HttpServletRequest;

+import javax.servlet.http.HttpServletResponse;

+

+import org.json.JSONArray;

+

+import com.att.eelf.configuration.EELFLogger;

+import com.att.eelf.configuration.EELFManager;

+import com.att.research.datarouter.provisioning.beans.EventLogRecord;

+import com.att.research.datarouter.provisioning.beans.LogRecord;

+import com.att.research.datarouter.provisioning.beans.Parameters;

+import com.att.research.datarouter.provisioning.eelf.EelfMsgs;

+import com.att.research.datarouter.provisioning.utils.DB;

+import com.att.research.datarouter.provisioning.utils.RLEBitSet;

+import com.att.research.datarouter.provisioning.utils.LogfileLoader;

+

+/**

+ * <p>

+ * This servlet handles requests to URLs under /internal on the provisioning server.

+ * These include:

+ * </p>

+ * <div class="contentContainer">

+ * <table class="packageSummary" border="0" cellpadding="3" cellspacing="0">

+ * <caption><span>URL Path Summary</span><span class="tabEnd">&nbsp;</span></caption>

+ * <tr>

+ *   <th class="colFirst" width="15%">URL Path</th>

+ *   <th class="colOne">Method</th>

+ *   <th class="colLast">Purpose</th>

+ * </tr>

+ * <tr class="altColor">

+ *   <td class="colFirst">/internal/prov</td>

+ *   <td class="colOne">GET</td>

+ *   <td class="colLast">used to GET a full JSON copy of the provisioning data.</td>

+ * </tr>

+ * <tr class="rowColor">

+ *   <td class="colFirst">/internal/fetchProv</td>

+ *   <td class="colOne">GET</td>

+ *   <td class="colLast">used to signal to a standby POD that the provisioning data should be fetched from the active POD.</td>

+ * </tr>

+ * <tr class="altColor">

+ *   <td class="colFirst" rowspan="2">/internal/logs</td>

+ *   <td class="colOne">GET</td>

+ *   <td class="colLast">used to GET an index of log files and individual logs for this provisioning server.</td>

+ * </tr>

+ * <tr class="altColor">

+ *   <td class="colOne">POST</td>

+ *   <td class="colLast">used to POST log files from the individual nodes to this provisioning server.</td>

+ * </tr>

+ * <tr class="rowColor">

+ *   <td class="colFirst" rowspan="4">/internal/api</td>

+ *   <td class="colOne">GET</td>

+ *   <td class="colLast">used to GET an individual parameter value. The parameter name is specified by the path after /api/.</td>

+ * </tr>

+ * <tr class="rowColor">

+ *   <td class="colOne">PUT</td>

+ *   <td class="colLast">used to set an individual parameter value. The parameter name is specified by the path after /api/.</td>

+ * </tr>

+ * <tr class="rowColor">

+ *   <td class="colOne">DELETE</td>

+ *   <td class="colLast">used to remove an individual parameter value. The parameter name is specified by the path after /api/.</td>

+ * </tr>

+ * <tr class="rowColor">

+ *   <td class="colOne">POST</td>

+ *   <td class="colLast">used to create a new individual parameter value. The parameter name is specified by the path after /api/.</td>

+ * </tr>

+ * <tr class="altColor">

+ *   <td class="colFirst">/internal/halt</td>

+ *   <td class="colOne">GET</td>

+ *   <td class="colLast">used to halt the server (must be accessed from 127.0.0.1).</td>

+ * </tr>

+ * <tr class="rowColor">

+ *   <td class="colFirst" rowspan="2">/internal/drlogs</td>

+ *   <td class="colOne">GET</td>

+ *   <td class="colLast">used to get a list of DR log entries available for retrieval.

+ *   Note: these are the actual data router log entries sent to the provisioning server

+ *   by the nodes, not the provisioning server's internal logs (access via /internal/logs above).

+ *   The range is returned as a list of record sequence numbers.</td>

+ * </tr>

+ * <tr class="rowColor">

+ *   <td class="colOne">POST</td>

+ *   <td class="colLast">used to retrieve specific log entries.

+ *   The sequence numbers of the records to fetch are POST-ed; the records matching the sequence numbers are returned.</td>

+ * </tr>

+ * <tr class="altColor">

+ *   <td class="colFirst">/internal/route/*</td>

+ *   <td class="colOne">*</td>

+ *   <td class="colLast">URLs under this path are handled via the {@link com.att.research.datarouter.provisioning.RouteServlet}</td>

+ * </tr>

+ * </table>

+ * </div>

+ * <p>

+ * Authorization to use these URLs is a little different than for other URLs on the provisioning server.

+ * For the most part, the IP address that the request comes from should be either:

+ * </p>

+ * <ol>

+ * <li>an IP address of a provisioning server, or</li>

+ * <li>the IP address of a node (to allow access to /internal/prov), or</li>

+ * <li>an IP address from the "<i>special subnet</i>" which is configured with

+ * the PROV_SPECIAL_SUBNET parameter.

+ * </ol>

+ * <p>

+ * In addition, requests to /internal/halt can ONLY come from localhost (127.0.0.1) on the HTTP port.

+ * </p>

+ * <p>

+ * All DELETE/GET/PUT/POST requests made to /internal/api on this servlet on the standby server are

+ * proxied to the active server (using the {@link ProxyServlet}) if it is up and reachable.

+ * </p>

+ *

+ * @author Robert Eby

+ * @version $Id: InternalServlet.java,v 1.23 2014/03/24 18:47:10 eby Exp $

+ */

+@SuppressWarnings("serial")

+public class InternalServlet extends ProxyServlet {

+	private static Integer logseq = new Integer(0); // another piece of info to make log spool file names unique

+	//Adding EELF Logger Rally:US664892 

+    private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("com.att.research.datarouter.provisioning.InternalServlet");

+

+	/**

+	 * Delete a parameter at the address /internal/api/&lt;parameter&gt;.

+	 * See the <b>Internal API</b> document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doDelete");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		EventLogRecord elr = new EventLogRecord(req);

+		if (!isAuthorizedForInternal(req)) {

+			elr.setMessage("Unauthorized.");

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");

+			return;

+		}

+

+		String path = req.getPathInfo();

+		if (path.startsWith("/api/")) {

+			if (isProxyOK(req) && isProxyServer()) {

+				super.doDelete(req, resp);

+				return;

+			}

+			String key = path.substring(5);

+			if (key.length() > 0) {

+				Parameters param = Parameters.getParameter(key);

+				if (param != null) {

+					if (doDelete(param)) {

+						elr.setResult(HttpServletResponse.SC_OK);

+						eventlogger.info(elr);

+						resp.setStatus(HttpServletResponse.SC_OK);

+						provisioningDataChanged();

+						provisioningParametersChanged();

+					} else {

+						// Something went wrong with the DELETE

+						elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);

+						eventlogger.info(elr);

+						resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);

+					}

+					return;

+				}

+			}

+		}

+		resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");

+	}

+	/**

+	 * Get some information (such as a parameter) underneath the /internal/ namespace.

+	 * See the <b>Internal API</b> document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doGet");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		String path = req.getPathInfo();

+		if (path.equals("/halt") && !req.isSecure()) {

+			// request to halt the server - can ONLY come from localhost

+			String remote = req.getRemoteAddr();

+			if (remote.equals("127.0.0.1")) {

+				intlogger.info("PROV0009 Request to HALT received.");

+				resp.setStatus(HttpServletResponse.SC_OK);

+				Main.shutdown();

+			} else {

+				intlogger.info("PROV0010 Disallowed request to HALT received from "+remote);

+				resp.setStatus(HttpServletResponse.SC_FORBIDDEN);

+			}

+			return;

+		}

+

+		EventLogRecord elr = new EventLogRecord(req);

+		if (!isAuthorizedForInternal(req)) {

+			elr.setMessage("Unauthorized.");

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");

+			return;

+		}

+		if (path.equals("/fetchProv") && !req.isSecure()) {

+			// if request came from active_pod or standby_pod and it is not us, reload prov data

+			SynchronizerTask s = SynchronizerTask.getSynchronizer();

+			s.doFetch();

+			resp.setStatus(HttpServletResponse.SC_OK);

+			return;

+		}

+		if (path.equals("/prov")) {

+			if (isProxyOK(req) && isProxyServer()) {

+				if (super.doGetWithFallback(req, resp))

+					return;

+				// fall back to returning the local data if the remote is unreachable

+				intlogger.info("Active server unavailable; falling back to local copy.");

+			}

+			Poker p = Poker.getPoker();

+			resp.setStatus(HttpServletResponse.SC_OK);

+			resp.setContentType(PROVFULL_CONTENT_TYPE2);

+			resp.getOutputStream().print(p.getProvisioningString());

+			return;

+		}

+		if (path.equals("/logs") || path.equals("/logs/")) {

+			resp.setStatus(HttpServletResponse.SC_OK);

+			resp.setContentType("application/json");

+			resp.getOutputStream().print(generateLogfileList().toString());

+			return;

+		}

+		if (path.startsWith("/logs/")) {

+			Properties p = (new DB()).getProperties();

+			String logdir = p.getProperty("com.att.research.datarouter.provserver.accesslog.dir");

+			String logfile = path.substring(6);

+			if (logdir != null && logfile != null && logfile.indexOf('/') < 0) {

+				File log = new File(logdir + "/" + logfile);

+				if (log.exists() && log.isFile()) {

+					resp.setStatus(HttpServletResponse.SC_OK);

+					resp.setContentType("text/plain");

+					Path logpath = Paths.get(log.getAbsolutePath());

+					Files.copy(logpath, resp.getOutputStream());

+					return;

+				}

+			}

+			resp.sendError(HttpServletResponse.SC_NO_CONTENT, "No file.");

+			return;

+		}

+		if (path.startsWith("/api/")) {

+			if (isProxyOK(req) && isProxyServer()) {

+				super.doGet(req, resp);

+				return;

+			}

+			String key = path.substring(5);

+			if (key.length() > 0) {

+				Parameters param = Parameters.getParameter(key);

+				if (param != null) {

+					resp.setStatus(HttpServletResponse.SC_OK);

+					resp.setContentType("text/plain");

+					resp.getOutputStream().print(param.getValue() + "\n");

+					return;

+				}

+			}

+		}

+		if (path.equals("/drlogs") || path.equals("/drlogs/")) {

+			// Special POD <=> POD API to determine what log file records are loaded here

+			LogfileLoader lfl = LogfileLoader.getLoader();

+			resp.setStatus(HttpServletResponse.SC_OK);

+			resp.setContentType("text/plain");

+			resp.getOutputStream().print(lfl.getBitSet().toString());

+			return;

+		}

+		resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");

+	}

+	/**

+	 * Modify a parameter at the address /internal/api/&lt;parameter&gt;.

+	 * See the <b>Internal API</b> document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doPut");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		EventLogRecord elr = new EventLogRecord(req);

+		if (!isAuthorizedForInternal(req)) {

+			elr.setMessage("Unauthorized.");

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");

+			return;

+		}

+		String path = req.getPathInfo();

+		if (path.startsWith("/api/")) {

+			if (isProxyOK(req) && isProxyServer()) {

+				super.doPut(req, resp);

+				return;

+			}

+			String key = path.substring(5);

+			if (key.length() > 0) {

+				Parameters param = Parameters.getParameter(key);

+				if (param != null) {

+					String t = catValues(req.getParameterValues("val"));

+					param.setValue(t);

+					if (doUpdate(param)) {

+						elr.setResult(HttpServletResponse.SC_OK);

+						eventlogger.info(elr);

+						resp.setStatus(HttpServletResponse.SC_OK);

+						provisioningDataChanged();

+						provisioningParametersChanged();

+					} else {

+						// Something went wrong with the UPDATE

+						elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);

+						eventlogger.info(elr);

+						resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);

+					}

+					return;

+				}

+			}

+		}

+		resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");

+	}

+	/**

+	 * Create some new information (such as a parameter or log entries) underneath the /internal/ namespace.

+	 * See the <b>Internal API</b> document for details on how this method should be invoked.

+	 */

+	@SuppressWarnings("resource")

+	@Override

+	public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doPost");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));

+		EventLogRecord elr = new EventLogRecord(req);

+		if (!isAuthorizedForInternal(req)) {

+			elr.setMessage("Unauthorized.");

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");

+			return;

+		}

+

+		String path = req.getPathInfo();

+		if (path.startsWith("/api/")) {

+			if (isProxyOK(req) && isProxyServer()) {

+				super.doPost(req, resp);

+				return;

+			}

+			String key = path.substring(5);

+			if (key.length() > 0) {

+				Parameters param = Parameters.getParameter(key);

+				if (param == null) {

+					String t = catValues(req.getParameterValues("val"));

+					param = new Parameters(key, t);

+					if (doInsert(param)) {

+						elr.setResult(HttpServletResponse.SC_OK);

+						eventlogger.info(elr);

+						resp.setStatus(HttpServletResponse.SC_OK);

+						provisioningDataChanged();

+						provisioningParametersChanged();

+					} else {

+						// Something went wrong with the INSERT

+						elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);

+						eventlogger.info(elr);

+						resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);

+					}

+					return;

+				}

+			}

+		}

+

+		if (path.equals("/logs") || path.equals("/logs/")) {

+			String ctype = req.getHeader("Content-Type");

+			if (ctype == null || !ctype.equals("text/plain")) {

+				elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);

+				elr.setMessage("Bad media type: "+ctype);

+				resp.setStatus(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);

+				eventlogger.info(elr);

+				return;

+			}

+			String spooldir = (new DB()).getProperties().getProperty("com.att.research.datarouter.provserver.spooldir");

+			String spoolname = String.format("%d-%d-", System.currentTimeMillis(), Thread.currentThread().getId());

+			synchronized (logseq) {

+				// perhaps unnecessary, but it helps make the name unique

+				spoolname += logseq.toString();

+				logseq++;

+			}

+			String encoding = req.getHeader("Content-Encoding");

+			if (encoding != null) {

+				if (encoding.trim().equals("gzip")) {

+					spoolname += ".gz";

+				} else {

+					elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);

+					resp.setStatus(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);

+					eventlogger.info(elr);

+					return;

+				}

+			}

+			// Determine space available -- available space must be at least 5%

+			FileSystem fs = (Paths.get(spooldir)).getFileSystem();

+			long total = 0;

+			long avail = 0;

+			for (FileStore store: fs.getFileStores()) {

+				total += store.getTotalSpace();

+				avail += store.getUsableSpace();

+			}

+			try { fs.close(); } catch (Exception e) { }

+			if (((avail * 100) / total) < 5) {

+				elr.setResult(HttpServletResponse.SC_SERVICE_UNAVAILABLE);

+				resp.setStatus(HttpServletResponse.SC_SERVICE_UNAVAILABLE);

+				eventlogger.info(elr);

+				return;

+			}

+			Path tmppath = Paths.get(spooldir, spoolname);

+			Path donepath = Paths.get(spooldir, "IN."+spoolname);

+			Files.copy(req.getInputStream(), Paths.get(spooldir, spoolname), StandardCopyOption.REPLACE_EXISTING);

+			Files.move(tmppath, donepath, StandardCopyOption.REPLACE_EXISTING);

+			elr.setResult(HttpServletResponse.SC_CREATED);

+			resp.setStatus(HttpServletResponse.SC_CREATED);

+			eventlogger.info(elr);

+			LogfileLoader.getLoader();	// This starts the logfile loader "task"

+			return;

+		}

+

+		if (path.equals("/drlogs") || path.equals("/drlogs/")) {

+			// Receive post request and generate log entries

+			String ctype = req.getHeader("Content-Type");

+			if (ctype == null || !ctype.equals("text/plain")) {

+				elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);

+				elr.setMessage("Bad media type: "+ctype);

+				resp.setStatus(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);

+				eventlogger.info(elr);

+				return;

+			}

+			InputStream is = req.getInputStream();

+			ByteArrayOutputStream bos = new ByteArrayOutputStream();

+			int ch = 0;

+			while ((ch = is.read()) >= 0)

+				bos.write(ch);

+			RLEBitSet bs = new RLEBitSet(bos.toString());	// The set of records to retrieve

+			elr.setResult(HttpServletResponse.SC_OK);

+			resp.setStatus(HttpServletResponse.SC_OK);

+			resp.setContentType("text/plain");

+			LogRecord.printLogRecords(resp.getOutputStream(), bs);

+			eventlogger.info(elr);

+			return;

+		}

+

+		elr.setResult(HttpServletResponse.SC_NOT_FOUND);

+		resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");

+		eventlogger.info(elr);

+	}

+

+	private String catValues(String[] v) {

+		StringBuilder sb = new StringBuilder();

+		if (v != null) {

+			String pfx = "";

+			for (String s : v) {

+				sb.append(pfx);

+				sb.append(s);

+				pfx = "|";

+			}

+		}

+		return sb.toString();

+	}

+	private JSONArray generateLogfileList() {

+		JSONArray ja = new JSONArray();

+		Properties p = (new DB()).getProperties();

+		String s = p.getProperty("com.att.research.datarouter.provserver.accesslog.dir");

+		if (s != null) {

+			String[] dirs = s.split(",");

+			for (String dir : dirs) {

+				File f = new File(dir);

+				String[] list = f.list();

+				if (list != null) {

+					for (String s2 : list) {

+						if (!s2.startsWith("."))

+							ja.put(s2);

+					}

+				}

+			}

+		}

+		return ja;

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/LogServlet.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/LogServlet.java
new file mode 100644
index 0000000..7ef74d1
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/LogServlet.java
@@ -0,0 +1,433 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning;

+

+import java.io.IOException;

+import java.sql.Connection;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Statement;

+import java.text.ParseException;

+import java.text.SimpleDateFormat;

+import java.util.Date;

+import java.util.HashMap;

+import java.util.Map;

+

+import javax.servlet.ServletOutputStream;

+import javax.servlet.http.HttpServletRequest;

+import javax.servlet.http.HttpServletResponse;

+

+import org.json.LOGJSONObject;

+

+import com.att.eelf.configuration.EELFLogger;

+import com.att.eelf.configuration.EELFManager;

+import com.att.research.datarouter.provisioning.beans.DeliveryRecord;

+import com.att.research.datarouter.provisioning.beans.EventLogRecord;

+import com.att.research.datarouter.provisioning.beans.ExpiryRecord;

+import com.att.research.datarouter.provisioning.beans.LOGJSONable;

+import com.att.research.datarouter.provisioning.beans.PublishRecord;

+import com.att.research.datarouter.provisioning.beans.Subscription;

+import com.att.research.datarouter.provisioning.eelf.EelfMsgs;

+import com.att.research.datarouter.provisioning.utils.DB;

+

+/**

+ * This servlet handles requests to the &lt;feedLogURL&gt; and  &lt;subLogURL&gt;,

+ * which are generated by the provisioning server to handle the log query API.

+ *

+ * @author Robert Eby

+ * @version $Id: LogServlet.java,v 1.11 2014/03/28 17:27:02 eby Exp $

+ */

+@SuppressWarnings("serial")

+public class LogServlet extends BaseServlet {

+	//Adding EELF Logger Rally:US664892  

+    private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("com.att.research.datarouter.provisioning.LogServlet");

+

+	private static final long TWENTYFOUR_HOURS = (24 * 60 * 60 * 1000L);

+	private static final String fmt1 = "yyyy-MM-dd'T'HH:mm:ss'Z'";

+	private static final String fmt2 = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";

+

+	private boolean isfeedlog;

+

+	public abstract class RowHandler {

+		private final ServletOutputStream out;

+		private final String[] fields;

+		public boolean firstrow;

+

+		public RowHandler(ServletOutputStream out, String fieldparam, boolean b) {

+			this.out = out;

+			this.firstrow = b;

+			this.fields = (fieldparam != null) ? fieldparam.split(":") : null;

+		}

+		public void handleRow(ResultSet rs) {

+			try {

+				LOGJSONable js = buildJSONable(rs);

+				LOGJSONObject jo = js.asJSONObject();

+				if (fields != null) {

+					// filter out unwanted fields

+					LOGJSONObject j2 = new LOGJSONObject();

+					for (String key : fields) {

+						Object v = jo.opt(key);

+						if (v != null)

+							j2.put(key, v);

+					}

+					jo = j2;

+				}

+				String t = firstrow ? "\n" : ",\n";

+				t += jo.toString();

+				out.print(t);

+				firstrow = false;

+			} catch (Exception e) {

+				// ignore

+			}

+		}

+		public abstract LOGJSONable buildJSONable(ResultSet rs) throws SQLException;

+	}

+	public class PublishRecordRowHandler extends RowHandler {

+		public PublishRecordRowHandler(ServletOutputStream out, String fields, boolean b) {

+			super(out, fields, b);

+		}

+		@Override

+		public LOGJSONable buildJSONable(ResultSet rs) throws SQLException {

+			return new PublishRecord(rs);

+		}

+	}

+	public class DeliveryRecordRowHandler extends RowHandler {

+		public DeliveryRecordRowHandler(ServletOutputStream out, String fields, boolean b) {

+			super(out, fields, b);

+		}

+		@Override

+		public LOGJSONable buildJSONable(ResultSet rs) throws SQLException {

+			return new DeliveryRecord(rs);

+		}

+	}

+	public class ExpiryRecordRowHandler extends RowHandler {

+		public ExpiryRecordRowHandler(ServletOutputStream out, String fields, boolean b) {

+			super(out, fields, b);

+		}

+		@Override

+		public LOGJSONable buildJSONable(ResultSet rs) throws SQLException {

+			return new ExpiryRecord(rs);

+		}

+	}

+

+	/**

+	 * This class must be created from either a {@link FeedLogServlet} or a {@link SubLogServlet}.

+	 * @param isFeedLog boolean to handle those places where a feedlog request is different from

+	 * a sublog request

+	 */

+	protected LogServlet(boolean isFeedLog) {

+		this.isfeedlog = isFeedLog;

+	}

+

+	/**

+	 * DELETE a logging URL -- not supported.

+	 */

+	@Override

+	public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doDelete");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		String message = "DELETE not allowed for the logURL.";

+		EventLogRecord elr = new EventLogRecord(req);

+		elr.setMessage(message);

+		elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);

+		eventlogger.info(elr);

+		resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);

+	}

+	/**

+	 * GET a logging URL -- retrieve logging data for a feed or subscription.

+	 * See the <b>Logging API</b> document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doGet");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		int id = getIdFromPath(req);

+		if (id < 0) {

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Missing or bad feed/subscription number.");

+			return;

+		}

+		Map<String, String> map = buildMapFromRequest(req);

+		if (map.get("err") != null) {

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Invalid arguments: "+map.get("err"));

+			return;

+		}

+		// check Accept: header??

+

+		resp.setStatus(HttpServletResponse.SC_OK);

+		resp.setContentType(LOGLIST_CONTENT_TYPE);

+		@SuppressWarnings("resource")

+		ServletOutputStream out = resp.getOutputStream();

+		final String fields = req.getParameter("fields");

+

+		out.print("[");

+		if (isfeedlog) {

+			// Handle /feedlog/feedid request

+			boolean firstrow = true;

+

+			// 1. Collect publish records for this feed

+			RowHandler rh = new PublishRecordRowHandler(out, fields, firstrow);

+			getPublishRecordsForFeed(id, rh, map);

+			firstrow = rh.firstrow;

+

+			// 2. Collect delivery records for subscriptions to this feed

+			rh = new DeliveryRecordRowHandler(out, fields, firstrow);

+			getDeliveryRecordsForFeed(id, rh, map);

+			firstrow = rh.firstrow;

+

+			// 3. Collect expiry records for subscriptions to this feed

+			rh = new ExpiryRecordRowHandler(out, fields, firstrow);

+			getExpiryRecordsForFeed(id, rh, map);

+		} else {

+			// Handle /sublog/subid request

+			Subscription sub = Subscription.getSubscriptionById(id);

+			if (sub != null) {

+				// 1. Collect publish records for the feed this subscription feeds

+				RowHandler rh = new PublishRecordRowHandler(out, fields, true);

+				getPublishRecordsForFeed(sub.getFeedid(), rh, map);

+

+				// 2. Collect delivery records for this subscription

+				rh = new DeliveryRecordRowHandler(out, fields, rh.firstrow);

+				getDeliveryRecordsForSubscription(id, rh, map);

+

+				// 3. Collect expiry records for this subscription

+				rh = new ExpiryRecordRowHandler(out, fields, rh.firstrow);

+				getExpiryRecordsForSubscription(id, rh, map);

+			}

+		}

+		out.print("\n]");

+	}

+	/**

+	 * PUT a logging URL -- not supported.

+	 */

+	@Override

+	public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doPut");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		String message = "PUT not allowed for the logURL.";

+		EventLogRecord elr = new EventLogRecord(req);

+		elr.setMessage(message);

+		elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);

+		eventlogger.info(elr);

+		resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);

+	}

+	/**

+	 * POST a logging URL -- not supported.

+	 */

+	@Override

+	public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doPost");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));

+		String message = "POST not allowed for the logURL.";

+		EventLogRecord elr = new EventLogRecord(req);

+		elr.setMessage(message);

+		elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);

+		eventlogger.info(elr);

+		resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);

+	}

+

+	private Map<String, String> buildMapFromRequest(HttpServletRequest req) {

+		Map<String, String> map = new HashMap<String, String>();

+		String s = req.getParameter("type");

+		if (s != null) {

+			if (s.equals("pub") || s.equals("del") || s.equals("exp")) {

+				map.put("type", s);

+			} else {

+				map.put("err", "bad type");

+				return map;

+			}

+		} else

+			map.put("type", "all");

+		map.put("publishSQL", "");

+		map.put("statusSQL", "");

+		map.put("resultSQL", "");

+		map.put("reasonSQL", "");

+

+		s = req.getParameter("publishId");

+		if (s != null) {

+			if (s.indexOf("'") >= 0) {

+				map.put("err", "bad publishId");

+				return map;

+			}

+			map.put("publishSQL", " AND PUBLISH_ID = '"+s+"'");

+		}

+

+		s = req.getParameter("statusCode");

+		if (s != null) {

+			String sql = null;

+			if (s.equals("success")) {

+				sql = " AND STATUS >= 200 AND STATUS < 300";

+			} else if (s.equals("redirect")) {

+				sql = " AND STATUS >= 300 AND STATUS < 400";

+			} else if (s.equals("failure")) {

+				sql = " AND STATUS >= 400";

+			} else {

+				try {

+					Integer n = Integer.parseInt(s);

+					if ((n >= 100 && n < 600) || (n == -1))

+						sql = " AND STATUS = " + n;

+				} catch (NumberFormatException e) {

+				}

+			}

+			if (sql == null) {

+				map.put("err", "bad statusCode");

+				return map;

+			}

+			map.put("statusSQL", sql);

+			map.put("resultSQL", sql.replaceAll("STATUS", "RESULT"));

+		}

+

+		s = req.getParameter("expiryReason");

+		if (s != null) {

+			map.put("type", "exp");

+			if (s.equals("notRetryable")) {

+				map.put("reasonSQL", " AND REASON = 'notRetryable'");

+			} else if (s.equals("retriesExhausted")) {

+				map.put("reasonSQL", " AND REASON = 'retriesExhausted'");

+			} else if (s.equals("diskFull")) {

+				map.put("reasonSQL", " AND REASON = 'diskFull'");

+			} else if (s.equals("other")) {

+				map.put("reasonSQL", " AND REASON = 'other'");

+			} else {

+				map.put("err", "bad expiryReason");

+				return map;

+			}

+		}

+

+		long stime = getTimeFromParam(req.getParameter("start"));

+		if (stime < 0) {

+			map.put("err", "bad start");

+			return map;

+		}

+		long etime = getTimeFromParam(req.getParameter("end"));

+		if (etime < 0) {

+			map.put("err", "bad end");

+			return map;

+		}

+		if (stime == 0 && etime == 0) {

+			etime = System.currentTimeMillis();

+			stime = etime - TWENTYFOUR_HOURS;

+		} else if (stime == 0) {

+			stime = etime - TWENTYFOUR_HOURS;

+		} else if (etime == 0) {

+			etime = stime + TWENTYFOUR_HOURS;

+		}

+		map.put("timeSQL", String.format(" AND EVENT_TIME >= %d AND EVENT_TIME <= %d", stime, etime));

+		return map;

+	}

+	private long getTimeFromParam(final String s) {

+		if (s == null)

+			return 0;

+		try {

+			// First, look for an RFC 3339 date

+			String fmt = (s.indexOf('.') > 0) ? fmt2 : fmt1;

+			SimpleDateFormat sdf = new SimpleDateFormat(fmt);

+			Date d = sdf.parse(s);

+			return d.getTime();

+		} catch (ParseException e) {

+		}

+		try {

+			// Also allow a long (in ms); useful for testing

+			long n = Long.parseLong(s);

+			return n;

+		} catch (NumberFormatException e) {

+		}

+		intlogger.info("Error parsing time="+s);

+		return -1;

+	}

+

+	private void getPublishRecordsForFeed(int feedid, RowHandler rh, Map<String, String> map) {

+		String type = map.get("type");

+		if (type.equals("all") || type.equals("pub")) {

+			String sql = "select * from LOG_RECORDS where FEEDID = "+feedid

+				+ " AND TYPE = 'pub'"

+				+ map.get("timeSQL") + map.get("publishSQL") + map.get("statusSQL");

+			getRecordsForSQL(sql, rh);

+		}

+	}

+	private void getDeliveryRecordsForFeed(int feedid, RowHandler rh, Map<String, String> map) {

+		String type = map.get("type");

+		if (type.equals("all") || type.equals("del")) {

+			String sql = "select * from LOG_RECORDS where FEEDID = "+feedid

+				+ " AND TYPE = 'del'"

+				+ map.get("timeSQL") + map.get("publishSQL") + map.get("resultSQL");

+			getRecordsForSQL(sql, rh);

+		}

+	}

+	private void getDeliveryRecordsForSubscription(int subid, RowHandler rh, Map<String, String> map) {

+		String type = map.get("type");

+		if (type.equals("all") || type.equals("del")) {

+			String sql = "select * from LOG_RECORDS where DELIVERY_SUBID = "+subid

+				+ " AND TYPE = 'del'"

+				+ map.get("timeSQL") + map.get("publishSQL") + map.get("resultSQL");

+			getRecordsForSQL(sql, rh);

+		}

+	}

+	private void getExpiryRecordsForFeed(int feedid, RowHandler rh, Map<String, String> map) {

+		String type = map.get("type");

+		if (type.equals("all") || type.equals("exp")) {

+			String st = map.get("statusSQL");

+			if (st == null || st.length() == 0) {

+				String sql = "select * from LOG_RECORDS where FEEDID = "+feedid

+					+ " AND TYPE = 'exp'"

+					+ map.get("timeSQL") + map.get("publishSQL") + map.get("reasonSQL");

+				getRecordsForSQL(sql, rh);

+			}

+		}

+	}

+	private void getExpiryRecordsForSubscription(int subid, RowHandler rh, Map<String, String> map) {

+		String type = map.get("type");

+		if (type.equals("all") || type.equals("exp")) {

+			String st = map.get("statusSQL");

+			if (st == null || st.length() == 0) {

+				String sql = "select * from LOG_RECORDS where DELIVERY_SUBID = "+subid

+					+ " AND TYPE = 'exp'"

+					+ map.get("timeSQL") + map.get("publishSQL") + map.get("reasonSQL");

+				getRecordsForSQL(sql, rh);

+			}

+		}

+	}

+	private void getRecordsForSQL(String sql, RowHandler rh) {

+		intlogger.debug(sql);

+		long start = System.currentTimeMillis();

+		DB db = new DB();

+		Connection conn = null;

+		try {

+			conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery(sql);

+			while (rs.next()) {

+				rh.handleRow(rs);

+			}

+			rs.close();

+			stmt.close();

+		} catch (SQLException e) {

+			e.printStackTrace();

+		} finally {

+			if (conn != null)

+				db.release(conn);

+		}

+		intlogger.debug("Time: " + (System.currentTimeMillis()-start) + " ms");

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/Main.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/Main.java
new file mode 100644
index 0000000..5911ecd
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/Main.java
@@ -0,0 +1,245 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning;

+

+import java.security.Security;

+import java.util.Properties;

+import java.util.Timer;

+

+import org.apache.log4j.Logger;

+import org.eclipse.jetty.server.Connector;

+import org.eclipse.jetty.server.Handler;

+import org.eclipse.jetty.server.NCSARequestLog;

+import org.eclipse.jetty.server.Server;

+import org.eclipse.jetty.server.handler.ContextHandlerCollection;

+import org.eclipse.jetty.server.handler.DefaultHandler;

+import org.eclipse.jetty.server.handler.HandlerCollection;

+import org.eclipse.jetty.server.handler.RequestLogHandler;

+import org.eclipse.jetty.server.nio.SelectChannelConnector;

+import org.eclipse.jetty.server.ssl.SslSelectChannelConnector;

+import org.eclipse.jetty.servlet.FilterHolder;

+import org.eclipse.jetty.servlet.FilterMapping;

+import org.eclipse.jetty.servlet.ServletContextHandler;

+import org.eclipse.jetty.servlet.ServletHolder;

+import org.eclipse.jetty.util.ssl.SslContextFactory;

+import org.eclipse.jetty.util.thread.QueuedThreadPool;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+import com.att.research.datarouter.provisioning.utils.LogfileLoader;

+import com.att.research.datarouter.provisioning.utils.PurgeLogDirTask;

+import com.att.research.datarouter.provisioning.utils.ThrottleFilter;

+

+/**

+ * <p>

+ * A main class which may be used to start the provisioning server with an "embedded" Jetty server.

+ * Configuration is done via the properties file <i>provserver.properties</i>, which should be in the CLASSPATH.

+ * The provisioning server may also be packaged with a web.xml and started as a traditional webapp.

+ * </p>

+ * <p>

+ * Most of the work of the provisioning server is carried out within the eight servlets (configured below)

+ * that are used to handle each of the eight types of requests the server may receive.

+ * In addition, there are background threads started to perform other tasks:

+ * </p>

+ * <ul>

+ * <li>One background Thread runs the {@link LogfileLoader} in order to process incoming logfiles.

+ *   This Thread is created as a side effect of the first successful POST to the /internal/logs/ servlet.</li>

+ * <li>One background Thread runs the {@link SynchronizerTask} which is used to periodically

+ *   synchronize the database between active and standby servers.</li>

+ * <li>One background Thread runs the {@link Poker} which is used to notify the nodes whenever

+ *   provisioning data changes.</li>

+ * <li>One task is run once a day to run {@link PurgeLogDirTask} which purges older logs from the

+ *   /opt/app/datartr/logs directory.</li>

+ * </ul>

+ * <p>

+ * The provisioning server is stopped by issuing a GET to the URL http://127.0.0.1/internal/halt

+ * using <i>curl</i> or some other such tool.

+ * </p>

+ *

+ * @author Robert Eby

+ * @version $Id: Main.java,v 1.12 2014/03/12 19:45:41 eby Exp $

+ */

+public class Main {

+	/** The truststore to use if none is specified */

+	public static final String DEFAULT_TRUSTSTORE           = "/opt/java/jdk/jdk180/jre/lib/security/cacerts";

+	public static final String KEYSTORE_TYPE_PROPERTY       = "com.att.research.datarouter.provserver.keystore.type";

+	public static final String KEYSTORE_PATH_PROPERTY       = "com.att.research.datarouter.provserver.keystore.path";

+	public static final String KEYSTORE_PASSWORD_PROPERTY   = "com.att.research.datarouter.provserver.keystore.password";

+	public static final String TRUSTSTORE_PATH_PROPERTY     = "com.att.research.datarouter.provserver.truststore.path";

+	public static final String TRUSTSTORE_PASSWORD_PROPERTY = "com.att.research.datarouter.provserver.truststore.password";

+

+	/** The one and only {@link Server} instance in this JVM */

+	private static Server server;

+

+	/**

+	 * Starts the Data Router Provisioning server.

+	 * @param args not used

+	 * @throws Exception if Jetty has a problem starting

+	 */

+	public static void main(String[] args) throws Exception {

+		Security.setProperty("networkaddress.cache.ttl", "4");

+		Logger logger = Logger.getLogger("com.att.research.datarouter.provisioning.internal");

+

+		// Check DB is accessible and contains the expected tables

+		if (! checkDatabase(logger))

+			System.exit(1);

+

+		logger.info("PROV0000 **** AT&T Data Router Provisioning Server starting....");

+

+		// Get properties

+		Properties p = (new DB()).getProperties();

+		int http_port  = Integer.parseInt(p.getProperty("com.att.research.datarouter.provserver.http.port", "8080"));

+		int https_port = Integer.parseInt(p.getProperty("com.att.research.datarouter.provserver.https.port", "8443"));

+

+		// HTTP connector

+		SelectChannelConnector http = new SelectChannelConnector();

+		http.setPort(http_port);

+		http.setMaxIdleTime(300000);

+		http.setRequestHeaderSize(2048);

+		http.setAcceptors(2);

+		http.setConfidentialPort(https_port);

+		http.setLowResourcesConnections(20000);

+

+		// HTTPS connector

+		SslSelectChannelConnector https = new SslSelectChannelConnector();

+		https.setPort(https_port);

+		https.setMaxIdleTime(30000);

+		https.setRequestHeaderSize(8192);

+		https.setAcceptors(2);

+

+		// SSL stuff

+		SslContextFactory cf = https.getSslContextFactory();

+		

+		/**Skip SSLv3 Fixes*/

+		cf.addExcludeProtocols("SSLv3");

+		logger.info("Excluded protocols prov-"+cf.getExcludeProtocols());

+		/**End of SSLv3 Fixes*/

+

+		cf.setKeyStoreType(p.getProperty(KEYSTORE_TYPE_PROPERTY, "jks"));

+		cf.setKeyStorePath(p.getProperty(KEYSTORE_PATH_PROPERTY));

+		cf.setKeyStorePassword(p.getProperty(KEYSTORE_PASSWORD_PROPERTY));

+		cf.setKeyManagerPassword(p.getProperty("com.att.research.datarouter.provserver.keymanager.password"));

+		String ts = p.getProperty(TRUSTSTORE_PATH_PROPERTY);

+		if (ts != null && ts.length() > 0) {

+			System.out.println("@@ TS -> "+ts);

+			cf.setTrustStore(ts);

+			cf.setTrustStorePassword(p.getProperty(TRUSTSTORE_PASSWORD_PROPERTY));

+		} else {

+			cf.setTrustStore(DEFAULT_TRUSTSTORE);

+			cf.setTrustStorePassword("changeit");

+		}

+		cf.setTrustStore("/opt/app/datartr/self_signed/cacerts.jks");

+		cf.setTrustStorePassword("changeit");

+		cf.setWantClientAuth(true);

+

+		// Servlet and Filter configuration

+		ServletContextHandler ctxt = new ServletContextHandler(0);

+		ctxt.setContextPath("/");

+		ctxt.addServlet(new ServletHolder(new FeedServlet()),         "/feed/*");

+		ctxt.addServlet(new ServletHolder(new FeedLogServlet()),      "/feedlog/*");

+		ctxt.addServlet(new ServletHolder(new PublishServlet()),      "/publish/*");

+		ctxt.addServlet(new ServletHolder(new SubscribeServlet()),    "/subscribe/*");

+		ctxt.addServlet(new ServletHolder(new StatisticsServlet()), 	  "/statistics/*");

+		ctxt.addServlet(new ServletHolder(new SubLogServlet()),       "/sublog/*");

+		ctxt.addServlet(new ServletHolder(new GroupServlet()),    	  "/group/*"); //Provision groups - Rally US708115 -1610 

+		ctxt.addServlet(new ServletHolder(new SubscriptionServlet()), "/subs/*");

+		ctxt.addServlet(new ServletHolder(new InternalServlet()),     "/internal/*");

+		ctxt.addServlet(new ServletHolder(new RouteServlet()),        "/internal/route/*");

+		ctxt.addServlet(new ServletHolder(new DRFeedsServlet()),      "/");

+		ctxt.addFilter (new FilterHolder (new ThrottleFilter()),      "/publish/*", FilterMapping.REQUEST);

+

+		ContextHandlerCollection contexts = new ContextHandlerCollection();

+		contexts.addHandler(ctxt);

+

+		// Request log configuration

+		NCSARequestLog nrl = new NCSARequestLog();

+		nrl.setFilename(p.getProperty("com.att.research.datarouter.provserver.accesslog.dir") + "/request.log.yyyy_mm_dd");

+		nrl.setFilenameDateFormat("yyyyMMdd");

+		nrl.setRetainDays(90);

+		nrl.setAppend(true);

+		nrl.setExtended(false);

+		nrl.setLogCookies(false);

+		nrl.setLogTimeZone("GMT");

+

+		RequestLogHandler reqlog = new RequestLogHandler();

+		reqlog.setRequestLog(nrl);

+

+		// Server's Handler collection

+		HandlerCollection hc = new HandlerCollection();

+		hc.setHandlers(new Handler[] { contexts, new DefaultHandler() });

+		hc.addHandler(reqlog);

+

+		// Server's thread pool

+		QueuedThreadPool pool = new QueuedThreadPool();

+		pool.setMinThreads(10);

+		pool.setMaxThreads(200);

+		pool.setDetailedDump(false);

+

+		// Daemon to clean up the log directory on a daily basis

+		Timer rolex = new Timer();

+		rolex.scheduleAtFixedRate(new PurgeLogDirTask(), 0, 86400000L);	// run once per day

+

+		// Start LogfileLoader

+		LogfileLoader.getLoader();

+

+		// The server itself

+		server = new Server();

+		server.setThreadPool(pool);

+		server.setConnectors(new Connector[] { http, https });

+		server.setHandler(hc);

+		server.setStopAtShutdown(true);

+		server.setSendServerVersion(true);

+		server.setSendDateHeader(true);

+		server.setGracefulShutdown(5000);	// allow 5 seconds for servlets to wrap up

+		server.setDumpAfterStart(false);

+		server.setDumpBeforeStop(false);

+

+		server.start();

+		server.join();

+		logger.info("PROV0001 **** AT&T Data Router Provisioning Server halted.");

+	}

+

+	private static boolean checkDatabase(Logger logger) {

+		DB db = new DB();

+		return db.runRetroFits();

+	}

+

+	/**

+	 * Stop the Jetty server.

+	 */

+	public static void shutdown() {

+		new Thread() {

+			@Override

+			public void run() {

+				try {

+					server.stop();

+					Thread.sleep(5000L);

+					System.exit(0);

+				} catch (Exception e) {

+					// ignore

+				}

+			}

+		}.start();

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/Poker.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/Poker.java
new file mode 100644
index 0000000..13350df
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/Poker.java
@@ -0,0 +1,318 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+package com.att.research.datarouter.provisioning;

+

+import java.io.FileInputStream;

+import java.io.IOException;

+import java.io.InputStream;

+import java.net.HttpURLConnection;

+import java.net.InetAddress;

+import java.net.MalformedURLException;

+import java.net.URL;

+import java.net.UnknownHostException;

+import java.util.Arrays;

+import java.util.HashSet;

+import java.util.Map;

+import java.util.Properties;

+import java.util.Set;

+import java.util.Timer;

+import java.util.TimerTask;

+import java.util.TreeSet;

+

+import javax.servlet.ServletException;

+

+import org.apache.log4j.Logger;

+import org.json.JSONException;

+import org.json.JSONObject;

+import org.json.JSONTokener;

+

+import com.att.research.datarouter.provisioning.beans.EgressRoute;

+import com.att.research.datarouter.provisioning.beans.Feed;

+import com.att.research.datarouter.provisioning.beans.IngressRoute;

+import com.att.research.datarouter.provisioning.beans.NetworkRoute;

+import com.att.research.datarouter.provisioning.beans.Parameters;

+import com.att.research.datarouter.provisioning.beans.Subscription;

+import com.att.research.datarouter.provisioning.beans.Group; //Groups feature Rally:US708115 - 1610	

+import com.att.research.datarouter.provisioning.utils.*;

+

+/**

+ * This class handles the two timers (described in R1 Design Notes), and takes care of issuing

+ * the GET to each node of the URL to "poke".

+ *

+ * @author Robert Eby

+ * @version $Id: Poker.java,v 1.11 2014/01/08 16:13:47 eby Exp $

+ */

+public class Poker extends TimerTask {

+	/** Template used to generate the URL to issue the GET against */

+	public static final String POKE_URL_TEMPLATE = "http://%s/internal/fetchProv";

+	

+	

+	

+

+	/** This is a singleton -- there is only one Poker object in the server */

+	private static Poker p;

+

+	/**

+	 * Get the singleton Poker object.

+	 * @return the Poker

+	 */

+	public static synchronized Poker getPoker() {

+		if (p == null)

+			p = new Poker();

+		return p;

+	}

+

+	private long timer1;

+	private long timer2;

+	private Timer rolex;

+	private String this_pod;		// DNS name of this machine

+	private Logger logger;

+	private String provstring;

+

+	private Poker() {

+		timer1 = timer2 = 0;

+		rolex = new Timer();

+		logger = Logger.getLogger("com.att.research.datarouter.provisioning.internal");

+		try {

+			this_pod = InetAddress.getLocalHost().getHostName();

+		} catch (UnknownHostException e) {

+			this_pod = "*UNKNOWN*";	// not a major problem

+		}

+		provstring = buildProvisioningString();

+

+		rolex.scheduleAtFixedRate(this, 0L, 1000L);	// Run once a second to check the timers

+	}

+

+	/**

+	 * This method sets the two timers described in the design notes.

+	 * @param t1 the first timer controls how long to wait after a provisioning request before poking each node

+	 *   This timer can be reset if it has not "gone off".

+	 * @param t2 the second timer set the outer bound on how long to wait.  It cannot be reset.

+	 */

+	public void setTimers(long t1, long t2) {

+		synchronized (this_pod) {

+			if (timer1 == 0 || t1 > timer1)

+				timer1 = t1;

+			if (timer2 == 0)

+				timer2 = t2;

+		}

+		if (logger.isDebugEnabled())

+			logger.debug("Poker timers set to " + timer1 + " and " + timer2);

+	

+		

+	}

+

+	/**

+	 * Return the last provisioning string built.

+	 * @return the last provisioning string built.

+	 */

+	public String getProvisioningString() {

+		return provstring;

+	}

+

+	/**

+	 * The method to run at the predefined interval (once per second).  This method checks

+	 * to see if either of the two timers has expired, and if so, will rebuild the provisioning

+	 * string, and poke all the nodes and other PODs.  The timers are then reset to 0.

+	 */

+	@Override

+	public void run() {

+		try {

+			if (timer1 > 0) {

+				long now = System.currentTimeMillis();

+				boolean fire = false;

+				synchronized (this_pod) {

+					if (now > timer1 || now > timer2) {

+						timer1 = timer2 = 0;

+						fire = true;

+					}

+				}

+				if (fire) {

+					// Rebuild the prov string

+					provstring = buildProvisioningString();

+

+					// Only the active POD should poke nodes, etc.

+					boolean active = SynchronizerTask.getSynchronizer().isActive();

+					if (active) {

+						// Poke all the DR nodes

+						for (String n : BaseServlet.getNodes()) {

+							pokeNode(n);

+						}

+						// Poke the pod that is not us

+						for (String n : BaseServlet.getPods()) {

+							if (n.length() > 0 && !n.equals(this_pod))

+								pokeNode(n);

+						}

+					}

+				}

+			}

+		} catch (Exception e) {

+			logger.warn("PROV0020: Caught exception in Poker: "+e);

+			e.printStackTrace();

+		}

+	}

+	private void pokeNode(final String nodename) {

+		logger.debug("PROV0012 Poking node " + nodename + " ...");

+		Runnable r = new Runnable() {

+			@Override

+			public void run() {

+			

+				try {

+					String u = String.format(POKE_URL_TEMPLATE, nodename+":"+DB.HTTP_PORT);

+					URL url = new URL(u);

+					HttpURLConnection conn = (HttpURLConnection) url.openConnection();

+					conn.setConnectTimeout(60000);	//Fixes for Itrack DATARTR-3, poke timeout

+					conn.connect();

+					conn.getContentLength();	// Force the GET through

+					conn.disconnect();

+				} catch (MalformedURLException e) {

+					logger.warn("PROV0013 MalformedURLException Error poking node "+nodename+": " + e.getMessage());

+				} catch (IOException e) {

+					logger.warn("PROV0013 IOException Error poking node "+nodename+": " + e.getMessage());

+				}

+			}

+		};

+//		Thread t = new Thread(r);

+//		t.start();

+		r.run();

+	}

+	@SuppressWarnings("unused")

+	private String buildProvisioningString() {

+		StringBuilder sb = new StringBuilder("{\n");

+

+		// Append Feeds to the string

+		String pfx = "\n";

+		sb.append("\"feeds\": [");

+		for (Feed f : Feed.getAllFeeds()) {

+			sb.append(pfx);

+			sb.append(f.asJSONObject().toString());

+			pfx = ",\n";

+		}

+		sb.append("\n],\n");

+		

+		//Append groups to the string - Rally:US708115  - 1610		

+		pfx = "\n";		

+		sb.append("\"groups\": [");		

+		for (Group s : Group.getAllgroups()) {		

+			sb.append(pfx);		

+			sb.append(s.asJSONObject().toString());		

+			pfx = ",\n";		

+		}		

+		sb.append("\n],\n");		

+				

+

+		// Append Subscriptions to the string

+		pfx = "\n";

+		sb.append("\"subscriptions\": [");

+		for (Subscription s : Subscription.getAllSubscriptions()) {

+			sb.append(pfx);

+			if(s!=null)

+			sb.append(s.asJSONObject().toString());

+			pfx = ",\n";

+		}

+		sb.append("\n],\n");

+

+		// Append Parameters to the string

+		pfx = "\n";

+		sb.append("\"parameters\": {");

+		Map<String,String> props = Parameters.getParameters();

+		Set<String> ivals = new HashSet<String>();

+		String intv = props.get("_INT_VALUES");

+		if (intv != null)

+			ivals.addAll(Arrays.asList(intv.split("\\|")));

+		for (String key : new TreeSet<String>(props.keySet())) {

+			String v = props.get(key);

+			sb.append(pfx);

+			sb.append("  \"").append(key).append("\": ");

+			if (ivals.contains(key)) {

+				// integer value

+				sb.append(v);

+			} else if (key.endsWith("S")) {

+				// Split and append array of strings

+				String[] pp = v.split("\\|");

+				String p2 = "";

+				sb.append("[");

+				for (String t : pp) {

+					sb.append(p2).append("\"").append(quote(t)).append("\"");

+					p2 = ",";

+				}

+				sb.append("]");

+			} else {

+				sb.append("\"").append(quote(v)).append("\"");

+			}

+			pfx = ",\n";

+		}

+		sb.append("\n},\n");

+

+		// Append Routes to the string

+		pfx = "\n";

+		sb.append("\"ingress\": [");

+		for (IngressRoute in : IngressRoute.getAllIngressRoutes()) {

+			sb.append(pfx);

+			sb.append(in.asJSONObject().toString());

+			pfx = ",\n";

+		}

+		sb.append("\n],\n");

+

+		pfx = "\n";

+		sb.append("\"egress\": {");

+		for (EgressRoute eg : EgressRoute.getAllEgressRoutes()) {

+			sb.append(pfx);

+			String t = eg.asJSONObject().toString();

+			t = t.substring(1, t.length()-1);

+			sb.append(t);

+			pfx = ",\n";

+		}

+		sb.append("\n},\n");

+

+		pfx = "\n";

+		sb.append("\"routing\": [");

+		for (NetworkRoute ne : NetworkRoute.getAllNetworkRoutes()) {

+			sb.append(pfx);

+			sb.append(ne.asJSONObject().toString());

+			pfx = ",\n";

+		}

+		sb.append("\n]");

+		sb.append("\n}");

+

+		// Convert to string and verify it is valid JSON

+		String provstring = sb.toString();

+		try {

+			new JSONObject(new JSONTokener(provstring));

+		} catch (JSONException e) {

+			logger.warn("PROV0016: Possible invalid prov string: "+e);

+		}

+		return provstring;

+	}

+	private String quote(String s) {

+		StringBuilder sb = new StringBuilder();

+		for (char ch : s.toCharArray()) {

+			if (ch == '\\' || ch == '"') {

+				sb.append('\\');

+			}

+			sb.append(ch);

+		}

+		return sb.toString();

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/ProxyServlet.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/ProxyServlet.java
new file mode 100644
index 0000000..b22b018
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/ProxyServlet.java
@@ -0,0 +1,304 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning;

+

+import java.io.File;

+import java.io.FileInputStream;

+import java.io.FileNotFoundException;

+import java.io.IOException;

+import java.io.InputStream;

+import java.net.URI;

+import java.security.KeyStore;

+import java.security.KeyStoreException;

+import java.util.Collections;

+import java.util.List;

+import java.util.Properties;

+

+import javax.servlet.ServletConfig;

+import javax.servlet.ServletException;

+import javax.servlet.http.HttpServletRequest;

+import javax.servlet.http.HttpServletResponse;

+

+import org.apache.commons.io.IOUtils;

+import org.apache.http.Header;

+import org.apache.http.HttpEntity;

+import org.apache.http.HttpResponse;

+import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;

+import org.apache.http.client.methods.HttpGet;

+import org.apache.http.client.methods.HttpRequestBase;

+import org.apache.http.conn.scheme.Scheme;

+import org.apache.http.conn.ssl.SSLSocketFactory;

+import org.apache.http.entity.BasicHttpEntity;

+import org.apache.http.impl.client.AbstractHttpClient;

+import org.apache.http.impl.client.DefaultHttpClient;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+import com.att.research.datarouter.provisioning.utils.URLUtilities;

+

+/**

+ * This class is the base class for those servlets that need to proxy their requests from the

+ * standby to active server.  Its methods perform the proxy function to the active server. If the

+ * active server is not reachable, a 503 (SC_SERVICE_UNAVAILABLE) is returned.  Only

+ * DELETE/GET/PUT/POST are supported.

+ *

+ * @author Robert Eby

+ * @version $Id: ProxyServlet.java,v 1.3 2014/03/24 18:47:10 eby Exp $

+ */

+@SuppressWarnings("serial")

+public class ProxyServlet extends BaseServlet {

+	private boolean inited = false;

+	private Scheme sch;

+

+	/**

+	 * Initialize this servlet, by setting up SSL.

+	 */

+	@SuppressWarnings("deprecation")

+	@Override

+	public void init(ServletConfig config) throws ServletException {

+		super.init(config);

+		try {

+			// Set up keystore

+			Properties props = (new DB()).getProperties();

+			String type  = props.getProperty(Main.KEYSTORE_TYPE_PROPERTY, "jks");

+			String store = props.getProperty(Main.KEYSTORE_PATH_PROPERTY);

+			String pass  = props.getProperty(Main.KEYSTORE_PASSWORD_PROPERTY);

+			KeyStore keyStore = readStore(store, pass, type);

+

+			store = props.getProperty(Main.TRUSTSTORE_PATH_PROPERTY);

+			pass  = props.getProperty(Main.TRUSTSTORE_PASSWORD_PROPERTY);

+			if (store == null || store.length() == 0) {

+				store = Main.DEFAULT_TRUSTSTORE;

+				pass = "changeit";

+			}

+			KeyStore trustStore = readStore(store, pass, KeyStore.getDefaultType());

+

+			// We are connecting with the node name, but the certificate will have the CNAME

+			// So we need to accept a non-matching certificate name

+			SSLSocketFactory socketFactory = new SSLSocketFactory(keyStore, "changeit", trustStore);

+			socketFactory.setHostnameVerifier(SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);

+			sch = new Scheme("https", 443, socketFactory);

+			inited = true;

+		} catch (Exception e) {

+			e.printStackTrace();

+		}

+		intlogger.info("ProxyServlet: inited = "+inited);

+	}

+	private KeyStore readStore(String store, String pass, String type) throws KeyStoreException, FileNotFoundException {

+		KeyStore ks = KeyStore.getInstance(type);

+		FileInputStream instream = new FileInputStream(new File(store));

+		try {

+		    ks.load(instream, pass.toCharArray());

+		} catch (Exception x) {

+			System.err.println("READING TRUSTSTORE: "+x);

+		} finally {

+		    try { instream.close(); } catch (Exception ignore) {}

+		}

+		return ks;

+	}

+	/**

+	 * Return <i>true</i> if the requester has NOT set the <i>noproxy</i> CGI variable.

+	 * If they have, this indicates they want to forcibly turn the proxy off.

+	 * @param req the HTTP request

+	 * @return true or false

+	 */

+	protected boolean isProxyOK(final HttpServletRequest req) {

+		String t = req.getQueryString();

+		if (t != null) {

+			t = t.replaceAll("&amp;", "&");

+			for (String s : t.split("&")) {

+				if (s.equals("noproxy") || s.startsWith("noproxy="))

+					return false;

+			}

+		}

+		return true;

+	}

+	/**

+	 * Is this the standby server?  If it is, the proxy functions can be used.

+	 * If not, the proxy functions should not be called, and will send a response of 500

+	 * (Internal Server Error).

+	 * @return true if this server is the standby (and hence a proxy server).

+	 */

+	public boolean isProxyServer() {

+		SynchronizerTask st = SynchronizerTask.getSynchronizer();

+		return st.getState() == SynchronizerTask.STANDBY;

+	}

+	/**

+	 * Issue a proxy DELETE to the active provisioning server.

+	 */

+	@Override

+	public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		doProxy(req, resp, "DELETE");

+	}

+	/**

+	 * Issue a proxy GET to the active provisioning server.

+	 */

+	@Override

+	public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		doProxy(req, resp, "GET");

+	}

+	/**

+	 * Issue a proxy PUT to the active provisioning server.

+	 */

+	@Override

+	public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		doProxy(req, resp, "PUT");

+	}

+	/**

+	 * Issue a proxy POST to the active provisioning server.

+	 */

+	@Override

+	public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		doProxy(req, resp, "POST");

+	}

+	/**

+	 * Issue a proxy GET to the active provisioning server.  Unlike doGet() above,

+	 * this method will allow the caller to fall back to other code if the remote server is unreachable.

+	 * @return true if the proxy succeeded

+	 */

+	public boolean doGetWithFallback(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		boolean rv = false;

+		if (inited) {

+			String url = buildUrl(req);

+			intlogger.info("ProxyServlet: proxying with fallback GET "+url);

+			AbstractHttpClient httpclient = new DefaultHttpClient();

+			HttpRequestBase proxy = new HttpGet(url);

+			try {

+				httpclient.getConnectionManager().getSchemeRegistry().register(sch);

+

+				// Copy request headers and request body

+				copyRequestHeaders(req, proxy);

+

+				// Execute the request

+				HttpResponse pxy_response = httpclient.execute(proxy);

+

+				// Get response headers and body

+				int code = pxy_response.getStatusLine().getStatusCode();

+				resp.setStatus(code);

+				copyResponseHeaders(pxy_response, resp);

+

+				HttpEntity entity = pxy_response.getEntity();

+				if (entity != null) {

+					InputStream in = entity.getContent();

+					IOUtils.copy(in, resp.getOutputStream());

+					in.close();

+				}

+				rv = true;

+			} catch (IOException e) {

+				System.err.println("ProxyServlet: "+e);

+				e.printStackTrace();

+			} finally {

+				proxy.releaseConnection();

+				httpclient.getConnectionManager().shutdown();

+			}

+		} else {

+			intlogger.warn("ProxyServlet: proxy disabled");

+		}

+		return rv;

+	}

+	private void doProxy(HttpServletRequest req, HttpServletResponse resp, final String method) throws IOException {

+		if (inited && isProxyServer()) {

+			String url = buildUrl(req);

+			intlogger.info("ProxyServlet: proxying "+method + " "+url);

+			AbstractHttpClient httpclient = new DefaultHttpClient();

+			ProxyHttpRequest proxy = new ProxyHttpRequest(method, url);

+			try {

+				httpclient.getConnectionManager().getSchemeRegistry().register(sch);

+

+				// Copy request headers and request body

+				copyRequestHeaders(req, proxy);

+				if (method.equals("POST") || method.equals("PUT")){

+					BasicHttpEntity body = new BasicHttpEntity();

+					body.setContent(req.getInputStream());

+					body.setContentLength(-1);	// -1 = unknown

+					proxy.setEntity(body);

+				}

+

+				// Execute the request

+				HttpResponse pxy_response = httpclient.execute(proxy);

+

+				// Get response headers and body

+				int code = pxy_response.getStatusLine().getStatusCode();

+				resp.setStatus(code);

+				copyResponseHeaders(pxy_response, resp);

+

+				HttpEntity entity = pxy_response.getEntity();

+				if (entity != null) {

+					InputStream in = entity.getContent();

+					IOUtils.copy(in, resp.getOutputStream());

+					in.close();

+				}

+			} catch (IOException e) {

+				intlogger.warn("ProxyServlet: "+e);

+				resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE);

+				e.printStackTrace();

+			} finally {

+				proxy.releaseConnection();

+				httpclient.getConnectionManager().shutdown();

+			}

+		} else {

+			intlogger.warn("ProxyServlet: proxy disabled");

+			resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);

+		}

+	}

+	private String buildUrl(HttpServletRequest req) {

+		StringBuilder sb = new StringBuilder("https://");

+		sb.append(URLUtilities.getPeerPodName());

+		sb.append(req.getRequestURI());

+		String q = req.getQueryString();

+		if (q != null)

+			sb.append("?").append(q);

+		return sb.toString();

+	}

+	private void copyRequestHeaders(HttpServletRequest from, HttpRequestBase to) {

+		@SuppressWarnings("unchecked")

+		List<String> list = Collections.list(from.getHeaderNames());

+		for (String name : list) {

+			// Proxy code will add this one

+			if (!name.equalsIgnoreCase("Content-Length"))

+				to.addHeader(name, from.getHeader(name));

+		}

+	}

+	private void copyResponseHeaders(HttpResponse from, HttpServletResponse to) {

+		for (Header hdr : from.getAllHeaders()) {

+			// Don't copy Date: our Jetty will add another Date header

+			if (!hdr.getName().equals("Date"))

+				to.addHeader(hdr.getName(), hdr.getValue());

+		}

+	}

+

+	public class ProxyHttpRequest extends HttpEntityEnclosingRequestBase {

+		private final String method;

+

+		public ProxyHttpRequest(final String method, final String uri) {

+			super();

+			this.method = method;

+	        setURI(URI.create(uri));

+		}

+		@Override

+		public String getMethod() {

+			return method;

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/PublishServlet.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/PublishServlet.java
new file mode 100644
index 0000000..2a8e2e3
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/PublishServlet.java
@@ -0,0 +1,192 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning;

+

+import java.io.IOException;

+import java.io.InputStream;

+import java.util.ArrayList;

+import java.util.Collection;

+import java.util.List;

+import java.util.Properties;

+

+import javax.servlet.ServletConfig;

+import javax.servlet.ServletException;

+import javax.servlet.http.HttpServletRequest;

+import javax.servlet.http.HttpServletResponse;

+

+import org.json.JSONArray;

+import org.json.JSONObject;

+import org.json.JSONTokener;

+

+import com.att.eelf.configuration.EELFLogger;

+import com.att.eelf.configuration.EELFManager;

+import com.att.research.datarouter.provisioning.beans.EventLogRecord;

+import com.att.research.datarouter.provisioning.beans.Feed;

+import com.att.research.datarouter.provisioning.beans.IngressRoute;

+import com.att.research.datarouter.provisioning.eelf.EelfMsgs;

+import com.att.research.datarouter.provisioning.utils.*;

+

+/**

+ * This servlet handles redirects for the &lt;publishURL&gt; on the provisioning server,

+ * which is generated by the provisioning server to handle a particular subscriptions to a feed.

+ * See the <b>File Publishing and Delivery API</b> document for details on how these methods

+ * should be invoked.

+ *

+ * @author Robert Eby

+ * @version $Id: PublishServlet.java,v 1.8 2014/03/12 19:45:41 eby Exp $

+ */

+@SuppressWarnings("serial")

+public class PublishServlet extends BaseServlet {

+	private int next_node;

+	private String provstring;

+	private List<IngressRoute> irt;

+	//Adding EELF Logger Rally:US664892  

+    private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("com.att.research.datarouter.provisioning.PublishServlet");

+    

+

+	@Override

+	public void init(ServletConfig config) throws ServletException {

+		super.init(config);

+		next_node = 0;

+		provstring = "";

+		irt = new ArrayList<IngressRoute>();

+	

+	}

+	@Override

+	public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doDelete");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		redirect(req, resp);

+	}

+	@Override

+	public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doGet");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		redirect(req, resp);

+	}

+	@Override

+	public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doPut");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		redirect(req, resp);

+	}

+	@Override

+	public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doPost");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));

+		redirect(req, resp);

+	}

+	private void redirect(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		String[] nodes = getNodes();

+		if (nodes == null || nodes.length == 0) {

+			resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE, "There are no nodes defined in the DR network.");

+		} else {

+			EventLogRecord elr = new EventLogRecord(req);

+			int feedid = checkPath(req);

+			if (feedid < 0) {

+				String message = (feedid == -1)

+					? "Invalid request - Missing or bad feed number."

+					: "Invalid request - Missing file ID.";

+				elr.setMessage(message);

+				elr.setResult(HttpServletResponse.SC_NOT_FOUND);

+				eventlogger.info(elr);

+

+				resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);

+			} else {

+				// Generate new URL

+				String nextnode = getRedirectNode(feedid, req);

+				nextnode = nextnode+":"+DB.HTTPS_PORT;

+				String newurl = "https://" + nextnode + "/publish" + req.getPathInfo();

+				String qs = req.getQueryString();

+				if (qs != null)

+					newurl += "?" + qs;

+

+				// Log redirect in event log

+				String message = "Redirected to: "+newurl;

+				elr.setMessage(message);

+				elr.setResult(HttpServletResponse.SC_MOVED_PERMANENTLY);

+				eventlogger.info(elr);

+

+				resp.setStatus(HttpServletResponse.SC_MOVED_PERMANENTLY);

+				resp.setHeader("Location", newurl);

+			}

+		}

+	}

+	private String getRedirectNode(int feedid, HttpServletRequest req) {

+		// Check to see if the IRT needs to be updated

+		Poker p = Poker.getPoker();

+		String s = p.getProvisioningString();

+		synchronized (provstring) {

+			if (irt == null || (s.length() != provstring.length()) || !s.equals(provstring)) {

+				// Provisioning string has changed -- update the IRT

+				provstring = s;

+				JSONObject jo = new JSONObject(new JSONTokener(provstring));

+				JSONArray ja = jo.getJSONArray("ingress");

+				List<IngressRoute> newlist = new ArrayList<IngressRoute>();

+				for (int i = 0; i < ja.length(); i++) {

+					IngressRoute iroute = new IngressRoute(ja.getJSONObject(i));

+					newlist.add(iroute);

+				}

+				irt = newlist;

+			}

+		}

+

+		// Look in IRT for next node

+		for (IngressRoute route : irt) {

+			if (route.matches(feedid, req)) {

+				// pick a node at random from the list

+				Collection<String> nodes = route.getNodes();

+				String[] arr = nodes.toArray(new String[0]);

+				long id = System.currentTimeMillis() % arr.length;

+				String node = arr[(int) id];

+				intlogger.info("Redirecting to "+node+" because of route "+route);

+				return node;

+			}

+		}

+

+		// No IRT rule matches, do round robin of all active nodes

+		String[] nodes = getNodes();

+		if (next_node >= nodes.length)	// The list of nodes may have grown/shrunk

+			next_node = 0;

+		return nodes[next_node++];

+	}

+	private int checkPath(HttpServletRequest req) {

+		String path = req.getPathInfo();

+		if (path == null || path.length() < 2)

+			return -1;

+		path = path.substring(1);

+		int ix = path.indexOf('/');

+		if (ix < 0 || ix == path.length()-1)

+			return -2;

+		try {

+			int feedid = Integer.parseInt(path.substring(0, ix));

+			if (!Feed.isFeedValid(feedid))

+				return -1;

+			return feedid;

+		} catch (NumberFormatException e) {

+			return -1;

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/RouteServlet.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/RouteServlet.java
new file mode 100644
index 0000000..68fd4c7
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/RouteServlet.java
@@ -0,0 +1,429 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning;

+

+import java.io.IOException;

+import java.util.Set;

+

+import javax.servlet.http.HttpServletRequest;

+import javax.servlet.http.HttpServletResponse;

+

+import org.json.JSONObject;

+

+import com.att.research.datarouter.provisioning.beans.Deleteable;

+import com.att.research.datarouter.provisioning.beans.EgressRoute;

+import com.att.research.datarouter.provisioning.beans.EventLogRecord;

+import com.att.research.datarouter.provisioning.beans.IngressRoute;

+import com.att.research.datarouter.provisioning.beans.Insertable;

+import com.att.research.datarouter.provisioning.beans.NetworkRoute;

+import com.att.research.datarouter.provisioning.beans.NodeClass;

+

+/**

+ * <p>

+ * This servlet handles requests to URLs under /internal/route/ on the provisioning server.

+ * This part of the URL tree is used to manipulate the Data Router routing tables.

+ * These include:

+ * </p>

+ * <div class="contentContainer">

+ * <table class="packageSummary" border="0" cellpadding="3" cellspacing="0">

+ * <caption><span>URL Path Summary</span><span class="tabEnd">&nbsp;</span></caption>

+ * <tr>

+ *   <th class="colFirst" width="35%">URL Path</th>

+ *   <th class="colOne">Method</th>

+ *   <th class="colLast">Purpose</th>

+ * </tr>

+ * <tr class="altColor">

+ *   <td class="colFirst">/internal/route/</td>

+ *   <td class="colOne">GET</td>

+ *   <td class="colLast">used to GET a full JSON copy of all three routing tables.</td>

+ * </tr>

+ * <tr class="rowColor">

+ *   <td class="colFirst" rowspan="2">/internal/route/ingress/</td>

+ *   <td class="colOne">GET</td>

+ *   <td class="colLast">used to GET a full JSON copy of the ingress routing table (IRT).</td>

+ * </tr>

+ * <tr class="rowColor">

+ *   <td class="colOne">POST</td>

+ *   <td class="colLast">used to create a new entry in the ingress routing table (IRT).</td></tr>

+ * <tr class="altColor">

+ *   <td class="colFirst" rowspan="2">/internal/route/egress/</td>

+ *   <td class="colOne">GET</td>

+ *   <td class="colLast">used to GET a full JSON copy of the egress routing table (ERT).</td>

+ * </tr>

+ * <tr class="altColor">

+ *   <td class="colOne">POST</td>

+ *   <td class="colLast">used to create a new entry in the egress routing table (ERT).</td></tr>

+ * <tr class="rowColor">

+ *   <td class="colFirst" rowspan="2">/internal/route/network/</td>

+ *   <td class="colOne">GET</td>

+ *   <td class="colLast">used to GET a full JSON copy of the network routing table (NRT).</td>

+ * </tr>

+ * <tr class="rowColor">

+ *   <td class="colOne">POST</td>

+ *   <td class="colLast">used to create a new entry in the network routing table (NRT).</td>

+ * </tr>

+ * <tr class="altColor">

+ *   <td class="colFirst">/internal/route/ingress/&lt;feed&gt;/&lt;user&gt;/&lt;subnet&gt;</td>

+ *   <td class="colOne">DELETE</td>

+ *   <td class="colLast">used to DELETE the ingress route corresponding to <i>feed</i>, <i>user</i> and <i>subnet</i>.

+ *   The / in the subnet specified should be replaced with a !, since / cannot be used in a URL.</td>

+ * </tr>

+ * <tr class="rowColor">

+ *   <td class="colFirst">/internal/route/ingress/&lt;seq&gt;</td>

+ *   <td class="colOne">DELETE</td>

+ *   <td class="colLast">used to DELETE all ingress routes with the matching <i>seq</i> sequence number.</td>

+ * </tr>

+ * <tr class="altColor">

+ *   <td class="colFirst">/internal/route/egress/&lt;sub&gt;</td>

+ *   <td class="colOne">DELETE</td>

+ *   <td class="colLast">used to DELETE the egress route the matching <i>sub</i> subscriber number.</td>

+ * </tr>

+ * <tr class="rowColor">

+ *   <td class="colFirst">/internal/route/network/&lt;fromnode&gt;/&lt;tonode&gt;</td>

+ *   <td class="colOne">DELETE</td>

+ *   <td class="colLast">used to DELETE the network route corresponding to <i>fromnode</i>

+ *   and <i>tonode</i>.</td>

+ * </tr>

+ * </table>

+ * <p>

+ * Authorization to use these URLs is a little different than for other URLs on the provisioning server.

+ * For the most part, the IP address that the request comes from should be either:

+ * </p>

+ * <ol>

+ * <li>an IP address of a provisioning server, or</li>

+ * <li>the IP address of a node, or</li>

+ * <li>an IP address from the "<i>special subnet</i>" which is configured with

+ * the PROV_SPECIAL_SUBNET parameter.

+ * </ol>

+ * <p>

+ * All DELETE/GET/POST requests made to this servlet on the standby server are proxied to the

+ * active server (using the {@link ProxyServlet}) if it is up and reachable.

+ * </p>

+ *

+ * @author Robert Eby

+ * @version $Id$

+ */

+@SuppressWarnings("serial")

+public class RouteServlet extends ProxyServlet {

+	/**

+	 * DELETE route table entries by deleting part of the route table tree.

+	 */

+	@Override

+	public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		EventLogRecord elr = new EventLogRecord(req);

+		if (!isAuthorizedForInternal(req)) {

+			elr.setMessage("Unauthorized.");

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");

+			return;

+		}

+		if (isProxyOK(req) && isProxyServer()) {

+			super.doDelete(req, resp);

+			return;

+		}

+

+		String path = req.getPathInfo();

+		String[] parts = path.substring(1).split("/");

+		Deleteable[] d = null;

+		if (parts[0].equals("ingress")) {

+			if (parts.length == 4) {

+				// /internal/route/ingress/<feed>/<user>/<subnet>

+				try {

+					int feedid = Integer.parseInt(parts[1]);

+					IngressRoute er = IngressRoute.getIngressRoute(feedid, parts[2], parts[3].replaceAll("!", "/"));

+					if (er == null) {

+						resp.sendError(HttpServletResponse.SC_NOT_FOUND, "The specified ingress route does not exist.");

+						return;

+					}

+					d = new Deleteable[] { er };

+				} catch (NumberFormatException e) {

+					resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid feed ID in 'delete ingress' command.");

+					return;

+				}

+			} else if (parts.length == 2) {

+				// /internal/route/ingress/<seq>

+				try {

+					int seq = Integer.parseInt(parts[1]);

+					Set<IngressRoute> set = IngressRoute.getIngressRoutesForSeq(seq);

+					d = set.toArray(new Deleteable[0]);

+				} catch (NumberFormatException e) {

+					resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid sequence number in 'delete ingress' command.");

+					return;

+				}

+			} else {

+				resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid number of arguments in 'delete ingress' command.");

+				return;

+			}

+		} else if (parts[0].equals("egress")) {

+			if (parts.length == 2) {

+				// /internal/route/egress/<sub>

+				try {

+					int subid = Integer.parseInt(parts[1]);

+					EgressRoute er = EgressRoute.getEgressRoute(subid);

+					if (er == null) {

+						resp.sendError(HttpServletResponse.SC_NOT_FOUND, "The specified egress route does not exist.");

+						return;

+					}

+					d = new Deleteable[] { er };

+				} catch (NumberFormatException e) {

+					resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid sub ID in 'delete egress' command.");

+					return;

+				}

+			} else {

+				resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid number of arguments in 'delete egress' command.");

+				return;

+			}

+		} else if (parts[0].equals("network")) {

+			if (parts.length == 3) {

+				// /internal/route/network/<from>/<to>

+				try {//

+					NetworkRoute nr = new NetworkRoute(

+						NodeClass.normalizeNodename(parts[1]),

+						NodeClass.normalizeNodename(parts[2])

+					);

+					d = new Deleteable[] { nr };

+				} catch (IllegalArgumentException e) {

+					resp.sendError(HttpServletResponse.SC_NOT_FOUND, "The specified network route does not exist.");

+					return;

+				}

+			} else {

+				resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid number of arguments in 'delete network' command.");

+				return;

+			}

+		}

+		if (d == null) {

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");

+			return;

+		}

+		boolean rv = true;

+		for (Deleteable dd : d) {

+			rv &= doDelete(dd);

+		}

+		if (rv) {

+			elr.setResult(HttpServletResponse.SC_OK);

+			eventlogger.info(elr);

+			resp.setStatus(HttpServletResponse.SC_OK);

+			provisioningDataChanged();

+			provisioningParametersChanged();

+		} else {

+			// Something went wrong with the DELETE

+			elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);

+		}

+	}

+	/**

+	 * GET route table entries from the route table tree specified by the URL path.

+	 */

+	@Override

+	public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		EventLogRecord elr = new EventLogRecord(req);

+		if (!isAuthorizedForInternal(req)) {

+			elr.setMessage("Unauthorized.");

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");

+			return;

+		}

+		if (isProxyOK(req) && isProxyServer()) {

+			super.doGet(req, resp);

+			return;

+		}

+

+		String path = req.getPathInfo();

+		if (!path.endsWith("/"))

+			path += "/";

+		if (!path.equals("/") && !path.equals("/ingress/") && !path.equals("/egress/") && !path.equals("/network/")) {

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");

+			return;

+		}

+

+		StringBuilder sb = new StringBuilder("{\n");

+		String px2 = "";

+		if (path.equals("/") || path.equals("/ingress/")) {

+			String pfx = "\n";

+			sb.append("\"ingress\": [");

+			for (IngressRoute in : IngressRoute.getAllIngressRoutes()) {

+				sb.append(pfx);

+				sb.append(in.asJSONObject().toString());

+				pfx = ",\n";

+			}

+			sb.append("\n]");

+			px2 = ",\n";

+		}

+

+		if (path.equals("/") || path.equals("/egress/")) {

+			String pfx = "\n";

+			sb.append(px2);

+			sb.append("\"egress\": {");

+			for (EgressRoute eg : EgressRoute.getAllEgressRoutes()) {

+				JSONObject jx = eg.asJSONObject();

+				for (String key : jx.keySet()) {

+					sb.append(pfx);

+					sb.append("  \"").append(key).append("\": ");

+					sb.append("\"").append(jx.getString(key)).append("\"");

+					pfx = ",\n";

+				}

+			}

+			sb.append("\n}");

+			px2 = ",\n";

+		}

+

+		if (path.equals("/") || path.equals("/network/")) {

+			String pfx = "\n";

+			sb.append(px2);

+			sb.append("\"routing\": [");

+			for (NetworkRoute ne : NetworkRoute.getAllNetworkRoutes()) {

+				sb.append(pfx);

+				sb.append(ne.asJSONObject().toString());

+				pfx = ",\n";

+			}

+			sb.append("\n]");

+		}

+		sb.append("}\n");

+		resp.setStatus(HttpServletResponse.SC_OK);

+		resp.setContentType("application/json");

+		resp.getOutputStream().print(sb.toString());

+	}

+	/**

+	 * PUT on &lt;/internal/route/*&gt; -- not supported.

+	 */

+	@Override

+	public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		EventLogRecord elr = new EventLogRecord(req);

+		if (!isAuthorizedForInternal(req)) {

+			elr.setMessage("Unauthorized.");

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");

+			return;

+		}

+		resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");

+	}

+	/**

+	 * POST - modify existing route table entries in the route table tree specified by the URL path.

+	 */

+	@Override

+	public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		EventLogRecord elr = new EventLogRecord(req);

+		if (!isAuthorizedForInternal(req)) {

+			elr.setMessage("Unauthorized.");

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");

+			return;

+		}

+		if (isProxyOK(req) && isProxyServer()) {

+			super.doPost(req, resp);

+			return;

+		}

+		String path = req.getPathInfo();

+		Insertable[] ins = null;

+		if (path.startsWith("/ingress/")) {

+			// /internal/route/ingress/?feed=%s&amp;user=%s&amp;subnet=%s&amp;nodepatt=%s

+			try {

+				// Although it probably doesn't make sense, you can install two identical routes in the IRT

+				int feedid = Integer.parseInt(req.getParameter("feed"));

+				String user = req.getParameter("user");

+				if (user == null)

+					user = "-";

+				String subnet = req.getParameter("subnet");

+				if (subnet == null)

+					subnet = "-";

+				String nodepatt = req.getParameter("nodepatt");

+				String t = req.getParameter("seq");

+				int seq = (t != null) ? Integer.parseInt(t) : (IngressRoute.getMaxSequence() + 100);

+				ins = new Insertable[] { new IngressRoute(seq, feedid, user, subnet, NodeClass.lookupNodeNames(nodepatt)) };

+			} catch (Exception e) {

+				intlogger.info(e);

+				resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Invalid arguments in 'add ingress' command.");

+				return;

+			}

+		} else if (path.startsWith("/egress/")) {

+			// /internal/route/egress/?sub=%s&amp;node=%s

+			try {

+				int subid = Integer.parseInt(req.getParameter("sub"));

+				EgressRoute er = EgressRoute.getEgressRoute(subid);

+				if (er != null) {

+					resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "An egress route already exists for that subscriber.");

+					return;

+				}

+				String node = NodeClass.normalizeNodename(req.getParameter("node"));

+				ins = new Insertable[] { new EgressRoute(subid, node) };

+			} catch (Exception e) {

+				intlogger.info(e);

+				resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Invalid arguments in 'add egress' command.");

+				return;

+			}

+		} else if (path.startsWith("/network/")) {

+			// /internal/route/network/?from=%s&amp;to=%s&amp;via=%s

+			try {

+				String nfrom = req.getParameter("from");

+				String nto   = req.getParameter("to");

+				String nvia  = req.getParameter("via");

+				if (nfrom == null || nto == null || nvia == null) {

+					resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Missing arguments in 'add network' command.");

+					return;

+				}

+				nfrom = NodeClass.normalizeNodename(nfrom);

+				nto   = NodeClass.normalizeNodename(nto);

+				nvia  = NodeClass.normalizeNodename(nvia);

+				NetworkRoute nr = new NetworkRoute(nfrom, nto, nvia);

+				for (NetworkRoute route : NetworkRoute.getAllNetworkRoutes()) {

+					if (route.getFromnode() == nr.getFromnode() && route.getTonode() == nr.getTonode()) {

+						resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Network route table already contains a route for "+nfrom+" and "+nto);

+						return;

+					}

+				}

+				ins = new Insertable[] { nr };

+			} catch (IllegalArgumentException e) {

+				intlogger.info(e);

+				resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Invalid arguments in 'add network' command.");

+				return;

+			}

+		}

+		if (ins == null) {

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");

+			return;

+		}

+		boolean rv = true;

+		for (Insertable dd : ins) {

+			rv &= doInsert(dd);

+		}

+		if (rv) {

+			elr.setResult(HttpServletResponse.SC_OK);

+			eventlogger.info(elr);

+			resp.setStatus(HttpServletResponse.SC_OK);

+			provisioningDataChanged();

+			provisioningParametersChanged();

+		} else {

+			// Something went wrong with the INSERT

+			elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/StatisticsServlet.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/StatisticsServlet.java
new file mode 100644
index 0000000..1c508b7
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/StatisticsServlet.java
@@ -0,0 +1,588 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+package com.att.research.datarouter.provisioning;

+

+import java.io.IOException;

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Statement;

+import java.text.ParseException;

+import java.text.SimpleDateFormat;

+import java.util.Calendar;

+import java.util.Date;

+import java.util.HashMap;

+import java.util.Map;

+import java.util.TimeZone;

+import javax.servlet.ServletOutputStream;

+import javax.servlet.http.HttpServletRequest;

+import javax.servlet.http.HttpServletResponse;

+import org.json.JSONException;

+import org.json.LOGJSONObject;

+import com.att.research.datarouter.provisioning.beans.EventLogRecord;

+import com.att.research.datarouter.provisioning.utils.DB;

+

+/**

+ * This Servlet handles requests to the &lt;Statistics API&gt; and  &lt;Statistics consilidated resultset&gt;,

+ * @author Manish Singh 

+ * @version $Id: StatisticsServlet.java,v 1.11 2016/08/10 17:27:02 Manish Exp $

+ */

+@SuppressWarnings("serial")

+

+public class StatisticsServlet extends BaseServlet {

+

+	private static final long TWENTYFOUR_HOURS = (24 * 60 * 60 * 1000L);

+	private static final String fmt1 = "yyyy-MM-dd'T'HH:mm:ss'Z'";

+	private static final String fmt2 = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";

+

+	

+	/**

+	 * DELETE a logging URL -- not supported.

+	 */

+	@Override

+	public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		String message = "DELETE not allowed for the logURL.";

+		EventLogRecord elr = new EventLogRecord(req);

+		elr.setMessage(message);

+		elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);

+		eventlogger.info(elr);

+		resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);

+	}

+	/**

+	 * GET a Statistics URL -- retrieve Statistics data for a feed or subscription.

+	 * See the <b>Statistics API</b> document for details on how this 	method should be invoked.

+	 */

+	@Override

+	public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		

+		Map<String, String> map = buildMapFromRequest(req);

+		if (map.get("err") != null) {

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Invalid arguments: "+map.get("err"));

+			return;

+		}

+		// check Accept: header??

+		

+		resp.setStatus(HttpServletResponse.SC_OK);

+		resp.setContentType(LOGLIST_CONTENT_TYPE);

+		ServletOutputStream out = resp.getOutputStream();

+		

+		

+		String outputType = "json";

+		String feedids = null;

+		

+		if(req.getParameter("feedid") ==null && req.getParameter("groupid") ==null)

+		{

+			out.print("Invalid request, Feedid or Group ID is required.");

+		}

+		

+	    if(req.getParameter("feedid")!=null && req.getParameter("groupid") == null) {

+			map.put("feedids", req.getParameter("feedid").replace("|", ",").toString());

+		}

+

+		if(req.getParameter("groupid") != null && req.getParameter("feedid") ==null) {

+			  // String groupid1 = null;

+			StringBuffer groupid1 = new  StringBuffer();  

+			   

+				 try {

+					 System.out.println("feeedidsssssssss");

+					 groupid1 = this.getFeedIdsByGroupId(Integer.parseInt(req.getParameter("groupid")));

+					  System.out.println("feeedids"+req.getParameter("groupid"));

+					  

+						map.put("feedids", groupid1.toString());

+						System.out.println("groupid1" +groupid1.toString());

+					  

+					  					   

+				  } catch (NumberFormatException e) {

+					 e.printStackTrace();

+				  } catch (SQLException e) {

+				    e.printStackTrace();

+				 }

+			}

+		if(req.getParameter("groupid") != null && req.getParameter("feedid") !=null) {

+			   StringBuffer groupid1 = new  StringBuffer();

+			     

+				   

+				 try {

+					 System.out.println("both r not null");

+					 groupid1 = this.getFeedIdsByGroupId(Integer.parseInt(req.getParameter("groupid")));

+					  System.out.println("feeedids"+req.getParameter("groupid"));

+					  groupid1.append(",");

+					   groupid1.append(req.getParameter("feedid").replace("|", ",").toString());

+					   

+						map.put("feedids", groupid1.toString());

+						

+					

+						System.out.println("groupid1" +groupid1.toString());

+					  

+					  					   

+				  } catch (NumberFormatException e) {

+					 e.printStackTrace();

+				  } catch (SQLException e) {

+				    e.printStackTrace();

+				 }

+			}

+		

+		

+				

+		if(req.getParameter("subid")!=null && req.getParameter("feedid") !=null) {

+			 StringBuffer subidstr = new  StringBuffer();

+//			 subidstr.append(" and e.DELIVERY_SUBID in(subid)");

+//			  subidstr.append(req.getParameter("subid").replace("|", ",").toString());

+			 subidstr.append("and e.DELIVERY_SUBID in(");

+			

+			 subidstr.append(req.getParameter("subid").replace("|", ",").toString());

+			 subidstr.append(")");

+			 map.put("subid", subidstr.toString());

+		}

+		if(req.getParameter("subid")!=null && req.getParameter("groupid") !=null) {

+			 StringBuffer subidstr = new  StringBuffer();

+//			 subidstr.append(" and e.DELIVERY_SUBID in(subid)");

+//			  subidstr.append(req.getParameter("subid").replace("|", ",").toString());

+			 subidstr.append("and e.DELIVERY_SUBID in(");

+			

+			 subidstr.append(req.getParameter("subid").replace("|", ",").toString());

+			 subidstr.append(")");

+			 map.put("subid", subidstr.toString());

+		}

+		if(req.getParameter("type")!=null) {

+			map.put("eventType", req.getParameter("type").replace("|", ",").toString());

+		}

+			if(req.getParameter("output_type")!=null) {

+			map.put("output_type", req.getParameter("output_type").toString());

+		}

+		if(req.getParameter("start_time")!=null) {

+			map.put("start_time", req.getParameter("start_time").toString());

+		}

+		if(req.getParameter("end_time")!=null) {

+			map.put("end_time", req.getParameter("end_time").toString());

+		}

+		

+		if(req.getParameter("time")!=null) {

+			map.put("start_time", req.getParameter("time").toString());

+			map.put("end_time", null);

+			}

+		

+		

+				

+		if(req.getParameter("output_type") !=null)

+		{

+			outputType = req.getParameter("output_type");

+		}

+		

+	

+		try {

+			

+			String filterQuery = this.queryGeneretor(map);

+			eventlogger.debug("SQL Query for Statistics resultset. "+filterQuery);

+			

+			ResultSet rs=this.getRecordsForSQL(filterQuery);

+			

+			if(outputType.equals("csv")) {

+				resp.setContentType("application/octet-stream");

+				Date date = new Date() ;

+				SimpleDateFormat dateFormat = new SimpleDateFormat("dd-MM-YYYY HH:mm:ss") ;

+				resp.setHeader("Content-Disposition", "attachment; filename=\"result:"+dateFormat.format(date)+".csv\"");

+				eventlogger.info("Generating CSV file from Statistics resultset");

+				

+				rsToCSV(rs, out);

+			}

+			else {

+				eventlogger.info("Generating JSON for Statistics resultset");

+				this.rsToJson(rs, out);	

+			}

+		} 

+		catch (IOException e) {

+			eventlogger.error("IOException - Generating JSON/CSV:"+e);

+			e.printStackTrace();

+		 } 

+		catch (JSONException e) {

+			eventlogger.error("JSONException - executing SQL query:"+e);

+			e.printStackTrace();

+		} catch (SQLException e) {

+			eventlogger.error("SQLException - executing SQL query:"+e);

+			e.printStackTrace();

+		} catch (ParseException e) {

+			eventlogger.error("ParseException - executing SQL query:"+e);

+			e.printStackTrace();

+		}

+	}

+	

+	

+	/**

+	 * rsToJson - Converting RS to JSON object

+	 * @exception IOException, SQLException

+	 * @param out ServletOutputStream, rs as ResultSet

+	 */

+	public void rsToCSV(ResultSet rs, ServletOutputStream out) throws IOException, SQLException {

+		String header = "FEEDNAME,FEEDID,FILES_PUBLISHED,PUBLISH_LENGTH, FILES_DELIVERED, DELIVERED_LENGTH, SUBSCRIBER_URL, SUBID, PUBLISH_TIME,DELIVERY_TIME, AverageDelay\n";

+

+		// String header = "FEEDNAME,FEEDID,TYPE,REMOTE_ADDR,DELIVERY_SUBID,REQURI,TOTAL CONTENT LENGTH,NO OF FILE,AVERAGE DELAY\n";

+		 

+         out.write(header.getBytes());

+         			            

+         while(rs.next()) {

+         	StringBuffer line = new StringBuffer();

+	            line.append(rs.getString("FEEDNAME"));

+	            line.append(",");

+	            line.append(rs.getString("FEEDID"));

+	            line.append(",");

+	            line.append(rs.getString("FILES_PUBLISHED"));

+	            line.append(",");

+	            line.append(rs.getString("PUBLISH_LENGTH"));

+	            line.append(",");

+	            line.append(rs.getString("FILES_DELIVERED"));

+	            line.append(",");

+	            line.append(rs.getString("DELIVERED_LENGTH"));

+	            line.append(",");

+	            line.append(rs.getString("SUBSCRIBER_URL"));

+	            line.append(",");

+	            line.append(rs.getString("SUBID"));

+	            line.append(",");

+	            line.append(rs.getString("PUBLISH_TIME"));

+	            line.append(",");

+	            line.append(rs.getString("DELIVERY_TIME"));

+	            line.append(",");

+	            line.append(rs.getString("AverageDelay"));

+	            line.append(",");

+	        

+	            line.append("\n");

+	            out.write(line.toString().getBytes());

+	            out.flush();

+         }

+	}

+	

+	/**

+	 * rsToJson - Converting RS to JSON object

+	 * @exception IOException, SQLException

+	 * @param out ServletOutputStream, rs as ResultSet

+	 */

+	public void rsToJson(ResultSet rs, ServletOutputStream out) throws IOException, SQLException {

+		

+		String fields[] = {"FEEDNAME","FEEDID","FILES_PUBLISHED","PUBLISH_LENGTH", "FILES_DELIVERED", "DELIVERED_LENGTH", "SUBSCRIBER_URL", "SUBID", "PUBLISH_TIME","DELIVERY_TIME", "AverageDelay"};

+		StringBuffer line = new StringBuffer();

+       	

+		 line.append("[\n");

+		

+		 while(rs.next()) {

+			 LOGJSONObject j2 = new LOGJSONObject();

+			 for (String key : fields) {

+				Object v = rs.getString(key);

+				if (v != null)

+					j2.put(key.toLowerCase(), v);

+				else

+					j2.put(key.toLowerCase(), "");

+			}

+			line =  line.append(j2.toString());;

+			line.append(",\n");

+		 }

+		 line.append("]");

+		out.print(line.toString());

+	}

+	

+	/**

+	 * getFeedIdsByGroupId - Getting FEEDID's by GROUP ID.

+	 * @exception SQL Query SQLException.

+	 * @param groupIds

+	 */

+	public StringBuffer getFeedIdsByGroupId(int groupIds) throws SQLException{ 

+		 

+		DB db = null; 

+		Connection conn = null; 

+		PreparedStatement prepareStatement = null; 

+		ResultSet resultSet=null; 

+		String sqlGoupid = null; 

+		StringBuffer feedIds = new StringBuffer(); 

+	 

+		try { 

+			db = new DB(); 

+			conn = db.getConnection(); 

+			sqlGoupid= " SELECT FEEDID from FEEDS  WHERE GROUPID = ?"; 

+			prepareStatement =conn.prepareStatement(sqlGoupid); 

+			prepareStatement.setInt(1, groupIds); 

+			resultSet=prepareStatement.executeQuery(); 

+			while(resultSet.next()){ 		

+				feedIds.append(resultSet.getInt("FEEDID"));

+				feedIds.append(",");

+			} 

+			feedIds.deleteCharAt(feedIds.length()-1);

+		System.out.println("feedIds"+feedIds.toString());

+			

+		} catch (SQLException e) { 

+			e.printStackTrace(); 

+		} finally { 

+			try { 

+					if(resultSet != null) { 

+						resultSet.close(); 

+						resultSet = null; 

+					} 

+	 

+					if(prepareStatement != null) { 

+						prepareStatement.close(); 

+						prepareStatement = null; 

+					} 

+	 

+					if(conn != null){ 

+						db.release(conn); 

+					} 

+				} catch(Exception e) { 

+					e.printStackTrace(); 

+				} 

+		} 

+		return feedIds; 

+	}

+

+	

+	/**

+	 * queryGeneretor - Generating sql query

+	 * @exception SQL Query parse exception.

+	 * @param Map as key value pare of all user input fields

+	 */

+	public String queryGeneretor(Map<String, String> map) throws ParseException{

+		 

+		String sql = null;

+		String eventType = null;

+		String feedids = null;

+		String start_time = null;

+		String end_time = null;

+		String subid=" ";

+		if(map.get("eventType") != null){

+			eventType=(String) map.get("eventType");

+		}

+		if(map.get("feedids") != null){

+			feedids=(String) map.get("feedids");

+		}

+		if(map.get("start_time") != null){

+			start_time=(String) map.get("start_time");

+		}

+		if(map.get("end_time") != null){

+			end_time=(String) map.get("end_time");

+		}

+		if("all".equalsIgnoreCase(eventType)){

+			eventType="PUB','DEL, EXP, PBF";

+		}

+		if(map.get("subid") != null){

+			subid=(String) map.get("subid");

+		}

+		

+		eventlogger.info("Generating sql query to get Statistics resultset. ");

+		

+		if(end_time==null && start_time==null ){

+

+				

+				sql="SELECT (SELECT NAME FROM FEEDS AS f WHERE f.FEEDID in("+feedids+") and f.FEEDID=e.FEEDID) AS FEEDNAME, e.FEEDID as FEEDID, (SELECT COUNT(*) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+") and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS FILES_PUBLISHED,(SELECT SUM(content_length) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+")  and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS PUBLISH_LENGTH, COUNT(e.EVENT_TIME) as FILES_DELIVERED,  sum(m.content_length) as DELIVERED_LENGTH,SUBSTRING_INDEX(e.REQURI,'/',+3) as SUBSCRIBER_URL, e.DELIVERY_SUBID as SUBID, e.EVENT_TIME AS PUBLISH_TIME, m.EVENT_TIME AS DELIVERY_TIME,  AVG(e.EVENT_TIME - m.EVENT_TIME)/1000 as AverageDelay FROM LOG_RECORDS e JOIN LOG_RECORDS m ON m.PUBLISH_ID = e.PUBLISH_ID AND e.FEEDID IN ("+feedids+") "+subid+" AND m.STATUS=204 AND e.RESULT=204  group by SUBID";

+				

+			return sql;

+		}else if(start_time!=null && end_time==null ){

+

+			long inputTimeInMilli=60000*Long.parseLong(start_time);

+			Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("GMT"));

+			long currentTimeInMilli=cal.getTimeInMillis();

+			long compareTime=currentTimeInMilli-inputTimeInMilli;

+			

+			  sql="SELECT (SELECT NAME FROM FEEDS AS f WHERE f.FEEDID in("+feedids+") and f.FEEDID=e.FEEDID) AS FEEDNAME, e.FEEDID as FEEDID, (SELECT COUNT(*) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+") and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS FILES_PUBLISHED,(SELECT SUM(content_length) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+")  and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS PUBLISH_LENGTH, COUNT(e.EVENT_TIME) as FILES_DELIVERED,  sum(m.content_length) as DELIVERED_LENGTH,SUBSTRING_INDEX(e.REQURI,'/',+3) as SUBSCRIBER_URL, e.DELIVERY_SUBID as SUBID, e.EVENT_TIME AS PUBLISH_TIME, m.EVENT_TIME AS DELIVERY_TIME,  AVG(e.EVENT_TIME - m.EVENT_TIME)/1000 as AverageDelay FROM LOG_RECORDS e JOIN LOG_RECORDS m ON m.PUBLISH_ID = e.PUBLISH_ID AND e.FEEDID IN ("+feedids+") "+subid+" AND m.STATUS=204 AND e.RESULT=204 and e.event_time>="+compareTime+" group by SUBID";

+			 

+    		 return sql;

+		

+		}else{

+			SimpleDateFormat inFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");

+			Date startDate=inFormat.parse(start_time);

+			Date endDate=inFormat.parse(end_time);

+

+			long startInMillis=startDate.getTime();

+			long endInMillis=endDate.getTime();

+			

+			 {

+				

+				sql="SELECT (SELECT NAME FROM FEEDS AS f WHERE f.FEEDID in("+feedids+") and f.FEEDID=e.FEEDID) AS FEEDNAME, e.FEEDID as FEEDID, (SELECT COUNT(*) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+") and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS FILES_PUBLISHED,(SELECT SUM(content_length) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+")  and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS PUBLISH_LENGTH, COUNT(e.EVENT_TIME) as FILES_DELIVERED,  sum(m.content_length) as DELIVERED_LENGTH,SUBSTRING_INDEX(e.REQURI,'/',+3) as SUBSCRIBER_URL, e.DELIVERY_SUBID as SUBID, e.EVENT_TIME AS PUBLISH_TIME, m.EVENT_TIME AS DELIVERY_TIME,  AVG(e.EVENT_TIME - m.EVENT_TIME)/1000 as AverageDelay FROM LOG_RECORDS e JOIN LOG_RECORDS m ON m.PUBLISH_ID = e.PUBLISH_ID AND e.FEEDID IN ("+feedids+") "+subid+" AND m.STATUS=204 AND e.RESULT=204 and e.event_time between "+startInMillis+" and "+endInMillis+" group by SUBID";

+				

+			}

+			return sql;

+		}

+	}

+	

+	

+	/**

+	 * PUT a Statistics URL -- not supported.

+	 */

+	@Override

+	public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		String message = "PUT not allowed for the StatisticsURL.";

+		EventLogRecord elr = new EventLogRecord(req);

+		elr.setMessage(message);

+		elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);

+		eventlogger.info(elr);

+		resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);

+	}

+	/**

+	 * POST a Statistics URL -- not supported.

+	 */

+	@Override

+	public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		String message = "POST not allowed for the StatisticsURL.";

+		EventLogRecord elr = new EventLogRecord(req);

+		elr.setMessage(message);

+		elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);

+		eventlogger.info(elr);

+		resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);

+	}

+

+	private Map<String, String> buildMapFromRequest(HttpServletRequest req) {

+		Map<String, String> map = new HashMap<String, String>();

+		String s = req.getParameter("type");

+		if (s != null) {

+			if (s.equals("pub") || s.equals("del") || s.equals("exp")) {

+				map.put("type", s);

+			} else {

+				map.put("err", "bad type");

+				return map;

+			}

+		} else

+		map.put("type", "all");

+		map.put("publishSQL", "");

+		map.put("statusSQL", "");

+		map.put("resultSQL", "");

+		map.put("reasonSQL", "");

+

+		s = req.getParameter("publishId");

+		if (s != null) {

+			if (s.indexOf("'") >= 0) {

+				map.put("err", "bad publishId");

+				return map;

+			}

+			map.put("publishSQL", " AND PUBLISH_ID = '"+s+"'");

+		}

+

+		s = req.getParameter("statusCode");

+		if (s != null) {

+			String sql = null;

+			if (s.equals("success")) {

+				sql = " AND STATUS >= 200 AND STATUS < 300";

+			} else if (s.equals("redirect")) {

+				sql = " AND STATUS >= 300 AND STATUS < 400";

+			} else if (s.equals("failure")) {

+				sql = " AND STATUS >= 400";

+			} else {

+				try {

+					Integer n = Integer.parseInt(s);

+					if ((n >= 100 && n < 600) || (n == -1))

+						sql = " AND STATUS = " + n;

+				} catch (NumberFormatException e) {

+				}

+			}

+			if (sql == null) {

+				map.put("err", "bad statusCode");

+				return map;

+			}

+			map.put("statusSQL", sql);

+			map.put("resultSQL", sql.replaceAll("STATUS", "RESULT"));

+		}

+

+		s = req.getParameter("expiryReason");

+		if (s != null) {

+			map.put("type", "exp");

+			if (s.equals("notRetryable")) {

+				map.put("reasonSQL", " AND REASON = 'notRetryable'");

+			} else if (s.equals("retriesExhausted")) {

+				map.put("reasonSQL", " AND REASON = 'retriesExhausted'");

+			} else if (s.equals("diskFull")) {

+				map.put("reasonSQL", " AND REASON = 'diskFull'");

+			} else if (s.equals("other")) {

+				map.put("reasonSQL", " AND REASON = 'other'");

+			} else {

+				map.put("err", "bad expiryReason");

+				return map;

+			}

+		}

+

+		long stime = getTimeFromParam(req.getParameter("start"));

+		if (stime < 0) {

+			map.put("err", "bad start");

+			return map;

+		}

+		long etime = getTimeFromParam(req.getParameter("end"));

+		if (etime < 0) {

+			map.put("err", "bad end");

+			return map;

+		}

+		if (stime == 0 && etime == 0) {

+			etime = System.currentTimeMillis();

+			stime = etime - TWENTYFOUR_HOURS;

+		} else if (stime == 0) {

+			stime = etime - TWENTYFOUR_HOURS;

+		} else if (etime == 0) {

+			etime = stime + TWENTYFOUR_HOURS;

+		}

+		map.put("timeSQL", String.format(" AND EVENT_TIME >= %d AND EVENT_TIME <= %d", stime, etime));

+		return map;

+	}

+	private long getTimeFromParam(final String s) {

+		if (s == null)

+			return 0;

+		try {

+			// First, look for an RFC 3339 date

+			String fmt = (s.indexOf('.') > 0) ? fmt2 : fmt1;

+			SimpleDateFormat sdf = new SimpleDateFormat(fmt);

+			Date d = sdf.parse(s);

+			return d.getTime();

+		} catch (ParseException e) {

+		}

+		try {

+			// Also allow a long (in ms); useful for testing

+			long n = Long.parseLong(s);

+			return n;

+		} catch (NumberFormatException e) {

+		}

+		intlogger.info("Error parsing time="+s);

+		return -1;

+	}

+

+	

+	private ResultSet getRecordsForSQL(String sql) {

+		intlogger.debug(sql);

+		long start = System.currentTimeMillis();

+		DB db = new DB();

+		Connection conn = null;

+		ResultSet rs=null;

+		

+		try {

+			conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			PreparedStatement pst=conn.prepareStatement(sql);

+			rs=pst.executeQuery();

+			//this.rsToJson(rs)

+			//rs.close();

+			stmt.close();

+		} catch (SQLException e) {

+			e.printStackTrace();

+		} finally {

+			if (conn != null)

+				db.release(conn);

+		}

+		

+		intlogger.debug("Time: " + (System.currentTimeMillis()-start) + " ms");

+		

+		return rs;

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/SubLogServlet.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/SubLogServlet.java
new file mode 100644
index 0000000..0f196b2
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/SubLogServlet.java
@@ -0,0 +1,39 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning;

+

+/**

+ * This servlet handles requests to the &lt;subLogURL&gt;,

+ * which are generated by the provisioning server to handle the log query API.

+ *

+ * @author Robert Eby

+ * @version $Id: SubLogServlet.java,v 1.1 2013/04/26 21:00:25 eby Exp $

+ */

+@SuppressWarnings("serial")

+public class SubLogServlet extends LogServlet {

+	public SubLogServlet() {

+		super(false);

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/SubscribeServlet.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/SubscribeServlet.java
new file mode 100644
index 0000000..ea79e9f
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/SubscribeServlet.java
@@ -0,0 +1,288 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning;

+

+import java.io.IOException;

+import java.io.InvalidObjectException;

+import java.util.Collection;

+

+import javax.servlet.http.HttpServletRequest;

+import javax.servlet.http.HttpServletResponse;

+

+import org.json.JSONObject;

+

+import com.att.eelf.configuration.EELFLogger;

+import com.att.eelf.configuration.EELFManager;

+import com.att.research.datarouter.authz.AuthorizationResponse;

+import com.att.research.datarouter.provisioning.beans.EventLogRecord;

+import com.att.research.datarouter.provisioning.beans.Feed;

+import com.att.research.datarouter.provisioning.beans.Subscription;

+import com.att.research.datarouter.provisioning.eelf.EelfMsgs;

+import com.att.research.datarouter.provisioning.utils.JSONUtilities;

+

+/**

+ * This servlet handles provisioning for the &lt;subscribeURL&gt; which is generated by the provisioning

+ * server to handle the creation and inspection of subscriptions to a specific feed.

+ *

+ * @author Robert Eby

+ * @version $Id$

+ */

+@SuppressWarnings("serial")

+public class SubscribeServlet extends ProxyServlet {

+	

+	//Adding EELF Logger Rally:US664892  

+    private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("com.att.research.datarouter.provisioning.SubscribeServlet");

+

+	/**

+	 * DELETE on the &lt;subscribeUrl&gt; -- not supported.

+	 */

+	@Override

+	public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doDelete");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		String message = "DELETE not allowed for the subscribeURL.";

+		EventLogRecord elr = new EventLogRecord(req);

+		elr.setMessage(message);

+		elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);

+		eventlogger.info(elr);

+		resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);

+	}

+	/**

+	 * GET on the &lt;subscribeUrl&gt; -- get the list of subscriptions to a feed.

+	 * See the <i>Subscription Collection Query</i> section in the <b>Provisioning API</b>

+	 * document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doGet");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		EventLogRecord elr = new EventLogRecord(req);

+		String message = isAuthorizedForProvisioning(req);

+		if (message != null) {

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		if (isProxyServer()) {

+			super.doGet(req, resp);

+			return;

+		}

+		String bhdr = req.getHeader(BEHALF_HEADER);

+		if (bhdr == null) {

+			message = "Missing "+BEHALF_HEADER+" header.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		int feedid = getIdFromPath(req);

+		if (feedid < 0) {

+			message = "Missing or bad feed number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		Feed feed = Feed.getFeedById(feedid);

+		if (feed == null || feed.isDeleted()) {

+			message = "Missing or bad feed number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_NOT_FOUND);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);

+			return;

+		}

+		// Check with the Authorizer

+		AuthorizationResponse aresp = authz.decide(req);

+		if (! aresp.isAuthorized()) {

+			message = "Policy Engine disallows access.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+

+		// Display a list of URLs

+		Collection<String> list = Subscription.getSubscriptionUrlList(feedid);

+		String t = JSONUtilities.createJSONArray(list);

+

+		// send response

+		elr.setResult(HttpServletResponse.SC_OK);

+		eventlogger.info(elr);

+		resp.setStatus(HttpServletResponse.SC_OK);

+		resp.setContentType(SUBLIST_CONTENT_TYPE);

+		resp.getOutputStream().print(t);

+	}

+	/**

+	 * PUT on the &lt;subscribeUrl&gt; -- not supported.

+	 */

+	@Override

+	public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doPut");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		String message = "PUT not allowed for the subscribeURL.";

+		EventLogRecord elr = new EventLogRecord(req);

+		elr.setMessage(message);

+		elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);

+		eventlogger.info(elr);

+		resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);

+	}

+	/**

+	 * POST on the &lt;subscribeUrl&gt; -- create a new subscription to a feed.

+	 * See the <i>Creating a Subscription</i> section in the <b>Provisioning API</b>

+	 * document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doPost");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));

+		EventLogRecord elr = new EventLogRecord(req);

+		String message = isAuthorizedForProvisioning(req);

+		if (message != null) {

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		if (isProxyServer()) {

+			super.doPost(req, resp);

+			return;

+		}

+		String bhdr = req.getHeader(BEHALF_HEADER);

+		if (bhdr == null) {

+			message = "Missing "+BEHALF_HEADER+" header.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		int feedid = getIdFromPath(req);

+		if (feedid < 0) {

+			message = "Missing or bad feed number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		Feed feed = Feed.getFeedById(feedid);

+		if (feed == null || feed.isDeleted()) {

+			message = "Missing or bad feed number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_NOT_FOUND);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);

+			return;

+		}

+		// Check with the Authorizer

+		AuthorizationResponse aresp = authz.decide(req);

+		if (! aresp.isAuthorized()) {

+			message = "Policy Engine disallows access.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+

+		// check content type is SUB_CONTENT_TYPE, version 1.0

+		ContentHeader ch = getContentHeader(req);

+		String ver = ch.getAttribute("version");

+		if (!ch.getType().equals(SUB_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {

+			intlogger.debug("Content-type is: "+req.getHeader("Content-Type"));

+			message = "Incorrect content-type";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);

+			return;

+		}

+		JSONObject jo = getJSONfromInput(req);

+		if (jo == null) {

+			message = "Badly formed JSON";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		if (intlogger.isDebugEnabled())

+			intlogger.debug(jo.toString());

+		if (++active_subs > max_subs) {

+			active_subs--;

+			message = "Cannot create subscription; the maximum number of subscriptions has been configured.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_CONFLICT);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_CONFLICT, message);

+			return;

+		}

+		Subscription sub = null;

+		try {

+			sub = new Subscription(jo);

+		} catch (InvalidObjectException e) {

+			active_subs--;

+			message = e.getMessage();

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		sub.setFeedid(feedid);

+		sub.setSubscriber(bhdr);	// set from X-ATT-DR-ON-BEHALF-OF header

+

+		// Check if this subscription already exists; not an error (yet), just warn

+		Subscription sub2 = Subscription.getSubscriptionMatching(sub);

+		if (sub2 != null)

+			intlogger.warn("PROV0011 Creating a duplicate subscription: new subid="+sub.getSubid()+", old subid="+sub2.getSubid());

+

+		// Create SUBSCRIPTIONS table entries

+		if (doInsert(sub)) {

+			// send response

+			elr.setResult(HttpServletResponse.SC_CREATED);

+			eventlogger.info(elr);

+			resp.setStatus(HttpServletResponse.SC_CREATED);

+			resp.setContentType(SUBFULL_CONTENT_TYPE);

+			resp.setHeader("Location", sub.getLinks().getSelf());

+			resp.getOutputStream().print(sub.asLimitedJSONObject().toString());

+

+			provisioningDataChanged();

+		} else {

+			// Something went wrong with the INSERT

+			active_subs--;

+			elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/SubscriptionServlet.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/SubscriptionServlet.java
new file mode 100644
index 0000000..0bb4717
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/SubscriptionServlet.java
@@ -0,0 +1,476 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning;

+

+import java.io.IOException;

+import java.io.InvalidObjectException;

+import java.net.HttpURLConnection;

+import java.net.URL;

+import java.util.List;

+import java.util.Vector;

+

+import javax.servlet.http.HttpServletRequest;

+import javax.servlet.http.HttpServletResponse;

+

+import org.json.JSONException;

+import org.json.JSONObject;

+

+import com.att.eelf.configuration.EELFLogger;

+import com.att.eelf.configuration.EELFManager;

+import com.att.research.datarouter.authz.AuthorizationResponse;

+import com.att.research.datarouter.provisioning.beans.EventLogRecord;

+import com.att.research.datarouter.provisioning.beans.Subscription;

+import com.att.research.datarouter.provisioning.eelf.EelfMsgs;

+

+/**

+ * This servlet handles provisioning for the &lt;subscriptionURL&gt; which is generated by the provisioning

+ * server to handle the inspection, modification, and deletion of a particular subscription to a feed.

+ * It supports DELETE to delete a subscription, GET to retrieve information about the subscription,

+ * and PUT to modify the subscription.  In DR 3.0, POST is also supported in order to reset the subscription

+ * timers for individual subscriptions.

+ *

+ * @author Robert Eby

+ * @version $Id$

+ */

+@SuppressWarnings("serial")

+public class SubscriptionServlet extends ProxyServlet {

+	public static final String SUBCNTRL_CONTENT_TYPE = "application/vnd.att-dr.subscription-control";

+	//Adding EELF Logger Rally:US664892  

+    private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("com.att.research.datarouter.provisioning.SubscriptionServlet");

+

+	/**

+	 * DELETE on the &lt;subscriptionUrl&gt; -- delete a subscription.

+	 * See the <i>Deleting a Subscription</i> section in the <b>Provisioning API</b>

+	 * document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doDelete");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		EventLogRecord elr = new EventLogRecord(req);

+		String message = isAuthorizedForProvisioning(req);

+		if (message != null) {

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		if (isProxyServer()) {

+			super.doDelete(req, resp);

+			return;

+		}

+		String bhdr = req.getHeader(BEHALF_HEADER);

+		if (bhdr == null) {

+			message = "Missing "+BEHALF_HEADER+" header.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		int subid = getIdFromPath(req);

+		if (subid < 0) {

+			message = "Missing or bad subscription number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		Subscription sub = Subscription.getSubscriptionById(subid);

+		if (sub == null) {

+			message = "Missing or bad subscription number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_NOT_FOUND);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);

+			return;

+		}

+		// Check with the Authorizer

+		AuthorizationResponse aresp = authz.decide(req);

+		if (! aresp.isAuthorized()) {

+			message = "Policy Engine disallows access.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+

+		// Delete Subscription

+		if (doDelete(sub)) {

+			active_subs--;

+			// send response

+			elr.setResult(HttpServletResponse.SC_NO_CONTENT);

+			eventlogger.info(elr);

+			resp.setStatus(HttpServletResponse.SC_NO_CONTENT);

+			provisioningDataChanged();

+		} else {

+			// Something went wrong with the DELETE

+			elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);

+		}

+	}

+	/**

+	 * GET on the &lt;subscriptionUrl&gt; -- get information about a subscription.

+	 * See the <i>Retreiving Information about a Subscription</i> section in the <b>Provisioning API</b>

+	 * document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doGet");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		EventLogRecord elr = new EventLogRecord(req);

+		String message = isAuthorizedForProvisioning(req);

+		if (message != null) {

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		if (isProxyServer()) {

+			super.doGet(req, resp);

+			return;

+		}

+		String bhdr = req.getHeader(BEHALF_HEADER);

+		if (bhdr == null) {

+			message = "Missing "+BEHALF_HEADER+" header.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		int subid = getIdFromPath(req);

+		if (subid < 0) {

+			message = "Missing or bad subscription number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		Subscription sub = Subscription.getSubscriptionById(subid);

+		if (sub == null) {

+			message = "Missing or bad subscription number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_NOT_FOUND);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);

+			return;

+		}

+		// Check with the Authorizer

+		AuthorizationResponse aresp = authz.decide(req);

+		if (! aresp.isAuthorized()) {

+			message = "Policy Engine disallows access.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+

+		// send response

+		elr.setResult(HttpServletResponse.SC_OK);

+		eventlogger.info(elr);

+		resp.setStatus(HttpServletResponse.SC_OK);

+		resp.setContentType(SUBFULL_CONTENT_TYPE);

+		resp.getOutputStream().print(sub.asJSONObject(true).toString());

+	}

+	/**

+	 * PUT on the &lt;subscriptionUrl&gt; -- modify a subscription.

+	 * See the <i>Modifying a Subscription</i> section in the <b>Provisioning API</b>

+	 * document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+		setIpAndFqdnForEelf("doPut");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");

+		EventLogRecord elr = new EventLogRecord(req);

+		String message = isAuthorizedForProvisioning(req);

+		if (message != null) {

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		if (isProxyServer()) {

+			super.doPut(req, resp);

+			return;

+		}

+		String bhdr = req.getHeader(BEHALF_HEADER);

+		if (bhdr == null) {

+			message = "Missing "+BEHALF_HEADER+" header.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		int subid = getIdFromPath(req);

+		if (subid < 0) {

+			message = "Missing or bad subscription number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		Subscription oldsub = Subscription.getSubscriptionById(subid);

+		if (oldsub == null) {

+			message = "Missing or bad subscription number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_NOT_FOUND);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);

+			return;

+		}

+		// Check with the Authorizer

+		AuthorizationResponse aresp = authz.decide(req);

+		if (! aresp.isAuthorized()) {

+			message = "Policy Engine disallows access.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		// check content type is SUB_CONTENT_TYPE, version 1.0

+		ContentHeader ch = getContentHeader(req);

+		String ver = ch.getAttribute("version");

+		if (!ch.getType().equals(SUB_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {

+			message = "Incorrect content-type";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);

+			return;

+		}

+		JSONObject jo = getJSONfromInput(req);

+		if (jo == null) {

+			message = "Badly formed JSON";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		if (intlogger.isDebugEnabled())

+			intlogger.debug(jo.toString());

+		Subscription sub = null;

+		try {

+			sub = new Subscription(jo);

+		} catch (InvalidObjectException e) {

+			message = e.getMessage();

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		sub.setSubid(oldsub.getSubid());

+		sub.setFeedid(oldsub.getFeedid());

+		sub.setSubscriber(bhdr);	// set from X-ATT-DR-ON-BEHALF-OF header

+

+		String subjectgroup = (req.getHeader("X-ATT-DR-ON-BEHALF-OF-GROUP")); //Adding for group feature:Rally US708115  

+		if (!oldsub.getSubscriber().equals(sub.getSubscriber()) && subjectgroup == null) {

+			message = "This subscriber must be modified by the same subscriber that created it.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+

+		// Update SUBSCRIPTIONS table entries

+		if (doUpdate(sub)) {

+			// send response

+			elr.setResult(HttpServletResponse.SC_OK);

+			eventlogger.info(elr);

+			resp.setStatus(HttpServletResponse.SC_OK);

+			resp.setContentType(SUBFULL_CONTENT_TYPE);

+			resp.getOutputStream().print(sub.asLimitedJSONObject().toString());

+

+			/**Change Owner ship of Subscriber 	Adding for group feature:Rally US708115*/

+			if (jo.has("changeowner") && subjectgroup != null) {

+				Boolean changeowner = (Boolean) jo.get("changeowner");

+				if (changeowner != null && changeowner.equals(true)) {

+					sub.setSubscriber(req.getHeader(BEHALF_HEADER));

+					sub.changeOwnerShip();

+				}

+			}

+			/***End of change ownership*/

+

+			provisioningDataChanged();

+		} else {

+			// Something went wrong with the UPDATE

+			elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);

+		}

+	}

+	/**

+	 * POST on the &lt;subscriptionUrl&gt; -- control a subscription.

+	 * See the <i>Resetting a Subscription's Retry Schedule</i> section in the <b>Provisioning API</b>

+	 * document for details on how this method should be invoked.

+	 */

+	@Override

+	public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {

+// OLD pre-3.0 code

+//		String message = "POST not allowed for the subscriptionURL.";

+//		EventLogRecord elr = new EventLogRecord(req);

+//		elr.setMessage(message);

+//		elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);

+//		eventlogger.info(elr);

+//		resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);

+

+		setIpAndFqdnForEelf("doPost");

+		eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));

+		EventLogRecord elr = new EventLogRecord(req);

+		String message = isAuthorizedForProvisioning(req);

+		if (message != null) {

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		if (isProxyServer()) {

+			super.doPost(req, resp);

+			return;

+		}

+		String bhdr = req.getHeader(BEHALF_HEADER);

+		if (bhdr == null) {

+			message = "Missing "+BEHALF_HEADER+" header.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		final int subid = getIdFromPath(req);

+		if (subid < 0 || Subscription.getSubscriptionById(subid) == null) {

+			message = "Missing or bad subscription number.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		// check content type is SUBCNTRL_CONTENT_TYPE, version 1.0

+		ContentHeader ch = getContentHeader(req);

+		String ver = ch.getAttribute("version");

+		if (!ch.getType().equals(SUBCNTRL_CONTENT_TYPE) || !ver.equals("1.0")) {

+			message = "Incorrect content-type";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);

+			return;

+		}

+		// Check with the Authorizer

+		AuthorizationResponse aresp = authz.decide(req);

+		if (! aresp.isAuthorized()) {

+			message = "Policy Engine disallows access.";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_FORBIDDEN);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);

+			return;

+		}

+		JSONObject jo = getJSONfromInput(req);

+		if (jo == null) {

+			message = "Badly formed JSON";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+			return;

+		}

+		try {

+			// Only the active POD sends notifications

+			boolean active = SynchronizerTask.getSynchronizer().isActive();

+			boolean b = jo.getBoolean("failed");

+			if (active && !b) {

+				// Notify all nodes to reset the subscription

+				SubscriberNotifyThread t = new SubscriberNotifyThread();

+				t.resetSubscription(subid);

+				t.start();

+			}

+			// send response

+			elr.setResult(HttpServletResponse.SC_ACCEPTED);

+			eventlogger.info(elr);

+			resp.setStatus(HttpServletResponse.SC_ACCEPTED);

+		} catch (JSONException e) {

+			message = "Badly formed JSON";

+			elr.setMessage(message);

+			elr.setResult(HttpServletResponse.SC_BAD_REQUEST);

+			eventlogger.info(elr);

+			resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);

+		}

+	}

+

+	/**

+	 * A Thread class used to serially send reset notifications to all nodes in the DR network,

+	 * when a POST is received for a subscription.

+	 */

+	public class SubscriberNotifyThread extends Thread {

+		public static final String URL_TEMPLATE = "http://%s/internal/resetSubscription/%d";

+		private List<String> urls = new Vector<String>();

+

+		public SubscriberNotifyThread() {

+			setName("SubscriberNotifyThread");

+		}

+		public void resetSubscription(int subid) {

+			for (String nodename : BaseServlet.getNodes()) {

+				String u = String.format(URL_TEMPLATE, nodename, subid);

+				urls.add(u);

+			}

+		}

+		public void run() {

+			try {

+				while (!urls.isEmpty()) {

+					String u = urls.remove(0);

+					try {

+						URL url = new URL(u);

+						HttpURLConnection conn = (HttpURLConnection) url.openConnection();

+						conn.connect();

+						conn.getContentLength();	// Force the GET through

+						conn.disconnect();

+					} catch (IOException e) {

+						intlogger.info("IOException Error accessing URL: "+u+": " + e.getMessage());

+					}

+				}

+			} catch (Exception e) {

+				intlogger.warn("Caught exception in SubscriberNotifyThread: "+e);

+				e.printStackTrace();

+			}

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/SynchronizerTask.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/SynchronizerTask.java
new file mode 100644
index 0000000..9cb9b7c
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/SynchronizerTask.java
@@ -0,0 +1,614 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning;

+

+import java.io.ByteArrayOutputStream;

+import java.io.File;

+import java.io.FileInputStream;

+import java.io.InputStream;

+import java.net.InetAddress;

+import java.net.UnknownHostException;

+import java.nio.file.Files;

+import java.nio.file.Path;

+import java.nio.file.Paths;

+import java.nio.file.StandardCopyOption;

+import java.security.KeyStore;

+import java.sql.Connection;

+import java.sql.SQLException;

+import java.util.ArrayList;

+import java.util.Arrays;

+import java.util.Collection;

+import java.util.HashMap;

+import java.util.Map;

+import java.util.Properties;

+import java.util.Set;

+import java.util.Timer;

+import java.util.TimerTask;

+import java.util.TreeSet;

+

+import javax.servlet.http.HttpServletResponse;

+

+import org.apache.http.HttpEntity;

+import org.apache.http.HttpResponse;

+import org.apache.http.client.methods.HttpGet;

+import org.apache.http.client.methods.HttpPost;

+import org.apache.http.conn.scheme.Scheme;

+import org.apache.http.conn.ssl.SSLSocketFactory;

+import org.apache.http.entity.ByteArrayEntity;

+import org.apache.http.entity.ContentType;

+import org.apache.http.impl.client.AbstractHttpClient;

+import org.apache.http.impl.client.DefaultHttpClient;

+import org.apache.log4j.Logger;

+import org.json.JSONArray;

+import org.json.JSONException;

+import org.json.JSONObject;

+import org.json.JSONTokener;

+

+import com.att.research.datarouter.provisioning.beans.EgressRoute;

+import com.att.research.datarouter.provisioning.beans.Feed;

+import com.att.research.datarouter.provisioning.beans.IngressRoute;

+import com.att.research.datarouter.provisioning.beans.NetworkRoute;

+import com.att.research.datarouter.provisioning.beans.Parameters;

+import com.att.research.datarouter.provisioning.beans.Subscription;

+import com.att.research.datarouter.provisioning.beans.Syncable;

+import com.att.research.datarouter.provisioning.utils.DB;

+import com.att.research.datarouter.provisioning.utils.RLEBitSet;

+import com.att.research.datarouter.provisioning.utils.LogfileLoader;

+import com.att.research.datarouter.provisioning.utils.URLUtilities;

+import com.att.research.datarouter.provisioning.beans.Group; //Groups feature Rally:US708115 - 1610	

+

+/**

+ * This class handles synchronization between provisioning servers (PODs).  It has three primary functions:

+ * <ol>

+ * <li>Checking DNS once per minute to see which POD the DNS CNAME points to. The CNAME will point to

+ * the active (master) POD.</li>

+ * <li>On non-master (standby) PODs, fetches provisioning data and logs in order to keep MySQL in sync.</li>

+ * <li>Providing information to other parts of the system as to the current role (ACTIVE, STANDBY, UNKNOWN)

+ * of this POD.</li>

+ * </ol>

+ * <p>For this to work correctly, the following code needs to be placed at the beginning of main().</p>

+ * <code>

+ * 		Security.setProperty("networkaddress.cache.ttl", "10");

+ * </code>

+ *

+ * @author Robert Eby

+ * @version $Id: SynchronizerTask.java,v 1.10 2014/03/21 13:50:10 eby Exp $

+ */

+public class SynchronizerTask extends TimerTask {

+	/** This is a singleton -- there is only one SynchronizerTask object in the server */

+	private static SynchronizerTask synctask;

+

+	/** This POD is unknown -- not on the list of PODs */

+	public static final int UNKNOWN = 0;

+	/** This POD is active -- on the list of PODs, and the DNS CNAME points to us */

+	public static final int ACTIVE = 1;

+	/** This POD is standby -- on the list of PODs, and the DNS CNAME does not point to us */

+	public static final int STANDBY = 2;

+	private static final String[] stnames = { "UNKNOWN", "ACTIVE", "STANDBY" };

+	private static final long ONE_HOUR = 60 * 60 * 1000L;

+

+	private final Logger logger;

+	private final Timer rolex;

+	private final String spooldir;

+	private int state;

+	private boolean doFetch;

+	private long nextsynctime;

+	private AbstractHttpClient httpclient = null;

+

+	/**

+	 * Get the singleton SynchronizerTask object.

+	 * @return the SynchronizerTask

+	 */

+	public static synchronized SynchronizerTask getSynchronizer() {

+		if (synctask == null)

+			synctask = new SynchronizerTask();

+		return synctask;

+	}

+

+	@SuppressWarnings("deprecation")

+	private SynchronizerTask() {

+		logger = Logger.getLogger("com.att.research.datarouter.provisioning.internal");

+		rolex = new Timer();

+		spooldir = (new DB()).getProperties().getProperty("com.att.research.datarouter.provserver.spooldir");

+		state = UNKNOWN;

+		doFetch = true;		// start off with a fetch

+		nextsynctime = 0;

+

+		logger.info("PROV5000: Sync task starting, server state is UNKNOWN");

+		try {

+			Properties props = (new DB()).getProperties();

+			String type  = props.getProperty(Main.KEYSTORE_TYPE_PROPERTY, "jks");

+			String store = props.getProperty(Main.KEYSTORE_PATH_PROPERTY);

+			String pass  = props.getProperty(Main.KEYSTORE_PASSWORD_PROPERTY);

+			KeyStore keyStore = KeyStore.getInstance(type);

+			FileInputStream instream = new FileInputStream(new File(store));

+			keyStore.load(instream, pass.toCharArray());

+			instream.close();

+

+			store = props.getProperty(Main.TRUSTSTORE_PATH_PROPERTY);

+			pass  = props.getProperty(Main.TRUSTSTORE_PASSWORD_PROPERTY);

+			KeyStore trustStore = null;

+			if (store != null && store.length() > 0) {

+				trustStore = KeyStore.getInstance(KeyStore.getDefaultType());

+				instream = new FileInputStream(new File(store));

+				trustStore.load(instream, pass.toCharArray());

+				instream.close();

+			}

+

+			// We are connecting with the node name, but the certificate will have the CNAME

+			// So we need to accept a non-matching certificate name

+			String keystorepass  = props.getProperty(Main.KEYSTORE_PASSWORD_PROPERTY); //itrack.web.att.com/browse/DATARTR-6 for changing hard coded passphase ref

+			AbstractHttpClient hc = new DefaultHttpClient();

+			SSLSocketFactory socketFactory =

+				(trustStore == null)

+				? new SSLSocketFactory(keyStore, keystorepass)

+				: new SSLSocketFactory(keyStore, keystorepass, trustStore);

+			socketFactory.setHostnameVerifier(SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);

+			Scheme sch = new Scheme("https", 443, socketFactory);

+			hc.getConnectionManager().getSchemeRegistry().register(sch);

+			httpclient = hc;

+

+			// Run once every 5 seconds to check DNS, etc.

+			long interval = 0;

+			try {

+				String s = props.getProperty("com.att.research.datarouter.provserver.sync_interval", "5000");

+				interval = Long.parseLong(s);

+			} catch (NumberFormatException e) {

+				interval = 5000L;

+			}

+			rolex.scheduleAtFixedRate(this, 0L, interval);

+		} catch (Exception e) {

+			logger.warn("PROV5005: Problem starting the synchronizer: "+e);

+		}

+	}

+

+	/**

+	 * What is the state of this POD?

+	 * @return one of ACTIVE, STANDBY, UNKNOWN

+	 */

+	public int getState() {

+		return state;

+	}

+

+	/**

+	 * Is this the active POD?

+	 * @return true if we are active (the master), false otherwise

+	 */

+	public boolean isActive() {

+		return state == ACTIVE;

+	}

+

+	/**

+	 * This method is used to signal that another POD (the active POD) has sent us a /fetchProv request,

+	 * and that we should re-synchronize with the master.

+	 */

+	public void doFetch() {

+		doFetch = true;

+	}

+

+	/**

+	 * Runs once a minute in order to <ol>

+	 * <li>lookup DNS names,</li>

+	 * <li>determine the state of this POD,</li>

+	 * <li>if this is a standby POD, and the fetch flag is set, perform a fetch of state from the active POD.</li>

+	 * <li>if this is a standby POD, check if there are any new log records to be replicated.</li>

+	 * </ol>

+	 */

+	@Override

+	public void run() {

+		try {

+			state = lookupState();

+			if (state == STANDBY) {

+				// Only copy provisioning data FROM the active server TO the standby

+				if (doFetch || (System.currentTimeMillis() >= nextsynctime)) {

+					logger.debug("Initiating a sync...");

+					JSONObject jo = readProvisioningJSON();

+					if (jo != null) {

+						doFetch = false;

+						syncFeeds( jo.getJSONArray("feeds"));

+						syncSubs(  jo.getJSONArray("subscriptions"));

+						syncGroups(  jo.getJSONArray("groups")); //Rally:US708115 - 1610

+						syncParams(jo.getJSONObject("parameters"));

+						// The following will not be present in a version=1.0 provfeed

+						JSONArray ja = jo.optJSONArray("ingress");

+						if (ja != null)

+							syncIngressRoutes(ja);

+						JSONObject j2 = jo.optJSONObject("egress");

+						if (j2 != null)

+							syncEgressRoutes( j2);

+						ja = jo.optJSONArray("routing");

+						if (ja != null)

+							syncNetworkRoutes(ja);

+					}

+					logger.info("PROV5013: Sync completed.");

+					nextsynctime = System.currentTimeMillis() + ONE_HOUR;

+				}

+			} else {

+				// Don't do fetches on non-standby PODs

+				doFetch = false;

+			}

+

+			// Fetch DR logs as needed - server to server

+			LogfileLoader lfl = LogfileLoader.getLoader();

+			if (lfl.isIdle()) {

+				// Only fetch new logs if the loader is waiting for them.

+				logger.trace("Checking for logs to replicate...");

+				RLEBitSet local  = lfl.getBitSet();

+				RLEBitSet remote = readRemoteLoglist();

+				remote.andNot(local);

+				if (!remote.isEmpty()) {

+					logger.debug(" Replicating logs: "+remote);

+					replicateDRLogs(remote);

+				}

+			}

+		} catch (Exception e) {

+			logger.warn("PROV0020: Caught exception in SynchronizerTask: "+e);

+			e.printStackTrace();

+		}

+	}

+

+	/**

+	 * This method is used to lookup the CNAME that points to the active server.

+	 * It returns 0 (UNKNOWN), 1(ACTIVE), or 2 (STANDBY) to indicate the state of this server.

+	 * @return the current state

+	 */

+	private int lookupState() {

+		int newstate = UNKNOWN;

+		try {

+			InetAddress myaddr = InetAddress.getLocalHost();

+			if (logger.isTraceEnabled())

+				logger.trace("My address: "+myaddr);

+			String this_pod = myaddr.getHostName();

+			Set<String> pods = new TreeSet<String>(Arrays.asList(BaseServlet.getPods()));

+			if (pods.contains(this_pod)) {

+				InetAddress pserver = InetAddress.getByName(BaseServlet.active_prov_name);

+				newstate = myaddr.equals(pserver) ? ACTIVE : STANDBY;

+				if (logger.isDebugEnabled() && System.currentTimeMillis() >= next_msg) {

+					logger.debug("Active POD = "+pserver+", Current state is "+stnames[newstate]);

+					next_msg = System.currentTimeMillis() + (5 * 60 * 1000L);

+				}

+			} else {

+				logger.warn("PROV5003: My name ("+this_pod+") is missing from the list of provisioning servers.");

+			}

+		} catch (UnknownHostException e) {

+			logger.warn("PROV5002: Cannot determine the name of this provisioning server.");

+		}

+

+		if (newstate != state)

+			logger.info(String.format("PROV5001: Server state changed from %s to %s", stnames[state], stnames[newstate]));

+		return newstate;

+	}

+	private static long next_msg = 0;	// only display the "Current state" msg every 5 mins.

+	/** Synchronize the Feeds in the JSONArray, with the Feeds in the DB. */

+	private void syncFeeds(JSONArray ja) {

+		Collection<Syncable> coll = new ArrayList<Syncable>();

+		for (int n = 0; n < ja.length(); n++) {

+			try {

+				Feed f = new Feed(ja.getJSONObject(n));

+				coll.add(f);

+			} catch (Exception e) {

+				logger.warn("PROV5004: Invalid object in feed: "+ja.optJSONObject(n));

+			}

+		}

+		if (sync(coll, Feed.getAllFeeds()))

+			BaseServlet.provisioningDataChanged();

+	}

+	/** Synchronize the Subscriptions in the JSONArray, with the Subscriptions in the DB. */

+	private void syncSubs(JSONArray ja) {

+		Collection<Syncable> coll = new ArrayList<Syncable>();

+		for (int n = 0; n < ja.length(); n++) {

+			try {

+				//Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.

+				JSONObject j = ja.getJSONObject(n);	 

+				j.put("sync", "true");

+				Subscription s = new Subscription(j);

+				coll.add(s);

+			} catch (Exception e) {

+				logger.warn("PROV5004: Invalid object in subscription: "+ja.optJSONObject(n));

+			}

+		}

+		if (sync(coll, Subscription.getAllSubscriptions()))

+			BaseServlet.provisioningDataChanged();

+	}

+

+	/**  Rally:US708115  - Synchronize the Groups in the JSONArray, with the Groups in the DB. */		

+	private void syncGroups(JSONArray ja) {		

+		Collection<Syncable> coll = new ArrayList<Syncable>();		

+		for (int n = 0; n < ja.length(); n++) {		

+			try {		

+				Group g = new Group(ja.getJSONObject(n));		

+				coll.add(g);		

+			} catch (Exception e) {		

+				logger.warn("PROV5004: Invalid object in subscription: "+ja.optJSONObject(n));		

+			}		

+		}		

+		if (sync(coll, Group.getAllgroups()))		

+			BaseServlet.provisioningDataChanged();		

+	}

+

+

+	/** Synchronize the Parameters in the JSONObject, with the Parameters in the DB. */

+	private void syncParams(JSONObject jo) {

+		Collection<Syncable> coll = new ArrayList<Syncable>();

+		for (String k : jo.keySet()) {

+			String v = "";

+			try {

+				v = jo.getString(k);

+			} catch (JSONException e) {

+				try {

+					v = ""+jo.getInt(k);

+				} catch (JSONException e1) {

+					JSONArray ja = jo.getJSONArray(k);

+					for (int i = 0; i < ja.length(); i++) {

+						if (i > 0)

+							v += "|";

+						v += ja.getString(i);

+					}

+				}

+			}

+			coll.add(new Parameters(k, v));

+		}

+		if (sync(coll, Parameters.getParameterCollection())) {

+			BaseServlet.provisioningDataChanged();

+			BaseServlet.provisioningParametersChanged();

+		}

+	}

+	private void syncIngressRoutes(JSONArray ja) {

+		Collection<Syncable> coll = new ArrayList<Syncable>();

+		for (int n = 0; n < ja.length(); n++) {

+			try {

+				IngressRoute in = new IngressRoute(ja.getJSONObject(n));

+				coll.add(in);

+			} catch (NumberFormatException e) {

+				logger.warn("PROV5004: Invalid object in ingress routes: "+ja.optJSONObject(n));

+			}

+		}

+		if (sync(coll, IngressRoute.getAllIngressRoutes()))

+			BaseServlet.provisioningDataChanged();

+	}

+	private void syncEgressRoutes(JSONObject jo) {

+		Collection<Syncable> coll = new ArrayList<Syncable>();

+		for (String key : jo.keySet()) {

+			try {

+				int sub = Integer.parseInt(key);

+				String node = jo.getString(key);

+				EgressRoute er = new EgressRoute(sub, node);

+				coll.add(er);

+			} catch (NumberFormatException e) {

+				logger.warn("PROV5004: Invalid subid in egress routes: "+key);

+			} catch (IllegalArgumentException e) {

+				logger.warn("PROV5004: Invalid node name in egress routes: "+key);

+			}

+		}

+		if (sync(coll, EgressRoute.getAllEgressRoutes()))

+			BaseServlet.provisioningDataChanged();

+	}

+	private void syncNetworkRoutes(JSONArray ja) {

+		Collection<Syncable> coll = new ArrayList<Syncable>();

+		for (int n = 0; n < ja.length(); n++) {

+			try {

+				NetworkRoute nr = new NetworkRoute(ja.getJSONObject(n));

+				coll.add(nr);

+			} catch (JSONException e) {

+				logger.warn("PROV5004: Invalid object in network routes: "+ja.optJSONObject(n));

+			}

+		}

+		if (sync(coll, NetworkRoute.getAllNetworkRoutes()))

+			BaseServlet.provisioningDataChanged();

+	}

+	private boolean sync(Collection<? extends Syncable> newc, Collection<? extends Syncable> oldc) {

+		boolean changes = false;

+		try {

+			Map<String, Syncable> newmap = getMap(newc);

+			Map<String, Syncable> oldmap = getMap(oldc);

+			Set<String> union = new TreeSet<String>(newmap.keySet());

+			union.addAll(oldmap.keySet());

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			for (String n : union) {

+				Syncable newobj = newmap.get(n);

+				Syncable oldobj = oldmap.get(n);

+				if (oldobj == null) {

+					if (logger.isDebugEnabled())

+						logger.debug("  Inserting record: "+newobj);

+					newobj.doInsert(conn);

+					changes = true;

+				} else if (newobj == null) {

+					if (logger.isDebugEnabled())

+						logger.debug("  Deleting record: "+oldobj);

+					oldobj.doDelete(conn);

+					changes = true;

+				} else if (!newobj.equals(oldobj)) {

+					if (logger.isDebugEnabled())

+						logger.debug("  Updating record: "+newobj);

+					newobj.doUpdate(conn);

+

+					/**Rally US708115

+					 * Change Ownership of FEED - 1610, Syncronised with secondary DB.

+					 * */

+					checkChnageOwner(newobj, oldobj);

+

+					changes = true;

+				}

+			}

+			db.release(conn);

+		} catch (SQLException e) {

+			logger.warn("PROV5009: problem during sync, exception: "+e);

+			e.printStackTrace();

+		}

+		return changes;

+	}

+	private Map<String, Syncable> getMap(Collection<? extends Syncable> c) {

+		Map<String, Syncable> map = new HashMap<String, Syncable>();

+		for (Syncable v : c) {

+			map.put(v.getKey(), v);

+		}

+		return map;

+	}

+	

+

+	/**Change owner of FEED/SUBSCRIPTION*/

+	/**Rally US708115

+	 * Change Ownership of FEED - 1610

+	 * 

+	 * */

+	private void checkChnageOwner(Syncable newobj, Syncable oldobj) {

+		if(newobj instanceof Feed) {

+			Feed oldfeed = (Feed) oldobj;

+			Feed newfeed = (Feed) newobj;

+			

+			if(!oldfeed.getPublisher().equals(newfeed.getPublisher())){

+				logger.info("PROV5013 -  Previous publisher: "+oldfeed.getPublisher() +": New publisher-"+newfeed.getPublisher());

+				oldfeed.setPublisher(newfeed.getPublisher());

+				oldfeed.changeOwnerShip();

+			}

+		}

+		else if(newobj instanceof Subscription) {

+			Subscription oldsub = (Subscription) oldobj;

+			Subscription newsub = (Subscription) newobj;

+			

+			if(!oldsub.getSubscriber().equals(newsub.getSubscriber())){

+				logger.info("PROV5013 -  Previous subscriber: "+oldsub.getSubscriber() +": New subscriber-"+newsub.getSubscriber());

+				oldsub.setSubscriber(newsub.getSubscriber());

+				oldsub.changeOwnerShip();

+			}

+		}

+		

+	}

+

+	/**

+	 * Issue a GET on the peer POD's /internal/prov/ URL to get a copy of its provisioning data.

+	 * @return the provisioning data (as a JONObject)

+	 */

+	private synchronized JSONObject readProvisioningJSON() {

+		String url  = URLUtilities.generatePeerProvURL();

+		HttpGet get = new HttpGet(url);

+		try {

+			HttpResponse response = httpclient.execute(get);

+			int code = response.getStatusLine().getStatusCode();

+			if (code != HttpServletResponse.SC_OK) {

+				logger.warn("PROV5010: readProvisioningJSON failed, bad error code: "+code);

+				return null;

+			}

+			HttpEntity entity = response.getEntity();

+			String ctype = entity.getContentType().getValue().trim();

+			if (!ctype.equals(BaseServlet.PROVFULL_CONTENT_TYPE1) && !ctype.equals(BaseServlet.PROVFULL_CONTENT_TYPE2)) {

+				logger.warn("PROV5011: readProvisioningJSON failed, bad content type: "+ctype);

+				return null;

+			}

+			return new JSONObject(new JSONTokener(entity.getContent()));

+		} catch (Exception e) {

+			logger.warn("PROV5012: readProvisioningJSON failed, exception: "+e);

+			return null;

+		} finally {

+			get.releaseConnection();

+		}

+	}

+	/**

+	 * Issue a GET on the peer POD's /internal/drlogs/ URL to get an RELBitSet representing the

+	 * log records available in the remote database.

+	 * @return the bitset

+	 */

+	private RLEBitSet readRemoteLoglist() {

+		RLEBitSet bs = new RLEBitSet();

+		String url  = URLUtilities.generatePeerLogsURL();

+

+		//Fixing if only one Prov is configured, not to give exception to fill logs, return empty bitset.

+		if(url.equals("")) {

+			return bs;

+		}

+		//End of fix.

+

+		HttpGet get = new HttpGet(url);

+		try {

+			HttpResponse response = httpclient.execute(get);

+			int code = response.getStatusLine().getStatusCode();

+			if (code != HttpServletResponse.SC_OK) {

+				logger.warn("PROV5010: readRemoteLoglist failed, bad error code: "+code);

+				return bs;

+			}

+			HttpEntity entity = response.getEntity();

+			String ctype = entity.getContentType().getValue().trim();

+			if (!ctype.equals("text/plain")) {

+				logger.warn("PROV5011: readRemoteLoglist failed, bad content type: "+ctype);

+				return bs;

+			}

+			InputStream is = entity.getContent();

+			ByteArrayOutputStream bos = new ByteArrayOutputStream();

+			int ch = 0;

+			while ((ch = is.read()) >= 0)

+				bos.write(ch);

+			bs.set(bos.toString());

+			is.close();

+		} catch (Exception e) {

+			logger.warn("PROV5012: readRemoteLoglist failed, exception: "+e);

+			return bs;

+		} finally {

+			get.releaseConnection();

+		}

+		return bs;

+	}

+	/**

+	 * Issue a POST on the peer POD's /internal/drlogs/ URL to fetch log records available

+	 * in the remote database that we wish to copy to the local database.

+	 * @param bs the bitset (an RELBitSet) of log records to fetch

+	 */

+	private void replicateDRLogs(RLEBitSet bs) {

+		String url  = URLUtilities.generatePeerLogsURL();

+		HttpPost post = new HttpPost(url);

+		try {

+			String t = bs.toString();

+			HttpEntity body = new ByteArrayEntity(t.getBytes(), ContentType.create("text/plain"));

+			post.setEntity(body);

+			if (logger.isDebugEnabled())

+				logger.debug("Requesting records: "+t);

+

+			HttpResponse response = httpclient.execute(post);

+			int code = response.getStatusLine().getStatusCode();

+			if (code != HttpServletResponse.SC_OK) {

+				logger.warn("PROV5010: replicateDRLogs failed, bad error code: "+code);

+				return;

+			}

+			HttpEntity entity = response.getEntity();

+			String ctype = entity.getContentType().getValue().trim();

+			if (!ctype.equals("text/plain")) {

+				logger.warn("PROV5011: replicateDRLogs failed, bad content type: "+ctype);

+				return;

+			}

+

+			String spoolname = "" + System.currentTimeMillis();

+			Path tmppath = Paths.get(spooldir, spoolname);

+			Path donepath = Paths.get(spooldir, "IN."+spoolname);

+			Files.copy(entity.getContent(), Paths.get(spooldir, spoolname), StandardCopyOption.REPLACE_EXISTING);

+			Files.move(tmppath, donepath, StandardCopyOption.REPLACE_EXISTING);

+			logger.info("Approximately "+bs.cardinality()+" records replicated.");

+		} catch (Exception e) {

+			logger.warn("PROV5012: replicateDRLogs failed, exception: "+e);

+		} finally {

+			post.releaseConnection();

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/BaseLogRecord.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/BaseLogRecord.java
new file mode 100644
index 0000000..327f95f
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/BaseLogRecord.java
@@ -0,0 +1,184 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.text.ParseException;

+import java.text.SimpleDateFormat;

+import java.util.Calendar;

+import java.util.Date;

+import java.util.GregorianCalendar;

+import org.json.LOGJSONObject;

+

+/**

+ * Define the common fields used by the three types of records generated by DR nodes.

+ *

+ * @author Robert Eby

+ * @version $Id: BaseLogRecord.java,v 1.10 2013/10/29 16:57:57 eby Exp $

+ */

+public class BaseLogRecord implements LOGJSONable, Loadable {

+	protected static final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");

+

+	private long eventTime;

+	private String publishId;

+	private int feedid;

+	private String requestUri;

+	private String method;

+	private String contentType;

+	private long contentLength;

+

+	protected BaseLogRecord(String[] pp) throws ParseException {

+//		This throws exceptions occasionally - don't know why.

+//		Date d = null;

+//		synchronized (sdf) {

+//			d = sdf.parse(pp[0]);

+//		}

+		Date d = parseDate(pp[0]);

+		this.eventTime     = d.getTime();

+		this.publishId     = pp[2];

+		this.feedid        = Integer.parseInt(pp[3]);

+		if (pp[1].equals("DLX")) {

+			this.requestUri    = "";

+			this.method        = "GET";	// Note: we need a valid value in this field, even though unused

+			this.contentType   = "";

+			this.contentLength = Long.parseLong(pp[5]);

+		} else  if (pp[1].equals("PUB") || pp[1].equals("LOG") || pp[1].equals("PBF")) {

+			this.requestUri    = pp[4];

+			this.method        = pp[5];

+			this.contentType   = pp[6];

+			this.contentLength = Long.parseLong(pp[7]);

+		} else {

+			this.requestUri    = pp[5];

+			this.method        = pp[6];

+			this.contentType   = pp[7];

+			this.contentLength = Long.parseLong(pp[8]);

+		}

+	}

+	protected BaseLogRecord(ResultSet rs) throws SQLException {

+		this.eventTime     = rs.getLong("EVENT_TIME");

+		this.publishId     = rs.getString("PUBLISH_ID");

+		this.feedid        = rs.getInt("FEEDID");

+		this.requestUri    = rs.getString("REQURI");

+		this.method        = rs.getString("METHOD");

+		this.contentType   = rs.getString("CONTENT_TYPE");

+		this.contentLength = rs.getLong("CONTENT_LENGTH");

+	}

+	protected Date parseDate(final String s) throws ParseException {

+		int[] n = new int[7];

+		int p = 0;

+		for (int i = 0; i < s.length(); i++) {

+			char c = s.charAt(i);

+			if (c < '0' || c > '9') {

+				p++;

+			} else {

+				if (p > n.length)

+					throw new ParseException("parseDate()", 0);

+				n[p] = (n[p] * 10) + (c - '0');

+			}

+		}

+		if (p != 7)

+			throw new ParseException("parseDate()", 1);

+		Calendar cal = new GregorianCalendar();

+		cal.set(Calendar.YEAR, n[0]);

+		cal.set(Calendar.MONTH, n[1]-1);

+		cal.set(Calendar.DAY_OF_MONTH, n[2]);

+		cal.set(Calendar.HOUR_OF_DAY, n[3]);

+		cal.set(Calendar.MINUTE, n[4]);

+		cal.set(Calendar.SECOND, n[5]);

+		cal.set(Calendar.MILLISECOND, n[6]);

+		return cal.getTime();

+	}

+	public long getEventTime() {

+		return eventTime;

+	}

+	public void setEventTime(long eventTime) {

+		this.eventTime = eventTime;

+	}

+	public String getPublishId() {

+		return publishId;

+	}

+	public void setPublishId(String publishId) {

+		this.publishId = publishId;

+	}

+	public int getFeedid() {

+		return feedid;

+	}

+	public void setFeedid(int feedid) {

+		this.feedid = feedid;

+	}

+	public String getRequestUri() {

+		return requestUri;

+	}

+	public void setRequestUri(String requestUri) {

+		this.requestUri = requestUri;

+	}

+	public String getMethod() {

+		return method;

+	}

+	public void setMethod(String method) {

+		this.method = method;

+	}

+	public String getContentType() {

+		return contentType;

+	}

+	public void setContentType(String contentType) {

+		this.contentType = contentType;

+	}

+	public long getContentLength() {

+		return contentLength;

+	}

+	public void setContentLength(long contentLength) {

+		this.contentLength = contentLength;

+	}

+	@Override

+	public LOGJSONObject asJSONObject() {

+		LOGJSONObject jo = new LOGJSONObject();

+		String t = "";

+		synchronized (sdf) {

+			t = sdf.format(eventTime);

+		}

+		jo.put("date", t);

+		jo.put("publishId", publishId);

+		jo.put("requestURI", requestUri);

+		jo.put("method", method);

+		if (method.equals("PUT")) {

+			jo.put("contentType", contentType);

+			jo.put("contentLength", contentLength);

+		}

+		return jo;

+	}

+	@Override

+	public void load(PreparedStatement ps) throws SQLException {

+		ps.setLong  (2, getEventTime());

+		ps.setString(3, getPublishId());

+		ps.setInt   (4, getFeedid());

+		ps.setString(5, getRequestUri());

+		ps.setString(6, getMethod());

+		ps.setString(7, getContentType());

+		ps.setLong  (8, getContentLength());

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Deleteable.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Deleteable.java
new file mode 100644
index 0000000..c16bdbc
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Deleteable.java
@@ -0,0 +1,41 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.Connection;

+

+/**

+ * An object that can be DELETE-ed from the database.

+ * @author Robert Eby

+ * @version $Id: Deleteable.java,v 1.2 2013/05/29 14:44:36 eby Exp $

+ */

+public interface Deleteable {

+	/**

+	 * Delete this object in the DB.

+	 * @param c the JDBC Connection to use

+	 * @return true if the DELETE succeeded, false otherwise

+	 */

+	public boolean doDelete(Connection c);

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/DeliveryExtraRecord.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/DeliveryExtraRecord.java
new file mode 100644
index 0000000..1a1cb56
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/DeliveryExtraRecord.java
@@ -0,0 +1,68 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Types;

+import java.text.ParseException;

+

+/**

+ * The representation of a Delivery Extra (DLX) Record, as retrieved from the DB.

+ * @author Robert Eby

+ * @version $Id: DeliveryExtraRecord.java,v 1.1 2013/10/28 18:06:52 eby Exp $

+ */

+public class DeliveryExtraRecord extends BaseLogRecord {

+	private int  subid;

+	private long contentLength2;

+

+	public DeliveryExtraRecord(String[] pp) throws ParseException {

+		super(pp);

+		this.subid = Integer.parseInt(pp[4]);

+		this.contentLength2 = Long.parseLong(pp[6]);

+	}

+	public DeliveryExtraRecord(ResultSet rs) throws SQLException {

+		super(rs);

+		// Note: because this record should be "rare" these fields are mapped to unconventional fields in the DB

+		this.subid  = rs.getInt("DELIVERY_SUBID");

+		this.contentLength2 = rs.getInt("CONTENT_LENGTH_2");

+	}

+	@Override

+	public void load(PreparedStatement ps) throws SQLException {

+		ps.setString(1, "dlx");		// field 1: type

+		super.load(ps);				// loads fields 2-8

+		ps.setNull( 9, Types.VARCHAR);

+		ps.setNull(10, Types.VARCHAR);

+		ps.setNull(11, Types.VARCHAR);

+		ps.setNull(12, Types.INTEGER);

+		ps.setInt (13, subid);

+		ps.setNull(14, Types.VARCHAR);

+		ps.setNull(15, Types.INTEGER);

+		ps.setNull(16, Types.INTEGER);

+		ps.setNull(17, Types.VARCHAR);

+		ps.setLong(19, contentLength2);

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/DeliveryRecord.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/DeliveryRecord.java
new file mode 100644
index 0000000..b4791d4
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/DeliveryRecord.java
@@ -0,0 +1,137 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Types;

+import java.text.ParseException;

+import java.util.LinkedHashMap;

+

+import org.json.LOGJSONObject;

+

+/**

+ * The representation of a Delivery Record, as retrieved from the DB.

+ * @author Robert Eby

+ * @version $Id: DeliveryRecord.java,v 1.9 2014/03/12 19:45:41 eby Exp $

+ */

+public class DeliveryRecord extends BaseLogRecord {

+	private int subid;

+	private String fileid;

+	private int result;

+	private String user;

+

+	public DeliveryRecord(String[] pp) throws ParseException {

+		super(pp);

+		String fileid = pp[5];

+		if (fileid.lastIndexOf('/') >= 0)

+			fileid = fileid.substring(fileid.lastIndexOf('/')+1);

+		this.subid  = Integer.parseInt(pp[4]);

+		this.fileid = fileid;

+		this.result = Integer.parseInt(pp[10]);

+		this.user   = pp[9];

+		if (this.user != null && this.user.length() > 50)

+			this.user = this.user.substring(0, 50);

+	}

+	public DeliveryRecord(ResultSet rs) throws SQLException {

+		super(rs);

+		this.subid  = rs.getInt("DELIVERY_SUBID");

+		this.fileid = rs.getString("DELIVERY_FILEID");

+		this.result = rs.getInt("RESULT");

+		this.user   = rs.getString("USER");

+	}

+	public int getSubid() {

+		return subid;

+	}

+	public void setSubid(int subid) {

+		this.subid = subid;

+	}

+	public String getFileid() {

+		return fileid;

+	}

+	public void setFileid(String fileid) {

+		this.fileid = fileid;

+	}

+	public int getResult() {

+		return result;

+	}

+	public void setResult(int result) {

+		this.result = result;

+	}

+	public String getUser() {

+		return user;

+	}

+	public void setUser(String user) {

+		this.user = user;

+	}

+	

+	

+	public LOGJSONObject reOrderObject(LOGJSONObject jo) {

+		LinkedHashMap<String,Object> logrecordObj = new LinkedHashMap<String,Object>();

+		

+		logrecordObj.put("statusCode", jo.get("statusCode"));

+		logrecordObj.put("deliveryId", jo.get("deliveryId"));

+		logrecordObj.put("publishId", jo.get("publishId"));

+		logrecordObj.put("requestURI", jo.get("requestURI"));

+		//logrecordObj.put("sourceIP", jo.get("sourceIP"));

+		logrecordObj.put("method", jo.get("method"));

+		logrecordObj.put("contentType", jo.get("contentType"));

+		//logrecordObj.put("endpointId", jo.get("endpointId"));

+		logrecordObj.put("type", jo.get("type"));

+		logrecordObj.put("date", jo.get("date"));

+		logrecordObj.put("contentLength", jo.get("contentLength"));

+

+

+		LOGJSONObject newjo = new LOGJSONObject(logrecordObj);

+		return newjo;

+	}

+	

+	@Override

+	public LOGJSONObject asJSONObject() {

+		LOGJSONObject jo = super.asJSONObject();

+		jo.put("type", "del");

+		jo.put("deliveryId", user);

+		jo.put("statusCode", result);

+		

+		LOGJSONObject newjo = this.reOrderObject(jo);

+		return newjo;

+	}

+	@Override

+	public void load(PreparedStatement ps) throws SQLException {

+		ps.setString(1, "del");		// field 1: type

+		super.load(ps);				// loads fields 2-8

+		ps.setNull  (9,  Types.VARCHAR);

+		ps.setNull  (10, Types.VARCHAR);

+		ps.setString(11, getUser());

+		ps.setNull  (12, Types.INTEGER);

+		ps.setInt   (13, getSubid());

+		ps.setString(14, getFileid());

+		ps.setInt   (15, getResult());

+		ps.setNull  (16, Types.INTEGER);

+		ps.setNull  (17, Types.VARCHAR);

+		ps.setNull  (19, Types.BIGINT);

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/EgressRoute.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/EgressRoute.java
new file mode 100644
index 0000000..94b59ce
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/EgressRoute.java
@@ -0,0 +1,227 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Statement;

+import java.util.SortedSet;

+import java.util.TreeSet;

+

+import org.apache.log4j.Logger;

+import org.json.JSONObject;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+

+/**

+ * The representation of one route in the Egress Route Table.

+ *

+ * @author Robert P. Eby

+ * @version $Id: EgressRoute.java,v 1.3 2013/12/16 20:30:23 eby Exp $

+ */

+public class EgressRoute extends NodeClass implements Comparable<EgressRoute> {

+	private static Logger intlogger = Logger.getLogger("com.att.research.datarouter.provisioning.internal");

+	private final int subid;

+	private final int nodeid;

+

+	/**

+	 * Get a set of all Egress Routes in the DB.  The set is sorted according to the natural sorting order

+	 * of the routes (based on the subscription ID in each route).

+	 * @return the sorted set

+	 */

+	public static SortedSet<EgressRoute> getAllEgressRoutes() {

+		SortedSet<EgressRoute> set = new TreeSet<EgressRoute>();

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet    rs = stmt.executeQuery("select SUBID, NODEID from EGRESS_ROUTES");

+			while (rs.next()) {

+				int subid  = rs.getInt("SUBID");

+				int nodeid = rs.getInt("NODEID");

+				set.add(new EgressRoute(subid, nodeid));

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		return set;

+	}

+	/**

+	 * Get a single Egress Route for the subscription <i>sub</i>.

+	 * @param sub the subscription to lookup

+	 * @return an EgressRoute, or null if there is no route for this subscription

+	 */

+	public static EgressRoute getEgressRoute(int sub) {

+		EgressRoute v = null;

+		PreparedStatement ps = null;

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			String sql = "select NODEID from EGRESS_ROUTES where SUBID = ?";

+			ps = conn.prepareStatement(sql);

+			ps.setInt(1, sub);

+			ResultSet rs = ps.executeQuery();

+			if (rs.next()) {

+				int node = rs.getInt("NODEID");

+				v = new EgressRoute(sub, node);

+			}

+			rs.close();

+			ps.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return v;

+	}

+

+	public EgressRoute(int subid, int nodeid) throws IllegalArgumentException {

+		this.subid = subid;

+		this.nodeid = nodeid;

+// Note: unlike for Feeds, it subscriptions can be removed from the tables, so it is

+// possible that an orphan ERT entry can exist if a sub is removed.

+//		if (Subscription.getSubscriptionById(subid) == null)

+//			throw new IllegalArgumentException("No such subscription: "+subid);

+	}

+

+	public EgressRoute(int subid, String node) throws IllegalArgumentException {

+		this(subid, lookupNodeName(node));

+	}

+

+	@Override

+	public boolean doDelete(Connection c) {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			String sql = "delete from EGRESS_ROUTES where SUBID = ?";

+			ps = c.prepareStatement(sql);

+			ps.setInt(1, subid);

+			ps.execute();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0007 doDelete: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+

+	@Override

+	public boolean doInsert(Connection c) {

+		boolean rv = false;

+		PreparedStatement ps = null;

+		try {

+			// Create the NETWORK_ROUTES row

+			String sql = "insert into EGRESS_ROUTES (SUBID, NODEID) values (?, ?)";

+			ps = c.prepareStatement(sql);

+			ps.setInt(1, this.subid);

+			ps.setInt(2, this.nodeid);

+			ps.execute();

+			ps.close();

+			rv = true;

+		} catch (SQLException e) {

+			intlogger.warn("PROV0005 doInsert: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+

+	@Override

+	public boolean doUpdate(Connection c) {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			String sql = "update EGRESS_ROUTES set NODEID = ? where SUBID = ?";

+			ps = c.prepareStatement(sql);

+			ps.setInt(1, nodeid);

+			ps.setInt(2, subid);

+			ps.executeUpdate();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0006 doUpdate: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+

+	@Override

+	public JSONObject asJSONObject() {

+		JSONObject jo = new JSONObject();

+		jo.put(""+subid, lookupNodeID(nodeid));

+		return jo;

+	}

+

+	@Override

+	public String getKey() {

+		return ""+subid;

+	}

+

+	@Override

+	public boolean equals(Object obj) {

+		if (!(obj instanceof EgressRoute))

+			return false;

+		EgressRoute on = (EgressRoute)obj;

+		return (subid == on.subid) && (nodeid == on.nodeid);

+	}

+

+	@Override

+	public int compareTo(EgressRoute o) {

+		return this.subid - o.subid;

+	}

+

+	@Override

+	public String toString() {

+		return String.format("EGRESS: sub=%d, node=%d", subid, nodeid);

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/EventLogRecord.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/EventLogRecord.java
new file mode 100644
index 0000000..adf45d4
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/EventLogRecord.java
@@ -0,0 +1,84 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.security.cert.X509Certificate;

+

+import javax.servlet.http.HttpServletRequest;

+

+import com.att.research.datarouter.provisioning.BaseServlet;

+

+/**

+ * This class is used to log provisioning server events.  Each event consists of a who

+ * (who made the provisioning request including the IP address, the X-ATT-DR-ON-BEHALF-OF

+ * header value, and the client certificate), a what (what request was made; the method

+ * and servlet involved), and a how (how the request was handled; the result code and

+ * message returned to the client).  EventLogRecords are logged using log4j at the INFO level.

+ *

+ * @author Robert Eby

+ * @version $Id: EventLogRecord.java,v 1.1 2013/04/26 21:00:25 eby Exp $

+ */

+public class EventLogRecord {

+	private final String ipaddr;		// Who

+	private final String behalfof;

+	private final String clientSubject;

+	private final String method;		// What

+	private final String servlet;

+	private int result;					// How

+	private String message;

+

+	public EventLogRecord(HttpServletRequest request) {

+		// Who is making the request

+		this.ipaddr = request.getRemoteAddr();

+		String s = request.getHeader(BaseServlet.BEHALF_HEADER);

+		this.behalfof = (s != null) ? s : "";

+		X509Certificate certs[] = (X509Certificate[]) request.getAttribute(BaseServlet.CERT_ATTRIBUTE);

+		this.clientSubject = (certs != null && certs.length > 0)

+			? certs[0].getSubjectX500Principal().getName() : "";

+

+		// What is the request

+		this.method  = request.getMethod();

+		this.servlet = request.getServletPath();

+

+		// How was it dealt with

+		this.result = -1;

+		this.message = "";

+	}

+	public void setResult(int result) {

+		this.result = result;

+	}

+	public void setMessage(String message) {

+		this.message = message;

+	}

+	@Override

+	public String toString() {

+		return String.format(

+			"%s %s \"%s\" %s %s %d \"%s\"",

+			ipaddr, behalfof, clientSubject,

+			method, servlet,

+			result, message

+		);

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/ExpiryRecord.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/ExpiryRecord.java
new file mode 100644
index 0000000..1db5417
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/ExpiryRecord.java
@@ -0,0 +1,141 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Types;

+import java.text.ParseException;

+import java.util.LinkedHashMap;

+

+import org.json.LOGJSONObject;

+

+/**

+ * The representation of a Expiry Record, as retrieved from the DB.

+ * @author Robert Eby

+ * @version $Id: ExpiryRecord.java,v 1.4 2013/10/28 18:06:52 eby Exp $

+ */

+public class ExpiryRecord extends BaseLogRecord {

+	private int subid;

+	private String fileid;

+	private int attempts;

+	private String reason;

+

+	public ExpiryRecord(String[] pp) throws ParseException {

+		super(pp);

+		String fileid = pp[5];

+		if (fileid.lastIndexOf('/') >= 0)

+			fileid = fileid.substring(fileid.lastIndexOf('/')+1);

+		this.subid    = Integer.parseInt(pp[4]);

+		this.fileid   = fileid;

+		this.attempts = Integer.parseInt(pp[10]);

+		this.reason   = pp[9];

+		if (!reason.equals("notRetryable") && !reason.equals("retriesExhausted") && !reason.equals("diskFull"))

+			this.reason = "other";

+	}

+	public ExpiryRecord(ResultSet rs) throws SQLException {

+		super(rs);

+		this.subid    = rs.getInt("DELIVERY_SUBID");

+		this.fileid   = rs.getString("DELIVERY_FILEID");

+		this.attempts = rs.getInt("ATTEMPTS");

+		this.reason   = rs.getString("REASON");

+	}

+

+	public int getSubid() {

+		return subid;

+	}

+

+	public void setSubid(int subid) {

+		this.subid = subid;

+	}

+

+	public String getFileid() {

+		return fileid;

+	}

+

+	public void setFileid(String fileid) {

+		this.fileid = fileid;

+	}

+

+	public int getAttempts() {

+		return attempts;

+	}

+

+	public void setAttempts(int attempts) {

+		this.attempts = attempts;

+	}

+

+	public String getReason() {

+		return reason;

+	}

+

+	public void setReason(String reason) {

+		this.reason = reason;

+	}

+	

+	public LOGJSONObject reOrderObject(LOGJSONObject jo) {

+		LinkedHashMap<String,Object> logrecordObj = new LinkedHashMap<String,Object>();

+		

+		logrecordObj.put("expiryReason", jo.get("expiryReason"));

+		logrecordObj.put("publishId", jo.get("publishId"));

+		logrecordObj.put("attempts", jo.get("attempts"));

+		logrecordObj.put("requestURI", jo.get("requestURI"));

+		logrecordObj.put("method", jo.get("method"));

+		logrecordObj.put("contentType", jo.get("contentType"));

+		logrecordObj.put("type", jo.get("type"));

+		logrecordObj.put("date", jo.get("date"));

+		logrecordObj.put("contentLength", jo.get("contentLength"));

+

+		LOGJSONObject newjo = new LOGJSONObject(logrecordObj);

+		return newjo;

+	}

+	

+	@Override

+	public LOGJSONObject asJSONObject() {

+		LOGJSONObject jo = super.asJSONObject();

+		jo.put("type", "exp");

+		jo.put("expiryReason", reason);

+		jo.put("attempts", attempts);

+		

+		LOGJSONObject newjo = this.reOrderObject(jo);

+		return newjo;

+	}

+	@Override

+	public void load(PreparedStatement ps) throws SQLException {

+		ps.setString(1, "exp");		// field 1: type

+		super.load(ps);				// loads fields 2-8

+		ps.setNull  (9,  Types.VARCHAR);

+		ps.setNull  (10, Types.VARCHAR);

+		ps.setNull  (11, Types.VARCHAR);

+		ps.setNull  (12, Types.INTEGER);

+		ps.setInt   (13, getSubid());

+		ps.setString(14, getFileid());

+		ps.setNull  (15, Types.INTEGER);

+		ps.setInt   (16, getAttempts());

+		ps.setString(17, getReason());

+		ps.setNull  (19, Types.BIGINT);

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Feed.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Feed.java
new file mode 100644
index 0000000..4ee5ab9
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Feed.java
@@ -0,0 +1,760 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.io.InvalidObjectException;

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Statement;

+import java.util.ArrayList;

+import java.util.Collection;

+import java.util.Date;

+import java.util.HashMap;

+import java.util.List;

+import java.util.Map;

+import java.util.Set;

+

+import org.apache.log4j.Logger;

+import org.json.JSONArray;

+import org.json.JSONObject;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+import com.att.research.datarouter.provisioning.utils.JSONUtilities;

+import com.att.research.datarouter.provisioning.utils.URLUtilities;

+

+/**

+ * The representation of a Feed.  Feeds can be retrieved from the DB, or stored/updated in the DB.

+ * @author Robert Eby

+ * @version $Id: Feed.java,v 1.13 2013/10/28 18:06:52 eby Exp $

+ */

+public class Feed extends Syncable {

+	private static Logger intlogger = Logger.getLogger("com.att.research.datarouter.provisioning.internal");

+	private static int next_feedid = getMaxFeedID() + 1;

+

+	private int feedid;

+	private int groupid; //New field is added - Groups feature Rally:US708115 - 1610

+	private String name;

+	private String version;

+	private String description;

+	private String business_description; // New field is added - Groups feature Rally:US708102 - 1610

+	private FeedAuthorization authorization;

+	private String publisher;

+	private FeedLinks links;

+	private boolean deleted;

+	private boolean suspended;

+	private Date last_mod;

+	private Date created_date;

+

+	/**

+	 * Check if a feed ID is valid.

+	 * @param id the Feed ID

+	 * @return true if it is valid

+	 */

+	@SuppressWarnings("resource")

+	public static boolean isFeedValid(int id) {

+		int count = 0;

+		try {

+			DB db = new DB();

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery("select COUNT(*) from FEEDS where FEEDID = " + id);

+			if (rs.next()) {

+				count = rs.getInt(1);

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		return count != 0;

+	}

+	/**

+	 * Get a specific feed from the DB, based upon its ID.

+	 * @param id the Feed ID

+	 * @return the Feed object, or null if it does not exist

+	 */

+	public static Feed getFeedById(int id) {

+		String sql = "select * from FEEDS where FEEDID = " + id;

+		return getFeedBySQL(sql);

+	}

+	/**

+	 * Get a specific feed from the DB, based upon its name and version.

+	 * @param name the name of the Feed

+	 * @param version the version of the Feed

+	 * @return the Feed object, or null if it does not exist

+	 */

+	public static Feed getFeedByNameVersion(String name, String version) {

+		name = name.replaceAll("'", "''");

+		version = version.replaceAll("'", "''");

+		String sql = "select * from FEEDS where NAME = '" + name + "' and VERSION ='" + version + "'";

+		return getFeedBySQL(sql);

+	}

+	/**

+	 * Return a count of the number of active feeds in the DB.

+	 * @return the count

+	 */

+	public static int countActiveFeeds() {

+		int count = 0;

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery("select count(*) from FEEDS where DELETED = 0");

+			if (rs.next()) {

+				count = rs.getInt(1);

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			intlogger.info("countActiveFeeds: "+e.getMessage());

+			e.printStackTrace();

+		}

+		return count;

+	}

+	public static int getMaxFeedID() {

+		int max = 0;

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery("select MAX(feedid) from FEEDS");

+			if (rs.next()) {

+				max = rs.getInt(1);

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			intlogger.info("getMaxFeedID: "+e.getMessage());

+			e.printStackTrace();

+		}

+		return max;

+	}

+	public static Collection<Feed> getAllFeeds() {

+		Map<Integer, Feed> map = new HashMap<Integer, Feed>();

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery("select * from FEEDS");

+			while (rs.next()) {

+				Feed feed = new Feed(rs);

+				map.put(feed.getFeedid(), feed);

+			}

+			rs.close();

+

+			String sql = "select * from FEED_ENDPOINT_IDS";

+			rs = stmt.executeQuery(sql);

+			while (rs.next()) {

+				int id = rs.getInt("FEEDID");

+				Feed feed = map.get(id);

+				if (feed != null) {

+					FeedEndpointID epi = new FeedEndpointID(rs);

+					Collection<FeedEndpointID> ecoll = feed.getAuthorization().getEndpoint_ids();

+					ecoll.add(epi);

+				}

+			}

+			rs.close();

+

+			sql = "select * from FEED_ENDPOINT_ADDRS";

+			rs = stmt.executeQuery(sql);

+			while (rs.next()) {

+				int id = rs.getInt("FEEDID");

+				Feed feed = map.get(id);

+				if (feed != null) {

+					Collection<String> acoll = feed.getAuthorization().getEndpoint_addrs();

+					acoll.add(rs.getString("ADDR"));

+				}

+			}

+			rs.close();

+

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		return map.values();

+	}

+	public static List<String> getFilteredFeedUrlList(final String name, final String val) {

+		List<String> list = new ArrayList<String>();

+		String sql = "select SELF_LINK from FEEDS where DELETED = 0";

+		if (name.equals("name")) {

+			sql += " and NAME = ?";

+		} else if (name.equals("publ")) {

+			sql += " and PUBLISHER = ?";

+		} else if (name.equals("subs")) {

+			sql = "select distinct FEEDS.SELF_LINK from FEEDS, SUBSCRIPTIONS " +

+				"where DELETED = 0 " +

+				"and FEEDS.FEEDID = SUBSCRIPTIONS.FEEDID " +

+				"and SUBSCRIPTIONS.SUBSCRIBER = ?";

+		}

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			PreparedStatement ps = conn.prepareStatement(sql);

+			if (sql.indexOf('?') >= 0)

+				ps.setString(1, val);

+			ResultSet rs = ps.executeQuery();

+			while (rs.next()) {

+				String t = rs.getString(1);

+				list.add(t.trim());

+			}

+			rs.close();

+			ps.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		return list;

+	}

+	@SuppressWarnings("resource")

+	private static Feed getFeedBySQL(String sql) {

+		Feed feed = null;

+		try {

+			DB db = new DB();

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery(sql);

+			if (rs.next()) {

+				feed = new Feed(rs);

+				rs.close();

+

+				sql = "select * from FEED_ENDPOINT_IDS where FEEDID = " + feed.feedid;

+				rs = stmt.executeQuery(sql);

+				Collection<FeedEndpointID> ecoll = feed.getAuthorization().getEndpoint_ids();

+				while (rs.next()) {

+					FeedEndpointID epi = new FeedEndpointID(rs);

+					ecoll.add(epi);

+				}

+				rs.close();

+

+				sql = "select * from FEED_ENDPOINT_ADDRS where FEEDID = " + feed.feedid;

+				rs = stmt.executeQuery(sql);

+				Collection<String> acoll = feed.getAuthorization().getEndpoint_addrs();

+				while (rs.next()) {

+					acoll.add(rs.getString("ADDR"));

+				}

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		return feed;

+	}

+

+	public Feed() {

+		this("", "", "","");

+	}

+

+	public Feed(String name, String version, String desc,String business_description) {

+		this.feedid = -1;

+		this.groupid = -1; //New field is added - Groups feature Rally:US708115 - 1610

+		this.name = name;

+		this.version = version;

+		this.description = desc;

+		this.business_description=business_description; // New field is added - Groups feature Rally:US708102 - 1610

+		this.authorization = new FeedAuthorization();

+		this.publisher = "";

+		this.links = new FeedLinks();

+		this.deleted = false;

+		this.suspended = false;

+		this.last_mod = new Date();

+		this.created_date = new Date();

+	}

+	public Feed(ResultSet rs) throws SQLException {

+		this.feedid = rs.getInt("FEEDID");

+		this.groupid = rs.getInt("GROUPID"); //New field is added - Groups feature Rally:US708115 - 1610

+		this.name = rs.getString("NAME");

+		this.version = rs.getString("VERSION");

+		this.description = rs.getString("DESCRIPTION");

+		this.business_description=rs.getString("BUSINESS_DESCRIPTION"); // New field is added - Groups feature Rally:US708102 - 1610

+		this.authorization = new FeedAuthorization();

+		this.authorization.setClassification(rs.getString("AUTH_CLASS"));

+		this.publisher   = rs.getString("PUBLISHER");

+		this.links       = new FeedLinks();

+		this.links.setSelf(rs.getString("SELF_LINK"));

+		this.links.setPublish(rs.getString("PUBLISH_LINK"));

+		this.links.setSubscribe(rs.getString("SUBSCRIBE_LINK"));

+		this.links.setLog(rs.getString("LOG_LINK"));

+		this.deleted     = rs.getBoolean("DELETED");

+		this.suspended   = rs.getBoolean("SUSPENDED");

+		this.last_mod    = rs.getDate("LAST_MOD");

+		this.created_date    = rs.getTimestamp("CREATED_DATE");

+	}

+	public Feed(JSONObject jo) throws InvalidObjectException {

+		this("", "", "","");

+		try {

+			// The JSONObject is assumed to contain a vnd.att-dr.feed representation

+			this.feedid = jo.optInt("feedid", -1);

+			this.groupid = jo.optInt("groupid"); //New field is added - Groups feature Rally:US708115 - 1610

+			this.name = jo.getString("name");

+			if (name.length() > 255)

+				throw new InvalidObjectException("name field is too long");

+			this.version = jo.getString("version");

+			if (version.length() > 20)

+				throw new InvalidObjectException("version field is too long");

+			this.description = jo.optString("description");

+			this.business_description = jo.optString("business_description"); // New field is added - Groups feature Rally:US708102 - 1610

+			if (description.length() > 1000)

+				throw new InvalidObjectException("technical description field is too long");

+			

+			if (business_description.length() > 1000) // New field is added - Groups feature Rally:US708102 - 1610

+				throw new InvalidObjectException("business description field is too long");

+

+			this.authorization = new FeedAuthorization();

+			JSONObject jauth = jo.getJSONObject("authorization");

+			this.authorization.setClassification(jauth.getString("classification"));

+			if (this.authorization.getClassification().length() > 32)

+				throw new InvalidObjectException("classification field is too long");

+			JSONArray ja = jauth.getJSONArray("endpoint_ids");

+			for (int i = 0; i < ja.length(); i++) {

+				JSONObject id = ja.getJSONObject(i);

+				FeedEndpointID fid = new FeedEndpointID(id.getString("id"), id.getString("password"));

+				if (fid.getId().length() > 20)

+					throw new InvalidObjectException("id field is too long ("+fid.getId()+")");

+				if (fid.getPassword().length() > 32)

+					throw new InvalidObjectException("password field is too long ("+fid.getPassword()+")");

+				this.authorization.getEndpoint_ids().add(fid);

+			}

+			if (this.authorization.getEndpoint_ids().size() < 1)

+				throw new InvalidObjectException("need to specify at least one endpoint_id");

+			ja = jauth.getJSONArray("endpoint_addrs");

+			for (int i = 0; i < ja.length(); i++) {

+				String addr = ja.getString(i);

+				if (!JSONUtilities.validIPAddrOrSubnet(addr))

+					throw new InvalidObjectException("bad IP addr or subnet mask: "+addr);

+				this.authorization.getEndpoint_addrs().add(addr);

+			}

+

+			this.publisher = jo.optString("publisher", "");

+			this.deleted   = jo.optBoolean("deleted", false);

+			this.suspended = jo.optBoolean("suspend", false);

+			JSONObject jol = jo.optJSONObject("links");

+			this.links = (jol == null) ? (new FeedLinks()) : (new FeedLinks(jol));

+		} catch (InvalidObjectException e) {

+			throw e;

+		} catch (Exception e) {

+			throw new InvalidObjectException("invalid JSON: "+e.getMessage());

+		}

+	}

+	public int getFeedid() {

+		return feedid;

+	}

+	public void setFeedid(int feedid) {

+		this.feedid = feedid;

+

+		// Create link URLs

+		FeedLinks fl = getLinks();

+		fl.setSelf(URLUtilities.generateFeedURL(feedid));

+		fl.setPublish(URLUtilities.generatePublishURL(feedid));

+		fl.setSubscribe(URLUtilities.generateSubscribeURL(feedid));

+		fl.setLog(URLUtilities.generateFeedLogURL(feedid));

+	}

+	

+	//new getter setters for groups- Rally:US708115 - 1610

+	public int getGroupid() {

+		return groupid;

+	}

+

+	public void setGroupid(int groupid) {

+		this.groupid = groupid;

+	}

+	

+	public String getName() {

+		return name;

+	}

+	public void setName(String name) {

+		this.name = name;

+	}

+	public String getVersion() {

+		return version;

+	}

+	public void setVersion(String version) {

+		this.version = version;

+	}

+	public String getDescription() {

+		return description;

+	}

+	public void setDescription(String description) {

+		this.description = description;

+	}

+    // New field is added - Groups feature Rally:US708102 - 1610

+	public String getBusiness_description() {

+		return business_description;

+	}

+

+	public void setBusiness_description(String business_description) {

+		this.business_description = business_description;

+	}

+

+	public FeedAuthorization getAuthorization() {

+		return authorization;

+	}

+	public void setAuthorization(FeedAuthorization authorization) {

+		this.authorization = authorization;

+	}

+	public String getPublisher() {

+		return publisher;

+	}

+	public void setPublisher(String publisher) {

+		if (publisher != null) {

+			if (publisher.length() > 8)

+				publisher = publisher.substring(0, 8);

+			this.publisher = publisher;

+		}

+	}

+	public FeedLinks getLinks() {

+		return links;

+	}

+	public void setLinks(FeedLinks links) {

+		this.links = links;

+	}

+

+	public boolean isDeleted() {

+		return deleted;

+	}

+

+	public void setDeleted(boolean deleted) {

+		this.deleted = deleted;

+	}

+

+	public boolean isSuspended() {

+		return suspended;

+	}

+

+	public void setSuspended(boolean suspended) {

+		this.suspended = suspended;

+	}

+

+	public Date getLast_mod() {

+		return last_mod;

+	}

+

+	public Date getCreated_date() {

+		return created_date;

+	}

+

+	@Override

+	public JSONObject asJSONObject() {

+		JSONObject jo = new JSONObject();

+		jo.put("feedid", feedid);

+		jo.put("groupid", groupid); //New field is added - Groups feature Rally:US708115 - 1610

+		jo.put("name", name);

+		jo.put("version", version);

+		jo.put("description", description);

+		jo.put("business_description", business_description); // New field is added - Groups feature Rally:US708102 - 1610

+		jo.put("authorization", authorization.asJSONObject());

+		jo.put("publisher", publisher);

+		jo.put("links", links.asJSONObject());

+		jo.put("deleted", deleted);

+		jo.put("suspend", suspended);

+		jo.put("last_mod", last_mod.getTime());

+		jo.put("created_date", created_date.getTime());

+		return jo;

+	}

+	public JSONObject asLimitedJSONObject() {

+		JSONObject jo = asJSONObject();

+		jo.remove("deleted");

+		jo.remove("feedid");

+		jo.remove("last_mod");

+		jo.remove("created_date");

+		return jo;

+	}

+	public JSONObject asJSONObject(boolean hidepasswords) {

+		JSONObject jo = asJSONObject();

+		if (hidepasswords) {

+			jo.remove("feedid");	// we no longer hide passwords, however we do hide these

+			jo.remove("deleted");

+			jo.remove("last_mod");

+			jo.remove("created_date");

+		}

+		return jo;

+	}

+	@Override

+	public boolean doDelete(Connection c) {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			String sql = "delete from FEEDS where FEEDID = ?";

+			ps = c.prepareStatement(sql);

+			ps.setInt(1, feedid);

+			ps.execute();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0007 doDelete: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+	@Override

+	public synchronized boolean doInsert(Connection c) {

+		boolean rv = true;

+//		PreparedStatement ps = null;

+		try {

+			if (feedid == -1) {

+//				// Get the next feedid

+//				String sql = "insert into FEEDS_UNIQUEID (FEEDID) values (0)";

+//				ps = c.prepareStatement(sql, new String[] { "FEEDID" });

+//				ps.execute();

+//				ResultSet rs = ps.getGeneratedKeys();

+//				rs.first();

+//				setFeedid(rs.getInt(1));

+				// No feed ID assigned yet, so assign the next available one

+				setFeedid(next_feedid++);

+			}

+			// In case we insert a feed from synchronization

+			if (feedid > next_feedid)

+				next_feedid = feedid+1;

+

+			// Create FEED_ENDPOINT_IDS rows

+			FeedAuthorization auth = getAuthorization();

+			String sql = "insert into FEED_ENDPOINT_IDS values (?, ?, ?)";

+			PreparedStatement ps2 = c.prepareStatement(sql);

+			for (FeedEndpointID fid : auth.getEndpoint_ids()) {

+				ps2.setInt(1, feedid);

+				ps2.setString(2, fid.getId());

+				ps2.setString(3, fid.getPassword());

+				ps2.executeUpdate();

+			}

+			ps2.close();

+

+			// Create FEED_ENDPOINT_ADDRS rows

+			sql = "insert into FEED_ENDPOINT_ADDRS values (?, ?)";

+			ps2 = c.prepareStatement(sql);

+			for (String t : auth.getEndpoint_addrs()) {

+				ps2.setInt(1, feedid);

+				ps2.setString(2, t);

+				ps2.executeUpdate();

+			}

+			ps2.close();

+

+			// Finally, create the FEEDS row

+			sql = "insert into FEEDS (FEEDID, NAME, VERSION, DESCRIPTION, AUTH_CLASS, PUBLISHER, SELF_LINK, PUBLISH_LINK, SUBSCRIBE_LINK, LOG_LINK, DELETED, SUSPENDED,BUSINESS_DESCRIPTION, GROUPID) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?, ?)";

+			ps2 = c.prepareStatement(sql);

+			ps2.setInt(1, feedid);

+			ps2.setString(2, getName());

+			ps2.setString(3, getVersion());

+			ps2.setString(4, getDescription());

+			ps2.setString(5, getAuthorization().getClassification());

+			ps2.setString(6, getPublisher());

+			ps2.setString(7, getLinks().getSelf());

+			ps2.setString(8, getLinks().getPublish());

+			ps2.setString(9, getLinks().getSubscribe());

+			ps2.setString(10, getLinks().getLog());

+			ps2.setBoolean(11, isDeleted());

+			ps2.setBoolean(12, isSuspended());

+			ps2.setString(13,getBusiness_description()); // New field is added - Groups feature Rally:US708102 - 1610

+			ps2.setInt(14,groupid); //New field is added - Groups feature Rally:US708115 - 1610

+			ps2.executeUpdate();

+			ps2.close();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0005 doInsert: "+e.getMessage());

+			e.printStackTrace();

+//		} finally {

+//			try {

+//				ps.close();

+//			} catch (SQLException e) {

+//				e.printStackTrace();

+//			}

+		}

+		return rv;

+	}

+	@Override

+	public boolean doUpdate(Connection c) {

+		boolean rv = true;

+		Feed oldobj = getFeedById(feedid);

+		PreparedStatement ps = null;

+		try {

+			Set<FeedEndpointID> newset = getAuthorization().getEndpoint_ids();

+			Set<FeedEndpointID> oldset = oldobj.getAuthorization().getEndpoint_ids();

+

+			// Insert new FEED_ENDPOINT_IDS rows

+			String sql = "insert into FEED_ENDPOINT_IDS values (?, ?, ?)";

+			ps = c.prepareStatement(sql);

+			for (FeedEndpointID fid : newset) {

+				if (!oldset.contains(fid)) {

+					ps.setInt(1, feedid);

+					ps.setString(2, fid.getId());

+					ps.setString(3, fid.getPassword());

+					ps.executeUpdate();

+				}

+			}

+			ps.close();

+

+			// Delete old FEED_ENDPOINT_IDS rows

+			sql = "delete from FEED_ENDPOINT_IDS where FEEDID = ? AND USERID = ? AND PASSWORD = ?";

+			ps = c.prepareStatement(sql);

+			for (FeedEndpointID fid : oldset) {

+				if (!newset.contains(fid)) {

+					ps.setInt(1, feedid);

+					ps.setString(2, fid.getId());

+					ps.setString(3, fid.getPassword());

+					ps.executeUpdate();

+				}

+			}

+			ps.close();

+

+			// Insert new FEED_ENDPOINT_ADDRS rows

+			Set<String> newset2 = getAuthorization().getEndpoint_addrs();

+			Set<String> oldset2 = oldobj.getAuthorization().getEndpoint_addrs();

+			sql = "insert into FEED_ENDPOINT_ADDRS values (?, ?)";

+			ps = c.prepareStatement(sql);

+			for (String t : newset2) {

+				if (!oldset2.contains(t)) {

+					ps.setInt(1, feedid);

+					ps.setString(2, t);

+					ps.executeUpdate();

+				}

+			}

+			ps.close();

+

+			// Delete old FEED_ENDPOINT_ADDRS rows

+			sql = "delete from FEED_ENDPOINT_ADDRS where FEEDID = ? AND ADDR = ?";

+			ps = c.prepareStatement(sql);

+			for (String t : oldset2) {

+				if (!newset2.contains(t)) {

+					ps.setInt(1, feedid);

+					ps.setString(2, t);

+					ps.executeUpdate();

+				}

+			}

+			ps.close();

+

+			// Finally, update the FEEDS row

+			sql = "update FEEDS set DESCRIPTION = ?, AUTH_CLASS = ?, DELETED = ?, SUSPENDED = ?, BUSINESS_DESCRIPTION=?, GROUPID=? where FEEDID = ?";

+			ps = c.prepareStatement(sql);

+			ps.setString(1, getDescription());

+			ps.setString(2, getAuthorization().getClassification());

+			ps.setInt(3, deleted ? 1 : 0);

+			ps.setInt(4, suspended ? 1 : 0);

+			ps.setString(5, getBusiness_description()); // New field is added - Groups feature Rally:US708102 - 1610

+			ps.setInt(6, groupid); //New field is added - Groups feature Rally:US708115 - 1610

+			ps.setInt(7, feedid);

+			ps.executeUpdate();

+			ps.close();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0006 doUpdate: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				if (ps != null)

+					ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+	

+	/**Rally US708115

+	 * Change Ownership of FEED - 1610

+	 * */

+	public boolean changeOwnerShip() {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection c = db.getConnection();

+			String sql = "update FEEDS set PUBLISHER = ? where FEEDID = ?";

+			ps = c.prepareStatement(sql);

+			ps.setString(1, this.publisher);

+			ps.setInt(2, feedid);

+			ps.execute();

+			ps.close();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0006 doUpdate: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+

+

+	@Override

+	public String getKey() {

+		return ""+getFeedid();

+	}

+

+	@Override

+	public boolean equals(Object obj) {

+		if (!(obj instanceof Feed))

+			return false;

+		Feed of = (Feed) obj;

+		if (feedid != of.feedid)

+			return false;

+		if (groupid != of.groupid) //New field is added - Groups feature Rally:US708115 - 1610

+			return false;

+		if (!name.equals(of.name))

+			return false;

+		if (!version.equals(of.version))

+			return false;

+		if (!description.equals(of.description))

+			return false;

+		if (!business_description.equals(of.business_description)) // New field is added - Groups feature Rally:US708102 - 1610

+			return false;

+		if (!publisher.equals(of.publisher))

+			return false;

+		if (!authorization.equals(of.authorization))

+			return false;

+		if (!links.equals(of.links))

+			return false;

+		if (deleted != of.deleted)

+			return false;

+		if (suspended != of.suspended)

+			return false;

+		return true;

+	}

+

+	@Override

+	public String toString() {

+		return "FEED: feedid=" + feedid + ", name=" + name + ", version=" + version;

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/FeedAuthorization.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/FeedAuthorization.java
new file mode 100644
index 0000000..5701ce9
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/FeedAuthorization.java
@@ -0,0 +1,96 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.util.HashSet;

+import java.util.Set;

+

+import org.json.JSONArray;

+import org.json.JSONObject;

+

+/**

+ * The representation of a Feed authorization.  This encapsulates the authorization information about a feed.

+ * @author Robert Eby

+ * @version $Id: FeedAuthorization.java,v 1.2 2013/06/20 14:11:05 eby Exp $

+ */

+public class FeedAuthorization implements JSONable {

+	private String classification;

+	private Set<FeedEndpointID> endpoint_ids;

+	private Set<String> endpoint_addrs;

+

+	public FeedAuthorization() {

+		this.classification = "";

+		this.endpoint_ids = new HashSet<FeedEndpointID>();

+		this.endpoint_addrs = new HashSet<String>();

+	}

+	public String getClassification() {

+		return classification;

+	}

+	public void setClassification(String classification) {

+		this.classification = classification;

+	}

+	public Set<FeedEndpointID> getEndpoint_ids() {

+		return endpoint_ids;

+	}

+	public void setEndpoint_ids(Set<FeedEndpointID> endpoint_ids) {

+		this.endpoint_ids = endpoint_ids;

+	}

+	public Set<String> getEndpoint_addrs() {

+		return endpoint_addrs;

+	}

+	public void setEndpoint_addrs(Set<String> endpoint_addrs) {

+		this.endpoint_addrs = endpoint_addrs;

+	}

+

+	@Override

+	public JSONObject asJSONObject() {

+		JSONObject jo = new JSONObject();

+		jo.put("classification", classification);

+		JSONArray ja = new JSONArray();

+		for (FeedEndpointID eid : endpoint_ids) {

+			ja.put(eid.asJSONObject());

+		}

+		jo.put("endpoint_ids", ja);

+		ja = new JSONArray();

+		for (String t : endpoint_addrs) {

+			ja.put(t);

+		}

+		jo.put("endpoint_addrs", ja);

+		return jo;

+	}

+	@Override

+	public boolean equals(Object obj) {

+		if (!(obj instanceof FeedAuthorization))

+			return false;

+		FeedAuthorization of = (FeedAuthorization) obj;

+		if (!classification.equals(of.classification))

+			return false;

+		if (!endpoint_ids.equals(of.endpoint_ids))

+			return false;

+		if (!endpoint_addrs.equals(of.endpoint_addrs))

+			return false;

+		return true;

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/FeedEndpointID.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/FeedEndpointID.java
new file mode 100644
index 0000000..f009c64
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/FeedEndpointID.java
@@ -0,0 +1,87 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.ResultSet;

+import java.sql.SQLException;

+

+import org.json.JSONObject;

+

+/**

+ * The representation of a Feed endpoint.  This contains a login/password pair.

+ * @author Robert Eby

+ * @version $Id: FeedEndpointID.java,v 1.1 2013/04/26 21:00:26 eby Exp $

+ */

+public class FeedEndpointID implements JSONable {

+	private String id;

+	private String password;

+

+	public FeedEndpointID() {

+		this("", "");

+	}

+	public FeedEndpointID(String id, String password) {

+		this.id = id;

+		this.password = password;

+	}

+	public FeedEndpointID(ResultSet rs) throws SQLException {

+		this.id       = rs.getString("USERID");

+		this.password = rs.getString("PASSWORD");

+	}

+

+	public String getId() {

+		return id;

+	}

+

+	public void setId(String id) {

+		this.id = id;

+	}

+

+	public String getPassword() {

+		return password;

+	}

+

+	public void setPassword(String password) {

+		this.password = password;

+	}

+

+	@Override

+	public JSONObject asJSONObject() {

+		JSONObject jo = new JSONObject();

+		jo.put("id", id);

+		jo.put("password", password);

+		return jo;

+	}

+	@Override

+	public boolean equals(Object obj) {

+		if (!(obj instanceof FeedEndpointID))

+			return false;

+		FeedEndpointID f2 = (FeedEndpointID) obj;

+		return id.equals(f2.id) && password.equals(f2.password);

+	}

+	@Override

+	public int hashCode() {

+		return (id + ":" + password).hashCode();

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/FeedLinks.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/FeedLinks.java
new file mode 100644
index 0000000..ccce9c4
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/FeedLinks.java
@@ -0,0 +1,103 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.io.InvalidObjectException;

+

+import org.json.JSONObject;

+

+/**

+ * The URLs associated with a Feed.

+ * @author Robert Eby

+ * @version $Id: FeedLinks.java,v 1.3 2013/07/05 13:48:05 eby Exp $

+ */

+public class FeedLinks implements JSONable {

+	private String self;

+	private String publish;

+	private String subscribe;

+	private String log;

+

+	public FeedLinks() {

+		self = publish = subscribe = log = null;

+	}

+

+	public FeedLinks(JSONObject jo) throws InvalidObjectException {

+		this();

+		self      = jo.getString("self");

+		publish   = jo.getString("publish");

+		subscribe = jo.getString("subscribe");

+		log       = jo.getString("log");

+	}

+

+	public String getSelf() {

+		return self;

+	}

+	public void setSelf(String self) {

+		this.self = self;

+	}

+	public String getPublish() {

+		return publish;

+	}

+	public void setPublish(String publish) {

+		this.publish = publish;

+	}

+	public String getSubscribe() {

+		return subscribe;

+	}

+	public void setSubscribe(String subscribe) {

+		this.subscribe = subscribe;

+	}

+	public String getLog() {

+		return log;

+	}

+	public void setLog(String log) {

+		this.log = log;

+	}

+

+	@Override

+	public JSONObject asJSONObject() {

+		JSONObject jo = new JSONObject();

+		jo.put("self", self);

+		jo.put("publish", publish);

+		jo.put("subscribe", subscribe);

+		jo.put("log", log);

+		return jo;

+	}

+	@Override

+	public boolean equals(Object obj) {

+		if (!(obj instanceof FeedLinks))

+			return false;

+		FeedLinks of = (FeedLinks) obj;

+		if (!self.equals(of.self))

+			return false;

+		if (!publish.equals(of.publish))

+			return false;

+		if (!subscribe.equals(of.subscribe))

+			return false;

+		if (!log.equals(of.log))

+			return false;

+		return true;

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Group.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Group.java
new file mode 100644
index 0000000..3f55b00
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Group.java
@@ -0,0 +1,417 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.io.InvalidObjectException;

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Statement;

+import java.util.ArrayList;

+import java.util.Collection;

+import java.util.Date;

+import java.util.List;

+

+import org.apache.log4j.Logger;

+import org.json.JSONObject;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+import com.att.research.datarouter.provisioning.utils.URLUtilities;

+

+/**

+ * The representation of a Subscription.  Subscriptions can be retrieved from the DB, or stored/updated in the DB.

+ * @author vikram

+ * @version $Id: Group.java,v 1.0 2016/07/19 

+ */

+public class Group extends Syncable {

+	private static Logger intlogger = Logger.getLogger("com.att.research.datarouter.provisioning.internal");

+	private static int next_groupid = getMaxGroupID() + 1;

+

+	private int groupid;

+	private String authid;

+	private String name;

+	private String description;

+	private String classification;

+	private String members;

+	private Date last_mod;

+	

+	

+	public static Group getGroupMatching(Group gup) {

+		String sql = String.format(

+			"select * from GROUPS where  NAME = \"%s\"",

+			gup.getName()

+		);

+		List<Group> list = getGroupsForSQL(sql);

+		return list.size() > 0 ? list.get(0) : null;

+	}

+	

+	public static Group getGroupMatching(Group gup, int groupid) {

+		String sql = String.format(

+			"select * from GROUPS where  NAME = \"%s\" and GROUPID != %d ",

+			gup.getName(),

+			gup.getGroupid()

+		);

+		List<Group> list = getGroupsForSQL(sql);

+		return list.size() > 0 ? list.get(0) : null;

+	}

+	

+	public static Group getGroupById(int id) {

+		String sql = "select * from GROUPS where GROUPID = " + id;

+		List<Group> list = getGroupsForSQL(sql);

+		return list.size() > 0 ? list.get(0) : null;

+	}

+	

+	public static Group getGroupByAuthId(String id) {

+		String sql = "select * from GROUPS where AUTHID = '" + id +"'";

+		List<Group> list = getGroupsForSQL(sql);

+		return list.size() > 0 ? list.get(0) : null;

+	}

+	

+	public static Collection<Group> getAllgroups() {

+		return getGroupsForSQL("select * from GROUPS");

+	}

+	private static List<Group> getGroupsForSQL(String sql) {

+		List<Group> list = new ArrayList<Group>();

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery(sql);

+			while (rs.next()) {

+				Group group = new Group(rs);

+				list.add(group);

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		return list;

+	}

+	public static int getMaxGroupID() {

+		int max = 0;

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery("select MAX(groupid) from GROUPS");

+			if (rs.next()) {

+				max = rs.getInt(1);

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			intlogger.info("getMaxSubID: "+e.getMessage());

+			e.printStackTrace();

+		}

+		return max;

+	}

+	public static Collection<String> getGroupsByClassfication(String classfication) {

+		List<String> list = new ArrayList<String>();

+		String sql = "select * from GROUPS where classification = '"+classfication+"'";

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet  rs = stmt.executeQuery(sql);

+			while (rs.next()) {

+				int groupid = rs.getInt("groupid");

+				//list.add(URLUtilities.generateSubscriptionURL(groupid));

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		return list;

+	}

+	/**

+	 * Return a count of the number of active subscriptions in the DB.

+	 * @return the count

+	 */

+	public static int countActiveSubscriptions() {

+		int count = 0;

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery("select count(*) from SUBSCRIPTIONS");

+			if (rs.next()) {

+				count = rs.getInt(1);

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			intlogger.warn("PROV0008 countActiveSubscriptions: "+e.getMessage());

+			e.printStackTrace();

+		}

+		return count;

+	}

+

+	public Group() {

+		this("", "", "");

+	}

+	public Group(String name, String desc, String members) {

+		this.groupid = -1;

+		this.authid = "";

+		this.name = name;

+		this.description = desc;

+		this.members = members;

+		this.classification = "";

+		this.last_mod = new Date();

+	}

+	

+	

+	public Group(ResultSet rs) throws SQLException {

+		this.groupid        = rs.getInt("GROUPID");

+		this.authid       = rs.getString("AUTHID");

+		this.name       = rs.getString("NAME");

+		this.description       = rs.getString("DESCRIPTION");

+		this.classification       = rs.getString("CLASSIFICATION");

+		this.members       = rs.getString("MEMBERS");

+		this.last_mod     = rs.getDate("LAST_MOD");

+	}

+	

+

+	

+	public Group(JSONObject jo) throws InvalidObjectException {

+		this("", "", "");

+		try {

+			// The JSONObject is assumed to contain a vnd.att-dr.group representation

+			this.groupid  = jo.optInt("groupid", -1);

+			String gname      = jo.getString("name");

+			String gdescription     = jo.getString("description");

+			

+			this.authid = jo.getString("authid");

+			this.name = gname;

+			this.description = gdescription;

+			this.classification = jo.getString("classification");

+			this.members = jo.getString("members");

+		

+			if (gname.length() > 50)

+				throw new InvalidObjectException("Group name is too long");

+			if (gdescription.length() > 256)

+				throw new InvalidObjectException("Group Description is too long");

+		} catch (InvalidObjectException e) {

+			throw e;

+		} catch (Exception e) {

+			throw new InvalidObjectException("invalid JSON: "+e.getMessage());

+		}

+	}

+	public int getGroupid() {

+		return groupid;

+	}

+	

+	public static Logger getIntlogger() {

+		return intlogger;

+	}

+	public void setGroupid(int groupid) {

+		this.groupid = groupid;

+	}

+	

+	public static void setIntlogger(Logger intlogger) {

+		Group.intlogger = intlogger;

+	}

+	public static int getNext_groupid() {

+		return next_groupid;

+	}

+	public static void setNext_groupid(int next_groupid) {

+		Group.next_groupid = next_groupid;

+	}

+	public String getAuthid() {

+		return authid;

+	}

+	public void setAuthid(String authid) {

+		this.authid = authid;

+	}

+	public String getName() {

+		return name;

+	}

+	public void setName(String name) {

+		this.name = name;

+	}

+	public String getDescription() {

+		return description;

+	}

+	public void setDescription(String description) {

+		this.description = description;

+	}

+	public String getClassification() {

+		return classification;

+	}

+	public void setClassification(String classification) {

+		this.classification = classification;

+	}

+	public String getMembers() {

+		return members;

+	}

+	public void setMembers(String members) {

+		this.members = members;

+	}

+	public Date getLast_mod() {

+		return last_mod;

+	}

+	public void setLast_mod(Date last_mod) {

+		this.last_mod = last_mod;

+	}

+	

+

+	@Override

+	public JSONObject asJSONObject() {

+		JSONObject jo = new JSONObject();

+		jo.put("groupid", groupid);

+		jo.put("authid", authid);

+		jo.put("name", name);

+		jo.put("description", description);

+		jo.put("classification", classification);

+		jo.put("members", members);

+		jo.put("last_mod", last_mod.getTime());

+		return jo;

+	}

+	@Override

+	public boolean doInsert(Connection c) {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			if (groupid == -1) {

+				// No feed ID assigned yet, so assign the next available one

+				setGroupid(next_groupid++);

+			}

+			// In case we insert a gropup from synchronization

+			if (groupid > next_groupid)

+				next_groupid = groupid+1;

+

+			

+			// Create the GROUPS row

+			String sql = "insert into GROUPS (GROUPID, AUTHID, NAME, DESCRIPTION, CLASSIFICATION, MEMBERS) values (?, ?, ?, ?, ?, ?)";

+			ps = c.prepareStatement(sql, new String[] { "GROUPID" });

+			ps.setInt(1, groupid);

+			ps.setString(2, authid);

+			ps.setString(3, name);

+			ps.setString(4, description);

+			ps.setString(5, classification);

+			ps.setString(6, members);

+			ps.execute();

+			ps.close();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0005 doInsert: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+	@Override

+	public boolean doUpdate(Connection c) {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			String sql = "update GROUPS set AUTHID = ?, NAME = ?, DESCRIPTION = ?, CLASSIFICATION = ? ,  MEMBERS = ? where GROUPID = ?";

+			ps = c.prepareStatement(sql);

+			ps.setString(1, authid);

+			ps.setString(2, name);

+			ps.setString(3, description);

+			ps.setString(4, classification);

+			ps.setString(5, members);

+			ps.setInt(6, groupid);

+			ps.executeUpdate();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0006 doUpdate: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+	@Override

+	public boolean doDelete(Connection c) {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			String sql = "delete from GROUPS where GROUPID = ?";

+			ps = c.prepareStatement(sql);

+			ps.setInt(1, groupid);

+			ps.execute();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0007 doDelete: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+	@Override

+	public String getKey() {

+		return ""+getGroupid();

+	}

+	@Override

+	public boolean equals(Object obj) {

+		if (!(obj instanceof Group))

+			return false;

+		Group os = (Group) obj;

+		if (groupid != os.groupid)

+			return false;

+		if (authid != os.authid)

+			return false;

+		if (!name.equals(os.name))

+			return false;

+		if (description != os.description)

+			return false;

+		if (!classification.equals(os.classification))

+			return false;

+		if (!members.equals(os.members))

+			return false;

+		

+		return true;

+	}

+

+	@Override

+	public String toString() {

+		return "GROUP: groupid=" + groupid;

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/IngressRoute.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/IngressRoute.java
new file mode 100644
index 0000000..a9ea9bc
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/IngressRoute.java
@@ -0,0 +1,542 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.net.InetAddress;

+import java.net.UnknownHostException;

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Statement;

+import java.util.ArrayList;

+import java.util.Collection;

+import java.util.Set;

+import java.util.SortedSet;

+import java.util.TreeSet;

+

+import javax.servlet.http.HttpServletRequest;

+

+import org.apache.commons.codec.binary.Base64;

+import org.apache.log4j.Logger;

+import org.json.JSONArray;

+import org.json.JSONObject;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+

+/**

+ * The representation of one route in the Ingress Route Table.

+ *

+ * @author Robert P. Eby

+ * @version $Id: IngressRoute.java,v 1.3 2013/12/16 20:30:23 eby Exp $

+ */

+public class IngressRoute extends NodeClass implements Comparable<IngressRoute> {

+	private static Logger intlogger = Logger.getLogger("com.att.research.datarouter.provisioning.internal");

+	private final int seq;

+	private final int feedid;

+	private final String userid;

+	private final String subnet;

+	private int nodelist;

+	private SortedSet<String> nodes;

+

+	/**

+	 * Get all IngressRoutes in the database, sorted in order according to their sequence field.

+	 * @return a sorted set of IngressRoutes

+	 */

+	public static SortedSet<IngressRoute> getAllIngressRoutes() {

+		return getAllIngressRoutesForSQL("select SEQUENCE, FEEDID, USERID, SUBNET, NODESET from INGRESS_ROUTES");

+	}

+	/**

+	 * Get all IngressRoutes in the database with a particular sequence number.

+	 * @param seq the sequence number

+	 * @return a set of IngressRoutes

+	 */

+	public static Set<IngressRoute> getIngressRoutesForSeq(int seq) {

+		return getAllIngressRoutesForSQL("select SEQUENCE, FEEDID, USERID, SUBNET, NODESET from INGRESS_ROUTES where SEQUENCE = "+seq);

+	}

+	private static SortedSet<IngressRoute> getAllIngressRoutesForSQL(String sql) {

+		SortedSet<IngressRoute> set = new TreeSet<IngressRoute>();

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery(sql);

+			while (rs.next()) {

+				int seq       = rs.getInt("SEQUENCE");

+				int feedid    = rs.getInt("FEEDID");

+				String user   = rs.getString("USERID");

+				String subnet = rs.getString("SUBNET");

+				int nodeset   = rs.getInt("NODESET");

+				set.add(new IngressRoute(seq, feedid, user, subnet, nodeset));

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		return set;

+	}

+

+	/**

+	 * Get the maximum node set ID in use in the DB.

+	 * @return the integer value of the maximum

+	 */

+	public static int getMaxNodeSetID() {

+		return getMax("select max(SETID) as MAX from NODESETS");

+	}

+	/**

+	 * Get the maximum node sequence number in use in the DB.

+	 * @return the integer value of the maximum

+	 */

+	public static int getMaxSequence() {

+		return getMax("select max(SEQUENCE) as MAX from INGRESS_ROUTES");

+	}

+	private static int getMax(String sql) {

+		int rv = 0;

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery(sql);

+			if (rs.next()) {

+				rv = rs.getInt("MAX");

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		return rv;

+	}

+

+	/**

+	 * Get an Ingress Route for a particular feed ID, user, and subnet

+	 * @param feedid the Feed ID to look for

+	 * @param user the user name to look for

+	 * @param subnet the subnet to look for

+	 * @return the Ingress Route, or null of there is none

+	 */

+	public static IngressRoute getIngressRoute(int feedid, String user, String subnet) {

+		IngressRoute v = null;

+		PreparedStatement ps = null;

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			String sql = "select SEQUENCE, NODESET from INGRESS_ROUTES where FEEDID = ? AND USERID = ? and SUBNET = ?";

+			ps = conn.prepareStatement(sql);

+			ps.setInt(1, feedid);

+			ps.setString(2, user);

+			ps.setString(3, subnet);

+			ResultSet rs = ps.executeQuery();

+			if (rs.next()) {

+				int seq = rs.getInt("SEQUENCE");

+				int nodeset = rs.getInt("NODESET");

+				v = new IngressRoute(seq, feedid, user, subnet, nodeset);

+			}

+			rs.close();

+			ps.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return v;

+	}

+

+	/**

+	 * Get a collection of all Ingress Routes with a particular sequence number.

+	 * @param seq the sequence number to look for

+	 * @return the collection (may be empty).

+	 */

+	public static Collection<IngressRoute> getIngressRoute(int seq) {

+		Collection<IngressRoute> rv = new ArrayList<IngressRoute>();

+		PreparedStatement ps = null;

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			String sql = "select FEEDID, USERID, SUBNET, NODESET from INGRESS_ROUTES where SEQUENCE = ?";

+			ps = conn.prepareStatement(sql);

+			ps.setInt(1, seq);

+			ResultSet rs = ps.executeQuery();

+			while (rs.next()) {

+				int feedid = rs.getInt("FEEDID");

+				String user  = rs.getString("USERID");

+				String subnet = rs.getString("SUBNET");

+				int nodeset = rs.getInt("NODESET");

+				rv.add(new IngressRoute(seq, feedid, user, subnet, nodeset));

+			}

+			rs.close();

+			ps.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+

+	public IngressRoute(int seq, int feedid, String user, String subnet, Collection<String> nodes)

+		throws IllegalArgumentException

+	{

+		this(seq, feedid, user, subnet);

+		this.nodelist = -1;

+		this.nodes = new TreeSet<String>(nodes);

+	}

+

+	public IngressRoute(int seq, int feedid, String user, String subnet, int nodeset)

+		throws IllegalArgumentException

+	{

+		this(seq, feedid, user, subnet);

+		this.nodelist = nodeset;

+		this.nodes = new TreeSet<String>(readNodes());

+	}

+

+	private IngressRoute(int seq, int feedid, String user, String subnet)

+		throws IllegalArgumentException

+	{

+		this.seq = seq;

+		this.feedid = feedid;

+		this.userid = (user == null) ? "-" : user;

+		this.subnet = (subnet == null) ? "-" : subnet;

+		this.nodelist = -1;

+		this.nodes = null;

+		if (Feed.getFeedById(feedid) == null)

+			throw new IllegalArgumentException("No such feed: "+feedid);

+		if (!this.subnet.equals("-")) {

+			SubnetMatcher sm = new SubnetMatcher(subnet);

+			if (!sm.isValid())

+				throw new IllegalArgumentException("Invalid subnet: "+subnet);

+		}

+	}

+

+	public IngressRoute(JSONObject jo) {

+		this.seq    = jo.optInt("seq");

+		this.feedid = jo.optInt("feedid");

+		String t    = jo.optString("user");

+		this.userid = t.equals("") ? "-" : t;

+		t           = jo.optString("subnet");

+		this.subnet = t.equals("") ? "-" : t;

+		this.nodelist = -1;

+		this.nodes = new TreeSet<String>();

+		JSONArray ja = jo.getJSONArray("node");

+		for (int i = 0; i < ja.length(); i++)

+			this.nodes.add(ja.getString(i));

+	}

+	/**

+	 * Does this particular IngressRoute match a request, represented by feedid and req?

+	 * To match, <i>feedid</i> must match the feed ID in the route, the user in the route

+	 * (if specified) must match the user in the request, and the subnet in the route (if specified)

+	 * must match the subnet from the request.

+	 * @param feedid the feedid for this request

+	 * @param req the remainder of the request

+	 * @return true if a match, false otherwise

+	 */

+	public boolean matches(int feedid, HttpServletRequest req) {

+		// Check feedid

+		if (this.feedid != feedid)

+			return false;

+

+		// Get user from request and compare

+		// Note: we don't check the password; the node will do that

+		if (userid.length() > 0 && !userid.equals("-")) {

+			String credentials = req.getHeader("Authorization");

+			if (credentials == null || !credentials.startsWith("Basic "))

+				return false;

+			String t = new String(Base64.decodeBase64(credentials.substring(6)));

+			int ix = t.indexOf(':');

+			if (ix >= 0)

+				t = t.substring(0, ix);

+			if (!t.equals(this.userid))

+				return false;

+		}

+

+		// If this route has a subnet, match it against the requester's IP addr

+		if (subnet.length() > 0 && !subnet.equals("-")) {

+			try {

+				InetAddress inet = InetAddress.getByName(req.getRemoteAddr());

+				SubnetMatcher sm = new SubnetMatcher(subnet);

+				return sm.matches(inet.getAddress());

+			} catch (UnknownHostException e) {

+				return false;

+			}

+		}

+		return true;

+	}

+

+	/**

+	 *	Compare IP addresses as byte arrays to a subnet specified as a CIDR.

+	 *  Taken from com.att.research.datarouter.node.SubnetMatcher and modified somewhat.

+	 */

+	public class SubnetMatcher {

+		private byte[]	sn;

+		private int	len;

+		private int	mask;

+		private boolean valid;

+

+		/**

+		 * Construct a subnet matcher given a CIDR

+		 * @param subnet	The CIDR to match

+		 */

+		public SubnetMatcher(String subnet) {

+			int i = subnet.lastIndexOf('/');

+			if (i == -1) {

+				try {

+					sn = InetAddress.getByName(subnet).getAddress();

+					len = sn.length;

+					valid = true;

+				} catch (UnknownHostException e) {

+					len = 0;

+					valid = false;

+				}

+				mask = 0;

+			} else {

+				int n = Integer.parseInt(subnet.substring(i + 1));

+				try {

+					sn = InetAddress.getByName(subnet.substring(0, i)).getAddress();

+					valid = true;

+				} catch (UnknownHostException e) {

+					valid = false;

+				}

+				len = n / 8;

+				mask = ((0xff00) >> (n % 8)) & 0xff;

+			}

+		}

+		public boolean isValid() {

+			return valid;

+		}

+		/**

+		 *	Is the IP address in the CIDR?

+		 *	@param addr the IP address as bytes in network byte order

+		 *	@return true if the IP address matches.

+		 */

+		public boolean matches(byte[] addr) {

+			if (!valid || addr.length != sn.length) {

+				return false;

+			}

+			for (int i = 0; i < len; i++) {

+				if (addr[i] != sn[i]) {

+					return false;

+				}

+			}

+			if (mask != 0 && ((addr[len] ^ sn[len]) & mask) != 0) {

+				return false;

+			}

+			return true;

+		}

+	}

+

+	/**

+	 * Get the list of node names for this route.

+	 * @return the list

+	 */

+	public SortedSet<String> getNodes() {

+		return this.nodes;

+	}

+

+	private Collection<String> readNodes() {

+		Collection<String> set = new TreeSet<String>();

+		PreparedStatement ps = null;

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			String sql = "select NODEID from NODESETS where SETID = ?";

+			ps = conn.prepareStatement(sql);

+			ps.setInt(1, nodelist);

+			ResultSet rs = ps.executeQuery();

+			while (rs.next()) {

+				int id = rs.getInt("NODEID");

+				set.add(lookupNodeID(id));

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return set;

+	}

+

+	/**

+	 * Delete the IRT route having this IngressRoutes feed ID, user ID, and subnet from the database.

+	 * @return true if the delete succeeded

+	 */

+	@Override

+	public boolean doDelete(Connection c) {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			ps = c.prepareStatement("delete from INGRESS_ROUTES where FEEDID = ? and USERID = ? and SUBNET = ?");

+			ps.setInt(1, feedid);

+			ps.setString(2, userid);

+			ps.setString(3, subnet);

+			ps.execute();

+			ps.close();

+

+			ps = c.prepareStatement("delete from NODESETS where SETID = ?");

+			ps.setInt(1, nodelist);

+			ps.execute();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0007 doDelete: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+

+	@SuppressWarnings("resource")

+	@Override

+	public boolean doInsert(Connection c) {

+		boolean rv = false;

+		PreparedStatement ps = null;

+		try {

+			// Create the NODESETS rows & set nodelist

+			int set = getMaxNodeSetID() + 1;

+			this.nodelist = set;

+			for (String node : nodes) {

+				int id = lookupNodeName(node);

+				ps = c.prepareStatement("insert into NODESETS (SETID, NODEID) values (?,?)");

+				ps.setInt(1, this.nodelist);

+				ps.setInt(2, id);

+				ps.execute();

+				ps.close();

+			}

+

+			// Create the INGRESS_ROUTES row

+			ps = c.prepareStatement("insert into INGRESS_ROUTES (SEQUENCE, FEEDID, USERID, SUBNET, NODESET) values (?, ?, ?, ?, ?)");

+			ps.setInt(1, this.seq);

+			ps.setInt(2, this.feedid);

+			ps.setString(3, this.userid);

+			ps.setString(4, this.subnet);

+			ps.setInt(5, this.nodelist);

+			ps.execute();

+			ps.close();

+			rv = true;

+		} catch (SQLException e) {

+			intlogger.warn("PROV0005 doInsert: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+

+	@Override

+	public boolean doUpdate(Connection c) {

+		return doDelete(c) && doInsert(c);

+	}

+

+	@Override

+	public JSONObject asJSONObject() {

+		JSONObject jo = new JSONObject();

+		jo.put("feedid", feedid);

+		// Note: for user and subnet, null, "", and "-" are equivalent

+		if (userid != null && !userid.equals("-") && !userid.equals(""))

+			jo.put("user", userid);

+		if (subnet != null && !subnet.equals("-") && !subnet.equals(""))

+			jo.put("subnet", subnet);

+		jo.put("seq", seq);

+		jo.put("node", nodes);

+		return jo;

+	}

+

+	@Override

+	public String getKey() {

+		return String.format("%d/%s/%s/%d", feedid, (userid == null)?"":userid, (subnet == null)?"":subnet, seq);

+	}

+

+	@Override

+	public int hashCode() {

+		return toString().hashCode();

+	}

+

+	@Override

+	public boolean equals(Object obj) {

+		try {

+			if (!(obj instanceof IngressRoute))

+				return false;

+			return this.compareTo((IngressRoute) obj) == 0;

+		} catch (NullPointerException e) {

+			return false;

+		}

+	}

+

+	@Override

+	public int compareTo(IngressRoute in) {

+		if (in == null)

+			throw new NullPointerException();

+		int n = this.feedid - in.feedid;

+		if (n != 0)

+			return n;

+		n = this.seq - in.seq;

+		if (n != 0)

+			return n;

+		n = this.userid.compareTo(in.userid);

+		if (n != 0)

+			return n;

+		n = this.subnet.compareTo(in.subnet);

+		if (n != 0)

+			return n;

+		return this.nodes.equals(in.nodes) ? 0 : 1;

+	}

+

+	@Override

+	public String toString() {

+		return String.format("INGRESS: feed=%d, userid=%s, subnet=%s, seq=%d", feedid, (userid == null)?"":userid, (subnet == null)?"":subnet, seq);

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Insertable.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Insertable.java
new file mode 100644
index 0000000..6604ab4
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Insertable.java
@@ -0,0 +1,41 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.Connection;

+

+/**

+ * An object that can be INSERT-ed into the database.

+ * @author Robert Eby

+ * @version $Id: Insertable.java,v 1.2 2013/05/29 14:44:36 eby Exp $

+ */

+public interface Insertable {

+	/**

+	 * Insert this object into the DB.

+	 * @param c the JDBC Connection to use

+	 * @return true if the INSERT succeeded, false otherwise

+	 */

+	public boolean doInsert(Connection c);

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/JSONable.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/JSONable.java
new file mode 100644
index 0000000..cbea9ad
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/JSONable.java
@@ -0,0 +1,40 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import org.json.JSONObject;

+

+/**

+ * An object that can be represented as a {@link JSONObject}.

+ * @author Robert Eby

+ * @version $Id: JSONable.java,v 1.1 2013/04/26 21:00:26 eby Exp $

+ */

+public interface JSONable {

+	/**

+	 * Get a JSONObject representing this object.

+	 * @return the JSONObject

+	 */

+	public JSONObject asJSONObject();

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/LOGJSONable.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/LOGJSONable.java
new file mode 100644
index 0000000..93cdfaa
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/LOGJSONable.java
@@ -0,0 +1,40 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import org.json.LOGJSONObject;

+

+/**

+ * An object that can be represented as a {@link JSONObject}.

+ * @author Robert Eby

+ * @version $Id: JSONable.java,v 1.1 2013/04/26 21:00:26 eby Exp $

+ */

+public interface LOGJSONable {

+	/**

+	 * Get a JSONObject representing this object.

+	 * @return the JSONObject

+	 */

+	public LOGJSONObject asJSONObject();

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Loadable.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Loadable.java
new file mode 100644
index 0000000..3676f45
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Loadable.java
@@ -0,0 +1,65 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.PreparedStatement;

+import java.sql.SQLException;

+

+import com.att.research.datarouter.provisioning.utils.LogfileLoader;

+

+/**

+ * This interface is used by bean classes that can be loaded into the LOG_RECORDS table using the

+ * PreparedStatement at {@link LogfileLoader}.INSERT_SQL.

+ *

+ * @author Robert Eby

+ * @version $Id: Loadable.java,v 1.2 2013/08/06 13:28:33 eby Exp $

+ */

+public interface Loadable {

+	/**

+	 * Load the 18 fields in the PreparedStatement <i>ps</i>. The fields are:

+	 * <ol>

+	 * <li>type (String)</li>

+	 * <li>event_time (long)</li>

+	 * <li>publish ID (String)</li>

+	 * <li>feed ID (int)</li>

+	 * <li>request URI (String)</li>

+	 * <li>method (String)</li>

+	 * <li>content type (String)</li>

+	 * <li>content length (long)</li>

+	 * <li>feed File ID (String)</li>

+	 * <li>remote address (String)</li>

+	 * <li>user (String)</li>

+	 * <li>status (int)</li>

+	 * <li>delivery subscriber id (int)</li>

+	 * <li>delivery File ID (String)</li>

+	 * <li>result (int)</li>

+	 * <li>attempts (int)</li>

+	 * <li>reason (String)</li>

+	 * <li>record ID (long)</li>

+	 * </ol>

+	 * @param ps the PreparedStatement to load

+	 */

+	public void load(PreparedStatement ps) throws SQLException;

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/LogRecord.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/LogRecord.java
new file mode 100644
index 0000000..1ddc509
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/LogRecord.java
@@ -0,0 +1,235 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.io.IOException;

+import java.io.OutputStream;

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Statement;

+import java.sql.Types;

+import java.text.ParseException;

+import java.util.Iterator;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+import com.att.research.datarouter.provisioning.utils.RLEBitSet;

+

+/**

+ * The representation of a Log Record, as retrieved from the DB.  Since this record format is only used

+ * to replicate between provisioning servers, it is very bare-bones; e.g. there are no field setters and only 1 getter.

+ * @author Robert Eby

+ * @version $Id: LogRecord.java,v 1.7 2014/03/12 19:45:41 eby Exp $

+ */

+public class LogRecord extends BaseLogRecord {

+	/**

+	 * Print all log records whose RECORD_IDs are in the bit set provided.

+	 * @param os the {@link OutputStream} to print the records on

+	 * @param bs the {@link RLEBitSet} listing the record IDs to print

+	 * @throws IOException

+	 */

+	public static void printLogRecords(OutputStream os, RLEBitSet bs) throws IOException {

+		final String sql = "select * from LOG_RECORDS where RECORD_ID >= ? AND RECORD_ID <= ?";

+		DB db = new DB();

+		Connection conn = null;

+		try {

+			conn = db.getConnection();

+			Statement stmt = conn.createStatement();

+			Iterator<Long[]> iter = bs.getRangeIterator();

+			PreparedStatement ps = conn.prepareStatement(sql);

+			while (iter.hasNext()) {

+				Long[] n = iter.next();

+				ps.setLong(1, n[0]);

+				ps.setLong(2, n[1]);

+				ResultSet rs = ps.executeQuery();

+				while (rs.next()) {

+					LogRecord lr = new LogRecord(rs);

+					os.write(lr.toString().getBytes());

+				}

+				rs.close();

+				ps.clearParameters();

+			}

+			ps.close();

+			stmt.close();

+		} catch (SQLException e) {

+			e.printStackTrace();

+		} finally {

+			if (conn != null)

+				db.release(conn);

+		}

+	}

+

+	private final String type;

+	private final String feedFileid;

+	private final String remoteAddr;

+	private final String user;

+	private final int status;

+	private final int subid;

+	private final String fileid;

+	private final int result;

+	private final int attempts;

+	private final String reason;

+	private final long record_id;

+	private final long clength2;

+

+	public LogRecord(ResultSet rs) throws SQLException {

+		super(rs);

+		this.type       = rs.getString("TYPE");

+		this.feedFileid = rs.getString("FEED_FILEID");

+		this.remoteAddr = rs.getString("REMOTE_ADDR");

+		this.user       = rs.getString("USER");

+		this.status     = rs.getInt("STATUS");

+

+		this.subid      = rs.getInt("DELIVERY_SUBID");

+		this.fileid     = rs.getString("DELIVERY_FILEID");

+		this.result     = rs.getInt("RESULT");

+

+		this.attempts   = rs.getInt("ATTEMPTS");

+		this.reason     = rs.getString("REASON");

+

+		this.record_id  = rs.getLong("RECORD_ID");

+		this.clength2   = rs.getLong("CONTENT_LENGTH_2");

+	}

+	public LogRecord(String[] pp) throws ParseException {

+		super(pp);

+		this.type       = pp[8];

+		this.feedFileid = pp[9];

+		this.remoteAddr = pp[10];

+		this.user       = pp[11];

+		this.status     = Integer.parseInt(pp[12]);

+

+		this.subid      = Integer.parseInt(pp[13]);

+		this.fileid     = pp[14];

+		this.result     = Integer.parseInt(pp[15]);

+

+		this.attempts   = Integer.parseInt(pp[16]);

+		this.reason     = pp[17];

+

+		this.record_id  = Long.parseLong(pp[18]);

+		this.clength2   = (pp.length == 20) ? Long.parseLong(pp[19]) : 0;

+	}

+

+	public long getRecordId() {

+		return record_id;

+	}

+

+	@Override

+	public String toString() {

+		return

+			sdf.format(getEventTime()) + "|"

+			+ "LOG|"

+			+ getPublishId() + "|"

+			+ getFeedid() + "|"

+			+ getRequestUri() + "|"

+			+ getMethod() + "|"

+			+ getContentType() + "|"

+			+ getContentLength() + "|"

+			+ type + "|"

+			+ feedFileid + "|"

+			+ remoteAddr + "|"

+			+ user + "|"

+			+ status + "|"

+			+ subid + "|"

+			+ fileid + "|"

+			+ result + "|"

+			+ attempts + "|"

+			+ reason + "|"

+			+ record_id + "|"

+			+ clength2

+			+ "\n";

+	}

+

+	@Override

+	public void load(PreparedStatement ps) throws SQLException {

+		ps.setString(1, type);

+		super.load(ps);				// loads fields 2-8

+		if (type.equals("pub")) {

+			ps.setString(9,  feedFileid);

+			ps.setString(10, remoteAddr);

+			ps.setString(11, user);

+			ps.setInt   (12, status);

+			ps.setNull  (13, Types.INTEGER);

+			ps.setNull  (14, Types.VARCHAR);

+			ps.setNull  (15, Types.INTEGER);

+			ps.setNull  (16, Types.INTEGER);

+			ps.setNull  (17, Types.VARCHAR);

+			ps.setLong  (18, record_id);

+			ps.setNull  (19, Types.BIGINT);

+		} else if (type.equals("del")) {

+			ps.setNull  (9,  Types.VARCHAR);

+			ps.setNull  (10, Types.VARCHAR);

+			ps.setString(11, user);

+			ps.setNull  (12, Types.INTEGER);

+			ps.setInt   (13, subid);

+			ps.setString(14, fileid);

+			ps.setInt   (15, result);

+			ps.setNull  (16, Types.INTEGER);

+			ps.setNull  (17, Types.VARCHAR);

+			ps.setLong  (18, record_id);

+			ps.setNull  (19, Types.BIGINT);

+		} else if (type.equals("exp")) {

+			ps.setNull  (9,  Types.VARCHAR);

+			ps.setNull  (10, Types.VARCHAR);

+			ps.setNull  (11, Types.VARCHAR);

+			ps.setNull  (12, Types.INTEGER);

+			ps.setInt   (13, subid);

+			ps.setString(14, fileid);

+			ps.setNull  (15, Types.INTEGER);

+			ps.setInt   (16, attempts);

+			ps.setString(17, reason);

+			ps.setLong  (18, record_id);

+			ps.setNull  (19, Types.BIGINT);

+		} else if (type.equals("pbf")) {

+			ps.setString( 9, feedFileid);

+			ps.setString(10, remoteAddr);

+			ps.setString(11, user);

+			ps.setNull  (12, Types.INTEGER);

+			ps.setNull  (13, Types.INTEGER);

+			ps.setNull  (14, Types.VARCHAR);

+			ps.setNull  (15, Types.INTEGER);

+			ps.setNull  (16, Types.INTEGER);

+			ps.setNull  (17, Types.VARCHAR);

+			ps.setLong  (18, record_id);

+			ps.setLong  (19, clength2);

+		} else if (type.equals("dlx")) {

+			ps.setNull  ( 9, Types.VARCHAR);

+			ps.setNull  (10, Types.VARCHAR);

+			ps.setNull  (11, Types.VARCHAR);

+			ps.setNull  (12, Types.INTEGER);

+			ps.setInt   (13, subid);

+			ps.setNull  (14, Types.VARCHAR);

+			ps.setNull  (15, Types.INTEGER);

+			ps.setNull  (16, Types.INTEGER);

+			ps.setNull  (17, Types.VARCHAR);

+			ps.setLong  (18, record_id);

+			ps.setLong  (19, clength2);

+		}

+	}

+

+	public static void main(String[] a) throws IOException {

+		LogRecord.printLogRecords(System.out, new RLEBitSet(a[0]));

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/NetworkRoute.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/NetworkRoute.java
new file mode 100644
index 0000000..59f2192
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/NetworkRoute.java
@@ -0,0 +1,230 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Statement;

+import java.util.SortedSet;

+import java.util.TreeSet;

+

+import org.apache.log4j.Logger;

+import org.json.JSONObject;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+

+/**

+ * The representation of one route in the Network Route Table.

+ *

+ * @author Robert P. Eby

+ * @version $Id: NetworkRoute.java,v 1.2 2013/12/16 20:30:23 eby Exp $

+ */

+public class NetworkRoute extends NodeClass implements Comparable<NetworkRoute> {

+	private static Logger intlogger = Logger.getLogger("com.att.research.datarouter.provisioning.internal");

+	private final int fromnode;

+	private final int tonode;

+	private final int vianode;

+

+	/**

+	 * Get a set of all Network Routes in the DB.  The set is sorted according to the natural sorting order

+	 * of the routes (based on the from and to node names in each route).

+	 * @return the sorted set

+	 */

+	public static SortedSet<NetworkRoute> getAllNetworkRoutes() {

+		SortedSet<NetworkRoute> set = new TreeSet<NetworkRoute>();

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet    rs = stmt.executeQuery("select FROMNODE, TONODE, VIANODE from NETWORK_ROUTES");

+			while (rs.next()) {

+				int fromnode = rs.getInt("FROMNODE");

+				int tonode   = rs.getInt("TONODE");

+				int vianode  = rs.getInt("VIANODE");

+				set.add(new NetworkRoute(fromnode, tonode, vianode));

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		return set;

+	}

+

+	public NetworkRoute(String fromnode, String tonode) throws IllegalArgumentException {

+		this.fromnode = lookupNodeName(fromnode);

+		this.tonode   = lookupNodeName(tonode);

+		this.vianode  = -1;

+	}

+

+	public NetworkRoute(String fromnode, String tonode, String vianode) throws IllegalArgumentException {

+		this.fromnode = lookupNodeName(fromnode);

+		this.tonode   = lookupNodeName(tonode);

+		this.vianode  = lookupNodeName(vianode);

+	}

+

+	public NetworkRoute(JSONObject jo) throws IllegalArgumentException {

+		this.fromnode = lookupNodeName(jo.getString("from"));

+		this.tonode   = lookupNodeName(jo.getString("to"));

+		this.vianode  = lookupNodeName(jo.getString("via"));

+	}

+

+	public NetworkRoute(int fromnode, int tonode, int vianode) throws IllegalArgumentException {

+		this.fromnode = fromnode;

+		this.tonode   = tonode;

+		this.vianode  = vianode;

+	}

+

+	public int getFromnode() {

+		return fromnode;

+	}

+

+	public int getTonode() {

+		return tonode;

+	}

+

+	public int getVianode() {

+		return vianode;

+	}

+

+	@Override

+	public boolean doDelete(Connection c) {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			String sql = "delete from NETWORK_ROUTES where FROMNODE = ? AND TONODE = ?";

+			ps = c.prepareStatement(sql);

+			ps.setInt(1, fromnode);

+			ps.setInt(2, tonode);

+			ps.execute();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0007 doDelete: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+

+	@Override

+	public boolean doInsert(Connection c) {

+		boolean rv = false;

+		if (this.vianode >= 0) {

+			PreparedStatement ps = null;

+			try {

+				// Create the NETWORK_ROUTES row

+				String sql = "insert into NETWORK_ROUTES (FROMNODE, TONODE, VIANODE) values (?, ?, ?)";

+				ps = c.prepareStatement(sql);

+				ps.setInt(1, this.fromnode);

+				ps.setInt(2, this.tonode);

+				ps.setInt(3, this.vianode);

+				ps.execute();

+				ps.close();

+				rv = true;

+			} catch (SQLException e) {

+				intlogger.warn("PROV0005 doInsert: "+e.getMessage());

+				e.printStackTrace();

+			} finally {

+				try {

+					ps.close();

+				} catch (SQLException e) {

+					e.printStackTrace();

+				}

+			}

+		}

+		return rv;

+	}

+

+	@Override

+	public boolean doUpdate(Connection c) {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			String sql = "update NETWORK_ROUTES set VIANODE = ? where FROMNODE = ? and TONODE = ?";

+			ps = c.prepareStatement(sql);

+			ps.setInt(1, vianode);

+			ps.setInt(2, fromnode);

+			ps.setInt(3, tonode);

+			ps.executeUpdate();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0006 doUpdate: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+

+	@Override

+	public JSONObject asJSONObject() {

+		JSONObject jo = new JSONObject();

+		jo.put("from", lookupNodeID(fromnode));

+		jo.put("to",   lookupNodeID(tonode));

+		jo.put("via",  lookupNodeID(vianode));

+		return jo;

+	}

+

+	@Override

+	public String getKey() {

+		return lookupNodeID(fromnode)+":"+lookupNodeID(tonode);

+	}

+

+	@Override

+	public boolean equals(Object obj) {

+		if (!(obj instanceof NetworkRoute))

+			return false;

+		NetworkRoute on = (NetworkRoute)obj;

+		return (fromnode == on.fromnode) && (tonode == on.tonode) && (vianode == on.vianode);

+	}

+

+	@Override

+	public int compareTo(NetworkRoute o) {

+		if (this.fromnode == o.fromnode) {

+			if (this.tonode == o.tonode)

+				return this.vianode - o.vianode;

+			return this.tonode - o.tonode;

+		}

+		return this.fromnode - o.fromnode;

+	}

+

+	@Override

+	public String toString() {

+		return String.format("NETWORK: from=%d, to=%d, via=%d", fromnode, tonode, vianode);

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/NodeClass.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/NodeClass.java
new file mode 100644
index 0000000..321885b
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/NodeClass.java
@@ -0,0 +1,179 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.util.Collection;

+import java.util.HashMap;

+import java.util.Map;

+import java.util.Set;

+import java.util.TreeSet;

+

+import org.apache.log4j.Logger;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+

+/**

+ * This class is used to aid in the mapping of node names from/to node IDs.

+ *

+ * @author Robert P. Eby

+ * @version $Id: NodeClass.java,v 1.2 2014/01/15 16:08:43 eby Exp $

+ */

+public abstract class NodeClass extends Syncable {

+	private static Map<String, Integer> map;

+

+	public NodeClass() {

+		// init on first use

+		if (map == null) {

+			reload();

+		}

+	}

+

+	/**

+	 * Add nodes to the NODES table, when the NODES parameter value is changed.

+	 * Nodes are only added to the table, they are never deleted.  The node name is normalized

+	 * to contain the domain (if missing).

+	 * @param nodes a pipe separated list of the current nodes

+	 */

+	public static void setNodes(String[] nodes) {

+		if (map == null)

+			reload();

+		int nextid = 0;

+		for (Integer n : map.values()) {

+			if (n >= nextid)

+				nextid = n+1;

+		}

+		// take | separated list, add domain if needed.

+		Logger intlogger = Logger.getLogger("com.att.research.datarouter.provisioning.internal");

+		for (String node : nodes) {

+			node = normalizeNodename(node);

+			if (!map.containsKey(node)) {

+				intlogger.info("..adding "+node+" to NODES with index "+nextid);

+				map.put(node, nextid);

+				PreparedStatement ps = null;

+				try {

+					DB db = new DB();

+					@SuppressWarnings("resource")

+					Connection conn = db.getConnection();

+					ps = conn.prepareStatement("insert into NODES (NODEID, NAME, ACTIVE) values (?, ?, 1)");

+					ps.setInt(1, nextid);

+					ps.setString(2, node);

+					ps.execute();

+					ps.close();

+					db.release(conn);

+				} catch (SQLException e) {

+					intlogger.warn("PROV0005 doInsert: "+e.getMessage());

+					e.printStackTrace();

+				} finally {

+					try {

+						ps.close();

+					} catch (SQLException e) {

+						e.printStackTrace();

+					}

+				}

+				nextid++;

+			}

+		}

+	}

+

+	public static void reload() {

+		Map<String, Integer> m = new HashMap<String, Integer>();

+		PreparedStatement ps = null;

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			String sql = "select NODEID, NAME from NODES";

+			ps = conn.prepareStatement(sql);

+			ResultSet rs = ps.executeQuery();

+			while (rs.next()) {

+				int id = rs.getInt("NODEID");

+				String name = rs.getString("NAME");

+				m.put(name, id);

+			}

+			rs.close();

+			ps.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		map = m;

+	}

+

+	public static Integer lookupNodeName(final String name) throws IllegalArgumentException {

+		Integer n = map.get(name);

+		if (n == null)

+			throw new IllegalArgumentException("Invalid node name: "+name);

+		return n;

+	}

+

+	public static Collection<String> lookupNodeNames(String patt) throws IllegalArgumentException {

+		Collection<String> coll = new TreeSet<String>();

+		final Set<String> keyset = map.keySet();

+		for (String s : patt.toLowerCase().split(",")) {

+			if (s.endsWith("*")) {

+				s = s.substring(0, s.length()-1);

+				for (String s2 : keyset) {

+					if (s2.startsWith(s))

+						coll.add(s2);

+				}

+			} else if (keyset.contains(s)) {

+				coll.add(s);

+			} else if (keyset.contains(normalizeNodename(s))) {

+				coll.add(normalizeNodename(s));

+			} else {

+				throw new IllegalArgumentException("Invalid node name: "+s);

+			}

+		}

+		return coll;

+	}

+

+	protected String lookupNodeID(int n) {

+		for (String s : map.keySet()) {

+			if (map.get(s) == n)

+				return s;

+		}

+		return null;

+	}

+

+	public static String normalizeNodename(String s) {

+		if (s != null && s.indexOf('.') <= 0) {

+			Parameters p = Parameters.getParameter(Parameters.PROV_DOMAIN);

+			if (p != null) {

+				String domain = p.getValue();

+				s += "." + domain;

+			}

+		}

+		return s.toLowerCase();

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Parameters.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Parameters.java
new file mode 100644
index 0000000..1cb4bca
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Parameters.java
@@ -0,0 +1,257 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Statement;

+import java.util.ArrayList;

+import java.util.Collection;

+import java.util.HashMap;

+import java.util.Map;

+

+import org.apache.log4j.Logger;

+import org.json.JSONObject;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+

+/**

+ * Methods to provide access to Provisioning parameters in the DB.

+ * This class also provides constants of the standard parameters used by the Data Router.

+ * @author Robert Eby

+ * @version $Id: Parameters.java,v 1.11 2014/03/12 19:45:41 eby Exp $

+ */

+public class Parameters extends Syncable {

+	public static final String PROV_REQUIRE_SECURE          = "PROV_REQUIRE_SECURE";

+	public static final String PROV_REQUIRE_CERT            = "PROV_REQUIRE_CERT";

+	public static final String PROV_AUTH_ADDRESSES          = "PROV_AUTH_ADDRESSES";

+	public static final String PROV_AUTH_SUBJECTS           = "PROV_AUTH_SUBJECTS";

+	public static final String PROV_NAME                    = "PROV_NAME";

+	public static final String PROV_ACTIVE_NAME             = "PROV_ACTIVE_NAME";

+	public static final String PROV_DOMAIN                  = "PROV_DOMAIN";

+	public static final String PROV_MAXFEED_COUNT           = "PROV_MAXFEED_COUNT";

+	public static final String PROV_MAXSUB_COUNT            = "PROV_MAXSUB_COUNT";

+	public static final String PROV_POKETIMER1              = "PROV_POKETIMER1";

+	public static final String PROV_POKETIMER2              = "PROV_POKETIMER2";

+	public static final String PROV_SPECIAL_SUBNET          = "PROV_SPECIAL_SUBNET";

+	public static final String PROV_LOG_RETENTION           = "PROV_LOG_RETENTION";

+	public static final String NODES                        = "NODES";

+	public static final String ACTIVE_POD                   = "ACTIVE_POD";

+	public static final String STANDBY_POD                  = "STANDBY_POD";

+	public static final String LOGROLL_INTERVAL             = "LOGROLL_INTERVAL";

+	public static final String DELIVERY_INIT_RETRY_INTERVAL = "DELIVERY_INIT_RETRY_INTERVAL";

+	public static final String DELIVERY_MAX_RETRY_INTERVAL  = "DELIVERY_MAX_RETRY_INTERVAL";

+	public static final String DELIVERY_RETRY_RATIO         = "DELIVERY_RETRY_RATIO";

+	public static final String DELIVERY_MAX_AGE             = "DELIVERY_MAX_AGE";

+	public static final String THROTTLE_FILTER              = "THROTTLE_FILTER";

+	public static final String STATIC_ROUTING_NODES         = "STATIC_ROUTING_NODES"; //Adding new param for static Routing - Rally:US664862-1610

+

+	private static Logger intlogger = Logger.getLogger("com.att.research.datarouter.provisioning.internal");

+

+	private String keyname;

+	private String value;

+

+	/**

+	 * Get all parameters in the DB as a Map.

+	 * @return the Map of keynames/values from the DB.

+	 */

+	public static Map<String,String> getParameters() {

+		Map<String,String> props = new HashMap<String,String>();

+		for (Parameters p : getParameterCollection()) {

+			props.put(p.getKeyname(), p.getValue());

+		}

+		return props;

+	}

+	public static Collection<Parameters> getParameterCollection() {

+		Collection<Parameters> coll = new ArrayList<Parameters>();

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			String sql = "select * from PARAMETERS";

+			ResultSet rs = stmt.executeQuery(sql);

+			while (rs.next()) {

+				Parameters p = new Parameters(rs);

+				coll.add(p);

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		return coll;

+	}

+	/**

+	 * Get a specific parameter value from the DB.

+	 * @param k the key to lookup

+	 * @return the value, or null if non-existant

+	 */

+	public static Parameters getParameter(String k) {

+		Parameters v = null;

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			String sql = "select KEYNAME, VALUE from PARAMETERS where KEYNAME = \"" + k + "\"";

+			ResultSet rs = stmt.executeQuery(sql);

+			if (rs.next()) {

+				v = new Parameters(rs);

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		return v;

+	}

+

+	public Parameters() {

+		this("", "");

+	}

+	public Parameters(String k, String v) {

+		this.keyname = k;

+		this.value   = v;

+	}

+	public Parameters(ResultSet rs) throws SQLException {

+		this.keyname = rs.getString("KEYNAME");

+		this.value   = rs.getString("VALUE");

+	}

+	public String getKeyname() {

+		return keyname;

+	}

+	public void setKeyname(String keyname) {

+		this.keyname = keyname;

+	}

+	public String getValue() {

+		return value;

+	}

+	public void setValue(String value) {

+		this.value = value;

+	}

+	@Override

+	public JSONObject asJSONObject() {

+		JSONObject jo = new JSONObject();

+		jo.put("keyname", keyname);

+		jo.put("value", value);

+		return jo;

+	}

+	@Override

+	public boolean doInsert(Connection c) {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			// Create the SUBSCRIPTIONS row

+			String sql = "insert into PARAMETERS values (?, ?)";

+			ps = c.prepareStatement(sql);

+			ps.setString(1, getKeyname());

+			ps.setString(2, getValue());

+			ps.execute();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0005 doInsert: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+	@Override

+	public boolean doUpdate(Connection c) {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			// Update the PARAMETERS row

+			String sql = "update PARAMETERS set VALUE = ? where KEYNAME = ?";

+			ps = c.prepareStatement(sql);

+			ps.setString(1, getValue());

+			ps.setString(2, getKeyname());

+			ps.executeUpdate();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0006 doUpdate: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+	@Override

+	public boolean doDelete(Connection c) {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			// Create the SUBSCRIPTIONS row

+			String sql = "delete from PARAMETERS where KEYNAME = ?";

+			ps = c.prepareStatement(sql);

+			ps.setString(1, getKeyname());

+			ps.execute();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0007 doDelete: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+	@Override

+	public String getKey() {

+		return getKeyname();

+	}

+	@Override

+	public boolean equals(Object obj) {

+		if (!(obj instanceof Parameters))

+			return false;

+		Parameters of = (Parameters) obj;

+		if (!keyname.equals(of.keyname))

+			return false;

+		if (!value.equals(of.value))

+			return false;

+		return true;

+	}

+

+	@Override

+	public String toString() {

+		return "PARAM: keyname=" + keyname + ", value=" + value;

+	}

+}

+

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/PubFailRecord.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/PubFailRecord.java
new file mode 100644
index 0000000..1fe1473
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/PubFailRecord.java
@@ -0,0 +1,85 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Types;

+import java.text.ParseException;

+

+/**

+ * The representation of a Publish Failure (PBF) Record, as retrieved from the DB.

+ * @author Robert Eby

+ * @version $Id: PubFailRecord.java,v 1.1 2013/10/28 18:06:53 eby Exp $

+ */

+public class PubFailRecord extends BaseLogRecord {

+	private long contentLengthReceived;

+	private String sourceIP;

+	private String user;

+	private String error;

+

+	public PubFailRecord(String[] pp) throws ParseException {

+		super(pp);

+		this.contentLengthReceived = Long.parseLong(pp[8]);

+		this.sourceIP = pp[9];

+		this.user     = pp[10];

+		this.error    = pp[11];

+	}

+	public PubFailRecord(ResultSet rs) throws SQLException {

+		super(rs);

+		// Note: because this record should be "rare" these fields are mapped to unconventional fields in the DB

+		this.contentLengthReceived = rs.getLong("CONTENT_LENGTH_2");

+		this.sourceIP = rs.getString("REMOTE_ADDR");

+		this.user     = rs.getString("USER");

+		this.error    = rs.getString("FEED_FILEID");

+	}

+	public long getContentLengthReceived() {

+		return contentLengthReceived;

+	}

+	public String getSourceIP() {

+		return sourceIP;

+	}

+	public String getUser() {

+		return user;

+	}

+	public String getError() {

+		return error;

+	}

+	@Override

+	public void load(PreparedStatement ps) throws SQLException {

+		ps.setString(1, "pbf");		// field 1: type

+		super.load(ps);				// loads fields 2-8

+		ps.setString( 9, getError());

+		ps.setString(10, getSourceIP());

+		ps.setString(11, getUser());

+		ps.setNull  (12, Types.INTEGER);

+		ps.setNull  (13, Types.INTEGER);

+		ps.setNull  (14, Types.VARCHAR);

+		ps.setNull  (15, Types.INTEGER);

+		ps.setNull  (16, Types.INTEGER);

+		ps.setNull  (17, Types.VARCHAR);

+		ps.setLong  (19, getContentLengthReceived());

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/PublishRecord.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/PublishRecord.java
new file mode 100644
index 0000000..a844c76
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/PublishRecord.java
@@ -0,0 +1,153 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Types;

+import java.text.ParseException;

+import java.util.LinkedHashMap;

+

+import org.json.LOGJSONObject;

+

+/**

+ * The representation of a Publish Record, as retrieved from the DB.

+ * @author Robert Eby

+ * @version $Id: PublishRecord.java,v 1.6 2013/10/28 18:06:53 eby Exp $

+ */

+public class PublishRecord extends BaseLogRecord {

+	private String feedFileid;

+	private String remoteAddr;

+	private String user;

+	private int status;

+

+	public PublishRecord(String[] pp) throws ParseException {

+		super(pp);

+//		This is too slow!

+//		Matcher m = Pattern.compile(".*/publish/(\\d+)/(.*)$").matcher(pp[4]);

+//		if (!m.matches())

+//			throw new ParseException("bad pattern", 0);

+//		this.feedFileid = m.group(2);

+		int ix = pp[4].indexOf("/publish/");

+		if (ix < 0)

+			throw new ParseException("bad pattern", 0);

+		ix = pp[4].indexOf('/', ix+9);

+		if (ix < 0)

+			throw new ParseException("bad pattern", 0);

+		this.feedFileid = pp[4].substring(ix+1);

+		this.remoteAddr = pp[8];

+		this.user       = pp[9];

+		this.status     = Integer.parseInt(pp[10]);

+	}

+	public PublishRecord(ResultSet rs) throws SQLException {

+		super(rs);

+		this.feedFileid = rs.getString("FEED_FILEID");

+		this.remoteAddr = rs.getString("REMOTE_ADDR");

+		this.user       = rs.getString("USER");

+		this.status     = rs.getInt("STATUS");

+	}

+	public String getFeedFileid() {

+		return feedFileid;

+	}

+

+	public void setFeedFileid(String feedFileid) {

+		this.feedFileid = feedFileid;

+	}

+

+	public String getRemoteAddr() {

+		return remoteAddr;

+	}

+

+	public void setRemoteAddr(String remoteAddr) {

+		this.remoteAddr = remoteAddr;

+	}

+

+	public String getUser() {

+		return user;

+	}

+

+	public void setUser(String user) {

+		this.user = user;

+	}

+

+	public int getStatus() {

+		return status;

+	}

+

+	public void setStatus(int status) {

+		this.status = status;

+	}

+	

+	

+	public LOGJSONObject reOrderObject(LOGJSONObject jo) {

+		LinkedHashMap<String,Object> logrecordObj = new LinkedHashMap<String,Object>();

+		

+		

+		logrecordObj.put("statusCode", jo.get("statusCode"));

+		logrecordObj.put("publishId", jo.get("publishId"));

+		logrecordObj.put("requestURI", jo.get("requestURI"));

+		logrecordObj.put("sourceIP", jo.get("sourceIP"));

+		logrecordObj.put("method", jo.get("method"));

+		logrecordObj.put("contentType", jo.get("contentType"));

+		logrecordObj.put("endpointId", jo.get("endpointId"));

+		logrecordObj.put("type", jo.get("type"));

+		logrecordObj.put("date", jo.get("date"));

+		logrecordObj.put("contentLength", jo.get("contentLength"));

+		

+		LOGJSONObject newjo = new LOGJSONObject(logrecordObj);

+		return newjo;

+	}

+	

+	@Override

+	public LOGJSONObject asJSONObject() {

+		LOGJSONObject jo = super.asJSONObject();

+		jo.put("type", "pub");

+//		jo.put("feedFileid", feedFileid);

+//		jo.put("remoteAddr", remoteAddr);

+//		jo.put("user", user);

+		jo.put("sourceIP", remoteAddr);

+		jo.put("endpointId", user);

+		jo.put("statusCode", status);

+		

+		LOGJSONObject newjo = this.reOrderObject(jo);

+		

+		return newjo;

+	}

+	@Override

+	public void load(PreparedStatement ps) throws SQLException {

+		ps.setString(1, "pub");		// field 1: type

+		super.load(ps);				// loads fields 2-8

+		ps.setString( 9, getFeedFileid());

+		ps.setString(10, getRemoteAddr());

+		ps.setString(11, getUser());

+		ps.setInt   (12, getStatus());

+		ps.setNull  (13, Types.INTEGER);

+		ps.setNull  (14, Types.VARCHAR);

+		ps.setNull  (15, Types.INTEGER);

+		ps.setNull  (16, Types.INTEGER);

+		ps.setNull  (17, Types.VARCHAR);

+		ps.setNull  (19, Types.BIGINT);

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/SubDelivery.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/SubDelivery.java
new file mode 100644
index 0000000..66e44af
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/SubDelivery.java
@@ -0,0 +1,109 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.ResultSet;

+import java.sql.SQLException;

+

+import org.json.JSONObject;

+

+/**

+ * The representation of Subscription delivery information.  This includes the URL to deliver to,

+ * login and password, and whether to use the "HTTP 100-continue" feature for this subscription.

+ * @author Robert Eby

+ * @version $Id: SubDelivery.java,v 1.2 2013/06/20 14:11:05 eby Exp $

+ */

+public class SubDelivery implements JSONable {

+	private String url;

+	private String user;

+	private String password;

+	private boolean use100;

+

+	public SubDelivery() {

+		this("", "", "", false);

+	}

+	public SubDelivery(String url, String user, String password, boolean use100) {

+		this.url      = url;

+		this.user     = user;

+		this.password = password;

+		this.use100   = use100;

+	}

+	public SubDelivery(ResultSet rs) throws SQLException {

+		this.url      = rs.getString("DELIVERY_URL");

+		this.user     = rs.getString("DELIVERY_USER");

+		this.password = rs.getString("DELIVERY_PASSWORD");

+		this.use100   = rs.getBoolean("DELIVERY_USE100");

+

+	}

+	public String getUrl() {

+		return url;

+	}

+	public void setUrl(String url) {

+		this.url = url;

+	}

+	public String getUser() {

+		return user;

+	}

+	public void setUser(String user) {

+		this.user = user;

+	}

+	public String getPassword() {

+		return password;

+	}

+	public void setPassword(String password) {

+		this.password = password;

+	}

+

+	public boolean isUse100() {

+		return use100;

+	}

+	public void setUse100(boolean use100) {

+		this.use100 = use100;

+	}

+	@Override

+	public JSONObject asJSONObject() {

+		JSONObject jo = new JSONObject();

+		jo.put("url", url);

+		jo.put("user", user);

+		jo.put("password", password);

+		jo.put("use100", use100);

+		return jo;

+	}

+	@Override

+	public boolean equals(Object obj) {

+		if (!(obj instanceof SubDelivery))

+			return false;

+		SubDelivery os = (SubDelivery) obj;

+		if (!url.equals(os.url))

+			return false;

+		if (!user.equals(os.user))

+			return false;

+		if (!password.equals(os.password))

+			return false;

+		if (use100 != os.use100)

+			return false;

+		return true;

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/SubLinks.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/SubLinks.java
new file mode 100644
index 0000000..27128d8
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/SubLinks.java
@@ -0,0 +1,95 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.io.InvalidObjectException;

+

+import org.json.JSONObject;

+

+/**

+ * The URLs associated with a Subscription.

+ * @author Robert Eby

+ * @version $Id: SubLinks.java,v 1.3 2013/07/05 13:48:05 eby Exp $

+ */

+public class SubLinks implements JSONable {

+	private String self;

+	private String feed;

+	private String log;

+

+	public SubLinks() {

+		self = feed = log = null;

+	}

+	public SubLinks(JSONObject jo) throws InvalidObjectException {

+		this();

+		self = jo.getString("self");

+		feed = jo.getString("feed");

+		log  = jo.getString("log");

+	}

+	public SubLinks(String self, String feed, String log) {

+		this.self = self;

+		this.feed = feed;

+		this.log  = log;

+	}

+	public String getSelf() {

+		return self;

+	}

+	public void setSelf(String self) {

+		this.self = self;

+	}

+	public String getFeed() {

+		return feed;

+	}

+	public void setFeed(String feed) {

+		this.feed = feed;

+	}

+	public String getLog() {

+		return log;

+	}

+	public void setLog(String log) {

+		this.log = log;

+	}

+

+	@Override

+	public JSONObject asJSONObject() {

+		JSONObject jo = new JSONObject();

+		jo.put("self", self);

+		jo.put("feed", feed);

+		jo.put("log", log);

+		return jo;

+	}

+	@Override

+	public boolean equals(Object obj) {

+		if (!(obj instanceof SubLinks))

+			return false;

+		SubLinks os = (SubLinks) obj;

+		if (!self.equals(os.self))

+			return false;

+		if (!feed.equals(os.feed))

+			return false;

+		if (!log.equals(os.log))

+			return false;

+		return true;

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Subscription.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Subscription.java
new file mode 100644
index 0000000..7ab10a4
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Subscription.java
@@ -0,0 +1,511 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.io.InvalidObjectException;

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Statement;

+import java.util.ArrayList;

+import java.util.Collection;

+import java.util.Date;

+import java.util.List;

+

+import org.apache.log4j.Logger;

+import org.json.JSONObject;

+import java.util.Properties;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+import com.att.research.datarouter.provisioning.utils.URLUtilities;

+

+/**

+ * The representation of a Subscription.  Subscriptions can be retrieved from the DB, or stored/updated in the DB.

+ * @author Robert Eby

+ * @version $Id: Subscription.java,v 1.9 2013/10/28 18:06:53 eby Exp $

+ */

+public class Subscription extends Syncable {

+	private static Logger intlogger = Logger.getLogger("com.att.research.datarouter.provisioning.internal");

+	private static int next_subid = getMaxSubID() + 1;

+

+	private int subid;

+	private int feedid;

+	private int groupid; //New field is added - Groups feature Rally:US708115 - 1610

+	private SubDelivery delivery;

+	private boolean metadataOnly;

+	private String subscriber;

+	private SubLinks links;

+	private boolean suspended;

+	private Date last_mod;

+	private Date created_date;

+

+	public static Subscription getSubscriptionMatching(Subscription sub) {

+		SubDelivery deli = sub.getDelivery();

+		String sql = String.format(

+			"select * from SUBSCRIPTIONS where FEEDID = %d and DELIVERY_URL = \"%s\" and DELIVERY_USER = \"%s\" and DELIVERY_PASSWORD = \"%s\" and DELIVERY_USE100 = %d and METADATA_ONLY = %d",

+			sub.getFeedid(),

+			deli.getUrl(),

+			deli.getUser(),

+			deli.getPassword(),

+			deli.isUse100() ? 1 : 0,

+			sub.isMetadataOnly() ? 1 : 0

+		);

+		List<Subscription> list = getSubscriptionsForSQL(sql);

+		return list.size() > 0 ? list.get(0) : null;

+	}

+	public static Subscription getSubscriptionById(int id) {

+		String sql = "select * from SUBSCRIPTIONS where SUBID = " + id;

+		List<Subscription> list = getSubscriptionsForSQL(sql);

+		return list.size() > 0 ? list.get(0) : null;

+	}

+	public static Collection<Subscription> getAllSubscriptions() {

+		return getSubscriptionsForSQL("select * from SUBSCRIPTIONS");

+	}

+	private static List<Subscription> getSubscriptionsForSQL(String sql) {

+		List<Subscription> list = new ArrayList<Subscription>();

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery(sql);

+			while (rs.next()) {

+				Subscription sub = new Subscription(rs);

+				list.add(sub);

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		return list;

+	}

+	public static int getMaxSubID() {

+		int max = 0;

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery("select MAX(subid) from SUBSCRIPTIONS");

+			if (rs.next()) {

+				max = rs.getInt(1);

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			intlogger.info("getMaxSubID: "+e.getMessage());

+			e.printStackTrace();

+		}

+		return max;

+	}

+	public static Collection<String> getSubscriptionUrlList(int feedid) {

+		List<String> list = new ArrayList<String>();

+		String sql = "select SUBID from SUBSCRIPTIONS where FEEDID = "+feedid;

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet  rs = stmt.executeQuery(sql);

+			while (rs.next()) {

+				int subid = rs.getInt("SUBID");

+				list.add(URLUtilities.generateSubscriptionURL(subid));

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		return list;

+	}

+	/**

+	 * Return a count of the number of active subscriptions in the DB.

+	 * @return the count

+	 */

+	public static int countActiveSubscriptions() {

+		int count = 0;

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			Statement  stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery("select count(*) from SUBSCRIPTIONS");

+			if (rs.next()) {

+				count = rs.getInt(1);

+			}

+			rs.close();

+			stmt.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			intlogger.warn("PROV0008 countActiveSubscriptions: "+e.getMessage());

+			e.printStackTrace();

+		}

+		return count;

+	}

+

+	public Subscription() {

+		this("", "", "");

+	}

+	public Subscription(String url, String user, String password) {

+		this.subid = -1;

+		this.feedid = -1;

+		this.groupid = -1; //New field is added - Groups feature Rally:US708115 - 1610

+		this.delivery = new SubDelivery(url, user, password, false);

+		this.metadataOnly = false;

+		this.subscriber = "";

+		this.links = new SubLinks();

+		this.suspended = false;

+		this.last_mod = new Date();

+		this.created_date = new Date();

+	}

+	public Subscription(ResultSet rs) throws SQLException {

+		this.subid        = rs.getInt("SUBID");

+		this.feedid       = rs.getInt("FEEDID");

+		this.groupid       = rs.getInt("GROUPID"); //New field is added - Groups feature Rally:US708115 - 1610

+		this.delivery     = new SubDelivery(rs);

+		this.metadataOnly = rs.getBoolean("METADATA_ONLY");

+		this.subscriber   = rs.getString("SUBSCRIBER");

+		this.links        = new SubLinks(rs.getString("SELF_LINK"), URLUtilities.generateFeedURL(feedid), rs.getString("LOG_LINK"));

+		this.suspended    = rs.getBoolean("SUSPENDED");

+		this.last_mod     = rs.getDate("LAST_MOD");

+		this.created_date = rs.getDate("CREATED_DATE");

+	}

+	public Subscription(JSONObject jo) throws InvalidObjectException {

+		this("", "", "");

+		try {

+			// The JSONObject is assumed to contain a vnd.att-dr.subscription representation

+			this.subid  = jo.optInt("subid", -1);

+			this.feedid = jo.optInt("feedid", -1);

+			this.groupid = jo.optInt("groupid", -1); //New field is added - Groups feature Rally:US708115 - 1610		

+

+			JSONObject jdeli = jo.getJSONObject("delivery");

+			String url      = jdeli.getString("url");

+			String user     = jdeli.getString("user");

+			String password = jdeli.getString("password");

+			boolean use100  = jdeli.getBoolean("use100");

+

+			

+			//Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.

+			Properties p = (new DB()).getProperties();

+			if(p.get("com.att.research.datarouter.provserver.https.relaxation").toString().equals("false") && !jo.has("sync")) {

+				if (!url.startsWith("https://"))

+					throw new InvalidObjectException("delivery URL is not HTTPS");

+			}

+

+			if (url.length() > 256)

+				throw new InvalidObjectException("delivery url field is too long");

+			if (user.length() > 20)

+				throw new InvalidObjectException("delivery user field is too long");

+			if (password.length() > 32)

+				throw new InvalidObjectException("delivery password field is too long");

+			this.delivery = new SubDelivery(url, user, password, use100);

+

+			this.metadataOnly = jo.getBoolean("metadataOnly");

+			this.suspended    = jo.optBoolean("suspend", false);

+

+			this.subscriber = jo.optString("subscriber", "");

+			JSONObject jol = jo.optJSONObject("links");

+			this.links = (jol == null) ? (new SubLinks()) : (new SubLinks(jol));

+		} catch (InvalidObjectException e) {

+			throw e;

+		} catch (Exception e) {

+			throw new InvalidObjectException("invalid JSON: "+e.getMessage());

+		}

+	}

+	public int getSubid() {

+		return subid;

+	}

+	public void setSubid(int subid) {

+		this.subid = subid;

+

+		// Create link URLs

+		SubLinks sl = getLinks();

+		sl.setSelf(URLUtilities.generateSubscriptionURL(subid));

+		sl.setLog(URLUtilities.generateSubLogURL(subid));

+	}

+	public int getFeedid() {

+		return feedid;

+	}

+	public void setFeedid(int feedid) {

+		this.feedid = feedid;

+

+		// Create link URLs

+		SubLinks sl = getLinks();

+		sl.setFeed(URLUtilities.generateFeedURL(feedid));

+	}

+

+	//New getter setters for Groups feature Rally:US708115 - 1610

+	public int getGroupid() {		

+		return groupid;		

+	}		

+	public void setGroupid(int groupid) {		

+		this.groupid = groupid;		

+	}

+

+	public SubDelivery getDelivery() {

+		return delivery;

+	}

+	public void setDelivery(SubDelivery delivery) {

+		this.delivery = delivery;

+	}

+	public boolean isMetadataOnly() {

+		return metadataOnly;

+	}

+	public void setMetadataOnly(boolean metadataOnly) {

+		this.metadataOnly = metadataOnly;

+	}

+	public boolean isSuspended() {

+		return suspended;

+	}

+	public void setSuspended(boolean suspended) {

+		this.suspended = suspended;

+	}

+	public String getSubscriber() {

+		return subscriber;

+	}

+	public void setSubscriber(String subscriber) {

+		if (subscriber != null) {

+			if (subscriber.length() > 8)

+				subscriber = subscriber.substring(0, 8);

+			this.subscriber = subscriber;

+		}

+	}

+	public SubLinks getLinks() {

+		return links;

+	}

+	public void setLinks(SubLinks links) {

+		this.links = links;

+	}

+

+	@Override

+	public JSONObject asJSONObject() {

+		JSONObject jo = new JSONObject();

+		jo.put("subid", subid);

+		jo.put("feedid", feedid);

+		jo.put("groupid", groupid); //New field is added - Groups feature Rally:US708115 - 1610

+		jo.put("delivery", delivery.asJSONObject());

+		jo.put("metadataOnly", metadataOnly);

+		jo.put("subscriber", subscriber);

+		jo.put("links", links.asJSONObject());

+		jo.put("suspend", suspended);

+		jo.put("last_mod", last_mod.getTime());

+		jo.put("created_date", created_date.getTime());

+		return jo;

+	}

+	public JSONObject asLimitedJSONObject() {

+		JSONObject jo = asJSONObject();

+		jo.remove("subid");

+		jo.remove("feedid");

+		jo.remove("last_mod");

+		return jo;

+	}

+	public JSONObject asJSONObject(boolean hidepasswords) {

+		JSONObject jo = asJSONObject();

+		if (hidepasswords) {

+			jo.remove("subid");	// we no longer hide passwords, however we do hide these

+			jo.remove("feedid");

+			jo.remove("last_mod");

+			jo.remove("created_date");

+		}

+		return jo;

+	}

+	@Override

+	public boolean doInsert(Connection c) {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			if (subid == -1) {

+				// No feed ID assigned yet, so assign the next available one

+				setSubid(next_subid++);

+			}

+			// In case we insert a feed from synchronization

+			if (subid > next_subid)

+				next_subid = subid+1;

+

+			// Create the SUBSCRIPTIONS row

+			String sql = "insert into SUBSCRIPTIONS (SUBID, FEEDID, DELIVERY_URL, DELIVERY_USER, DELIVERY_PASSWORD, DELIVERY_USE100, METADATA_ONLY, SUBSCRIBER, SUSPENDED, GROUPID) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";

+			ps = c.prepareStatement(sql, new String[] { "SUBID" });

+			ps.setInt(1, subid);

+			ps.setInt(2, feedid);

+			ps.setString(3, getDelivery().getUrl());

+			ps.setString(4, getDelivery().getUser());

+			ps.setString(5, getDelivery().getPassword());

+			ps.setInt(6, getDelivery().isUse100()?1:0);

+			ps.setInt(7, isMetadataOnly()?1:0);

+			ps.setString(8, getSubscriber());

+			ps.setBoolean(9, isSuspended());

+			ps.setInt(10, groupid); //New field is added - Groups feature Rally:US708115 - 1610

+			ps.execute();

+			ps.close();

+//			ResultSet rs = ps.getGeneratedKeys();

+//			rs.first();

+//			setSubid(rs.getInt(1));	// side effect - sets the link URLs

+//			ps.close();

+

+			// Update the row to set the URLs

+			sql = "update SUBSCRIPTIONS set SELF_LINK = ?, LOG_LINK = ? where SUBID = ?";

+			ps = c.prepareStatement(sql);

+			ps.setString(1, getLinks().getSelf());

+			ps.setString(2, getLinks().getLog());

+			ps.setInt(3, subid);

+			ps.execute();

+			ps.close();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0005 doInsert: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+	@Override

+	public boolean doUpdate(Connection c) {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			String sql = "update SUBSCRIPTIONS set DELIVERY_URL = ?, DELIVERY_USER = ?, DELIVERY_PASSWORD = ?, DELIVERY_USE100 = ?, METADATA_ONLY = ?, SUSPENDED = ?, GROUPID = ? where SUBID = ?";

+			ps = c.prepareStatement(sql);

+			ps.setString(1, delivery.getUrl());

+			ps.setString(2, delivery.getUser());

+			ps.setString(3, delivery.getPassword());

+			ps.setInt(4, delivery.isUse100()?1:0);

+			ps.setInt(5, isMetadataOnly()?1:0);

+			ps.setInt(6, suspended ? 1 : 0);

+			ps.setInt(7, groupid); //New field is added - Groups feature Rally:US708115 - 1610				

+			ps.setInt(8, subid);

+			ps.executeUpdate();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0006 doUpdate: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+

+

+	

+	/**Rally US708115

+	 * Change Ownership of Subscription - 1610

+	 * */

+	public boolean changeOwnerShip() {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection c = db.getConnection();

+			String sql = "update SUBSCRIPTIONS set SUBSCRIBER = ? where SUBID = ?";

+			ps = c.prepareStatement(sql);

+			ps.setString(1, this.subscriber);

+			ps.setInt(2, subid);

+			ps.execute();

+			ps.close();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0006 doUpdate: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+	

+

+

+	@Override

+	public boolean doDelete(Connection c) {

+		boolean rv = true;

+		PreparedStatement ps = null;

+		try {

+			String sql = "delete from SUBSCRIPTIONS where SUBID = ?";

+			ps = c.prepareStatement(sql);

+			ps.setInt(1, subid);

+			ps.execute();

+		} catch (SQLException e) {

+			rv = false;

+			intlogger.warn("PROV0007 doDelete: "+e.getMessage());

+			e.printStackTrace();

+		} finally {

+			try {

+				ps.close();

+			} catch (SQLException e) {

+				e.printStackTrace();

+			}

+		}

+		return rv;

+	}

+	@Override

+	public String getKey() {

+		return ""+getSubid();

+	}

+	@Override

+	public boolean equals(Object obj) {

+		if (!(obj instanceof Subscription))

+			return false;

+		Subscription os = (Subscription) obj;

+		if (subid != os.subid)

+			return false;

+		if (feedid != os.feedid)

+			return false;

+		if (groupid != os.groupid) //New field is added - Groups feature Rally:US708115 - 1610		 

+			return false;

+		if (!delivery.equals(os.delivery))

+			return false;

+		if (metadataOnly != os.metadataOnly)

+			return false;

+		if (!subscriber.equals(os.subscriber))

+			return false;

+		if (!links.equals(os.links))

+			return false;

+		if (suspended != os.suspended)

+			return false;

+		return true;

+	}

+

+	@Override

+	public String toString() {

+		return "SUB: subid=" + subid + ", feedid=" + feedid;

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Syncable.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Syncable.java
new file mode 100644
index 0000000..00163c1
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Syncable.java
@@ -0,0 +1,57 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.Connection;

+

+import org.json.JSONObject;

+

+/**

+ * This abstract class defines the "contract" for beans that can be sync-ed with the database,

+ * by means of straight comparison.  The <i>getKey</i> method is used to return the primary key

+ * used to identify a record.

+ *

+ * @author Robert Eby

+ * @version $Id: Syncable.java,v 1.1 2013/07/05 13:48:05 eby Exp $

+ */

+public abstract class Syncable implements Deleteable, Insertable, Updateable, JSONable {

+	@Override

+	abstract public JSONObject asJSONObject();

+

+	@Override

+	abstract public boolean doUpdate(Connection c);

+

+	@Override

+	abstract public boolean doInsert(Connection c);

+

+	@Override

+	abstract public boolean doDelete(Connection c);

+

+	/**

+	 * Get the "natural key" for this object type, as a String.

+	 * @return the key

+	 */

+	abstract public String getKey();

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Updateable.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Updateable.java
new file mode 100644
index 0000000..a9b19e7
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/Updateable.java
@@ -0,0 +1,40 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+package com.att.research.datarouter.provisioning.beans;

+

+import java.sql.Connection;

+

+/**

+ * An object that can be UPDATE-ed in the database.

+ * @author Robert Eby

+ * @version $Id: Updateable.java,v 1.2 2013/05/29 14:44:36 eby Exp $

+ */

+public interface Updateable {

+	/**

+	 * Update this object in the DB.

+	 * @param c the JDBC Connection to use

+	 * @return true if the UPDATE succeeded, false otherwise

+	 */

+	public boolean doUpdate(Connection c);

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/package.html b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/package.html
new file mode 100644
index 0000000..4b28053
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/beans/package.html
@@ -0,0 +1,31 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+

+<html>

+<body>

+<p>

+This package provides beans to represent the basic provisioning objects of the Data Router application.

+These objects are defined by the document <b>Data Router Release 1 Provisioning API</b> <i>Version 1.2</i>.

+</p>

+</body>

+</html>

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/eelf/EelfMsgs.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/eelf/EelfMsgs.java
new file mode 100644
index 0000000..3a23041
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/eelf/EelfMsgs.java
@@ -0,0 +1,56 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package com.att.research.datarouter.provisioning.eelf;

+

+import com.att.eelf.i18n.EELFResolvableErrorEnum;

+import com.att.eelf.i18n.EELFResourceManager;

+

+public enum EelfMsgs implements EELFResolvableErrorEnum {

+	

+	/**

+     * Application message prints user (accepts one argument)

+     */

+	MESSAGE_WITH_BEHALF,

+

+	/**

+     * Application message prints user and FeedID (accepts two arguments)

+     */

+

+	MESSAGE_WITH_BEHALF_AND_FEEDID,

+

+	/**

+     * Application message prints user and SUBID (accepts two arguments)

+     */

+

+	MESSAGE_WITH_BEHALF_AND_SUBID;

+

+		

+    

+    /**

+     * Static initializer to ensure the resource bundles for this class are loaded...

+     * Here this application loads messages from three bundles

+     */

+    static {

+        EELFResourceManager.loadMessageBundle("EelfMessages");

+    }

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/eelf/JettyFilter.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/eelf/JettyFilter.java
new file mode 100644
index 0000000..cfef910
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/eelf/JettyFilter.java
@@ -0,0 +1,38 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package com.att.research.datarouter.provisioning.eelf;

+

+import ch.qos.logback.classic.spi.ILoggingEvent;

+import ch.qos.logback.core.filter.Filter;

+import ch.qos.logback.core.spi.FilterReply;

+

+public class JettyFilter extends Filter<ILoggingEvent>{

+	  @Override

+	  public FilterReply decide(ILoggingEvent event) {    

+	    if (event.getLoggerName().contains("org.eclipse.jetty")) {

+	      return FilterReply.ACCEPT;

+	    } else {

+	      return FilterReply.DENY;

+	    }

+	  }

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/package.html b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/package.html
new file mode 100644
index 0000000..7b00931
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/package.html
@@ -0,0 +1,123 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+

+<html>

+<body>

+<p>

+This package provides the servlets used by the provisioning server for the Data Router application.

+URLs are from the document <b>URLs for DR Release 1</b> <i>Version 1.2</i>.

+</p>

+<div class="contentContainer">

+<table class="packageSummary" border="0" cellpadding="3" cellspacing="0">

+<caption><span>URL Path Summary</span><span class="tabEnd">&nbsp;</span></caption>

+<tr class="altColor">

+	<th class="colFirst">URL Path</th>

+	<th class="colOne">Symbolic Name</th>

+	<th class="colLast">Servlet Name</th>

+	<th class="colLast" colspan="4">Allowed Methods</th>

+</tr>

+<tr>

+	<td class="colFirst" class="colOne">/</td>

+	<td class="colOne" class="colOne">&lt;drFeedsUrl&gt;</td>

+	<td class="colLast" class="colOne">{@link com.att.research.datarouter.provisioning.DRFeedsServlet}</td>

+	<td class="colLast" class="colOne" style="background-color: pink">DELETE</td>

+	<td class="colLast" class="colOne" style="background-color: lightgreen">GET</td>

+	<td class="colLast" class="colOne" style="background-color: lightgreen">POST</td>

+	<td class="colLast" class="colOne" style="background-color: pink">PUT</td>

+</tr>

+<tr class="altColor">

+	<td class="colFirst" class="colOne">/feed/feedid</td>

+	<td class="colOne" class="colOne">&lt;feedUrl&gt;</td>

+	<td class="colLast" class="colOne">{@link com.att.research.datarouter.provisioning.FeedServlet}</td>

+	<td class="colLast" style="background-color: lightgreen">DELETE</td>

+	<td class="colLast" style="background-color: lightgreen">GET</td>

+	<td class="colLast" style="background-color: pink">POST</td>

+	<td class="colLast" style="background-color: lightgreen">PUT</td>

+</tr>

+<tr>

+	<td class="colFirst">/publish/feedid</td>

+	<td class="colOne">&lt;publishUrl&gt;</td>

+	<td class="colLast">{@link com.att.research.datarouter.provisioning.PublishServlet}</td>

+	<td class="colLast" style="background-color: lightgreen">DELETE</td>

+	<td class="colLast" style="background-color: lightgreen">GET</td>

+	<td class="colLast" style="background-color: lightgreen">POST</td>

+	<td class="colLast" style="background-color: lightgreen">PUT</td>

+</tr>

+<tr class="altColor">

+	<td class="colFirst">/subscribe/feedid</td>

+	<td class="colOne">&lt;subscribeUrl&gt;</td>

+	<td class="colLast">{@link com.att.research.datarouter.provisioning.SubscribeServlet}</td>

+	<td class="colLast" style="background-color: pink">DELETE</td>

+	<td class="colLast" style="background-color: lightgreen">GET</td>

+	<td class="colLast" style="background-color: lightgreen">POST</td>

+	<td class="colLast" style="background-color: pink">PUT</td>

+</tr>

+<tr>

+	<td class="colFirst">/feedlog/feedid</td>

+	<td class="colOne">&lt;feedLogUrl&gt;</td>

+	<td class="colLast">{@link com.att.research.datarouter.provisioning.FeedLogServlet}</td>

+	<td class="colLast" style="background-color: pink">DELETE</td>

+	<td class="colLast" style="background-color: lightgreen">GET</td>

+	<td class="colLast" style="background-color: pink">POST</td>

+	<td class="colLast" style="background-color: pink">PUT</td>

+</tr>

+<tr class="altColor">

+	<td class="colFirst">/subs/subid</td>

+	<td class="colOne">&lt;subscriptionUrl&gt;</td>

+	<td class="colLast">{@link com.att.research.datarouter.provisioning.SubscriptionServlet}</td>

+	<td class="colLast" style="background-color: lightgreen">DELETE</td>

+	<td class="colLast" style="background-color: lightgreen">GET</td>

+	<td class="colLast" style="background-color: lightgreen">POST</td>

+	<td class="colLast" style="background-color: lightgreen">PUT</td>

+</tr>

+<tr>

+	<td class="colFirst">/sublog/subid</td>

+	<td class="colOne">&lt;subLogUrl&gt;</td>

+	<td class="colLast">{@link com.att.research.datarouter.provisioning.SubLogServlet}</td>

+	<td class="colLast" style="background-color: pink">DELETE</td>

+	<td class="colLast" style="background-color: lightgreen">GET</td>

+	<td class="colLast" style="background-color: pink">POST</td>

+	<td class="colLast" style="background-color: pink">PUT</td>

+</tr>

+<tr class="altColor">

+	<td class="colFirst">/internal/*</td>

+	<td class="colOne">&lt;internalUrl&gt;</td>

+	<td class="colLast">{@link com.att.research.datarouter.provisioning.InternalServlet}</td>

+	<td class="colLast" style="background-color: lightgreen">DELETE</td>

+	<td class="colLast" style="background-color: lightgreen">GET</td>

+	<td class="colLast" style="background-color: lightgreen">POST</td>

+	<td class="colLast" style="background-color: lightgreen">PUT</td>

+</tr>

+<tr>

+	<td class="colFirst">/internal/route/*</td>

+	<td class="colOne">&lt;routeUrl&gt;</td>

+	<td class="colLast">{@link com.att.research.datarouter.provisioning.RouteServlet}</td>

+	<td class="colLast" style="background-color: lightgreen">DELETE</td>

+	<td class="colLast" style="background-color: lightgreen">GET</td>

+	<td class="colLast" style="background-color: lightgreen">POST</td>

+	<td class="colLast" style="background-color: pink">PUT</td>

+</tr>

+</table>

+</div>

+</body>

+</html>

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/DB.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/DB.java
new file mode 100644
index 0000000..ec4b0e6
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/DB.java
@@ -0,0 +1,711 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.utils;

+

+import java.io.File;

+import java.io.FileReader;

+import java.io.IOException;

+import java.io.InputStream;

+import java.io.LineNumberReader;

+import java.lang.reflect.Constructor;

+import java.lang.reflect.InvocationTargetException;

+import java.sql.Connection;

+import java.sql.DatabaseMetaData;

+import java.sql.DriverManager;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Statement;

+import java.util.HashSet;

+import java.util.LinkedList;

+import java.util.NoSuchElementException;

+import java.util.Properties;

+import java.util.Queue;

+import java.util.Set;

+

+import org.apache.log4j.Logger;

+

+import com.att.research.datarouter.provisioning.beans.DeliveryRecord;

+import com.att.research.datarouter.provisioning.beans.ExpiryRecord;

+import com.att.research.datarouter.provisioning.beans.Loadable;

+import com.att.research.datarouter.provisioning.beans.PublishRecord;

+

+/**

+ * Load the DB JDBC driver, and manage a simple pool of connections to the DB.

+ *

+ * @author Robert Eby

+ * @version $Id$

+ */

+public class DB {

+	/** The name of the properties file (in CLASSPATH) */

+	public static final String CONFIG_FILE = "provserver.properties";

+

+	private static String DB_DRIVER   = "com.mysql.jdbc.Driver";

+	private static String DB_URL      = "jdbc:mysql://127.0.0.1:3306/datarouter";

+	private static String DB_LOGIN    = "datarouter";

+	private static String DB_PASSWORD = "datarouter";

+	private static Properties props;

+	private static Logger intlogger = Logger.getLogger("com.att.research.datarouter.provisioning.internal");

+	private static Queue<Connection> queue = new LinkedList<Connection>();

+

+	public static String HTTPS_PORT;

+	public static String HTTP_PORT;

+

+	/**

+	 * Construct a DB object.  If this is the very first creation of this object, it will load a copy

+	 * of the properties for the server, and attempt to load the JDBC driver for the database.  If a fatal

+	 * error occurs (e.g. either the properties file or the DB driver is missing), the JVM will exit.

+	 */

+	public DB() {

+		if (props == null) {

+			props = new Properties();

+			InputStream inStream = getClass().getClassLoader().getResourceAsStream(CONFIG_FILE);

+			try {

+				props.load(inStream);

+				DB_DRIVER   = (String) props.get("com.att.research.datarouter.db.driver");

+				DB_URL      = (String) props.get("com.att.research.datarouter.db.url");

+				DB_LOGIN    = (String) props.get("com.att.research.datarouter.db.login");

+				DB_PASSWORD = (String) props.get("com.att.research.datarouter.db.password");

+				HTTPS_PORT = (String) props.get("com.att.research.datarouter.provserver.https.port");

+				HTTP_PORT = (String) props.get("com.att.research.datarouter.provserver.http.port");

+				Class.forName(DB_DRIVER);

+			} catch (IOException e) {

+				intlogger.fatal("PROV9003 Opening properties: "+e.getMessage());

+				e.printStackTrace();

+				System.exit(1);

+			} catch (ClassNotFoundException e) {

+				intlogger.fatal("PROV9004 cannot find the DB driver: "+e);

+				e.printStackTrace();

+				System.exit(1);

+			} finally {

+				try {

+					inStream.close();

+				} catch (IOException e) {

+				}

+			}

+		}

+	}

+	/**

+	 * Get the provisioning server properties (loaded from provserver.properties).

+	 * @return the Properties object

+	 */

+	public Properties getProperties() {

+		return props;

+	}

+	/**

+	 * Get a JDBC connection to the DB from the pool.  Creates a new one if none are available.

+	 * @return the Connection

+	 * @throws SQLException

+	 */

+	@SuppressWarnings("resource")

+	public Connection getConnection() throws SQLException {

+		Connection c = null;

+		while (c == null) {

+			synchronized (queue) {

+				try {

+					c = queue.remove();

+				} catch (NoSuchElementException e) {

+					int n = 0;

+					do {

+						// Try up to 3 times to get a connection

+						try {

+							c = DriverManager.getConnection(DB_URL, DB_LOGIN, DB_PASSWORD);

+						} catch (SQLException e1) {

+							if (++n >= 3)

+								throw e1;

+						}

+					} while (c == null);

+				}

+			}

+			if (c != null && !c.isValid(1)) {

+				c.close();

+				c = null;

+			}

+		}

+		return c;

+	}

+	/**

+	 * Returns a JDBC connection to the pool.

+	 * @param c the Connection to return

+	 * @throws SQLException

+	 */

+	public void release(Connection c) {

+		if (c != null) {

+			synchronized (queue) {

+				if (!queue.contains(c))

+					queue.add(c);

+			}

+		}

+	}

+

+	/**

+	 * Run all necessary retrofits required to bring the database up to the level required for this version

+	 * of the provisioning server.  This should be run before the server itself is started.

+	 * @return true if all retrofits worked, false otherwise

+	 */

+	public boolean runRetroFits() {

+		return retroFit1()

+			&& retroFit2()

+			&& retroFit3()

+			&& retroFit4()

+			&& retroFit5()

+			&& retroFit6()

+			&& retroFit7()

+			&& retroFit8()

+			&& retroFit9()  //New retroFit call to add CREATED_DATE column Rally:US674199 - 1610

+			&& retroFit10() //New retroFit call to add BUSINESS_DESCRIPTION column Rally:US708102 - 1610

+			&& retroFit11() //New retroFit call for groups feature Rally:US708115 - 1610	

+			;

+	}

+	/**

+	 * Retrofit 1 - Make sure the expected tables are in MySQL and are initialized.

+	 * Uses mysql_init_0000 and mysql_init_0001 to setup the DB.

+	 * @return true if the retrofit worked, false otherwise

+	 */

+	private boolean retroFit1() {

+		final String[] expected_tables = {

+			"FEEDS", "FEED_ENDPOINT_ADDRS", "FEED_ENDPOINT_IDS", "PARAMETERS", "SUBSCRIPTIONS"

+		};

+		Connection c = null;

+		try {

+			c = getConnection();

+			Set<String> tables = getTableSet(c);

+			boolean initialize = false;

+			for (String s : expected_tables) {

+				initialize |= !tables.contains(s);

+			}

+			if (initialize) {

+				intlogger.info("PROV9001: First time startup; The database is being initialized.");

+				runInitScript(c, 0);		// script 0 creates the provisioning tables

+				runInitScript(c, 1);		// script 1 initializes PARAMETERS

+			}

+		} catch (SQLException e) {

+			intlogger.fatal("PROV9000: The database credentials are not working: "+e.getMessage());

+			return false;

+		} finally {

+			if (c != null)

+				release(c);

+		}

+		return true;

+	}

+	/**

+	 * Retrofit 2 - if the LOG_RECORDS table is missing, add it.

+	 * Uses mysql_init_0002 to create this table.

+	 * @return true if the retrofit worked, false otherwise

+	 */

+	private boolean retroFit2() {

+		Connection c = null;

+		try {

+			// If LOG_RECORDS table is missing, add it

+			c = getConnection();

+			Set<String> tables = getTableSet(c);

+			if (!tables.contains("LOG_RECORDS")) {

+				intlogger.info("PROV9002: Creating LOG_RECORDS table.");

+				runInitScript(c, 2);		// script 2 creates the LOG_RECORDS table

+			}

+		} catch (SQLException e) {

+			intlogger.fatal("PROV9000: The database credentials are not working: "+e.getMessage());

+			return false;

+		} finally {

+			if (c != null)

+				release(c);

+		}

+		return true;

+	}

+	/**

+	 * Retrofit 3 - if the FEEDS_UNIQUEID table (from release 1.0.*) exists, drop it.

+	 * If SUBSCRIPTIONS.SUBID still has the auto_increment attribute, remove it.

+	 * @return true if the retrofit worked, false otherwise

+	 */

+	@SuppressWarnings("resource")

+	private boolean retroFit3() {

+		Connection c = null;

+		try {

+			// if SUBSCRIPTIONS.SUBID still has auto_increment, remove it

+			boolean doremove = false;

+			c = getConnection();

+			DatabaseMetaData md = c.getMetaData();

+			ResultSet rs = md.getColumns("datarouter", "", "SUBSCRIPTIONS", "SUBID");

+			if (rs != null) {

+				while (rs.next()) {

+					doremove = rs.getString("IS_AUTOINCREMENT").equals("YES");

+				}

+				rs.close();

+				rs = null;

+			}

+			if (doremove) {

+				intlogger.info("PROV9002: Modifying SUBSCRIPTIONS SUBID column to remove auto increment.");

+				Statement s = c.createStatement();

+				s.execute("ALTER TABLE SUBSCRIPTIONS MODIFY COLUMN SUBID INT UNSIGNED NOT NULL");

+				s.close();

+			}

+

+			// Remove the FEEDS_UNIQUEID table, if it exists

+			Set<String> tables = getTableSet(c);

+			if (tables.contains("FEEDS_UNIQUEID")) {

+				intlogger.info("PROV9002: Dropping FEEDS_UNIQUEID table.");

+				Statement s = c.createStatement();

+				s.execute("DROP TABLE FEEDS_UNIQUEID");

+				s.close();

+			}

+		} catch (SQLException e) {

+			intlogger.fatal("PROV9000: The database credentials are not working: "+e.getMessage());

+			return false;

+		} finally {

+			if (c != null)

+				release(c);

+		}

+		return true;

+	}

+	private long nextid = 0;	// used for initial creation of LOG_RECORDS table.

+	/**

+	 * Retrofit 4 - if old log tables exist (from release 1.0.*), copy them to LOG_RECORDS, then drop them.

+	 * @return true if the retrofit worked, false otherwise

+	 */

+	@SuppressWarnings("resource")

+	private boolean retroFit4() {

+		Connection c = null;

+		try {

+			c = getConnection();

+			Set<String> tables = getTableSet(c);

+			if (tables.contains("PUBLISH_RECORDS")) {

+				intlogger.info("PROV9002: Copying PUBLISH_RECORDS to LOG_RECORDS table.");

+				copyLogTable("PUBLISH_RECORDS", PublishRecord.class);

+				intlogger.info("PROV9002: Dropping PUBLISH_RECORDS table.");

+				Statement s = c.createStatement();

+				s.execute("DROP TABLE PUBLISH_RECORDS");

+				s.close();

+			}

+			if (tables.contains("DELIVERY_RECORDS")) {

+				intlogger.info("PROV9002: Copying DELIVERY_RECORDS to LOG_RECORDS table.");

+				copyLogTable("DELIVERY_RECORDS", DeliveryRecord.class);

+				intlogger.info("PROV9002: Dropping DELIVERY_RECORDS table.");

+				Statement s = c.createStatement();

+				s.execute("DROP TABLE DELIVERY_RECORDS");

+				s.close();

+			}

+			if (tables.contains("EXPIRY_RECORDS")) {

+				intlogger.info("PROV9002: Copying EXPIRY_RECORDS to LOG_RECORDS table.");

+				copyLogTable("EXPIRY_RECORDS", ExpiryRecord.class);

+				intlogger.info("PROV9002: Dropping EXPIRY_RECORDS table.");

+				Statement s = c.createStatement();

+				s.execute("DROP TABLE EXPIRY_RECORDS");

+				s.close();

+			}

+		} catch (SQLException e) {

+			intlogger.fatal("PROV9000: The database credentials are not working: "+e.getMessage());

+			return false;

+		} finally {

+			if (c != null)

+				release(c);

+		}

+		return true;

+	}

+	/**

+	 * Retrofit 5 - Create the new routing tables required for Release 2.

+	 * Adds a new "SUSPENDED" column to FEEDS and SUBSCRIPTIONS.

+	 * Modifies the LOG_RECORDS table to handle new R2 records.

+	 * @return true if the retrofit worked, false otherwise

+	 */

+	@SuppressWarnings("resource")

+	private boolean retroFit5() {

+		final String[] expected_tables = {

+			"INGRESS_ROUTES", "EGRESS_ROUTES", "NETWORK_ROUTES", "NODESETS", "NODES"

+		};

+		Connection c = null;

+		try {

+			// If expected tables are not present, then add new routing tables

+			c = getConnection();

+			Set<String> tables = getTableSet(c);

+			boolean initialize = false;

+			for (String s : expected_tables) {

+				initialize |= !tables.contains(s);

+			}

+			if (initialize) {

+				intlogger.info("PROV9002: Adding routing tables for Release 2.0.");

+				runInitScript(c, 3);		// script 3 creates the routing tables

+			}

+

+			// Add SUSPENDED column to FEEDS/SUBSCRIPTIONS

+			DatabaseMetaData md = c.getMetaData();

+			for (String tbl : new String[] {"FEEDS", "SUBSCRIPTIONS" }) {

+				boolean add_col = true;

+				ResultSet rs = md.getColumns("datarouter", "", tbl, "SUSPENDED");

+				if (rs != null) {

+					add_col = !rs.next();

+					rs.close();

+					rs = null;

+				}

+				if (add_col) {

+					intlogger.info("PROV9002: Adding SUSPENDED column to "+tbl+" table.");

+					Statement s = c.createStatement();

+					s.execute("ALTER TABLE "+tbl+" ADD COLUMN SUSPENDED BOOLEAN DEFAULT FALSE");

+					s.close();

+				}

+			}

+

+			// Modify LOG_RECORDS for R2

+			intlogger.info("PROV9002: Modifying LOG_RECORDS table.");

+			Statement s = c.createStatement();

+			s.execute("ALTER TABLE LOG_RECORDS MODIFY COLUMN TYPE ENUM('pub', 'del', 'exp', 'pbf', 'dlx') NOT NULL");

+			s.close();

+			s = c.createStatement();

+			s.execute("ALTER TABLE LOG_RECORDS MODIFY COLUMN REASON ENUM('notRetryable', 'retriesExhausted', 'diskFull', 'other')");

+			s.close();

+			boolean add_col = true;

+			ResultSet rs = md.getColumns("datarouter", "", "LOG_RECORDS", "CONTENT_LENGTH_2");

+			if (rs != null) {

+				add_col = !rs.next();

+				rs.close();

+				rs = null;

+			}

+			if (add_col) {

+				intlogger.info("PROV9002: Fixing two columns in LOG_RECORDS table (this may take some time).");

+				s = c.createStatement();

+				s.execute("ALTER TABLE LOG_RECORDS MODIFY COLUMN CONTENT_LENGTH BIGINT NOT NULL, ADD COLUMN CONTENT_LENGTH_2 BIGINT AFTER RECORD_ID");

+				s.close();

+			}

+		} catch (SQLException e) {

+			intlogger.fatal("PROV9000: The database credentials are not working: "+e.getMessage());

+			return false;

+		} finally {

+			if (c != null)

+				release(c);

+		}

+		return true;

+	}

+	/**

+	 * Retrofit 6 - Adjust LOG_RECORDS.USER to be 50 chars (MR #74).

+	 * @return true if the retrofit worked, false otherwise

+	 */

+	@SuppressWarnings("resource")

+	private boolean retroFit6() {

+		Connection c = null;

+		try {

+			c = getConnection();

+			// Modify LOG_RECORDS for R2

+			intlogger.info("PROV9002: Modifying LOG_RECORDS.USER length.");

+			Statement s = c.createStatement();

+			s.execute("ALTER TABLE LOG_RECORDS MODIFY COLUMN USER VARCHAR(50)");

+			s.close();

+		} catch (SQLException e) {

+			intlogger.fatal("PROV9000: The database credentials are not working: "+e.getMessage());

+			return false;

+		} finally {

+			if (c != null)

+				release(c);

+		}

+		return true;

+	}

+	/**

+	 * Retrofit 7 - Adjust LOG_RECORDS.FEED_FILEID and LOG_RECORDS.DELIVERY_FILEID to be 256 chars.

+	 * @return true if the retrofit worked, false otherwise

+	 */

+	@SuppressWarnings("resource")

+	private boolean retroFit7() {

+		Connection c = null;

+		try {

+			c = getConnection();

+			// Modify LOG_RECORDS for long (>128) FILEIDs

+			intlogger.info("PROV9002: Modifying LOG_RECORDS.USER length.");

+			Statement s = c.createStatement();

+			s.execute("ALTER TABLE LOG_RECORDS MODIFY COLUMN FEED_FILEID VARCHAR(256), MODIFY COLUMN DELIVERY_FILEID VARCHAR(256)");

+			s.close();

+		} catch (SQLException e) {

+			intlogger.fatal("PROV9000: The database credentials are not working: "+e.getMessage());

+			return false;

+		} finally {

+			if (c != null)

+				release(c);

+		}

+		return true;

+	}

+	/**

+	 * Retrofit 8 - Adjust FEEDS.NAME to be 255 chars (MR #74).

+	 * @return true if the retrofit worked, false otherwise

+	 */

+	@SuppressWarnings("resource")

+	private boolean retroFit8() {

+		Connection c = null;

+		try {

+			c = getConnection();

+			intlogger.info("PROV9002: Modifying FEEDS.NAME length.");

+			Statement s = c.createStatement();

+			s.execute("ALTER TABLE FEEDS MODIFY COLUMN NAME VARCHAR(255)");

+			s.close();

+		} catch (SQLException e) {

+			intlogger.fatal("PROV9000: The database credentials are not working: "+e.getMessage());

+			return false;

+		} finally {

+			if (c != null)

+				release(c);

+		}

+		return true;

+	}

+	

+	/**

+	 * Retrofit 9 - Add column FEEDS.CREATED_DATE and SUBSCRIPTIONS.CREATED_DATE, 1610 release user story US674199.

+	 * @return true if the retrofit worked, false otherwise

+	 */

+

+	@SuppressWarnings("resource")		

+	private boolean retroFit9() {		

+		Connection c = null;		

+		try {		

+			c = getConnection();		

+			// Add CREATED_DATE column to FEEDS/SUBSCRIPTIONS tables

+			DatabaseMetaData md = c.getMetaData();		

+			for (String tbl : new String[] {"FEEDS", "SUBSCRIPTIONS" }) {		

+				boolean add_col = true;		

+				ResultSet rs = md.getColumns("datarouter", "", tbl, "CREATED_DATE");		

+				if (rs != null) {		

+					add_col = !rs.next();		

+					rs.close();		

+					rs = null;		

+				}		

+				if (add_col) {		

+					intlogger.info("PROV9002: Adding CREATED_DATE column to "+tbl+" table.");		

+					Statement s = c.createStatement();

+					s.execute("ALTER TABLE "+tbl+" ADD COLUMN CREATED_DATE timestamp DEFAULT CURRENT_TIMESTAMP");		

+					s.close();		

+				}		

+			}						

+		} catch (SQLException e) {		

+			intlogger.fatal("PROV9000: The database credentials are not working: "+e.getMessage());		

+			return false;		

+		} finally {		

+			if (c != null)		

+				release(c);		

+		}		

+		return true;		

+	}

+

+	/**

+	 * Retrofit 10 -Adding business BUSINESS_DESCRIPTION to FEEDS table (Rally

+	 * US708102).

+	 * 

+	 * @return true if the retrofit worked, false otherwise

+	 */

+

+	@SuppressWarnings("resource")

+	private boolean retroFit10() {

+		Connection c = null;

+		boolean addColumn = true;

+		

+		try {

+

+			c = getConnection();		

+			// Add BUSINESS_DESCRIPTION column to FEEDS table

+			DatabaseMetaData md = c.getMetaData();		

+				boolean add_col = true;		

+				ResultSet rs = md.getColumns("datarouter", "", "FEEDS", "BUSINESS_DESCRIPTION");		

+				if (rs != null) {		

+					add_col = !rs.next();		

+					rs.close();		

+					rs = null;		

+				}	

+		if(add_col) {

+			intlogger

+					.info("PROV9002: Adding BUSINESS_DESCRIPTION column to FEEDS table.");

+			Statement s = c.createStatement();

+			s.execute("ALTER TABLE FEEDS ADD COLUMN BUSINESS_DESCRIPTION varchar(1000) DEFAULT NULL AFTER DESCRIPTION, MODIFY COLUMN DESCRIPTION VARCHAR(1000)");

+			s.close();

+			}

+		}

+		catch (SQLException e) {

+			intlogger

+					.fatal("PROV9000: The database credentials are not working: "

+							+ e.getMessage());

+			return false;

+		} finally {

+			if (c != null)

+				release(c);

+		}

+		return true;

+	}

+

+

+	/*New retroFit method is added for groups feature Rally:US708115 - 1610	

+	* @retroFit11()

+	* @parmas: none

+	* @return - boolean if table and fields are created (Group table, group id in FEEDS, SUBSCRIPTION TABLES)

+	*/

+	@SuppressWarnings("resource")	

+	private boolean retroFit11() {		

+		final String[] expected_tables = {		

+			"GROUPS"		

+		};		

+		Connection c = null;		

+			

+		try {		

+			// If expected tables are not present, then add new routing tables		

+			c = getConnection();		

+			Set<String> tables = getTableSet(c);		

+			boolean initialize = false;		

+			for (String s : expected_tables) {		

+				initialize |= !tables.contains(s);		

+			}		

+			if (initialize) {		

+				intlogger.info("PROV9002: Adding GROUPS table for Release 1610.");		

+				runInitScript(c, 4);		// script 4 creates the routing tables		

+			}		

+					

+			// Add GROUPID column to FEEDS/SUBSCRIPTIONS		

+			DatabaseMetaData md = c.getMetaData();		

+			for (String tbl : new String[] {"FEEDS", "SUBSCRIPTIONS" }) {		

+				boolean add_col = true;		

+				ResultSet rs = md.getColumns("datarouter", "", tbl, "GROUPID");		

+				if (rs != null) {		

+					add_col = !rs.next();		

+					rs.close();		

+					rs = null;		

+				}		

+				if (add_col) {		

+					intlogger.info("PROV9002: Adding GROUPID column to "+tbl+" table.");		

+					Statement s = c.createStatement();		

+					s.execute("ALTER TABLE "+tbl+" ADD COLUMN GROUPID INT(10) UNSIGNED NOT NULL DEFAULT 0 AFTER FEEDID");		

+					s.close();		

+				}		

+			}						

+		} catch (SQLException e) {		

+			intlogger.fatal("PROV9000: The database credentials are not working: "+e.getMessage());		

+			return false;		

+		} finally {		

+			if (c != null)		

+				release(c);		

+		}		

+		return true;		

+	}

+

+

+	/**

+	 * Copy the log table <i>table_name</i> to LOG_RECORDS;

+	 * @param table_name the name of the old (1.0.*) table to copy

+	 * @param table_class the class used to instantiate a record from the table

+	 * @throws SQLException if there is a problem getting a MySQL connection

+	 */

+	@SuppressWarnings("resource")

+	private void copyLogTable(String table_name, Class<? extends Loadable> table_class) throws SQLException {

+		long start = System.currentTimeMillis();

+		int n = 0;

+		Connection c1 = getConnection();

+		Connection c2 = getConnection();

+

+		try {

+			Constructor<? extends Loadable> cnst = table_class.getConstructor(ResultSet.class);

+			PreparedStatement ps = c2.prepareStatement(LogfileLoader.INSERT_SQL);

+			Statement stmt = c1.createStatement();

+			ResultSet rs = stmt.executeQuery("select * from "+table_name);

+			while (rs.next()) {

+				Loadable rec = cnst.newInstance(rs);

+				rec.load(ps);

+				ps.setLong(18, ++nextid);

+				ps.executeUpdate();

+				if ((++n % 10000) == 0)

+					intlogger.debug("  "+n+" records done.");

+			}

+			stmt.close();

+			ps.close();

+		} catch (SQLException e) {

+			e.printStackTrace();

+		} catch (NoSuchMethodException e) {

+			e.printStackTrace();

+		} catch (SecurityException e) {

+			e.printStackTrace();

+		} catch (InstantiationException e) {

+			e.printStackTrace();

+		} catch (IllegalAccessException e) {

+			e.printStackTrace();

+		} catch (IllegalArgumentException e) {

+			e.printStackTrace();

+		} catch (InvocationTargetException e) {

+			e.printStackTrace();

+		}

+

+		release(c1);

+		release(c2);

+		long x = (System.currentTimeMillis() - start);

+		intlogger.debug("  "+n+" records done in "+x+" ms.");

+	}

+

+	/**

+	 * Get a set of all table names in the DB.

+	 * @param c a DB connection

+	 * @return the set of table names

+	 */

+	private Set<String> getTableSet(Connection c) {

+		Set<String> tables = new HashSet<String>();

+		try {

+			DatabaseMetaData md = c.getMetaData();

+			ResultSet rs = md.getTables("datarouter", "", "", null);

+			if (rs != null) {

+				while (rs.next()) {

+					tables.add(rs.getString("TABLE_NAME"));

+				}

+				rs.close();

+			}

+		} catch (SQLException e) {

+		}

+		return tables;

+	}

+	/**

+	 * Initialize the tables by running the initialization scripts located in the directory specified

+	 * by the property <i>com.att.research.datarouter.provserver.dbscripts</i>.  Scripts have names of

+	 * the form mysql_init_NNNN.

+	 * @param c a DB connection

+	 * @param n the number of the mysql_init_NNNN script to run

+	 */

+	private void runInitScript(Connection c, int n) {

+		String scriptdir = (String) props.get("com.att.research.datarouter.provserver.dbscripts");

+		StringBuilder sb = new StringBuilder();

+		try {

+			String scriptfile = String.format("%s/mysql_init_%04d", scriptdir, n);

+			if (!(new File(scriptfile)).exists())

+				return;

+

+			LineNumberReader in = new LineNumberReader(new FileReader(scriptfile));

+			String line;

+			while ((line = in.readLine()) != null) {

+				if (!line.startsWith("--")) {

+					line = line.trim();

+					sb.append(line);

+					if (line.endsWith(";")) {

+						// Execute one DDL statement

+						String sql = sb.toString();

+						sb.setLength(0);

+						Statement s = c.createStatement();

+						s.execute(sql);

+						s.close();

+					}

+				}

+			}

+			in.close();

+			sb.setLength(0);

+		} catch (Exception e) {

+			intlogger.fatal("PROV9002 Error when initializing table: "+e.getMessage());

+			System.exit(1);

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/DRRouteCLI.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/DRRouteCLI.java
new file mode 100644
index 0000000..36d46e3
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/DRRouteCLI.java
@@ -0,0 +1,456 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.utils;

+

+import java.io.File;

+import java.io.FileInputStream;

+import java.io.IOException;

+import java.io.InputStream;

+import java.io.InputStreamReader;

+import java.io.LineNumberReader;

+import java.security.KeyStore;

+import java.util.Arrays;

+import java.util.Properties;

+

+import javax.servlet.http.HttpServletResponse;

+

+import org.apache.http.HttpEntity;

+import org.apache.http.HttpResponse;

+import org.apache.http.StatusLine;

+import org.apache.http.client.methods.HttpDelete;

+import org.apache.http.client.methods.HttpGet;

+import org.apache.http.client.methods.HttpPost;

+import org.apache.http.conn.scheme.Scheme;

+import org.apache.http.conn.ssl.SSLSocketFactory;

+import org.apache.http.impl.client.AbstractHttpClient;

+import org.apache.http.impl.client.DefaultHttpClient;

+import org.apache.http.util.EntityUtils;

+import org.json.JSONArray;

+import org.json.JSONObject;

+import org.json.JSONTokener;

+

+/**

+ * This class provides a Command Line Interface for the routing tables in the DR Release 2.0 DB.

+ * A full description of this command is <a href="http://wiki.proto.research.att.com/doku.php?id=datarouter-route-cli">here</a>.

+ *

+ * @author Robert Eby

+ * @version $Id: DRRouteCLI.java,v 1.2 2013/11/05 15:54:16 eby Exp $

+ */

+public class DRRouteCLI {

+	/**

+	 * Invoke the CLI.  The CLI can be run with a single command (given as command line arguments),

+	 * or in an interactive mode where the user types a sequence of commands to the program.  The CLI is invoked via:

+	 * <pre>

+	 * java com.att.research.datarouter.provisioning.utils.DRRouteCLI [ -s <i>server</i> ] [ <i>command</i> ]

+	 * </pre>

+	 * A full description of the arguments to this command are

+	 * <a href="http://wiki.proto.research.att.com/doku.php?id=datarouter-route-cli">here</a>.

+	 *

+	 * @param args command line arguments

+	 * @throws Exception for any unrecoverable problem

+	 */

+	public static void main(String[] args) throws Exception {

+		String server = System.getenv(ENV_VAR);

+		if (args.length >= 2 && args[0].equals("-s")) {

+			server = args[1];

+			String[] t = new String[args.length-2];

+			if (t.length > 0)

+				System.arraycopy(args, 2, t, 0, t.length);

+			args = t;

+		}

+		if (server == null || server.equals("")) {

+			System.err.println("dr-route: you need to specify a server, either via $PROVSRVR or the '-s' option.");

+			System.exit(1);

+		}

+		DRRouteCLI cli = new DRRouteCLI(server);

+		if (args.length > 0) {

+			boolean b = cli.runCommand(args);

+			System.exit(b ? 0 : 1);

+		} else {

+			cli.interactive();

+			System.exit(0);

+		}

+	}

+

+	public static final String ENV_VAR = "PROVSRVR";

+	public static final String PROMPT = "dr-route> ";

+	public static final String DEFAULT_TRUSTSTORE_PATH = /* $JAVA_HOME + */ "/jre/lib/security/cacerts";

+

+	private final String server;

+	private int width = 120;		// screen width (for list)

+	private AbstractHttpClient httpclient;

+

+	/**

+	 * Create a DRRouteCLI object connecting to the specified server.

+	 * @param server the server to send command to

+	 * @throws Exception

+	 */

+	public DRRouteCLI(String server) throws Exception {

+		this.server = server;

+		this.width = 120;

+		this.httpclient = new DefaultHttpClient();

+

+		Properties p = (new DB()).getProperties();

+		String truststore_file = p.getProperty("com.att.research.datarouter.provserver.truststore.path");

+		String truststore_pw   = p.getProperty("com.att.research.datarouter.provserver.truststore.password");

+

+		KeyStore trustStore = KeyStore.getInstance(KeyStore.getDefaultType());

+		if (truststore_file == null || truststore_file.equals("")) {

+			String jhome = System.getenv("JAVA_HOME");

+			if (jhome == null || jhome.equals(""))

+				jhome = "/opt/java/jdk/jdk180";

+			truststore_file = jhome + DEFAULT_TRUSTSTORE_PATH;

+		}

+		File f = new File(truststore_file);

+		if (f.exists()) {

+			FileInputStream instream = new FileInputStream(f);

+		    try {

+		        trustStore.load(instream, truststore_pw.toCharArray());

+		    } catch (Exception x) {

+		    	System.err.println("Problem reading truststore: "+x);

+		    	throw x;

+		    } finally {

+		        try { instream.close(); } catch (Exception ignore) {}

+		    }

+		}

+

+		SSLSocketFactory socketFactory = new SSLSocketFactory(trustStore);

+		Scheme sch = new Scheme("https", 443, socketFactory);

+		httpclient.getConnectionManager().getSchemeRegistry().register(sch);

+	}

+

+	private void interactive() throws IOException {

+		LineNumberReader in = new LineNumberReader(new InputStreamReader(System.in));

+		while (true) {

+			System.out.print(PROMPT);

+			String line = in.readLine();

+			if (line == null)

+				return;

+			line = line.trim();

+			if (line.equalsIgnoreCase("exit"))	// "exit" may only be used in interactive mode

+				return;

+			if (line.equalsIgnoreCase("quit"))	// "quit" may only be used in interactive mode

+				return;

+			String[] args = line.split("[ \t]+");

+			if (args.length > 0)

+				runCommand(args);

+		}

+	}

+

+	/**

+	 * Run the command specified by the arguments.

+	 * @param args The command line arguments.

+	 * @return true if the command was valid and succeeded

+	 */

+	public boolean runCommand(String[] args) {

+		String cmd = args[0].trim().toLowerCase();

+		if (cmd.equals("add")) {

+			if (args.length > 2) {

+				if (args[1].startsWith("in") && args.length >= 6) {

+					return addIngress(args);

+				}

+				if (args[1].startsWith("eg") && args.length == 4) {

+					return addEgress(args);

+				}

+				if (args[1].startsWith("ne") && args.length == 5) {

+					return addRoute(args);

+				}

+			}

+			System.err.println("Add command should be one of:");

+			System.err.println("  add in[gress] feedid user subnet nodepatt [ seq ]");

+			System.err.println("  add eg[ress]  subid node");

+			System.err.println("  add ne[twork] fromnode tonode vianode");

+		} else if (cmd.startsWith("del")) {

+			if (args.length > 2) {

+				if (args[1].startsWith("in") && args.length == 5) {

+					return delIngress(args);

+				}

+				if (args[1].startsWith("in") && args.length == 3) {

+					return delIngress(args);

+				}

+				if (args[1].startsWith("eg") && args.length == 3) {

+					return delEgress(args);

+				}

+				if (args[1].startsWith("ne") && args.length == 4) {

+					return delRoute(args);

+				}

+			}

+			System.err.println("Delete command should be one of:");

+			System.err.println("  del in[gress] feedid user subnet");

+			System.err.println("  del in[gress] seq");

+			System.err.println("  del eg[ress]  subid");

+			System.err.println("  del ne[twork] fromnode tonode");

+		} else if (cmd.startsWith("lis")) {

+			return list(args);

+		} else if (cmd.startsWith("wid") && args.length > 1) {

+			width = Integer.parseInt(args[1]);

+			return true;

+		} else if (cmd.startsWith("?") || cmd.startsWith("hel") || cmd.startsWith("usa")) {

+			usage();

+		} else if (cmd.startsWith("#")) {

+			// comment -- ignore

+		} else {

+			System.err.println("Command should be one of add, del, list, exit, quit");

+		}

+		return false;

+	}

+

+	private void usage() {

+		System.out.println("Enter one of the following commands:");

+		System.out.println("  add in[gress] feedid user subnet nodepatt [ seq ]");

+		System.out.println("  add eg[ress]  subid node");

+		System.out.println("  add ne[twork] fromnode tonode vianode");

+		System.out.println("  del in[gress] feedid user subnet");

+		System.out.println("  del in[gress] seq");

+		System.out.println("  del eg[ress]  subid");

+		System.out.println("  del ne[twork] fromnode tonode");

+		System.out.println("  list [ all | ingress | egress | network ]");

+		System.out.println("  exit");

+		System.out.println("  quit");

+	}

+

+	private boolean addIngress(String[] args) {

+		String url = String.format("https://%s/internal/route/ingress/?feed=%s&user=%s&subnet=%s&nodepatt=%s", server, args[2], args[3], args[4], args[5]);

+		if (args.length > 6)

+			url += "&seq=" + args[6];

+		return doPost(url);

+	}

+

+	private boolean addEgress(String[] args) {

+		String url = String.format("https://%s/internal/route/egress/?sub=%s&node=%s", server, args[2], args[3]);

+		return doPost(url);

+	}

+

+	private boolean addRoute(String[] args) {

+		String url = String.format("https://%s/internal/route/network/?from=%s&to=%s&via=%s", server, args[2], args[3], args[4]);

+		return doPost(url);

+	}

+

+	private boolean delIngress(String[] args) {

+		String url;

+		if (args.length == 5) {

+			String subnet = args[4].replaceAll("/", "!");	// replace the / with a !

+			url = String.format("https://%s/internal/route/ingress/%s/%s/%s", server, args[2], args[3], subnet);

+		} else {

+			url = String.format("https://%s/internal/route/ingress/%s", server, args[2]);

+		}

+		return doDelete(url);

+	}

+

+	private boolean delEgress(String[] args) {

+		String url = String.format("https://%s/internal/route/egress/%s", server, args[2]);

+		return doDelete(url);

+	}

+

+	private boolean delRoute(String[] args) {

+		String url = String.format("https://%s/internal/route/network/%s/%s", server, args[2], args[3]);

+		return doDelete(url);

+	}

+

+	private boolean list(String[] args) {

+		String tbl = (args.length == 1) ? "all" : args[1].toLowerCase();

+		JSONObject jo = doGet("https://"+server+"/internal/route/");	// Returns all 3 tables

+		StringBuilder sb = new StringBuilder();

+		if (tbl.startsWith("al") || tbl.startsWith("in")) {

+			// Display the IRT

+			JSONArray irt = jo.optJSONArray("ingress");

+			int cw1 = 6, cw2 = 6, cw3 = 6, cw4 = 6;		// determine column widths for first 4 cols

+			for (int i = 0; irt != null && i < irt.length(); i++) {

+				JSONObject e  = irt.getJSONObject(i);

+				cw1 = Math.max(cw1, (""+ e.getInt("seq")).length());

+				cw2 = Math.max(cw2, (""+e.getInt("feedid")).length());

+				String t = e.optString("user");

+				cw3 = Math.max(cw3, (t == null) ? 1 : t.length());

+				t = e.optString("subnet");

+				cw4 = Math.max(cw4, (t == null) ? 1 : t.length());

+			}

+

+			int nblank = cw1 + cw2 + cw3 + cw4 + 8;

+			sb.append("Ingress Routing Table\n");

+			sb.append(String.format("%s  %s  %s  %s  Nodes\n", ext("Seq", cw1), ext("FeedID", cw2), ext("User", cw3), ext("Subnet", cw4)));

+			for (int i = 0; irt != null && i < irt.length(); i++) {

+				JSONObject e  = irt.getJSONObject(i);

+				String seq    = ""+e.getInt("seq");

+				String feedid = ""+e.getInt("feedid");

+				String user   = e.optString("user");

+				String subnet = e.optString("subnet");

+				if (user.equals("")) user = "-";

+				if (subnet.equals("")) subnet = "-";

+				JSONArray nodes = e.getJSONArray("node");

+				int sol = sb.length();

+				sb.append(String.format("%s  %s  %s  %s  ", ext(seq, cw1), ext(feedid, cw2), ext(user, cw3), ext(subnet, cw4)));

+				for (int j = 0; j < nodes.length(); j++) {

+					String nd = nodes.getString(j);

+					int cursor = sb.length() - sol;

+					if (j > 0 && (cursor + nd.length() > width)) {

+						sb.append("\n");

+						sol = sb.length();

+						sb.append(ext(" ", nblank));

+					}

+					sb.append(nd);

+					if ((j+1) < nodes.length()) {

+						sb.append(", ");

+					}

+				}

+				sb.append("\n");

+			}

+		}

+		if (tbl.startsWith("al") || tbl.startsWith("eg")) {

+			// Display the ERT

+			JSONObject ert = jo.optJSONObject("egress");

+			String[] subs = (ert == null) ? new String[0] : JSONObject.getNames(ert);

+			if (subs == null)

+				subs = new String[0];

+			Arrays.sort(subs);

+			int cw1 = 5;

+			for (int i = 0; i < subs.length; i++) {

+				cw1 = Math.max(cw1, subs[i].length());

+			}

+

+			if (sb.length() > 0)

+				sb.append("\n");

+			sb.append("Egress Routing Table\n");

+			sb.append(String.format("%s  Node\n", ext("SubID", cw1)));

+			for (int i = 0; i < subs.length; i++) {

+				String node = ert.getString(subs[i]);

+				sb.append(String.format("%s  %s\n", ext(subs[i], cw1), node));

+			}

+		}

+		if (tbl.startsWith("al") || tbl.startsWith("ne")) {

+			// Display the NRT

+			JSONArray nrt = jo.optJSONArray("routing");

+			int cw1 = 4, cw2 = 4;

+			for (int i = 0; nrt != null && i < nrt.length(); i++) {

+				JSONObject e = nrt.getJSONObject(i);

+				String from = e.getString("from");

+				String to   = e.getString("to");

+				cw1 = Math.max(cw1, from.length());

+				cw2 = Math.max(cw2, to.length());

+			}

+

+			if (sb.length() > 0)

+				sb.append("\n");

+			sb.append("Network Routing Table\n");

+			sb.append(String.format("%s  %s  Via\n", ext("From", cw1), ext("To", cw2)));

+			for (int i = 0; nrt != null && i < nrt.length(); i++) {

+				JSONObject e = nrt.getJSONObject(i);

+				String from = e.getString("from");

+				String to   = e.getString("to");

+				String via  = e.getString("via");

+				sb.append(String.format("%s  %s  %s\n", ext(from, cw1), ext(to, cw2), via));

+			}

+		}

+		System.out.print(sb.toString());

+		return true;

+	}

+	private String ext(String s, int n) {

+		if (s == null)

+			s = "-";

+		while (s.length() < n)

+			s += " ";

+		return s;

+	}

+

+	private boolean doDelete(String url) {

+		boolean rv = false;

+		HttpDelete meth = new HttpDelete(url);

+		try {

+			HttpResponse response = httpclient.execute(meth);

+			HttpEntity entity = response.getEntity();

+			StatusLine sl = response.getStatusLine();

+			rv = (sl.getStatusCode() == HttpServletResponse.SC_OK);

+			if (rv) {

+				System.out.println("Routing entry deleted.");

+				EntityUtils.consume(entity);

+			} else {

+				printErrorText(entity);

+			}

+		} catch (Exception e) {

+		} finally {

+			meth.releaseConnection();

+		}

+		return rv;

+	}

+

+	private JSONObject doGet(String url) {

+		JSONObject rv = new JSONObject();

+		HttpGet meth = new HttpGet(url);

+		try {

+			HttpResponse response = httpclient.execute(meth);

+			HttpEntity entity = response.getEntity();

+			StatusLine sl = response.getStatusLine();

+			if (sl.getStatusCode() == HttpServletResponse.SC_OK) {

+				rv = new JSONObject(new JSONTokener(entity.getContent()));

+			} else {

+				printErrorText(entity);

+			}

+		} catch (Exception e) {

+			System.err.println(e);

+		} finally {

+			meth.releaseConnection();

+		}

+		return rv;

+	}

+

+	private boolean doPost(String url) {

+		boolean rv = false;

+		HttpPost meth = new HttpPost(url);

+		try {

+			HttpResponse response = httpclient.execute(meth);

+			HttpEntity entity = response.getEntity();

+			StatusLine sl = response.getStatusLine();

+			rv = (sl.getStatusCode() == HttpServletResponse.SC_OK);

+			if (rv) {

+				System.out.println("Routing entry added.");

+				EntityUtils.consume(entity);

+			} else {

+				printErrorText(entity);

+			}

+		} catch (Exception e) {

+		} finally {

+			meth.releaseConnection();

+		}

+		return rv;

+	}

+

+	private void printErrorText(HttpEntity entity) throws IllegalStateException, IOException {

+		// Look for and print only the part of the output between <pre>...</pre>

+		InputStream is = entity.getContent();

+		StringBuilder sb = new StringBuilder();

+		byte[] b = new byte[512];

+		int n = 0;

+		while ((n = is.read(b)) > 0) {

+			sb.append(new String(b, 0, n));

+		}

+		is.close();

+		int ix = sb.indexOf("<pre>");

+		if (ix > 0)

+			sb.delete(0, ix+5);

+		ix = sb.indexOf("</pre>");

+		if (ix > 0)

+			sb.delete(ix, sb.length());

+		System.err.println(sb.toString());

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/JSONUtilities.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/JSONUtilities.java
new file mode 100644
index 0000000..e167658
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/JSONUtilities.java
@@ -0,0 +1,76 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.utils;

+

+import java.net.InetAddress;

+import java.net.UnknownHostException;

+import java.util.Collection;

+

+/**

+ * Some utility functions used when creating/validating JSON.

+ *

+ * @author Robert Eby

+ * @version $Id: JSONUtilities.java,v 1.1 2013/04/26 21:00:26 eby Exp $

+ */

+public class JSONUtilities {

+	/**

+	 * Does the String <i>v</i> represent a valid Internet address (with or without a

+	 * mask length appended).

+	 * @param v the string to check

+	 * @return true if valid, false otherwise

+	 */

+	public static boolean validIPAddrOrSubnet(String v) {

+		String[] pp = { v, "" };

+		if (v.indexOf('/') > 0)

+			pp = v.split("/");

+		try {

+			InetAddress addr = InetAddress.getByName(pp[0]);

+			if (pp[1].length() > 0) {

+				// check subnet mask

+				int mask = Integer.parseInt(pp[1]);

+				if (mask > (addr.getAddress().length * 8))

+					return false;

+			}

+			return true;

+		} catch (UnknownHostException e) {

+			return false;

+		}

+	}

+	/**

+	 * Build a JSON array from a collection of Strings.

+	 * @param coll the collection

+	 * @return a String containing a JSON array

+	 */

+	public static String createJSONArray(Collection<String> coll) {

+		StringBuilder sb = new StringBuilder("[");

+		String pfx = "\n";

+		for (String t : coll) {

+			sb.append(pfx).append("  \"").append(t).append("\"");

+			pfx = ",\n";

+		}

+		sb.append("\n]\n");

+		return sb.toString();

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/LogfileLoader.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/LogfileLoader.java
new file mode 100644
index 0000000..f9c11f1
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/LogfileLoader.java
@@ -0,0 +1,549 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.utils;

+

+import java.io.File;

+import java.io.FileInputStream;

+import java.io.FileNotFoundException;

+import java.io.FileReader;

+import java.io.FilenameFilter;

+import java.io.IOException;

+import java.io.InputStreamReader;

+import java.io.LineNumberReader;

+import java.io.Reader;

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.sql.Statement;

+import java.text.ParseException;

+import java.util.Date;

+import java.util.HashMap;

+import java.util.Iterator;

+import java.util.Map;

+import java.util.TreeSet;

+import java.util.zip.GZIPInputStream;

+

+import org.apache.log4j.Logger;

+

+import com.att.research.datarouter.provisioning.BaseServlet;

+import com.att.research.datarouter.provisioning.beans.DeliveryExtraRecord;

+import com.att.research.datarouter.provisioning.beans.DeliveryRecord;

+import com.att.research.datarouter.provisioning.beans.ExpiryRecord;

+import com.att.research.datarouter.provisioning.beans.Loadable;

+import com.att.research.datarouter.provisioning.beans.LogRecord;

+import com.att.research.datarouter.provisioning.beans.Parameters;

+import com.att.research.datarouter.provisioning.beans.PubFailRecord;

+import com.att.research.datarouter.provisioning.beans.PublishRecord;

+

+/**

+ * This class provides methods that run in a separate thread, in order to process logfiles uploaded into the spooldir.

+ * These logfiles are loaded into the MySQL LOG_RECORDS table. In a running provisioning server, there should only be

+ * two places where records can be loaded into this table; here, and in the method DB.retroFit4() which may be run at

+ * startup to load the old (1.0) style log tables into LOG_RECORDS;

+ * <p>This method maintains an {@link RLEBitSet} which can be used to easily see what records are presently in the

+ * database.

+ * This bit set is used to synchronize between provisioning servers.</p>

+ *

+ * @author Robert Eby

+ * @version $Id: LogfileLoader.java,v 1.22 2014/03/12 19:45:41 eby Exp $

+ */

+public class LogfileLoader extends Thread {

+	/** Default number of log records to keep when pruning.  Keep 10M by default. */

+	public static final long DEFAULT_LOG_RETENTION = 10000000L;

+	/** NOT USED: Percentage of free space required before old records are removed. */

+	public static final int REQUIRED_FREE_PCT = 20;

+

+	/** This is a singleton -- there is only one LogfileLoader object in the server */

+	private static LogfileLoader p;

+

+	/**

+	 * Get the singleton LogfileLoader object, and start it if it is not running.

+	 * @return the LogfileLoader

+	 */

+	public static synchronized LogfileLoader getLoader() {

+		if (p == null)

+			p = new LogfileLoader();

+		if (!p.isAlive())

+			p.start();

+		return p;

+	}

+

+	/** The PreparedStatement which is loaded by a <i>Loadable</i>. */

+	public static final String INSERT_SQL = "insert into LOG_RECORDS values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";

+	/** Each server can assign this many IDs */

+	private static final long SET_SIZE = (1L << 56);

+

+	private final Logger logger;

+	private final DB db;

+	private final String spooldir;

+	private final long set_start;

+	private final long set_end;

+	private RLEBitSet seq_set;

+	private long nextid;

+	private boolean idle;

+

+	private LogfileLoader() {

+		this.logger   = Logger.getLogger("com.att.research.datarouter.provisioning.internal");

+		this.db       = new DB();

+		this.spooldir = db.getProperties().getProperty("com.att.research.datarouter.provserver.spooldir");

+		this.set_start = getIdRange();

+		this.set_end   = set_start + SET_SIZE - 1;

+		this.seq_set  = new RLEBitSet();

+		this.nextid   = 0;

+		this.idle     = false;

+

+		// This is a potentially lengthy operation, so has been moved to run()

+		//initializeNextid();

+		this.setDaemon(true);

+		this.setName("LogfileLoader");

+	}

+

+	private long getIdRange() {

+		long n;

+		if (BaseServlet.isInitialActivePOD())

+			n = 0;

+		else if (BaseServlet.isInitialStandbyPOD())

+			n = SET_SIZE;

+		else

+			n = SET_SIZE * 2;

+		String r = String.format("[%X .. %X]", n, n+SET_SIZE-1);

+		logger.debug("This server shall assign RECORD_IDs in the range "+r);

+		return n;

+	}

+	/**

+	 * Return the bit set representing the record ID's that are loaded in this database.

+	 * @return the bit set

+	 */

+	public RLEBitSet getBitSet() {

+		return seq_set;

+	}

+	/**

+	 * True if the LogfileLoader is currently waiting for work.

+	 * @return true if idle

+	 */

+	public boolean isIdle() {

+		return idle;

+	}

+	/**

+	 * Run continuously to look for new logfiles in the spool directory and import them into the DB.

+	 * The spool is checked once per second.  If free space on the MySQL filesystem falls below

+	 * REQUIRED_FREE_PCT (normally 20%) then the oldest logfile entries are removed and the LOG_RECORDS

+	 * table is compacted until free space rises above the threshold.

+	 */

+	@Override

+	public void run() {

+		initializeNextid();	// moved from the constructor

+		while (true) {

+			try {

+				File dirfile = new File(spooldir);

+				while (true) {

+					// process IN files

+					File[] infiles = dirfile.listFiles(new FilenameFilter() {

+						@Override

+						public boolean accept(File dir, String name) {

+							return name.startsWith("IN.");

+						}

+					});

+

+					if (infiles.length == 0) {

+						idle = true;

+						try {

+							Thread.sleep(1000L);

+						} catch (InterruptedException e) {

+						}

+						idle = false;

+					} else {

+						// Remove old rows

+						if (pruneRecords()) {

+							// Removed at least some entries, recompute the bit map

+							initializeNextid();

+						}

+

+						// Process incoming logfiles

+						for (File f : infiles) {

+							if (logger.isDebugEnabled())

+								logger.debug("PROV8001 Starting " + f + " ...");

+							long time = System.currentTimeMillis();

+							int[] n = process(f);

+							time = System.currentTimeMillis() - time;

+							logger.info(String

+									.format("PROV8000 Processed %s in %d ms; %d of %d records.",

+											f.toString(), time, n[0], n[1]));

+							f.delete();

+						}

+					}

+				}

+			} catch (Exception e) {

+				logger.warn("PROV0020: Caught exception in LogfileLoader: " + e);

+				e.printStackTrace();

+			}

+		}

+	}

+	private boolean pruneRecords() {

+		boolean did1 = false;

+		long count = countRecords();

+		long threshold = DEFAULT_LOG_RETENTION;

+		Parameters param = Parameters.getParameter(Parameters.PROV_LOG_RETENTION);

+		if (param != null) {

+			try {

+				long n = Long.parseLong(param.getValue());

+				// This check is to prevent inadvertent errors from wiping the table out

+				if (n > 1000000L)

+					threshold = n;

+			} catch (NumberFormatException e) {

+				// ignore

+			}

+		}

+		logger.debug("Pruning LOG_RECORD table: records in DB="+count+", threshold="+threshold);

+		if (count > threshold) {

+			count -= threshold;						// we need to remove this many records;

+			Map<Long,Long> hist = getHistogram();	// histogram of records per day

+			// Determine the cutoff point to remove the needed number of records

+			long sum = 0;

+			long cutoff = 0;

+			for (Long day : new TreeSet<Long>(hist.keySet())) {

+				sum += hist.get(day);

+				cutoff = day;

+				if (sum >= count)

+					break;

+			}

+			cutoff++;

+			cutoff *= 86400000L;		// convert day to ms

+			logger.debug("  Pruning records older than="+(cutoff/86400000L)+" ("+new Date(cutoff)+")");

+

+			Connection conn = null;

+			try {

+				// Limit to a million at a time to avoid typing up the DB for too long.

+				conn = db.getConnection();

+				PreparedStatement ps = conn.prepareStatement("DELETE from LOG_RECORDS where EVENT_TIME < ? limit 1000000");

+				ps.setLong(1, cutoff);

+				while (count > 0) {

+					if (!ps.execute()) {

+						int dcount = ps.getUpdateCount();

+						count -= dcount;

+						logger.debug("  "+dcount+" rows deleted.");

+						did1 |= (dcount!=0);

+						if (dcount == 0)

+							count = 0;	// prevent inf. loops

+					} else {

+						count = 0;	// shouldn't happen!

+					}

+				}

+				ps.close();

+				Statement stmt = conn.createStatement();

+				stmt.execute("OPTIMIZE TABLE LOG_RECORDS");

+				stmt.close();

+			} catch (SQLException e) {

+				System.err.println(e);

+				e.printStackTrace();

+			} finally {

+				db.release(conn);

+			}

+		}

+		return did1;

+	}

+	private long countRecords() {

+		long count = 0;

+		Connection conn = null;

+		try {

+			conn = db.getConnection();

+			Statement stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery("SELECT COUNT(*) as COUNT from LOG_RECORDS");

+			if (rs.next()) {

+				count = rs.getLong("COUNT");

+			}

+			rs.close();

+			stmt.close();

+		} catch (SQLException e) {

+			System.err.println(e);

+			e.printStackTrace();

+		} finally {

+			db.release(conn);

+		}

+		return count;

+	}

+	private Map<Long,Long> getHistogram() {

+		Map<Long,Long> map = new HashMap<Long,Long>();

+		Connection conn = null;

+		try {

+			logger.debug("  LOG_RECORD table histogram...");

+			conn = db.getConnection();

+			Statement stmt = conn.createStatement();

+			ResultSet rs = stmt.executeQuery("SELECT FLOOR(EVENT_TIME/86400000) AS DAY, COUNT(*) AS COUNT FROM LOG_RECORDS GROUP BY DAY");

+			while (rs.next()) {

+				long day = rs.getLong("DAY");

+				long cnt = rs.getLong("COUNT");

+				map.put(day, cnt);

+				logger.debug("  "+day + "  "+cnt);

+			}

+			rs.close();

+			stmt.close();

+		} catch (SQLException e) {

+			System.err.println(e);

+			e.printStackTrace();

+		} finally {

+			db.release(conn);

+		}

+		return map;

+	}

+	private void initializeNextid() {

+		Connection conn = null;

+		try {

+			conn = db.getConnection();

+			Statement stmt = conn.createStatement();

+			// Build a bitset of all records in the LOG_RECORDS table

+			// We need to run this SELECT in stages, because otherwise we run out of memory!

+			RLEBitSet nbs = new RLEBitSet();

+			final long stepsize = 6000000L;

+			boolean go_again = true;

+			for (long i = 0; go_again; i += stepsize) {

+				String sql = String.format("select RECORD_ID from LOG_RECORDS LIMIT %d,%d", i, stepsize);

+				ResultSet rs = stmt.executeQuery(sql);

+				go_again = false;

+				while (rs.next()) {

+					long n = rs.getLong("RECORD_ID");

+					nbs.set(n);

+					go_again = true;

+				}

+				rs.close();

+			}

+			stmt.close();

+			seq_set = nbs;

+

+			// Compare with the range for this server

+			// Determine the next ID for this set of record IDs

+			RLEBitSet tbs = (RLEBitSet) nbs.clone();

+			RLEBitSet idset = new RLEBitSet();

+			idset.set(set_start, set_start+SET_SIZE);

+			tbs.and(idset);

+			long t = tbs.length();

+			nextid = (t == 0) ? set_start : (t - 1);

+			if (nextid >= set_start+SET_SIZE) {

+				// Handle wraparound, when the IDs reach the end of our "range"

+				Long[] last = null;

+				Iterator<Long[]> li = tbs.getRangeIterator();

+				while (li.hasNext()) {

+					last = li.next();

+				}

+				if (last != null) {

+					tbs.clear(last[0], last[1]+1);

+					t = tbs.length();

+					nextid = (t == 0) ? set_start : (t - 1);

+				}

+			}

+			logger.debug(String.format("initializeNextid, next ID is %d (%x)", nextid, nextid));

+		} catch (SQLException e) {

+			System.err.println(e);

+			e.printStackTrace();

+		} finally {

+			db.release(conn);

+		}

+	}

+// OLD CODE - commented here for historical purposes

+//

+//	private boolean pruneRecordsOldAlgorithm() {

+//		// Determine space available -- available space must be at least 20% under /opt/app/mysql

+//		int pct = getFreePercentage();

+//		boolean did1 = false;

+//		while (pct < REQUIRED_FREE_PCT) {

+//			logger.info("PROV8008: Free space is " + pct + "% - removing old log entries");

+//			boolean didit = removeOldestEntries();

+//			pct = didit ? getFreePercentage() : 100; // don't loop endlessly

+//			did1 |= didit;

+//		}

+//		return did1;

+//	}

+//	private int getFreePercentage() {

+//		FileSystem fs = (Paths.get("/opt/app/mysql")).getFileSystem();

+//		long total = 0;

+//		long avail = 0;

+//		try {

+//			for (FileStore store : fs.getFileStores()) {

+//				total += store.getTotalSpace();

+//				avail += store.getUsableSpace();

+//			}

+//		} catch (IOException e) {

+//		}

+//		try { fs.close(); } catch (Exception e) { }

+//		return (int)((avail * 100) / total);

+//	}

+//	private boolean removeOldestEntries() {

+//		// Remove the last days worth of entries

+//		Connection conn = null;

+//		try {

+//			conn = db.getConnection();

+//			Statement stmt = conn.createStatement();

+//			ResultSet rs = stmt.executeQuery("select min(event_time) as MIN from LOG_RECORDS");

+//			if (rs != null) {

+//				if (rs.next()) {

+//					// Compute the end of the first day of logs

+//					long first = rs.getLong("MIN");

+//					Calendar cal = new GregorianCalendar();

+//					cal.setTime(new Date(first));

+//					cal.add(Calendar.DAY_OF_YEAR, 1);

+//					cal.set(Calendar.HOUR_OF_DAY, 0);

+//					cal.set(Calendar.MINUTE, 0);

+//					cal.set(Calendar.SECOND, 0);

+//					cal.set(Calendar.MILLISECOND, 0);

+//					if (!stmt.execute("delete from LOG_RECORDS where event_time < " + cal.getTimeInMillis())) {

+//						int count = stmt.getUpdateCount();

+//						logger.info("PROV0009: Removed "+count+" old log entries.");

+//						stmt.execute("OPTIMIZE TABLE LOG_RECORDS");

+//					}

+//					rs.close();

+//					stmt.close();

+//					return true;

+//				}

+//				rs.close();

+//			}

+//			stmt.close();

+//		} catch (SQLException e) {

+//			System.err.println(e);

+//			e.printStackTrace();

+//		} finally {

+//			db.release(conn);

+//		}

+//		return false;

+//	}

+	@SuppressWarnings("resource")

+	private int[] process(File f) {

+		int ok = 0, total = 0;

+		try {

+			Connection conn = db.getConnection();

+			PreparedStatement ps = conn.prepareStatement(INSERT_SQL);

+			Reader r = f.getPath().endsWith(".gz")

+				? new InputStreamReader(new GZIPInputStream(new FileInputStream(f)))

+				: new FileReader(f);

+			LineNumberReader in = new LineNumberReader(r);

+			String line;

+			while ((line = in.readLine()) != null) {

+				try {

+					for (Loadable rec : buildRecords(line)) {

+						rec.load(ps);

+						if (rec instanceof LogRecord) {

+							LogRecord lr = ((LogRecord)rec);

+							if (!seq_set.get(lr.getRecordId())) {

+								ps.executeUpdate();

+								seq_set.set(lr.getRecordId());

+							} else

+								logger.debug("Duplicate record ignored: "+lr.getRecordId());

+						} else {

+							if (++nextid > set_end)

+								nextid = set_start;

+							ps.setLong(18, nextid);

+							ps.executeUpdate();

+							seq_set.set(nextid);

+						}

+						ps.clearParameters();

+						ok++;

+					}

+				} catch (SQLException e) {

+					logger.warn("PROV8003 Invalid value in record: "+line);

+					logger.debug(e);

+					e.printStackTrace();

+				} catch (NumberFormatException e) {

+					logger.warn("PROV8004 Invalid number in record: "+line);

+					logger.debug(e);

+					e.printStackTrace();

+				} catch (ParseException e) {

+					logger.warn("PROV8005 Invalid date in record: "+line);

+					logger.debug(e);

+					e.printStackTrace();

+				} catch (Exception e) {

+					logger.warn("PROV8006 Invalid pattern in record: "+line);

+					logger.debug(e);

+					e.printStackTrace();

+				}

+				total++;

+			}

+			in.close();

+			ps.close();

+			db.release(conn);

+			conn = null;

+		} catch (FileNotFoundException e) {

+			logger.warn("PROV8007 Exception reading "+f+": "+e);

+		} catch (IOException e) {

+			logger.warn("PROV8007 Exception reading "+f+": "+e);

+		} catch (SQLException e) {

+			logger.warn("PROV8007 Exception reading "+f+": "+e);

+		}

+		return new int[] { ok, total };

+	}

+	private Loadable[] buildRecords(String line) throws ParseException {

+		String[] pp = line.split("\\|");

+		if (pp != null && pp.length >= 7) {

+			String rtype = pp[1].toUpperCase();

+			if (rtype.equals("PUB") && pp.length == 11) {

+				// Fields are: date|PUB|pubid|feedid|requrl|method|ctype|clen|srcip|user|status

+				return new Loadable[] { new PublishRecord(pp) };

+			}

+			if (rtype.equals("DEL") && pp.length == 12) {

+				// Fields are: date|DEL|pubid|feedid|subid|requrl|method|ctype|clen|user|status|xpubid

+				String[] subs = pp[4].split("\\s+");

+				if (subs != null) {

+					Loadable[] rv = new Loadable[subs.length];

+					for (int i = 0; i < subs.length; i++) {

+						// create a new record for each individual sub

+						pp[4] = subs[i];

+						rv[i] = new DeliveryRecord(pp);

+					}

+					return rv;

+				}

+			}

+			if (rtype.equals("EXP") && pp.length == 11) {

+				// Fields are: date|EXP|pubid|feedid|subid|requrl|method|ctype|clen|reason|attempts

+				ExpiryRecord e = new ExpiryRecord(pp);

+				if (e.getReason().equals("other"))

+					logger.info("Invalid reason '"+pp[9]+"' changed to 'other' for record: "+e.getPublishId());

+				return new Loadable[] { e };

+			}

+			if (rtype.equals("PBF") && pp.length == 12) {

+				// Fields are: date|PBF|pubid|feedid|requrl|method|ctype|clen-expected|clen-received|srcip|user|error

+				return new Loadable[] { new PubFailRecord(pp) };

+			}

+			if (rtype.equals("DLX") && pp.length == 7) {

+				// Fields are: date|DLX|pubid|feedid|subid|clen-tosend|clen-sent

+				return new Loadable[] { new DeliveryExtraRecord(pp) };

+			}

+			if (rtype.equals("LOG") && (pp.length == 19 || pp.length == 20)) {

+				// Fields are: date|LOG|pubid|feedid|requrl|method|ctype|clen|type|feedFileid|remoteAddr|user|status|subid|fileid|result|attempts|reason|record_id

+				return new Loadable[] { new LogRecord(pp) };

+			}

+		}

+		logger.warn("PROV8002 bad record: "+line);

+		return new Loadable[0];

+	}

+

+	/**

+	 * The LogfileLoader can be run stand-alone by invoking the main() method of this class.

+	 * @param a ignored

+	 * @throws InterruptedException

+	 */

+	public static void main(String[] a) throws InterruptedException {

+		LogfileLoader.getLoader();

+		Thread.sleep(200000L);

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/PurgeLogDirTask.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/PurgeLogDirTask.java
new file mode 100644
index 0000000..b705e6f
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/PurgeLogDirTask.java
@@ -0,0 +1,70 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.utils;

+

+import java.io.File;

+import java.util.Properties;

+import java.util.TimerTask;

+

+/**

+ * This class provides a {@link TimerTask} that purges old logfiles

+ * (older than the number of days specified by the com.att.research.datarouter.provserver.logretention property).

+ * @author Robert Eby

+ * @version $Id: PurgeLogDirTask.java,v 1.2 2013/07/05 13:48:05 eby Exp $

+ */

+public class PurgeLogDirTask extends TimerTask {

+	private static final long ONEDAY = 86400000L;

+

+	private final String logdir;

+	private final long interval;

+

+	public PurgeLogDirTask() {

+		Properties p = (new DB()).getProperties();

+		logdir   = p.getProperty("com.att.research.datarouter.provserver.accesslog.dir");

+		String s = p.getProperty("com.att.research.datarouter.provserver.logretention", "30");

+		long n = 30;

+		try {

+			n = Long.parseLong(s);

+		} catch (NumberFormatException e) {

+			// ignore

+		}

+		interval = n * ONEDAY;

+	}

+	@Override

+	public void run() {

+		try {

+			File dir = new File(logdir);

+			if (dir.exists()) {

+				long exptime = System.currentTimeMillis() - interval;

+				for (File logfile : dir.listFiles()) {

+					if (logfile.lastModified() < exptime)

+						logfile.delete();

+				}

+			}

+		} catch (Exception e) {

+			e.printStackTrace();

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/RLEBitSet.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/RLEBitSet.java
new file mode 100644
index 0000000..5861741
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/RLEBitSet.java
@@ -0,0 +1,418 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.utils;

+

+import java.util.ArrayList;

+import java.util.Iterator;

+import java.util.List;

+import java.util.SortedSet;

+import java.util.TreeSet;

+

+/**

+ * This class provides operations similar to the standard Java {@link java.util.BitSet} class.

+ * It is designed for bit sets where there are long runs of 1s and 0s; it is not appropriate

+ * for sparsely populated bits sets.  In addition, this class uses <code>long</code>s rather

+ * than <code>int</code>s to represent the indices of the bits.

+ *

+ * @author Robert Eby

+ * @version $Id$

+ */

+public class RLEBitSet {

+	/**

+	 * Used to represent a continues set of <i>nbits</i> 1 bits starting at <i>start</i>.

+	 */

+	private class RLE implements Comparable<RLE> {

+		private final long start;

+		private long nbits;

+		public RLE(long from, long nbits) {

+			this.start = from;

+			this.nbits = (nbits > 0) ? nbits : 0;

+		}

+		/**

+		 * Returns the index of the first set bit in this RLE.

+		 * @return the index

+		 */

+		public long firstBit() {

+			return start;

+		}

+		/**

+		 * Returns the index of the last set bit in this RLE.

+		 * @return the index

+		 */

+		public long lastBit() {

+			return start+nbits-1;

+		}

+		public boolean intersects(RLE b2) {

+			if (b2.lastBit() < this.firstBit())

+				return false;

+			if (b2.firstBit() > this.lastBit())

+				return false;

+			return true;

+		}

+		public boolean isSubset(RLE b2) {

+			if (firstBit() < b2.firstBit())

+				return false;

+			if (firstBit() > b2.lastBit())

+				return false;

+			if (lastBit() < b2.firstBit())

+				return false;

+			if (lastBit() > b2.lastBit())

+				return false;

+			return true;

+		}

+		public RLE union(RLE b2) {

+			RLE b1 = this;

+			if (b1.firstBit() > b2.firstBit()) {

+				b1 = b2;

+				b2 = this;

+			}

+			long end = b1.lastBit();

+			if (b2.lastBit() > b1.lastBit())

+				end = b2.lastBit();

+			return new RLE(b1.firstBit(), end-b1.firstBit()+1);

+		}

+		/**

+		 * Returns the number of bits set to {@code true} in this {@code RLE}.

+		 * @return the number of bits set to {@code true} in this {@code RLE}.

+		 */

+		public int cardinality() {

+			return (int) nbits;

+		}

+		@Override

+		public int compareTo(RLE o) {

+			if (this.equals(o))

+				return 0;

+			return (start < o.start) ? -1 : 1;

+		}

+		@Override

+		public boolean equals(Object obj) {

+			if (obj instanceof RLE) {

+				RLE b = (RLE) obj;

+				return (start == b.start) && (nbits == b.nbits);

+			}

+			return false;

+		}

+		@Override

+		public int hashCode() {

+			return new Long(start ^ nbits).hashCode();

+		}

+		@Override

+		public String toString() {

+			return "["+firstBit()+".."+lastBit()+"]";

+		}

+	}

+	private SortedSet<RLE> bitsets;

+

+	/**

+	 * Creates a new bit set. All bits are initially <code>false</code>.

+	 */

+	public RLEBitSet() {

+		bitsets = new TreeSet<RLE>();

+	}

+	/**

+	 * Creates a new bit set, with bits set according to the value of <code>s</code>.

+	 * @param s the initialization String

+	 */

+	public RLEBitSet(String s) {

+		bitsets = new TreeSet<RLE>();

+		set(s);

+	}

+	/**

+	 * Returns the "logical size" of this {@code RLEBitSet}: the index of the highest set bit

+	 * in the {@code RLEBitSet} plus one. Returns zero if the {@code RLEBitSet} contains no set bits.

+	 * @return the logical size of this {@code RLEBitSet}

+	 */

+	public long length() {

+		if (isEmpty())

+			return 0;

+		return bitsets.last().lastBit()+1;

+	}

+	/**

+	 * Returns the value of the bit with the specified index. The value is {@code true} if the bit

+	 * with the index bit is currently set in this BitSet; otherwise, the result is {@code false}.

+	 * @param bit the bit index

+	 * @return the value of the bit with the specified index

+	 */

+	public boolean get(long bit) {

+		synchronized (bitsets) {

+			for (RLE bs : bitsets) {

+				if (bit >= bs.firstBit() && bit <= bs.lastBit())

+					return true;

+			}

+		}

+		return false;

+	}

+	/**

+	 * Set one or more bits to true, based on the value of <code>s</code>.

+	 * @param s the initialization String, which consists of a comma or space separated list of

+	 * non-negative numbers and ranges.  An individual number represents the bit index to set.

+	 * A range (two numbers separated by a dash) causes all bit indexes between the two numbers

+	 * (inclusive) to be set.

+	 * @exception NumberFormatException - if a number is incorrectly formatted

+	 * @exception IndexOutOfBoundsException - if an index is negative

+	 */

+	public void set(String s) throws NumberFormatException {

+		s = s.trim();

+		if (!s.isEmpty()) {

+			for (String s2 : s.split("[, \n]+")) {

+				if (s2.indexOf('-') >= 0) {

+					String[] pp = s2.split("-");

+					long f = Long.parseLong(pp[0]);

+					long t = Long.parseLong(pp[1]);

+					set(f, t+1);

+				} else

+					set(Long.parseLong(s2));

+			}

+		}

+	}

+	/**

+	 * Sets the bit at the specified index to {@code true}.

+	 * @param bit a bit index

+	 */

+	public void set(long bit) {

+		set(bit, bit+1);

+	}

+	/**

+	 * Sets the bits from the specified {@code from} (inclusive) to the

+	 * specified {@code to} (exclusive) to {@code true}.

+	 * @param from index of the first bit to be set

+	 * @param to index after the last bit to be set

+	 * @throws IndexOutOfBoundsException if {@code from} is negative,

+	 *		or {@code to} is negative,

+	 *		or {@code from} is larger than {@code to}

+	 */

+	public void set(long from, long to) {

+		checkRange(from, to);

+		RLE newbits = new RLE(from, to-from);

+		synchronized (bitsets) {

+			for (RLE bs : bitsets) {

+				if (bs.intersects(newbits)) {

+					if (!newbits.isSubset(bs)) {

+						bitsets.remove(bs);

+						bitsets.add(newbits.union(bs));

+						coalesce();

+					}

+					return;

+				}

+			}

+			bitsets.add(newbits);

+		}

+		coalesce();

+	}

+	/**

+	 * Sets all of the bits in this BitSet to {@code false}.

+	 */

+	public void clear() {

+		synchronized (bitsets) {

+			bitsets.clear();

+		}

+	}

+	/**

+	 * Sets the bit specified by the index to {@code false}.

+	 * @param bit the index of the bit to be cleared

+	 */

+	public void clear(long bit) {

+		clear(bit, bit+1);

+	}

+	/**

+	 * Sets the bits from the specified {@code from} (inclusive) to the

+	 * specified {@code to} (exclusive) to {@code false}.

+	 * @param from index of the first bit to be cleared

+	 * @param to index after the last bit to be cleared

+	 * @throws IndexOutOfBoundsException if {@code from} is negative,

+	 *		or {@code to} is negative,

+	 *		or {@code from} is larger than {@code to}

+	 */

+	public void clear(long from, long to) {

+		checkRange(from, to);

+		RLE newbits = new RLE(from, to-from);

+		List<RLE> newranges = new ArrayList<RLE>();

+		synchronized (bitsets) {

+			for (RLE bs : bitsets) {

+				if (bs.intersects(newbits)) {

+					// preserve the bits that are not being cleared

+					long len = newbits.firstBit() - bs.firstBit();

+					if (len > 0)

+						newranges.add(new RLE(bs.firstBit(), len));

+					len = bs.lastBit() - newbits.lastBit();

+					if (len > 0)

+						newranges.add(new RLE(newbits.lastBit()+1, len));

+					bs.nbits = 0;

+				}

+			}

+			if (!newranges.isEmpty()) {

+				for (RLE bs : newranges) {

+					bitsets.add(bs);

+				}

+			}

+		}

+		coalesce();

+	}

+	/** Combine abutting RLEBitSets, and remove 0 length RLEBitSets. */

+	private void coalesce() {

+		RLE last = null;

+		synchronized (bitsets) {

+			Iterator<RLE> iter = bitsets.iterator();

+			while (iter.hasNext()) {

+				RLE bs = iter.next();

+				if (last != null && (last.lastBit()+1 == bs.firstBit())) {

+					last.nbits += bs.nbits;

+					iter.remove();

+				} else if (bs.nbits == 0) {

+					iter.remove();

+				} else {

+					last = bs;

+				}

+			}

+		}

+	}

+	/**

+	 * Checks that fromIndex ... toIndex is a valid range of bit indices.

+	 */

+	private static void checkRange(long from, long to) {

+		if (from < 0)

+			throw new IndexOutOfBoundsException("fromIndex < 0: " + from);

+		if (to < 0)

+			throw new IndexOutOfBoundsException("toIndex < 0: " + to);

+		if (from > to)

+			throw new IndexOutOfBoundsException("fromIndex: " + from + " > toIndex: " + to);

+	}

+	/**

+	 * Performs a logical <b>AND</b> of this target bit set with the argument bit set.

+	 * This bit set is modified so that each bit in it has the value {@code true} if and only if

+	 * it both initially had the value {@code true} and the corresponding bit in the bit set

+	 * argument also had the value {@code true}.

+	 * @param set a {@code RLEBitSet}

+	 */

+	public void and(RLEBitSet set) {

+		long last = 0;

+		synchronized (set.bitsets) {

+			for (RLE bs : set.bitsets) {

+				clear(last, bs.start);

+				last = bs.start + bs.nbits;

+			}

+		}

+		clear(last, Long.MAX_VALUE);

+	}

+	/**

+	 * Clears all of the bits in this {@code RLEBitSet} whose corresponding bit is set in

+	 * the specified {@code RLEBitSet}.

+	 * @param set the {@code RLEBitSet} with which to mask this {@code RLEBitSet}

+	 */

+	public void andNot(RLEBitSet set) {

+		synchronized (set.bitsets) {

+			for (RLE bs : set.bitsets) {

+				clear(bs.start, bs.start + bs.nbits);

+			}

+		}

+	}

+	/**

+	 * Returns true if this {@code RLEBitSet} contains no bits that are set

+	 * to {@code true}.

+	 *

+	 * @return boolean indicating whether this {@code BitSet} is empty

+	 */

+	public boolean isEmpty() {

+		return bitsets.isEmpty();

+	}

+	/**

+	 * Returns the number of bits set to {@code true} in this {@code RLEBitSet}.

+	 * @return the number of bits set to {@code true} in this {@code RLEBitSet}.

+	 */

+	public int cardinality() {

+		int n = 0;

+		synchronized (bitsets) {

+			for (RLE bs : bitsets) {

+				n += bs.cardinality();

+			}

+		}

+		return n;

+	}

+	/**

+	 * Cloning this RLEBitSet produces a new RLEBitSet that is equal to it. The clone of the

+	 * bit set is another bit set that has exactly the same bits set to true as this bit set.

+	 * @return a clone of this bit set

+	 */

+	public Object clone() {

+		RLEBitSet rv = new RLEBitSet();

+		synchronized (bitsets) {

+			for (RLE bs : bitsets) {

+				rv.bitsets.add(new RLE(bs.start, bs.nbits));

+			}

+		}

+		return rv;

+	}

+	/**

+	 * Returns a string representation of this bit set, using the same notation as is required for

+	 * the String constructor. For every index for which this {@code RLEBitSet} contains a bit in

+	 * the set state, the decimal representation of that index is included in the result. Such

+	 * indices are listed in order from lowest to highest, separated by ",". Ranges of set bits are

+	 * indicated by <i>lobit</i>-<i>hibit</i>.

+	 * @return the String

+	 */

+	@Override

+	public String toString() {

+		StringBuilder sb = new StringBuilder();

+		String prefix = "";

+		synchronized (bitsets) {

+			for (RLE bs : bitsets) {

+				sb.append(prefix);

+				prefix = ",";

+				long s = bs.firstBit();

+				long e = bs.lastBit();

+				sb.append(s);

+				if (s != e)

+					sb.append('-').append(e);

+			}

+		}

+		return sb.toString();

+	}

+	/**

+	 * Return an Iterator which provides pairs of {@code Long}s representing the beginning and

+	 * ending index of a range of set bits in this {@code RLEBitSet}.

+	 * @return the Iterator

+	 */

+	public Iterator<Long[]> getRangeIterator() {

+		return new Iterator<Long[]>() {

+			private Iterator<RLE> i = bitsets.iterator();

+

+			@Override

+			public boolean hasNext() {

+				return i.hasNext();

+			}

+

+			@Override

+			public Long[] next() {

+				RLE bs = i.next();

+				return new Long[] { bs.firstBit(), bs.lastBit() };

+			}

+

+			@Override

+			public void remove() {

+				throw new UnsupportedOperationException();

+			}

+		};

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/ThrottleFilter.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/ThrottleFilter.java
new file mode 100644
index 0000000..6eb866c
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/ThrottleFilter.java
@@ -0,0 +1,316 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.utils;

+

+import java.io.IOException;

+import java.io.InputStream;

+import java.util.ArrayList;

+import java.util.HashMap;

+import java.util.List;

+import java.util.Map;

+import java.util.Timer;

+import java.util.TimerTask;

+import java.util.Vector;

+

+import javax.servlet.Filter;

+import javax.servlet.FilterChain;

+import javax.servlet.FilterConfig;

+import javax.servlet.ServletException;

+import javax.servlet.ServletRequest;

+import javax.servlet.ServletResponse;

+import javax.servlet.http.HttpServletRequest;

+import javax.servlet.http.HttpServletResponse;

+

+import com.att.research.datarouter.provisioning.beans.Parameters;

+

+import org.apache.log4j.Logger;

+import org.eclipse.jetty.continuation.Continuation;

+import org.eclipse.jetty.continuation.ContinuationSupport;

+import org.eclipse.jetty.server.AbstractHttpConnection;

+import org.eclipse.jetty.server.Request;

+

+/**

+ * This filter checks /publish requests to the provisioning server to allow ill-behaved publishers to be throttled.

+ * It is configured via the provisioning parameter THROTTLE_FILTER.

+ * The THROTTLE_FILTER provisioning parameter can have these values:

+ * <table>

+ * <tr><td>(no value)</td><td>filter disabled</td></tr>

+ * <tr><td>off</td><td>filter disabled</td></tr>

+ * <tr><td>N[,M[,action]]</td><td>set N, M, and action (used in the algorithm below).

+ *     Action is <i>drop</i> or <i>throttle</i>.

+ *     If M is missing, it defaults to 5 minutes.

+ *     If the action is missing, it defaults to <i>drop</i>.

+ * </td></tr>

+ * </table>

+ * <p>

+ * The <i>action</i> is triggered iff:

+ * <ol>

+ * <li>the filter is enabled, and</li>

+ * <li>N /publish requests come to the provisioning server in M minutes

+ *   <ol>

+ *   <li>from the same IP address</li>

+ *   <li>for the same feed</li>

+ *   <li>lacking the <i>Expect: 100-continue</i> header</li>

+ *   </ol>

+ * </li>

+ * </ol>

+ * The action that can be performed (if triggered) are:

+ * <ol>

+ * <li><i>drop</i> - the connection is dropped immediately.</li>

+ * <li><i>throttle</i> - [not supported] the connection is put into a low priority queue with all other throttled connections.

+ *   These are then processed at a slower rate.  Note: this option does not work correctly, and is disabled.

+ *   The only action that is supported is <i>drop</i>.

+ * </li>

+ * </ol>

+ *

+ * @author Robert Eby

+ * @version $Id: ThrottleFilter.java,v 1.2 2014/03/12 19:45:41 eby Exp $

+ */

+public class ThrottleFilter extends TimerTask implements Filter {

+	public  static final int    DEFAULT_N       = 10;

+	public  static final int    DEFAULT_M       = 5;

+	public  static final String THROTTLE_MARKER = "com.att.research.datarouter.provisioning.THROTTLE_MARKER";

+	private static final String JETTY_REQUEST   = "org.eclipse.jetty.server.Request";

+	private static final long   ONE_MINUTE      = 60000L;

+	private static final int    ACTION_DROP     = 0;

+	private static final int    ACTION_THROTTLE = 1;

+

+	// Configuration

+	private static boolean enabled = false;		// enabled or not

+	private static int n_requests = 0;			// number of requests in M minutes

+	private static int m_minutes = 0;			// sampling period

+	private static int action = ACTION_DROP;	// action to take (throttle or drop)

+

+	private static Logger logger = Logger.getLogger("com.att.research.datarouter.provisioning.internal");

+	private static Map<String, Counter> map = new HashMap<String, Counter>();

+	private static final Timer rolex = new Timer();

+

+	@Override

+	public void init(FilterConfig arg0) throws ServletException {

+		configure();

+		rolex.scheduleAtFixedRate(this, 5*60000L, 5*60000L);	// Run once every 5 minutes to clean map

+	}

+

+	/**

+	 * Configure the throttle.  This should be called from BaseServlet.provisioningParametersChanged(), to make sure it stays up to date.

+	 */

+	public static void configure() {

+		Parameters p = Parameters.getParameter(Parameters.THROTTLE_FILTER);

+		if (p != null) {

+			try {

+				Class.forName(JETTY_REQUEST);

+				String v = p.getValue();

+				if (v != null && !v.equals("off")) {

+					String[] pp = v.split(",");

+					if (pp != null) {

+						n_requests = (pp.length > 0) ? getInt(pp[0], DEFAULT_N) : DEFAULT_N;

+						m_minutes  = (pp.length > 1) ? getInt(pp[1], DEFAULT_M) : DEFAULT_M;

+						action     = (pp.length > 2 && pp[2] != null && pp[2].equalsIgnoreCase("throttle")) ? ACTION_THROTTLE : ACTION_DROP;

+						enabled    = true;

+						// ACTION_THROTTLE is not currently working, so is not supported

+						if (action == ACTION_THROTTLE) {

+							action = ACTION_DROP;

+							logger.info("Throttling is not currently supported; action changed to DROP");

+						}

+						logger.info("ThrottleFilter is ENABLED for /publish requests; N="+n_requests+", M="+m_minutes+", Action="+action);

+						return;

+					}

+				}

+			} catch (ClassNotFoundException e) {

+				logger.warn("Class "+JETTY_REQUEST+" is not available; this filter requires Jetty.");

+			}

+		}

+		logger.info("ThrottleFilter is DISABLED for /publish requests.");

+		enabled = false;

+		map.clear();

+	}

+	private static int getInt(String s, int deflt) {

+		try {

+			return Integer.parseInt(s);

+		} catch (NumberFormatException x) {

+			return deflt;

+		}

+	}

+	@Override

+	public void destroy() {

+		rolex.cancel();

+		map.clear();

+	}

+

+	@Override

+	public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)

+		throws IOException, ServletException

+	{

+		if (enabled && action == ACTION_THROTTLE) {

+			throttleFilter((HttpServletRequest) request, (HttpServletResponse) response, chain);

+		} else if (enabled) {

+			dropFilter((HttpServletRequest) request, (HttpServletResponse) response, chain);

+		} else {

+			chain.doFilter(request, response);

+		}

+	}

+	public void dropFilter(HttpServletRequest request, HttpServletResponse response, FilterChain chain)

+		throws IOException, ServletException

+	{

+		int rate = getRequestRate((HttpServletRequest) request);

+		if (rate >= n_requests) {

+			// drop request - only works under Jetty

+			String m = String.format("Dropping connection: %s %d bad connections in %d minutes", getConnectionId((HttpServletRequest) request), rate, m_minutes);

+			logger.info(m);

+			Request base_request = (request instanceof Request)

+				? (Request) request

+				: AbstractHttpConnection.getCurrentConnection().getRequest();

+			base_request.getConnection().getEndPoint().close();

+		} else {

+			chain.doFilter(request, response);

+		}

+	}

+	public void throttleFilter(HttpServletRequest request, HttpServletResponse response, FilterChain chain)

+		throws IOException, ServletException

+	{

+		// throttle request

+		String id = getConnectionId((HttpServletRequest) request);

+		int rate = getRequestRate((HttpServletRequest) request);

+		Object results = request.getAttribute(THROTTLE_MARKER);

+		if (rate >= n_requests && results == null) {

+			String m = String.format("Throttling connection: %s %d bad connections in %d minutes", getConnectionId((HttpServletRequest) request), rate, m_minutes);

+			logger.info(m);

+			Continuation continuation = ContinuationSupport.getContinuation(request);

+			continuation.suspend();

+			register(id, continuation);

+			continuation.undispatch();

+		} else {

+			chain.doFilter(request, response);

+			@SuppressWarnings("resource")

+			InputStream is = request.getInputStream();

+			byte[] b = new byte[4096];

+			int n = is.read(b);

+			while (n > 0) {

+				n = is.read(b);

+			}

+			resume(id);

+		}

+	}

+	private Map<String, List<Continuation>> suspended_requests = new HashMap<String, List<Continuation>>();

+	private void register(String id, Continuation continuation) {

+		synchronized (suspended_requests) {

+			List<Continuation> list = suspended_requests.get(id);

+			if (list == null) {

+				list = new ArrayList<Continuation>();

+				suspended_requests.put(id,  list);

+			}

+			list.add(continuation);

+		}

+	}

+	private void resume(String id) {

+		synchronized (suspended_requests) {

+			List<Continuation> list = suspended_requests.get(id);

+			if (list != null) {

+				// when the waited for event happens

+				Continuation continuation = list.remove(0);

+				continuation.setAttribute(ThrottleFilter.THROTTLE_MARKER, new Object());

+				continuation.resume();

+			}

+		}

+	}

+

+	/**

+	 * Return a count of number of requests in the last M minutes, iff this is a "bad" request.

+	 * If the request has been resumed (if it contains the THROTTLE_MARKER) it is considered good.

+	 * @param request the request

+	 * @return number of requests in the last M minutes, 0 means it is a "good" request

+	 */

+	private int getRequestRate(HttpServletRequest request) {

+		String expecthdr = request.getHeader("Expect");

+		if (expecthdr != null && expecthdr.equalsIgnoreCase("100-continue"))

+			return 0;

+

+		String key = getConnectionId(request);

+		synchronized (map) {

+			Counter cnt = map.get(key);

+			if (cnt == null) {

+				cnt = new Counter();

+				map.put(key, cnt);

+			}

+			int n = cnt.getRequestRate();

+			return n;

+		}

+	}

+

+	public class Counter {

+		private List<Long> times = new Vector<Long>();	// a record of request times

+		public int prune() {

+			try {

+				long n = System.currentTimeMillis() - (m_minutes * ONE_MINUTE);

+				long t = times.get(0);

+				while (t < n) {

+					times.remove(0);

+					t = times.get(0);

+				}

+			} catch (IndexOutOfBoundsException e) {

+				// ignore

+			}

+			return times.size();

+		}

+		public int getRequestRate() {

+			times.add(System.currentTimeMillis());

+			return prune();

+		}

+	}

+

+	/**

+	 *  Identify a connection by endpoint IP address, and feed ID.

+	 */

+	private String getConnectionId(HttpServletRequest req) {

+		return req.getRemoteAddr() + "/" + getFeedId(req);

+	}

+	private int getFeedId(HttpServletRequest req) {

+		String path = req.getPathInfo();

+		if (path == null || path.length() < 2)

+			return -1;

+		path = path.substring(1);

+		int ix = path.indexOf('/');

+		if (ix < 0 || ix == path.length()-1)

+			return -2;

+		try {

+			int feedid = Integer.parseInt(path.substring(0, ix));

+			return feedid;

+		} catch (NumberFormatException e) {

+			return -1;

+		}

+	}

+

+	@Override

+	public void run() {

+		// Once every 5 minutes, go through the map, and remove empty entrys

+		for (Object s : map.keySet().toArray()) {

+			synchronized (map) {

+				Counter c = map.get(s);

+				if (c.prune() <= 0)

+					map.remove(s);

+			}

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/URLUtilities.java b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/URLUtilities.java
new file mode 100644
index 0000000..c1793e5
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/URLUtilities.java
@@ -0,0 +1,130 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.provisioning.utils;

+

+import java.net.InetAddress;

+import java.net.UnknownHostException;

+import java.util.Arrays;

+

+import com.att.research.datarouter.provisioning.BaseServlet;

+

+/**

+ * Utility functions used to generate the different URLs used by the Data Router.

+ *

+ * @author Robert Eby

+ * @version $Id: URLUtilities.java,v 1.2 2014/03/12 19:45:41 eby Exp $

+ */

+public class URLUtilities {

+	/**

+	 * Generate the URL used to access a feed.

+	 * @param feedid the feed id

+	 * @return the URL

+	 */

+	public static String generateFeedURL(int feedid) {

+		return "https://" + BaseServlet.prov_name + "/feed/" + feedid;

+	}

+	/**

+	 * Generate the URL used to publish to a feed.

+	 * @param feedid the feed id

+	 * @return the URL

+	 */

+	public static String generatePublishURL(int feedid) {

+		return "https://" + BaseServlet.prov_name + "/publish/" + feedid;

+	}

+	/**

+	 * Generate the URL used to subscribe to a feed.

+	 * @param feedid the feed id

+	 * @return the URL

+	 */

+	public static String generateSubscribeURL(int feedid) {

+		return "https://" + BaseServlet.prov_name + "/subscribe/" + feedid;

+	}

+	/**

+	 * Generate the URL used to access a feed's logs.

+	 * @param feedid the feed id

+	 * @return the URL

+	 */

+	public static String generateFeedLogURL(int feedid) {

+		return "https://" + BaseServlet.prov_name + "/feedlog/" + feedid;

+	}

+	/**

+	 * Generate the URL used to access a subscription.

+	 * @param subid the subscription id

+	 * @return the URL

+	 */

+	public static String generateSubscriptionURL(int subid) {

+		return "https://" + BaseServlet.prov_name + "/subs/" + subid;

+	}

+	/**

+	 * Generate the URL used to access a subscription's logs.

+	 * @param subid the subscription id

+	 * @return the URL

+	 */

+	public static String generateSubLogURL(int subid) {

+		return "https://" + BaseServlet.prov_name + "/sublog/" + subid;

+	}

+	/**

+	 * Generate the URL used to access the provisioning data on the peer POD.

+	 * @return the URL

+	 */

+	public static String generatePeerProvURL() {

+		return "https://" + getPeerPodName() + "/internal/prov";

+	}

+	/**

+	 * Generate the URL used to access the logfile data on the peer POD.

+	 * @return the URL

+	 */

+	public static String generatePeerLogsURL() {

+		//Fixes for Itrack ticket - DATARTR-4#Fixing if only one Prov is configured, not to give exception to fill logs.

+		String peerPodUrl = getPeerPodName();

+		if(peerPodUrl.equals("") || peerPodUrl.equals(null)){

+			return "";

+		}

+				

+		return "https://" + peerPodUrl + "/internal/drlogs/";

+	}

+	/**

+	 * Return the real (non CNAME) version of the peer POD's DNS name.

+	 * @return the name

+	 */

+	public static String getPeerPodName() {

+		if (other_pod == null) {

+			String this_pod = "";

+			try {

+				this_pod = InetAddress.getLocalHost().getHostName();

+				System.out.println("this_pod: "+this_pod);

+			} catch (UnknownHostException e) {

+				this_pod = "";

+			}

+			System.out.println("ALL PODS: "+Arrays.asList(BaseServlet.getPods()));

+			for (String pod : BaseServlet.getPods()) {

+				if (!pod.equals(this_pod))

+					other_pod = pod;

+			}

+		}

+		return other_pod;

+	}

+	private static String other_pod;

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/package.html b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/package.html
new file mode 100644
index 0000000..7855bb4
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/provisioning/utils/package.html
@@ -0,0 +1,30 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+

+<html>

+<body>

+<p>

+This package provide various helper classes used by the provisioning server.

+</p>

+</body>

+</html>

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/reports/DailyLatencyReport.java b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/DailyLatencyReport.java
new file mode 100644
index 0000000..63d612f
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/DailyLatencyReport.java
@@ -0,0 +1,194 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.reports;

+

+import java.io.FileNotFoundException;

+import java.io.PrintWriter;

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.text.SimpleDateFormat;

+import java.util.ArrayList;

+import java.util.Date;

+import java.util.HashMap;

+import java.util.List;

+import java.util.Map;

+import java.util.TreeSet;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+

+/**

+ * Generate a daily per feed latency report.  The report is a .csv file containing the following columns:

+ * <table>

+ * <tr><td>date</td><td>the date for this record</td></tr>

+ * <tr><td>feedid</td><td>the Feed ID for this record</td></tr>

+ * <tr><td>minsize</td><td>the minimum size of all files published on this feed and date</td></tr>

+ * <tr><td>maxsize</td><td>the maximum size of all files published on this feed and date</td></tr>

+ * <tr><td>avgsize</td><td>the average size of all files published on this feed and date</td></tr>

+ * <tr><td>minlat</td><td>the minimum latency in delivering this feed to all subscribers (in ms)</td></tr>

+ * <tr><td>maxlat</td><td>the maximum latency in delivering this feed to all subscribers (in ms)</td></tr>

+ * <tr><td>avglat</td><td>the average latency in delivering this feed to all subscribers (in ms)</td></tr>

+ * <tr><td>fanout</td><td>the average number of subscribers this feed was delivered to</td></tr>

+ * </table>

+ * <p>

+ * In the context of this report, latency is defined as the value

+ * <i>(D<sub>e</sub> - P<sub>s</sub>)</i>

+ * where:

+ * </p>

+ * <p>P<sub>s</sub> is the time that the publication of the file to the node starts.</p>

+ * <p>D<sub>e</sub> is the time that the delivery of the file to the subscriber ends.</p>

+ *

+ * @author Robert P. Eby

+ * @version $Id: DailyLatencyReport.java,v 1.2 2013/11/06 16:23:54 eby Exp $

+ */

+public class DailyLatencyReport extends ReportBase {

+	private static final String SELECT_SQL =

+		"select EVENT_TIME, TYPE, PUBLISH_ID, FEED_FILEID, FEEDID, CONTENT_LENGTH from LOG_RECORDS" +

+		" where EVENT_TIME >= ? and EVENT_TIME <= ?";

+

+	private class Job {

+		public long pubtime = 0;

+		public long clen = 0;

+		public List<Long> deltime = new ArrayList<Long>();

+		public long minLatency() {

+			long n = deltime.isEmpty() ? 0 : Long.MAX_VALUE;

+			for (Long l : deltime)

+				n = Math.min(n, l-pubtime);

+			return n;

+		}

+		public long maxLatency() {

+			long n = 0;

+			for (Long l : deltime)

+				n = Math.max(n, l-pubtime);

+			return n;

+		}

+		public long totalLatency() {

+			long n = 0;

+			for (Long l : deltime)

+				n += (l-pubtime);

+			return n;

+		}

+	}

+	private class Counters {

+		public final String date;

+		public final int feedid;

+		public final Map<String, Job> jobs;

+		public Counters(String d, int fid) {

+			date = d;

+			feedid = fid;

+			jobs = new HashMap<String, Job>();

+		}

+		public void addEvent(long etime, String type, String id, String fid, long clen) {

+			Job j = jobs.get(id);

+			if (j == null) {

+				j = new Job();

+				jobs.put(id, j);

+			}

+			if (type.equals("pub")) {

+				j.pubtime = getPstart(id);

+				j.clen = clen;

+			} else if (type.equals("del")) {

+				j.deltime.add(etime);

+			}

+		}

+		@Override

+		public String toString() {

+			long minsize = Long.MAX_VALUE, maxsize = 0, avgsize = 0;

+			long minl    = Long.MAX_VALUE, maxl    = 0;

+			long fanout  = 0, totall = 0, totaln = 0;

+			for (Job j : jobs.values()) {

+				minsize = Math.min(minsize, j.clen);

+				maxsize = Math.max(maxsize, j.clen);

+				avgsize += j.clen;

+				minl    = Math.min(minl, j.minLatency());

+				maxl    = Math.max(maxl, j.maxLatency());

+				totall  += j.totalLatency();

+				totaln  += j.deltime.size();

+				fanout  += j.deltime.size();

+			}

+			if (jobs.size() > 0) {

+				avgsize /= jobs.size();

+				fanout  /= jobs.size();

+			}

+			long avgl = (totaln > 0) ? (totall / totaln) : 0;

+			return date + "," + feedid + "," + minsize + "," + maxsize + "," + avgsize + "," + minl + "," + maxl + "," + avgl + "," + fanout;

+		}

+	}

+	private long getPstart(String t) {

+		if (t.indexOf('.') > 0)

+			t = t.substring(0, t.indexOf('.'));

+		return Long.parseLong(t);

+	}

+

+	@Override

+	public void run() {

+		Map<String, Counters> map = new HashMap<String, Counters>();

+		SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");

+		long start = System.currentTimeMillis();

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			PreparedStatement ps = conn.prepareStatement(SELECT_SQL);

+			ps.setLong(1, from);

+			ps.setLong(2, to);

+			ResultSet rs = ps.executeQuery();

+			while (rs.next()) {

+				String id   = rs.getString("PUBLISH_ID");

+				int feed    = rs.getInt("FEEDID");

+				long etime  = rs.getLong("EVENT_TIME");

+				String type = rs.getString("TYPE");

+				String fid  = rs.getString("FEED_FILEID");

+				long clen   = rs.getLong("CONTENT_LENGTH");

+				String date = sdf.format(new Date(getPstart(id)));

+				String key  = date + "," + feed;

+				Counters c = map.get(key);

+				if (c == null) {

+					c = new Counters(date, feed);

+					map.put(key, c);

+				}

+				c.addEvent(etime, type, id, fid, clen);

+			}

+			rs.close();

+			ps.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		logger.debug("Query time: " + (System.currentTimeMillis()-start) + " ms");

+		try {

+			PrintWriter os = new PrintWriter(outfile);

+			os.println("date,feedid,minsize,maxsize,avgsize,minlat,maxlat,avglat,fanout");

+			for (String key : new TreeSet<String>(map.keySet())) {

+				Counters c = map.get(key);

+				os.println(c.toString());

+			}

+			os.close();

+		} catch (FileNotFoundException e) {

+			System.err.println("File cannot be written: "+outfile);

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/reports/FeedReport.java b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/FeedReport.java
new file mode 100644
index 0000000..9fe7e27
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/FeedReport.java
@@ -0,0 +1,395 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.reports;

+

+import java.io.FileNotFoundException;

+import java.io.FileReader;

+import java.io.LineNumberReader;

+import java.io.PrintWriter;

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.text.SimpleDateFormat;

+import java.util.Arrays;

+import java.util.Calendar;

+import java.util.Date;

+import java.util.GregorianCalendar;

+

+import org.json.JSONException;

+import org.json.JSONObject;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+

+/**

+ * Generate a feeds report.  The report is a .CSV file.

+ *

+ * @author Robert P. Eby

+ * @version $Id: FeedReport.java,v 1.2 2013/11/06 16:23:55 eby Exp $

+ */

+public class FeedReport extends ReportBase {

+	private static final String SELECT_SQL =

+		// Note to use the time in the publish_id, use date(from_unixtime(substring(publish_id, 1, 10)))

+		// To just use month, substring(from_unixtime(event_time div 1000), 1, 7)

+		"select date(from_unixtime(event_time div 1000)) as date, type, feedid, delivery_subid, count(*) as count" +

+		" from LOG_RECORDS" +

+		" where type = 'pub' or type = 'del'" +

+		" group by date, type, feedid, delivery_subid";

+	private static final String SELECT_SQL_OLD =

+		"select PUBLISH_ID, TYPE, FEEDID, DELIVERY_SUBID from LOG_RECORDS where EVENT_TIME >= ? and EVENT_TIME <= ?";

+

+	@Override

+	public void run() {

+		boolean alg1 = true;

+		JSONObject jo = new JSONObject();

+		long start = System.currentTimeMillis();

+		StringBuilder sb = new StringBuilder();

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			PreparedStatement ps = conn.prepareStatement(SELECT_SQL);

+//			ps.setLong(1, from);

+//			ps.setLong(2, to);

+			ResultSet rs = ps.executeQuery();

+			while (rs.next()) {

+				if (alg1) {

+					String date = rs.getString("date");

+					String type = rs.getString("type");

+					int feedid  = rs.getInt("feedid");

+					int subid   = type.equals("del") ? rs.getInt("delivery_subid") : 0;

+					int count   = rs.getInt("count");

+					sb.append(date + "," + type + "," + feedid + "," + subid + "," + count + "\n");

+				} else {

+					String date = rs.getString("date");

+					JSONObject datemap = jo.optJSONObject(date);

+					if (datemap == null) {

+						datemap = new JSONObject();

+						jo.put(date, datemap);

+					}

+					int feed = rs.getInt("FEEDID");

+					JSONObject feedmap = datemap.optJSONObject(""+feed);

+					if (feedmap == null) {

+						feedmap = new JSONObject();

+						feedmap.put("pubcount", 0);

+						datemap.put(""+feed, feedmap);

+					}

+					String type = rs.getString("TYPE");

+					int count   = rs.getInt("count");

+					if (type.equals("pub")) {

+						feedmap.put("pubcount", count);

+					} else if (type.equals("del")) {

+						String subid = ""+rs.getInt("DELIVERY_SUBID");

+						feedmap.put(subid, count);

+					}

+				}

+			}

+			rs.close();

+			ps.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		logger.debug("Query time: " + (System.currentTimeMillis()-start) + " ms");

+		try {

+			PrintWriter os = new PrintWriter(outfile);

+			if (alg1) {

+				os.print("date,type,feedid,subid,count\n");

+				os.print(sb.toString());

+			} else {

+				os.println(toHTML(jo));

+			}

+			os.close();

+		} catch (FileNotFoundException e) {

+			System.err.println("File cannot be written: "+outfile);

+		}

+	}

+

+	public void run2() {

+		JSONObject jo = new JSONObject();

+		SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");

+		long start = System.currentTimeMillis();

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			PreparedStatement ps = conn.prepareStatement(SELECT_SQL_OLD);

+			ps.setLong(1, from);

+			ps.setLong(2, to);

+			ps.setFetchSize(100000);

+			ResultSet rs = ps.executeQuery();

+			while (rs.next()) {

+				String id   = rs.getString("PUBLISH_ID");

+				String date = sdf.format(new Date(getPstart(id)));

+				JSONObject datemap = jo.optJSONObject(date);

+				if (datemap == null) {

+					datemap = new JSONObject();

+					jo.put(date, datemap);

+				}

+				int feed = rs.getInt("FEEDID");

+				JSONObject feedmap = datemap.optJSONObject(""+feed);

+				if (feedmap == null) {

+					feedmap = new JSONObject();

+					feedmap.put("pubcount", 0);

+					datemap.put(""+feed, feedmap);

+				}

+				String type = rs.getString("TYPE");

+				if (type.equals("pub")) {

+					try {

+						int n = feedmap.getInt("pubcount");

+						feedmap.put("pubcount", n+1);

+					} catch (JSONException e) {

+						feedmap.put("pubcount", 1);

+					}

+				} else if (type.equals("del")) {

+					String subid = ""+rs.getInt("DELIVERY_SUBID");

+					try {

+						int n = feedmap.getInt(subid);

+						feedmap.put(subid, n+1);

+					} catch (JSONException e) {

+						feedmap.put(subid, 1);

+					}

+				}

+			}

+			rs.close();

+			ps.close();

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		logger.debug("Query time: " + (System.currentTimeMillis()-start) + " ms");

+		try {

+			PrintWriter os = new PrintWriter(outfile);

+			os.println(toHTML(jo));

+			os.close();

+		} catch (FileNotFoundException e) {

+			System.err.println("File cannot be written: "+outfile);

+		}

+	}

+	private long getPstart(String t) {

+		if (t.indexOf('.') > 0)

+			t = t.substring(0, t.indexOf('.'));

+		return Long.parseLong(t);

+	}

+	@SuppressWarnings("unused")

+	private static String toHTMLNested(JSONObject jo) {

+		StringBuilder s = new StringBuilder();

+		s.append("<table>\n");

+		s.append("<tr><th>Date</th><th>Feeds</th></tr>\n");

+		String[] dates = JSONObject.getNames(jo);

+		Arrays.sort(dates);

+		for (int i = dates.length-1; i >= 0; i--) {

+			String date = dates[i];

+			JSONObject j2 = jo.getJSONObject(date);

+			String[] feeds = JSONObject.getNames(j2);

+			Arrays.sort(feeds);

+			s.append("<tr><td>"+date+"</td><td>");

+			s.append(feeds.length).append(feeds.length > 1 ? " Feeds\n" : " Feed\n");

+			s.append("<table>\n");

+			s.append("<tr><th>Feed ID</th><th>Publish Count</th><th>Subscriptions</th></tr>\n");

+			for (String feed : feeds) {

+				JSONObject j3 = j2.getJSONObject(feed);

+				String[] subs = JSONObject.getNames(j3);

+				Arrays.sort(subs);

+				s.append("<tr><td>"+feed+"</td>");

+				s.append("<td>"+j3.getInt("pubcount")+"</td>");

+				int scnt = j3.length()-1;

+				s.append("<td>").append(scnt).append(" Subcription");

+				if (scnt > 1)

+					s.append("s");

+				s.append("<table>\n");

+				s.append("<tr><th>Sub ID</th><th>Delivery Count</th></tr>\n");

+				for (String sub : subs) {

+					if (!sub.equals("pubcount")) {

+						s.append("<tr><td>"+sub+"</td>");

+						s.append("<td>"+j3.getInt(sub)+"</td>");

+						s.append("</td></tr>\n");

+					}

+				}

+				s.append("</table>\n");

+

+				s.append("</td></tr>\n");

+			}

+			s.append("</table>\n");

+			s.append("</td></tr>\n");

+		}

+		s.append("</table>\n");

+		return s.toString();

+	}

+	private static String toHTML(JSONObject jo) {

+		StringBuilder s = new StringBuilder();

+		s.append("<table>\n");

+		s.append("<tr><th>Date</th><th>Feeds</th><th>Feed ID</th><th>Publish Count</th><th>Subs</th><th>Sub ID</th><th>Delivery Count</th></tr>\n");

+		String[] dates = JSONObject.getNames(jo);

+		Arrays.sort(dates);

+		for (int i = dates.length-1; i >= 0; i--) {

+			String date = dates[i];

+			JSONObject j2 = jo.getJSONObject(date);

+			int rc1 = countrows(j2);

+			String[] feeds = JSONObject.getNames(j2);

+			Arrays.sort(feeds);

+			s.append("<tr><td rowspan=\"" + rc1 + "\">")

+			 .append(date)

+			 .append("</td>");

+			s.append("<td rowspan=\"" + rc1 + "\">")

+			 .append(feeds.length)

+			 .append("</td>");

+			String px1 = "";

+			for (String feed : feeds) {

+				JSONObject j3 = j2.getJSONObject(feed);

+				int pubcount = j3.getInt("pubcount");

+				int subcnt = j3.length()-1;

+				int rc2 = (subcnt < 1) ? 1 : subcnt;

+				String[] subs = JSONObject.getNames(j3);

+				Arrays.sort(subs);

+				s.append(px1)

+				 .append("<td rowspan=\"" + rc2 + "\">")

+				 .append(feed)

+				 .append("</td>");

+				s.append("<td rowspan=\"" + rc2 + "\">")

+				 .append(pubcount)

+				 .append("</td>");

+				s.append("<td rowspan=\"" + rc2 + "\">")

+				 .append(subcnt)

+				 .append("</td>");

+				String px2 = "";

+				for (String sub : subs) {

+					if (!sub.equals("pubcount")) {

+						s.append(px2);

+						s.append("<td>"+sub+"</td>");

+						s.append("<td>"+j3.getInt(sub)+"</td>");

+						s.append("</tr>\n");

+						px2 = "<tr>";

+					}

+				}

+				if (px2.equals(""))

+					s.append("<td></td><td></td></tr>\n");

+				px1 = "<tr>";

+			}

+		}

+		s.append("</table>\n");

+		return s.toString();

+	}

+	private static int countrows(JSONObject x) {

+		int n = 0;

+		for (String feed : JSONObject.getNames(x)) {

+			JSONObject j3 = x.getJSONObject(feed);

+			int subcnt = j3.length()-1;

+			int rc2 = (subcnt < 1) ? 1 : subcnt;

+			n += rc2;

+		}

+		return (n > 0) ? n : 1;

+	}

+

+	/**

+	 * Convert a .CSV file (as generated by the normal FeedReport mechanism) to an HTML table.

+	 * @param args

+	 */

+	public static void main(String[] args) {

+		int rtype = 0;	// 0 -> day, 1 -> week, 2 -> month, 3 -> year

+		String infile  = null;

+		String outfile = null;

+		for (int i = 0; i < args.length; i++) {

+			if (args[i].equals("-t")) {

+				switch (args[++i].charAt(0)) {

+				case 'w':	rtype = 1; break;

+				case 'm':	rtype = 2; break;

+				case 'y':	rtype = 3; break;

+				default:	rtype = 0; break;

+				}

+			} else if (infile == null) {

+				infile = args[i];

+			} else if (outfile == null) {

+				outfile = args[i];

+			}

+		}

+		if (infile == null) {

+			System.err.println("usage: FeedReport [ -t <reporttype> ] [ <input .csv> ] [ <output .html> ]");

+			System.exit(1);

+		}

+		try {

+			JSONObject jo = new JSONObject();

+			LineNumberReader lr = new LineNumberReader(new FileReader(infile));

+			String line = lr.readLine();

+			while (line != null) {

+				String[] tt = line.split(",");

+				if (tt[0].startsWith("2")) {

+					String date = tt[0];

+					switch (rtype) {

+					case 1:

+						String[] xx = date.split("-");

+						Calendar cal = new GregorianCalendar(new Integer(xx[0]), new Integer(xx[1])-1, new Integer(xx[2]));

+						date = xx[0] + "-W" + cal.get(Calendar.WEEK_OF_YEAR);

+						break;

+					case 2:	date = date.substring(0, 7); break;

+					case 3:	date = date.substring(0, 4); break;

+					}

+					JSONObject datemap = jo.optJSONObject(date);

+					if (datemap == null) {

+						datemap = new JSONObject();

+						jo.put(date, datemap);

+					}

+					int feed = Integer.parseInt(tt[2]);

+					JSONObject feedmap = datemap.optJSONObject(""+feed);

+					if (feedmap == null) {

+						feedmap = new JSONObject();

+						feedmap.put("pubcount", 0);

+						datemap.put(""+feed, feedmap);

+					}

+					String type = tt[1];

+					int count   = Integer.parseInt(tt[4]);

+					if (type.equals("pub")) {

+						try {

+							int n = feedmap.getInt("pubcount");

+							feedmap.put("pubcount", n+count);

+						} catch (JSONException e) {

+							feedmap.put("pubcount", count);

+						}

+					} else if (type.equals("del")) {

+						String subid = tt[3];

+						try {

+							int n = feedmap.getInt(subid);

+							feedmap.put(subid, n+count);

+						} catch (JSONException e) {

+							feedmap.put(subid, count);

+						}

+					}

+				}

+				line = lr.readLine();

+			}

+			lr.close();

+			String t = toHTML(jo);

+			switch (rtype) {

+			case 1:	t = t.replaceAll("<th>Date</th>", "<th>Week</th>"); break;

+			case 2:	t = t.replaceAll("<th>Date</th>", "<th>Month</th>"); break;

+			case 3:	t = t.replaceAll("<th>Date</th>", "<th>Year</th>"); break;

+			}

+			System.out.println(t);

+		} catch (Exception e) {

+			System.err.println(e);

+			e.printStackTrace();

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/reports/LatencyReport.java b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/LatencyReport.java
new file mode 100644
index 0000000..96e096e
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/LatencyReport.java
@@ -0,0 +1,179 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.reports;

+

+import java.io.FileNotFoundException;

+import java.io.PrintWriter;

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.util.ArrayList;

+import java.util.List;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+

+/**

+ * Generate a per-file latency report.  It reports on the details related to one file published

+ * on one feed. This report can be further reduced in order to generate more specific reports

+ * based on feed ID or node name. The report is a .csv file containing the following columns:

+ * <table>

+ * <tr><td>recordid</td><td>the unique record ID assigned to a particular incoming feed</td></tr>

+ * <tr><td>feedid</td><td>the Feed ID for this record</td></tr>

+ * <tr><td>uri</td><td>the URI of the file delivered</td></tr>

+ * <tr><td>size</td><td>the size of the file delivered</td></tr>

+ * <tr><td>min</td><td>the minimum latency in delivering this feed to a subscriber (in ms)</td></tr>

+ * <tr><td>max</td><td>the maximum latency in delivering this feed to a subscriber (in ms)</td></tr>

+ * <tr><td>avg</td><td>the average latency in delivering this feed to all subscribers (in ms)</td></tr>

+ * <tr><td>fanout</td><td>the number of subscribers this feed was delivered to</td></tr>

+ * </table>

+ *

+ * @author Robert P. Eby

+ * @version $Id: LatencyReport.java,v 1.1 2013/10/28 18:06:53 eby Exp $

+ */

+public class LatencyReport extends ReportBase {

+	private static final String SELECT_SQL =

+		"select EVENT_TIME, TYPE, PUBLISH_ID, FEED_FILEID, FEEDID, CONTENT_LENGTH from LOG_RECORDS" +

+		" where EVENT_TIME >= ? and EVENT_TIME <= ? order by PUBLISH_ID, EVENT_TIME";

+

+	private class Event {

+		public final String type;

+		public final long time;

+		public Event(String t, long tm) {

+			type = t;

+			time = tm;

+		}

+	}

+	private class Counters {

+		public final String id;

+		public final int feedid;

+		public final long clen;

+		public final String fileid;

+		public final List<Event> events;

+		public Counters(String i, int fid, long c, String s) {

+			id = i;

+			feedid = fid;

+			clen = c;

+			fileid = s;

+			events = new ArrayList<Event>();

+		}

+		private long pubtime;

+		public void addEvent(String t, long tm) {

+			events.add(new Event(t, tm));

+			if (t.equals("pub"))

+				pubtime = tm;

+		}

+		public long min() {

+			long min = Long.MAX_VALUE;

+			for (Event e : events) {

+				if (e.type.equals("del")) {

+					min = Math.min(min, e.time - pubtime);

+				}

+			}

+			return min;

+		}

+		public long max() {

+			long max = 0;

+			for (Event e : events) {

+				if (e.type.equals("del")) {

+					max = Math.max(max, e.time - pubtime);

+				}

+			}

+			return max;

+		}

+		public long avg() {

+			long total = 0, c = 0;

+			for (Event e : events) {

+				if (e.type.equals("del")) {

+					total += e.time - pubtime;

+					c++;

+				}

+			}

+			return (c == 0) ? 0 : total/c;

+		}

+		public int fanout() {

+			int n = 0;

+			for (Event e : events) {

+				if (e.type.equals("del")) {

+					n++;

+				}

+			}

+			return n;

+		}

+		@Override

+		public String toString() {

+			return feedid + "," + fileid + "," + clen + "," + min() + "," + max() + "," + avg() + "," + fanout();

+		}

+	}

+

+	@Override

+	public void run() {

+		long start = System.currentTimeMillis();

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			PreparedStatement ps = conn.prepareStatement(SELECT_SQL);

+			ps.setLong(1, from);

+			ps.setLong(2, to);

+			ResultSet rs = ps.executeQuery();

+			PrintWriter os = new PrintWriter(outfile);

+			os.println("recordid,feedid,uri,size,min,max,avg,fanout");

+			Counters c = null;

+			while (rs.next()) {

+				long etime  = rs.getLong("EVENT_TIME");

+				String type = rs.getString("TYPE");

+				String id   = rs.getString("PUBLISH_ID");

+				String fid  = rs.getString("FEED_FILEID");

+				int feed    = rs.getInt("FEEDID");

+				long clen   = rs.getLong("CONTENT_LENGTH");

+				if (c != null && !id.equals(c.id)) {

+					String line = id + "," + c.toString();

+					os.println(line);

+					c = null;

+				}

+				if (c == null) {

+					c = new Counters(id, feed, clen, fid);

+				}

+				if (feed != c.feedid)

+					System.err.println("Feed ID mismatch, "+feed+" <=> "+c.feedid);

+				if (clen != c.clen)

+					System.err.println("Cont Len mismatch, "+clen+" <=> "+c.clen);

+//				if (fid != c.fileid)

+//					System.err.println("File ID mismatch, "+fid+" <=> "+c.fileid);

+				c.addEvent(type, etime);

+			}

+			rs.close();

+			ps.close();

+			db.release(conn);

+			os.close();

+		} catch (FileNotFoundException e) {

+			System.err.println("File cannot be written: "+outfile);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		logger.debug("Query time: " + (System.currentTimeMillis()-start) + " ms");

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/reports/Report.java b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/Report.java
new file mode 100644
index 0000000..bd64e0e
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/Report.java
@@ -0,0 +1,155 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.reports;

+

+import java.lang.reflect.Constructor;

+import java.util.Calendar;

+import java.util.GregorianCalendar;

+import java.util.TimeZone;

+

+/**

+ * This class provides a CLI to generate any of the reports defined in this package.

+ *

+ * @author Robert P. Eby

+ * @version $Id: Report.java,v 1.2 2013/11/06 16:23:55 eby Exp $

+ */

+public class Report {

+	/**

+	 * Generate .csv report files from the database.  Usage:

+	 * <pre>

+	 * java com.att.research.datarouter.reports.Report [ -t <i>type</i> ] [ -o <i>outfile</i> ] [ <i>fromdate</i> [ <i>todate</i> ]]

+	 * </pre>

+	 * <i>type</i> should be <b>volume</b> for a {@link VolumeReport},

+	 * <b>feed</b> for a {@link FeedReport},

+	 * <b>latency</b> for a {@link LatencyReport}, or

+	 * <b>dailyLatency</b> for a {@link DailyLatencyReport}.

+	 * If <i>outfile</i> is not specified, the report goes into a file <i>/tmp/nnnnnnnnnnnnn.csv</i>,

+	 * where nnnnnnnnnnnnn is the current time in milliseconds.

+	 * If <i>from</i> and <i>to</i> are not specified, then the report is limited to the last weeks worth of data.

+	 * <i>from</i> can be the keyword <b>ALL</b> to specify all data in the DB, or the keyword <b>yesterday</b>.

+	 * Otherwise, <i>from</i> and <i>to</i> should match the pattern YYYY-MM-DD.

+	 * @param args the command line arguments

+	 */

+	public static void main(String[] args) {

+		ReportBase report = new VolumeReport();

+		String outfile = "/tmp/" + System.currentTimeMillis() + ".csv";

+		String from = null, to = null;

+

+		for (int i = 0; i < args.length; i++) {

+			if (args[i].equals("-?")) {

+				System.err.println("usage: java com.att.research.datarouter.reports.Report [ -t <i>type</i> ] [ -o <i>outfile</i> ] [ <i>fromdate</i> [ <i>todate</i> ]]");

+				System.exit(0);

+			} else if (args[i].equals("-o")) {

+				if (++i < args.length) {

+					outfile = args[i];

+				}

+			} else if (args[i].equals("-t")) {

+				if (++i < args.length) {

+					String base = args[i];

+					base = Character.toUpperCase(base.charAt(0)) + base.substring(1);

+					base = "com.att.research.datarouter.reports."+base+"Report";

+					try {

+						@SuppressWarnings("unchecked")

+						Class<? extends ReportBase> cl = (Class<? extends ReportBase>) Class.forName(base);

+						Constructor<? extends ReportBase> con = cl.getConstructor();

+						report = con.newInstance();

+					} catch (Exception e) {

+						System.err.println("Unknown report type: "+args[i]);

+						System.exit(1);

+					}

+				}

+			} else if (from == null) {

+				from = args[i];

+			} else {

+				to = args[i];

+			}

+		}

+		long lfrom = 0, lto = 0;

+		if (from == null) {

+			// last 7 days

+			TimeZone utc = TimeZone.getTimeZone("UTC");

+			Calendar cal = new GregorianCalendar(utc);

+			cal.set(Calendar.HOUR_OF_DAY, 0);

+			cal.set(Calendar.MINUTE, 0);

+			cal.set(Calendar.SECOND, 0);

+			cal.set(Calendar.MILLISECOND, 0);

+			lfrom = cal.getTimeInMillis() - (7 * 24 * 60 * 60 * 1000L);	// 1 week

+			lto   = cal.getTimeInMillis() - 1;

+		} else if (to == null) {

+			try {

+				String[] dates = getDates(from);

+				lfrom = Long.parseLong(dates[0]);

+				lto   = Long.parseLong(dates[1]);

+			} catch (Exception e) {

+				System.err.println("Invalid date: "+from);

+				System.exit(1);

+			}

+		} else {

+			String[] dates;

+			try {

+				dates = getDates(from);

+				lfrom = Long.parseLong(dates[0]);

+			} catch (Exception e) {

+				System.err.println("Invalid date: "+from);

+				System.exit(1);

+			}

+			try {

+				dates = getDates(to);

+				lto   = Long.parseLong(dates[0]);

+			} catch (Exception e) {

+				System.err.println("Invalid date: "+to);

+				System.exit(1);

+			}

+		}

+

+		report.setFrom(lfrom);

+		report.setTo(lto);

+		report.setOutputFile(outfile);

+		report.run();

+	}

+

+	private static String[] getDates(String d) throws Exception {

+		if (d.equals("ALL"))

+			return new String[] { "1", ""+System.currentTimeMillis() };

+

+		TimeZone utc = TimeZone.getTimeZone("UTC");

+		Calendar cal = new GregorianCalendar(utc);

+		if (d.matches("20\\d\\d-\\d\\d-\\d\\d")) {

+			cal.set(Calendar.YEAR,         Integer.parseInt(d.substring(0, 4)));

+			cal.set(Calendar.MONTH,        Integer.parseInt(d.substring(5, 7))-1);

+			cal.set(Calendar.DAY_OF_MONTH, Integer.parseInt(d.substring(8, 10)));

+		} else if (d.equals("yesterday")) {

+			cal.add(Calendar.DAY_OF_YEAR, -1);

+		} else

+			throw new Exception("wa?");

+		cal.set(Calendar.HOUR_OF_DAY, 0);

+		cal.set(Calendar.MINUTE, 0);

+		cal.set(Calendar.SECOND, 0);

+		cal.set(Calendar.MILLISECOND, 0);

+		long start = cal.getTimeInMillis();

+		long end   = start + (24 * 60 * 60 * 1000L) - 1;

+		return new String[] { ""+start, ""+end };

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/reports/ReportBase.java b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/ReportBase.java
new file mode 100644
index 0000000..2bdabf1
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/ReportBase.java
@@ -0,0 +1,63 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+package com.att.research.datarouter.reports;

+

+import org.apache.log4j.Logger;

+

+/**

+ * Base class for all the report generating classes.

+ *

+ * @author Robert P. Eby

+ * @version $Id: ReportBase.java,v 1.1 2013/10/28 18:06:53 eby Exp $

+ */

+abstract public class ReportBase implements Runnable {

+	protected long from, to;

+	protected String outfile;

+	protected Logger logger;

+

+	public ReportBase() {

+		this.from = 0;

+		this.to = System.currentTimeMillis();

+		this.logger = Logger.getLogger("com.att.research.datarouter.reports");

+	}

+

+	public void setFrom(long from) {

+		this.from = from;

+	}

+

+	public void setTo(long to) {

+		this.to = to;

+	}

+

+	public String getOutfile() {

+		return outfile;

+	}

+

+	public void setOutputFile(String s) {

+		this.outfile = s;

+	}

+

+	@Override

+	abstract public void run();

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/reports/SubscriberReport.java b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/SubscriberReport.java
new file mode 100644
index 0000000..b003ab1
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/SubscriberReport.java
@@ -0,0 +1,157 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.reports;

+

+import java.io.FileNotFoundException;

+import java.io.PrintWriter;

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.util.HashMap;

+import java.util.Map;

+import java.util.TreeSet;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+

+/**

+ * Generate a subscribers report.  The report is a .CSV file.  It contains information per-day and per-subscriber,

+ * on the status codes returned from each delivery attempt (1XX, 2XX, etc.) as well as a count of 4XX instead of a 100.

+ *

+ * @author Robert P. Eby

+ * @version $Id: SubscriberReport.java,v 1.2 2013/11/06 16:23:55 eby Exp $

+ */

+public class SubscriberReport extends ReportBase {

+	private static final String SELECT_SQL =

+		"select date(from_unixtime(EVENT_TIME div 1000)) as DATE, DELIVERY_SUBID, RESULT, COUNT(RESULT) as COUNT" +

+		" from LOG_RECORDS" +

+		" where TYPE = 'del' and EVENT_TIME >= ? and EVENT_TIME <= ?" +

+		" group by DATE, DELIVERY_SUBID, RESULT";

+	private static final String SELECT_SQL2 =

+		"select date(from_unixtime(EVENT_TIME div 1000)) as DATE, DELIVERY_SUBID, COUNT(CONTENT_LENGTH_2) as COUNT" +

+		" from LOG_RECORDS" +

+		" where TYPE = 'dlx' and CONTENT_LENGTH_2 = -1 and EVENT_TIME >= ? and EVENT_TIME <= ?" +

+		" group by DATE, DELIVERY_SUBID";

+

+	private class Counters {

+		private String date;

+		private int sub;

+		private int c100, c200, c300, c400, c500, cm1, cdlx;

+		public Counters(String date, int sub) {

+			this.date = date;

+			this.sub = sub;

+			c100 = c200 = c300 = c400 = c500 = cm1 = cdlx = 0;

+		}

+		public void addCounts(int status, int n) {

+			if (status < 0) {

+				cm1 += n;

+			} else if (status >= 100 && status <= 199) {

+				c100 += n;

+			} else if (status >= 200 && status <= 299) {

+				c200 += n;

+			} else if (status >= 300 && status <= 399) {

+				c300 += n;

+			} else if (status >= 400 && status <= 499) {

+				c400 += n;

+			} else if (status >= 500 && status <= 599) {

+				c500 += n;

+			}

+		}

+		public void addDlxCount(int n) {

+			cdlx += n;

+		}

+		@Override

+		public String toString() {

+			return date + "," + sub + "," +

+				c100 + "," + c200 + "," + c300 + "," + c400 + "," + c500 + "," +

+				cm1 + "," + cdlx;

+		}

+	}

+

+	@Override

+	public void run() {

+		Map<String, Counters> map = new HashMap<String, Counters>();

+		long start = System.currentTimeMillis();

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			PreparedStatement ps = conn.prepareStatement(SELECT_SQL);

+			ps.setLong(1, from);

+			ps.setLong(2, to);

+			ResultSet rs = ps.executeQuery();

+			while (rs.next()) {

+				String date = rs.getString("DATE");

+				int sub     = rs.getInt("DELIVERY_SUBID");

+				int res     = rs.getInt("RESULT");

+				int count   = rs.getInt("COUNT");

+				String key  = date + "," + sub;

+				Counters c = map.get(key);

+				if (c == null) {

+					c = new Counters(date, sub);

+					map.put(key, c);

+				}

+				c.addCounts(res, count);

+			}

+			rs.close();

+			ps.close();

+

+			ps = conn.prepareStatement(SELECT_SQL2);

+			ps.setLong(1, from);

+			ps.setLong(2, to);

+			rs = ps.executeQuery();

+			while (rs.next()) {

+				String date = rs.getString("DATE");

+				int sub     = rs.getInt("DELIVERY_SUBID");

+				int count   = rs.getInt("COUNT");

+				String key  = date + "," + sub;

+				Counters c = map.get(key);

+				if (c == null) {

+					c = new Counters(date, sub);

+					map.put(key, c);

+				}

+				c.addDlxCount(count);

+			}

+			rs.close();

+			ps.close();

+

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		logger.debug("Query time: " + (System.currentTimeMillis()-start) + " ms");

+		try {

+			PrintWriter os = new PrintWriter(outfile);

+			os.println("date,subid,count100,count200,count300,count400,count500,countminus1,countdlx");

+			for (String key : new TreeSet<String>(map.keySet())) {

+				Counters c = map.get(key);

+				os.println(c.toString());

+			}

+			os.close();

+		} catch (FileNotFoundException e) {

+			System.err.println("File cannot be written: "+outfile);

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/reports/VolumeReport.java b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/VolumeReport.java
new file mode 100644
index 0000000..92a85e2
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/VolumeReport.java
@@ -0,0 +1,140 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+

+

+package com.att.research.datarouter.reports;

+

+import java.io.FileNotFoundException;

+import java.io.PrintWriter;

+import java.sql.Connection;

+import java.sql.PreparedStatement;

+import java.sql.ResultSet;

+import java.sql.SQLException;

+import java.text.SimpleDateFormat;

+import java.util.Date;

+import java.util.HashMap;

+import java.util.Map;

+import java.util.TreeSet;

+

+import com.att.research.datarouter.provisioning.utils.DB;

+

+/**

+ * Generate a traffic volume report. The report is a .csv file containing the following columns:

+ * <table>

+ * <tr><td>date</td><td>the date for this record</td></tr>

+ * <tr><td>feedid</td><td>the Feed ID for this record</td></tr>

+ * <tr><td>filespublished</td><td>the number of files published on this feed and date</td></tr>

+ * <tr><td>bytespublished</td><td>the number of bytes published on this feed and date</td></tr>

+ * <tr><td>filesdelivered</td><td>the number of files delivered on this feed and date</td></tr>

+ * <tr><td>bytesdelivered</td><td>the number of bytes delivered on this feed and date</td></tr>

+ * <tr><td>filesexpired</td><td>the number of files expired on this feed and date</td></tr>

+ * <tr><td>bytesexpired</td><td>the number of bytes expired on this feed and date</td></tr>

+ * </table>

+ *

+ * @author Robert P. Eby

+ * @version $Id: VolumeReport.java,v 1.3 2014/02/28 15:11:13 eby Exp $

+ */

+public class VolumeReport extends ReportBase {

+	private static final String SELECT_SQL = "select EVENT_TIME, TYPE, FEEDID, CONTENT_LENGTH, RESULT" +

+		" from LOG_RECORDS where EVENT_TIME >= ? and EVENT_TIME <= ? LIMIT ?, ?";

+

+	private class Counters {

+		public int  filespublished, filesdelivered, filesexpired;

+		public long bytespublished, bytesdelivered, bytesexpired;

+		@Override

+		public String toString() {

+			return String.format("%d,%d,%d,%d,%d,%d",

+				filespublished, bytespublished, filesdelivered,

+				bytesdelivered, filesexpired, bytesexpired);

+		}

+	}

+

+	@Override

+	public void run() {

+		Map<String, Counters> map = new HashMap<String, Counters>();

+		SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");

+		long start = System.currentTimeMillis();

+		try {

+			DB db = new DB();

+			@SuppressWarnings("resource")

+			Connection conn = db.getConnection();

+			// We need to run this SELECT in stages, because otherwise we run out of memory!

+			final long stepsize = 6000000L;

+			boolean go_again = true;

+			for (long i = 0; go_again; i += stepsize) {

+				PreparedStatement ps = conn.prepareStatement(SELECT_SQL);

+				ps.setLong(1, from);

+				ps.setLong(2, to);

+				ps.setLong(3, i);

+				ps.setLong(4, stepsize);

+				ResultSet rs = ps.executeQuery();

+				go_again = false;

+				while (rs.next()) {

+					go_again = true;

+					long etime  = rs.getLong("EVENT_TIME");

+					String type = rs.getString("TYPE");

+					int feed    = rs.getInt("FEEDID");

+					long clen   = rs.getLong("CONTENT_LENGTH");

+					String key  = sdf.format(new Date(etime)) + ":" + feed;

+					Counters c = map.get(key);

+					if (c == null) {

+						c = new Counters();

+						map.put(key, c);

+					}

+					if (type.equalsIgnoreCase("pub")) {

+						c.filespublished++;

+						c.bytespublished += clen;

+					} else if (type.equalsIgnoreCase("del")) {

+						// Only count successful deliveries

+						int statusCode = rs.getInt("RESULT");

+						if (statusCode >= 200 && statusCode < 300) {

+							c.filesdelivered++;

+							c.bytesdelivered += clen;

+						}

+					} else if (type.equalsIgnoreCase("exp")) {

+						c.filesexpired++;

+						c.bytesexpired += clen;

+					}

+				}

+				rs.close();

+				ps.close();

+			}

+			db.release(conn);

+		} catch (SQLException e) {

+			e.printStackTrace();

+		}

+		logger.debug("Query time: " + (System.currentTimeMillis()-start) + " ms");

+		try {

+			PrintWriter os = new PrintWriter(outfile);

+			os.println("date,feedid,filespublished,bytespublished,filesdelivered,bytesdelivered,filesexpired,bytesexpired");

+			for (String key : new TreeSet<String>(map.keySet())) {

+				Counters c = map.get(key);

+				String[] p = key.split(":");

+				os.println(String.format("%s,%s,%s", p[0], p[1], c.toString()));

+			}

+			os.close();

+		} catch (FileNotFoundException e) {

+			System.err.println("File cannot be written: "+outfile);

+		}

+	}

+}

diff --git a/datarouter-prov/src/main/java/com/att/research/datarouter/reports/package.html b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/package.html
new file mode 100644
index 0000000..2c2d26b
--- /dev/null
+++ b/datarouter-prov/src/main/java/com/att/research/datarouter/reports/package.html
@@ -0,0 +1,43 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+

+<html>

+<body>

+<p>

+This package provides various classes which are used to generate .CSV files from the logs

+in the database.

+The .CSV files can then be used to generate reports on another web server external from the DR network.

+</p>

+<p>

+The classes in this package, and the reports they generate are:

+</p>

+<table>

+<tr><th>Class</th><th>Report</th></tr>

+<tr><td>{@link com.att.research.datarouter.reports.DailyLatencyReport}</td><td>dailylatency.csv</td></tr>

+<tr><td>{@link com.att.research.datarouter.reports.FeedReport}</td><td>NOT CURRENTLY USED</td></tr>

+<tr><td>{@link com.att.research.datarouter.reports.LatencyReport}</td><td></td></tr>

+<tr><td>{@link com.att.research.datarouter.reports.SubscriberReport}</td><td>subscriber.csv</td></tr>

+<tr><td>{@link com.att.research.datarouter.reports.VolumeReport}</td><td>volumes.csv</td></tr>

+</table>

+</body>

+</html>

diff --git a/datarouter-prov/src/main/java/org/json/CDL.java b/datarouter-prov/src/main/java/org/json/CDL.java
new file mode 100644
index 0000000..7e489a9
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/CDL.java
@@ -0,0 +1,301 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+/*

+Copyright (c) 2002 JSON.org

+

+Permission is hereby granted, free of charge, to any person obtaining a copy

+of this software and associated documentation files (the "Software"), to deal

+in the Software without restriction, including without limitation the rights

+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

+copies of the Software, and to permit persons to whom the Software is

+furnished to do so, subject to the following conditions:

+

+The above copyright notice and this permission notice shall be included in all

+copies or substantial portions of the Software.

+

+The Software shall be used for Good, not Evil.

+

+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

+SOFTWARE.

+*/

+

+/**

+ * This provides static methods to convert comma delimited text into a

+ * JSONArray, and to covert a JSONArray into comma delimited text. Comma

+ * delimited text is a very popular format for data interchange. It is

+ * understood by most database, spreadsheet, and organizer programs.

+ * <p>

+ * Each row of text represents a row in a table or a data record. Each row

+ * ends with a NEWLINE character. Each row contains one or more values.

+ * Values are separated by commas. A value can contain any character except

+ * for comma, unless is is wrapped in single quotes or double quotes.

+ * <p>

+ * The first row usually contains the names of the columns.

+ * <p>

+ * A comma delimited list can be converted into a JSONArray of JSONObjects.

+ * The names for the elements in the JSONObjects can be taken from the names

+ * in the first row.

+ * @author JSON.org

+ * @version 2012-11-13

+ */

+public class CDL {

+

+    /**

+     * Get the next value. The value can be wrapped in quotes. The value can

+     * be empty.

+     * @param x A JSONTokener of the source text.

+     * @return The value string, or null if empty.

+     * @throws JSONException if the quoted string is badly formed.

+     */

+    private static String getValue(JSONTokener x) throws JSONException {

+        char c;

+        char q;

+        StringBuffer sb;

+        do {

+            c = x.next();

+        } while (c == ' ' || c == '\t');

+        switch (c) {

+        case 0:

+            return null;

+        case '"':

+        case '\'':

+            q = c;

+            sb = new StringBuffer();

+            for (;;) {

+                c = x.next();

+                if (c == q) {

+                    break;

+                }

+                if (c == 0 || c == '\n' || c == '\r') {

+                    throw x.syntaxError("Missing close quote '" + q + "'.");

+                }

+                sb.append(c);

+            }

+            return sb.toString();

+        case ',':

+            x.back();

+            return "";

+        default:

+            x.back();

+            return x.nextTo(',');

+        }

+    }

+

+    /**

+     * Produce a JSONArray of strings from a row of comma delimited values.

+     * @param x A JSONTokener of the source text.

+     * @return A JSONArray of strings.

+     * @throws JSONException

+     */

+    public static JSONArray rowToJSONArray(JSONTokener x) throws JSONException {

+        JSONArray ja = new JSONArray();

+        for (;;) {

+            String value = getValue(x);

+            char c = x.next();

+            if (value == null ||

+                    (ja.length() == 0 && value.length() == 0 && c != ',')) {

+                return null;

+            }

+            ja.put(value);

+            for (;;) {

+                if (c == ',') {

+                    break;

+                }

+                if (c != ' ') {

+                    if (c == '\n' || c == '\r' || c == 0) {

+                        return ja;

+                    }

+                    throw x.syntaxError("Bad character '" + c + "' (" +

+                            (int)c + ").");

+                }

+                c = x.next();

+            }

+        }

+    }

+

+    /**

+     * Produce a JSONObject from a row of comma delimited text, using a

+     * parallel JSONArray of strings to provides the names of the elements.

+     * @param names A JSONArray of names. This is commonly obtained from the

+     *  first row of a comma delimited text file using the rowToJSONArray

+     *  method.

+     * @param x A JSONTokener of the source text.

+     * @return A JSONObject combining the names and values.

+     * @throws JSONException

+     */

+    public static JSONObject rowToJSONObject(JSONArray names, JSONTokener x)

+            throws JSONException {

+        JSONArray ja = rowToJSONArray(x);

+        return ja != null ? ja.toJSONObject(names) :  null;

+    }

+

+    /**

+     * Produce a comma delimited text row from a JSONArray. Values containing

+     * the comma character will be quoted. Troublesome characters may be

+     * removed.

+     * @param ja A JSONArray of strings.

+     * @return A string ending in NEWLINE.

+     */

+    public static String rowToString(JSONArray ja) {

+        StringBuffer sb = new StringBuffer();

+        for (int i = 0; i < ja.length(); i += 1) {

+            if (i > 0) {

+                sb.append(',');

+            }

+            Object object = ja.opt(i);

+            if (object != null) {

+                String string = object.toString();

+                if (string.length() > 0 && (string.indexOf(',') >= 0 ||

+                        string.indexOf('\n') >= 0 || string.indexOf('\r') >= 0 ||

+                        string.indexOf(0) >= 0 || string.charAt(0) == '"')) {

+                    sb.append('"');

+                    int length = string.length();

+                    for (int j = 0; j < length; j += 1) {

+                        char c = string.charAt(j);

+                        if (c >= ' ' && c != '"') {

+                            sb.append(c);

+                        }

+                    }

+                    sb.append('"');

+                } else {

+                    sb.append(string);

+                }

+            }

+        }

+        sb.append('\n');

+        return sb.toString();

+    }

+

+    /**

+     * Produce a JSONArray of JSONObjects from a comma delimited text string,

+     * using the first row as a source of names.

+     * @param string The comma delimited text.

+     * @return A JSONArray of JSONObjects.

+     * @throws JSONException

+     */

+    public static JSONArray toJSONArray(String string) throws JSONException {

+        return toJSONArray(new JSONTokener(string));

+    }

+

+    /**

+     * Produce a JSONArray of JSONObjects from a comma delimited text string,

+     * using the first row as a source of names.

+     * @param x The JSONTokener containing the comma delimited text.

+     * @return A JSONArray of JSONObjects.

+     * @throws JSONException

+     */

+    public static JSONArray toJSONArray(JSONTokener x) throws JSONException {

+        return toJSONArray(rowToJSONArray(x), x);

+    }

+

+    /**

+     * Produce a JSONArray of JSONObjects from a comma delimited text string

+     * using a supplied JSONArray as the source of element names.

+     * @param names A JSONArray of strings.

+     * @param string The comma delimited text.

+     * @return A JSONArray of JSONObjects.

+     * @throws JSONException

+     */

+    public static JSONArray toJSONArray(JSONArray names, String string)

+            throws JSONException {

+        return toJSONArray(names, new JSONTokener(string));

+    }

+

+    /**

+     * Produce a JSONArray of JSONObjects from a comma delimited text string

+     * using a supplied JSONArray as the source of element names.

+     * @param names A JSONArray of strings.

+     * @param x A JSONTokener of the source text.

+     * @return A JSONArray of JSONObjects.

+     * @throws JSONException

+     */

+    public static JSONArray toJSONArray(JSONArray names, JSONTokener x)

+            throws JSONException {

+        if (names == null || names.length() == 0) {

+            return null;

+        }

+        JSONArray ja = new JSONArray();

+        for (;;) {

+            JSONObject jo = rowToJSONObject(names, x);

+            if (jo == null) {

+                break;

+            }

+            ja.put(jo);

+        }

+        if (ja.length() == 0) {

+            return null;

+        }

+        return ja;

+    }

+

+

+    /**

+     * Produce a comma delimited text from a JSONArray of JSONObjects. The

+     * first row will be a list of names obtained by inspecting the first

+     * JSONObject.

+     * @param ja A JSONArray of JSONObjects.

+     * @return A comma delimited text.

+     * @throws JSONException

+     */

+    public static String toString(JSONArray ja) throws JSONException {

+        JSONObject jo = ja.optJSONObject(0);

+        if (jo != null) {

+            JSONArray names = jo.names();

+            if (names != null) {

+                return rowToString(names) + toString(names, ja);

+            }

+        }

+        return null;

+    }

+

+    /**

+     * Produce a comma delimited text from a JSONArray of JSONObjects using

+     * a provided list of names. The list of names is not included in the

+     * output.

+     * @param names A JSONArray of strings.

+     * @param ja A JSONArray of JSONObjects.

+     * @return A comma delimited text.

+     * @throws JSONException

+     */

+    public static String toString(JSONArray names, JSONArray ja)

+            throws JSONException {

+        if (names == null || names.length() == 0) {

+            return null;

+        }

+        StringBuffer sb = new StringBuffer();

+        for (int i = 0; i < ja.length(); i += 1) {

+            JSONObject jo = ja.optJSONObject(i);

+            if (jo != null) {

+                sb.append(rowToString(jo.toJSONArray(names)));

+            }

+        }

+        return sb.toString();

+    }

+}

diff --git a/datarouter-prov/src/main/java/org/json/Cookie.java b/datarouter-prov/src/main/java/org/json/Cookie.java
new file mode 100644
index 0000000..67e4f17
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/Cookie.java
@@ -0,0 +1,191 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+/*

+Copyright (c) 2002 JSON.org

+

+Permission is hereby granted, free of charge, to any person obtaining a copy

+of this software and associated documentation files (the "Software"), to deal

+in the Software without restriction, including without limitation the rights

+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

+copies of the Software, and to permit persons to whom the Software is

+furnished to do so, subject to the following conditions:

+

+The above copyright notice and this permission notice shall be included in all

+copies or substantial portions of the Software.

+

+The Software shall be used for Good, not Evil.

+

+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

+SOFTWARE.

+*/

+

+/**

+ * Convert a web browser cookie specification to a JSONObject and back.

+ * JSON and Cookies are both notations for name/value pairs.

+ * @author JSON.org

+ * @version 2010-12-24

+ */

+public class Cookie {

+

+    /**

+     * Produce a copy of a string in which the characters '+', '%', '=', ';'

+     * and control characters are replaced with "%hh". This is a gentle form

+     * of URL encoding, attempting to cause as little distortion to the

+     * string as possible. The characters '=' and ';' are meta characters in

+     * cookies. By convention, they are escaped using the URL-encoding. This is

+     * only a convention, not a standard. Often, cookies are expected to have

+     * encoded values. We encode '=' and ';' because we must. We encode '%' and

+     * '+' because they are meta characters in URL encoding.

+     * @param string The source string.

+     * @return       The escaped result.

+     */

+    public static String escape(String string) {

+        char         c;

+        String       s = string.trim();

+        StringBuffer sb = new StringBuffer();

+        int          length = s.length();

+        for (int i = 0; i < length; i += 1) {

+            c = s.charAt(i);

+            if (c < ' ' || c == '+' || c == '%' || c == '=' || c == ';') {

+                sb.append('%');

+                sb.append(Character.forDigit((char)((c >>> 4) & 0x0f), 16));

+                sb.append(Character.forDigit((char)(c & 0x0f), 16));

+            } else {

+                sb.append(c);

+            }

+        }

+        return sb.toString();

+    }

+

+

+    /**

+     * Convert a cookie specification string into a JSONObject. The string

+     * will contain a name value pair separated by '='. The name and the value

+     * will be unescaped, possibly converting '+' and '%' sequences. The

+     * cookie properties may follow, separated by ';', also represented as

+     * name=value (except the secure property, which does not have a value).

+     * The name will be stored under the key "name", and the value will be

+     * stored under the key "value". This method does not do checking or

+     * validation of the parameters. It only converts the cookie string into

+     * a JSONObject.

+     * @param string The cookie specification string.

+     * @return A JSONObject containing "name", "value", and possibly other

+     *  members.

+     * @throws JSONException

+     */

+    public static JSONObject toJSONObject(String string) throws JSONException {

+        String         name;

+        JSONObject     jo = new JSONObject();

+        Object         value;

+        JSONTokener x = new JSONTokener(string);

+        jo.put("name", x.nextTo('='));

+        x.next('=');

+        jo.put("value", x.nextTo(';'));

+        x.next();

+        while (x.more()) {

+            name = unescape(x.nextTo("=;"));

+            if (x.next() != '=') {

+                if (name.equals("secure")) {

+                    value = Boolean.TRUE;

+                } else {

+                    throw x.syntaxError("Missing '=' in cookie parameter.");

+                }

+            } else {

+                value = unescape(x.nextTo(';'));

+                x.next();

+            }

+            jo.put(name, value);

+        }

+        return jo;

+    }

+

+

+    /**

+     * Convert a JSONObject into a cookie specification string. The JSONObject

+     * must contain "name" and "value" members.

+     * If the JSONObject contains "expires", "domain", "path", or "secure"

+     * members, they will be appended to the cookie specification string.

+     * All other members are ignored.

+     * @param jo A JSONObject

+     * @return A cookie specification string

+     * @throws JSONException

+     */

+    public static String toString(JSONObject jo) throws JSONException {

+        StringBuffer sb = new StringBuffer();

+

+        sb.append(escape(jo.getString("name")));

+        sb.append("=");

+        sb.append(escape(jo.getString("value")));

+        if (jo.has("expires")) {

+            sb.append(";expires=");

+            sb.append(jo.getString("expires"));

+        }

+        if (jo.has("domain")) {

+            sb.append(";domain=");

+            sb.append(escape(jo.getString("domain")));

+        }

+        if (jo.has("path")) {

+            sb.append(";path=");

+            sb.append(escape(jo.getString("path")));

+        }

+        if (jo.optBoolean("secure")) {

+            sb.append(";secure");

+        }

+        return sb.toString();

+    }

+

+    /**

+     * Convert <code>%</code><i>hh</i> sequences to single characters, and

+     * convert plus to space.

+     * @param string A string that may contain

+     *      <code>+</code>&nbsp;<small>(plus)</small> and

+     *      <code>%</code><i>hh</i> sequences.

+     * @return The unescaped string.

+     */

+    public static String unescape(String string) {

+        int length = string.length();

+        StringBuffer sb = new StringBuffer();

+        for (int i = 0; i < length; ++i) {

+            char c = string.charAt(i);

+            if (c == '+') {

+                c = ' ';

+            } else if (c == '%' && i + 2 < length) {

+                int d = JSONTokener.dehexchar(string.charAt(i + 1));

+                int e = JSONTokener.dehexchar(string.charAt(i + 2));

+                if (d >= 0 && e >= 0) {

+                    c = (char)(d * 16 + e);

+                    i += 2;

+                }

+            }

+            sb.append(c);

+        }

+        return sb.toString();

+    }

+}

diff --git a/datarouter-prov/src/main/java/org/json/CookieList.java b/datarouter-prov/src/main/java/org/json/CookieList.java
new file mode 100644
index 0000000..89b7816
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/CookieList.java
@@ -0,0 +1,112 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+/*

+Copyright (c) 2002 JSON.org

+

+Permission is hereby granted, free of charge, to any person obtaining a copy

+of this software and associated documentation files (the "Software"), to deal

+in the Software without restriction, including without limitation the rights

+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

+copies of the Software, and to permit persons to whom the Software is

+furnished to do so, subject to the following conditions:

+

+The above copyright notice and this permission notice shall be included in all

+copies or substantial portions of the Software.

+

+The Software shall be used for Good, not Evil.

+

+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

+SOFTWARE.

+*/

+

+import java.util.Iterator;

+

+/**

+ * Convert a web browser cookie list string to a JSONObject and back.

+ * @author JSON.org

+ * @version 2010-12-24

+ */

+public class CookieList {

+

+    /**

+     * Convert a cookie list into a JSONObject. A cookie list is a sequence

+     * of name/value pairs. The names are separated from the values by '='.

+     * The pairs are separated by ';'. The names and the values

+     * will be unescaped, possibly converting '+' and '%' sequences.

+     *

+     * To add a cookie to a cooklist,

+     * cookielistJSONObject.put(cookieJSONObject.getString("name"),

+     *     cookieJSONObject.getString("value"));

+     * @param string  A cookie list string

+     * @return A JSONObject

+     * @throws JSONException

+     */

+    public static JSONObject toJSONObject(String string) throws JSONException {

+        JSONObject jo = new JSONObject();

+        JSONTokener x = new JSONTokener(string);

+        while (x.more()) {

+            String name = Cookie.unescape(x.nextTo('='));

+            x.next('=');

+            jo.put(name, Cookie.unescape(x.nextTo(';')));

+            x.next();

+        }

+        return jo;

+    }

+

+

+    /**

+     * Convert a JSONObject into a cookie list. A cookie list is a sequence

+     * of name/value pairs. The names are separated from the values by '='.

+     * The pairs are separated by ';'. The characters '%', '+', '=', and ';'

+     * in the names and values are replaced by "%hh".

+     * @param jo A JSONObject

+     * @return A cookie list string

+     * @throws JSONException

+     */

+    public static String toString(JSONObject jo) throws JSONException {

+        boolean      b = false;

+        Iterator<String> keys = jo.keys();

+        String       string;

+        StringBuffer sb = new StringBuffer();

+        while (keys.hasNext()) {

+            string = keys.next().toString();

+            if (!jo.isNull(string)) {

+                if (b) {

+                    sb.append(';');

+                }

+                sb.append(Cookie.escape(string));

+                sb.append("=");

+                sb.append(Cookie.escape(jo.getString(string)));

+                b = true;

+            }

+        }

+        return sb.toString();

+    }

+}

diff --git a/datarouter-prov/src/main/java/org/json/HTTP.java b/datarouter-prov/src/main/java/org/json/HTTP.java
new file mode 100644
index 0000000..34ad3f5
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/HTTP.java
@@ -0,0 +1,185 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+/*

+Copyright (c) 2002 JSON.org

+

+Permission is hereby granted, free of charge, to any person obtaining a copy

+of this software and associated documentation files (the "Software"), to deal

+in the Software without restriction, including without limitation the rights

+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

+copies of the Software, and to permit persons to whom the Software is

+furnished to do so, subject to the following conditions:

+

+The above copyright notice and this permission notice shall be included in all

+copies or substantial portions of the Software.

+

+The Software shall be used for Good, not Evil.

+

+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

+SOFTWARE.

+*/

+

+import java.util.Iterator;

+

+/**

+ * Convert an HTTP header to a JSONObject and back.

+ * @author JSON.org

+ * @version 2010-12-24

+ */

+public class HTTP {

+

+    /** Carriage return/line feed. */

+    public static final String CRLF = "\r\n";

+

+    /**

+     * Convert an HTTP header string into a JSONObject. It can be a request

+     * header or a response header. A request header will contain

+     * <pre>{

+     *    Method: "POST" (for example),

+     *    "Request-URI": "/" (for example),

+     *    "HTTP-Version": "HTTP/1.1" (for example)

+     * }</pre>

+     * A response header will contain

+     * <pre>{

+     *    "HTTP-Version": "HTTP/1.1" (for example),

+     *    "Status-Code": "200" (for example),

+     *    "Reason-Phrase": "OK" (for example)

+     * }</pre>

+     * In addition, the other parameters in the header will be captured, using

+     * the HTTP field names as JSON names, so that <pre>

+     *    Date: Sun, 26 May 2002 18:06:04 GMT

+     *    Cookie: Q=q2=PPEAsg--; B=677gi6ouf29bn&b=2&f=s

+     *    Cache-Control: no-cache</pre>

+     * become

+     * <pre>{...

+     *    Date: "Sun, 26 May 2002 18:06:04 GMT",

+     *    Cookie: "Q=q2=PPEAsg--; B=677gi6ouf29bn&b=2&f=s",

+     *    "Cache-Control": "no-cache",

+     * ...}</pre>

+     * It does no further checking or conversion. It does not parse dates.

+     * It does not do '%' transforms on URLs.

+     * @param string An HTTP header string.

+     * @return A JSONObject containing the elements and attributes

+     * of the XML string.

+     * @throws JSONException

+     */

+    public static JSONObject toJSONObject(String string) throws JSONException {

+        JSONObject     jo = new JSONObject();

+        HTTPTokener    x = new HTTPTokener(string);

+        String         token;

+

+        token = x.nextToken();

+        if (token.toUpperCase().startsWith("HTTP")) {

+

+// Response

+

+            jo.put("HTTP-Version", token);

+            jo.put("Status-Code", x.nextToken());

+            jo.put("Reason-Phrase", x.nextTo('\0'));

+            x.next();

+

+        } else {

+

+// Request

+

+            jo.put("Method", token);

+            jo.put("Request-URI", x.nextToken());

+            jo.put("HTTP-Version", x.nextToken());

+        }

+

+// Fields

+

+        while (x.more()) {

+            String name = x.nextTo(':');

+            x.next(':');

+            jo.put(name, x.nextTo('\0'));

+            x.next();

+        }

+        return jo;

+    }

+

+

+    /**

+     * Convert a JSONObject into an HTTP header. A request header must contain

+     * <pre>{

+     *    Method: "POST" (for example),

+     *    "Request-URI": "/" (for example),

+     *    "HTTP-Version": "HTTP/1.1" (for example)

+     * }</pre>

+     * A response header must contain

+     * <pre>{

+     *    "HTTP-Version": "HTTP/1.1" (for example),

+     *    "Status-Code": "200" (for example),

+     *    "Reason-Phrase": "OK" (for example)

+     * }</pre>

+     * Any other members of the JSONObject will be output as HTTP fields.

+     * The result will end with two CRLF pairs.

+     * @param jo A JSONObject

+     * @return An HTTP header string.

+     * @throws JSONException if the object does not contain enough

+     *  information.

+     */

+    public static String toString(JSONObject jo) throws JSONException {

+        Iterator<String> keys = jo.keys();

+        String       string;

+        StringBuffer sb = new StringBuffer();

+        if (jo.has("Status-Code") && jo.has("Reason-Phrase")) {

+            sb.append(jo.getString("HTTP-Version"));

+            sb.append(' ');

+            sb.append(jo.getString("Status-Code"));

+            sb.append(' ');

+            sb.append(jo.getString("Reason-Phrase"));

+        } else if (jo.has("Method") && jo.has("Request-URI")) {

+            sb.append(jo.getString("Method"));

+            sb.append(' ');

+            sb.append('"');

+            sb.append(jo.getString("Request-URI"));

+            sb.append('"');

+            sb.append(' ');

+            sb.append(jo.getString("HTTP-Version"));

+        } else {

+            throw new JSONException("Not enough material for an HTTP header.");

+        }

+        sb.append(CRLF);

+        while (keys.hasNext()) {

+            string = keys.next().toString();

+            if (!"HTTP-Version".equals(string)      && !"Status-Code".equals(string) &&

+                    !"Reason-Phrase".equals(string) && !"Method".equals(string) &&

+                    !"Request-URI".equals(string)   && !jo.isNull(string)) {

+                sb.append(string);

+                sb.append(": ");

+                sb.append(jo.getString(string));

+                sb.append(CRLF);

+            }

+        }

+        sb.append(CRLF);

+        return sb.toString();

+    }

+}

diff --git a/datarouter-prov/src/main/java/org/json/HTTPTokener.java b/datarouter-prov/src/main/java/org/json/HTTPTokener.java
new file mode 100644
index 0000000..0594e74
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/HTTPTokener.java
@@ -0,0 +1,99 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+/*

+Copyright (c) 2002 JSON.org

+

+Permission is hereby granted, free of charge, to any person obtaining a copy

+of this software and associated documentation files (the "Software"), to deal

+in the Software without restriction, including without limitation the rights

+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

+copies of the Software, and to permit persons to whom the Software is

+furnished to do so, subject to the following conditions:

+

+The above copyright notice and this permission notice shall be included in all

+copies or substantial portions of the Software.

+

+The Software shall be used for Good, not Evil.

+

+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

+SOFTWARE.

+*/

+

+/**

+ * The HTTPTokener extends the JSONTokener to provide additional methods

+ * for the parsing of HTTP headers.

+ * @author JSON.org

+ * @version 2012-11-13

+ */

+public class HTTPTokener extends JSONTokener {

+

+    /**

+     * Construct an HTTPTokener from a string.

+     * @param string A source string.

+     */

+    public HTTPTokener(String string) {

+        super(string);

+    }

+

+

+    /**

+     * Get the next token or string. This is used in parsing HTTP headers.

+     * @throws JSONException

+     * @return A String.

+     */

+    public String nextToken() throws JSONException {

+        char c;

+        char q;

+        StringBuffer sb = new StringBuffer();

+        do {

+            c = next();

+        } while (Character.isWhitespace(c));

+        if (c == '"' || c == '\'') {

+            q = c;

+            for (;;) {

+                c = next();

+                if (c < ' ') {

+                    throw syntaxError("Unterminated string.");

+                }

+                if (c == q) {

+                    return sb.toString();

+                }

+                sb.append(c);

+            }

+        }

+        for (;;) {

+            if (c == 0 || Character.isWhitespace(c)) {

+                return sb.toString();

+            }

+            sb.append(c);

+            c = next();

+        }

+    }

+}

diff --git a/datarouter-prov/src/main/java/org/json/JSONArray.java b/datarouter-prov/src/main/java/org/json/JSONArray.java
new file mode 100644
index 0000000..c9e7c42
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/JSONArray.java
@@ -0,0 +1,970 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+/*

+ Copyright (c) 2002 JSON.org

+

+ Permission is hereby granted, free of charge, to any person obtaining a copy

+ of this software and associated documentation files (the "Software"), to deal

+ in the Software without restriction, including without limitation the rights

+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

+ copies of the Software, and to permit persons to whom the Software is

+ furnished to do so, subject to the following conditions:

+

+ The above copyright notice and this permission notice shall be included in all

+ copies or substantial portions of the Software.

+

+ The Software shall be used for Good, not Evil.

+

+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

+ SOFTWARE.

+ */

+

+import java.io.IOException;

+import java.io.StringWriter;

+import java.io.Writer;

+import java.lang.reflect.Array;

+import java.util.ArrayList;

+import java.util.Collection;

+import java.util.Iterator;

+import java.util.List;

+import java.util.Map;

+

+/**

+ * A JSONArray is an ordered sequence of values. Its external text form is a

+ * string wrapped in square brackets with commas separating the values. The

+ * internal form is an object having <code>get</code> and <code>opt</code>

+ * methods for accessing the values by index, and <code>put</code> methods for

+ * adding or replacing values. The values can be any of these types:

+ * <code>Boolean</code>, <code>JSONArray</code>, <code>JSONObject</code>,

+ * <code>Number</code>, <code>String</code>, or the

+ * <code>JSONObject.NULL object</code>.

+ * <p>

+ * The constructor can convert a JSON text into a Java object. The

+ * <code>toString</code> method converts to JSON text.

+ * <p>

+ * A <code>get</code> method returns a value if one can be found, and throws an

+ * exception if one cannot be found. An <code>opt</code> method returns a

+ * default value instead of throwing an exception, and so is useful for

+ * obtaining optional values.

+ * <p>

+ * The generic <code>get()</code> and <code>opt()</code> methods return an

+ * object which you can cast or query for type. There are also typed

+ * <code>get</code> and <code>opt</code> methods that do type checking and type

+ * coercion for you.

+ * <p>

+ * The texts produced by the <code>toString</code> methods strictly conform to

+ * JSON syntax rules. The constructors are more forgiving in the texts they will

+ * accept:

+ * <ul>

+ * <li>An extra <code>,</code>&nbsp;<small>(comma)</small> may appear just

+ * before the closing bracket.</li>

+ * <li>The <code>null</code> value will be inserted when there is <code>,</code>

+ * &nbsp;<small>(comma)</small> elision.</li>

+ * <li>Strings may be quoted with <code>'</code>&nbsp;<small>(single

+ * quote)</small>.</li>

+ * <li>Strings do not need to be quoted at all if they do not begin with a quote

+ * or single quote, and if they do not contain leading or trailing spaces, and

+ * if they do not contain any of these characters:

+ * <code>{ } [ ] / \ : , = ; #</code> and if they do not look like numbers and

+ * if they are not the reserved words <code>true</code>, <code>false</code>, or

+ * <code>null</code>.</li>

+ * <li>Values can be separated by <code>;</code> <small>(semicolon)</small> as

+ * well as by <code>,</code> <small>(comma)</small>.</li>

+ * </ul>

+ *

+ * @author JSON.org

+ * @version 2012-11-13

+ */

+public class JSONArray {

+

+    /**

+     * The arrayList where the JSONArray's properties are kept.

+     */

+    private final List<Object> myArrayList;

+

+    /**

+     * Construct an empty JSONArray.

+     */

+    public JSONArray() {

+        this.myArrayList = new ArrayList<Object>();

+    }

+

+    /**

+     * Construct a JSONArray from a JSONTokener.

+     *

+     * @param x

+     *            A JSONTokener

+     * @throws JSONException

+     *             If there is a syntax error.

+     */

+    public JSONArray(JSONTokener x) throws JSONException {

+        this();

+        if (x.nextClean() != '[') {

+            throw x.syntaxError("A JSONArray text must start with '['");

+        }

+        if (x.nextClean() != ']') {

+            x.back();

+            for (;;) {

+                if (x.nextClean() == ',') {

+                    x.back();

+                    this.myArrayList.add(JSONObject.NULL);

+                } else {

+                    x.back();

+                    this.myArrayList.add(x.nextValue());

+                }

+                switch (x.nextClean()) {

+                case ';':

+                case ',':

+                    if (x.nextClean() == ']') {

+                        return;

+                    }

+                    x.back();

+                    break;

+                case ']':

+                    return;

+                default:

+                    throw x.syntaxError("Expected a ',' or ']'");

+                }

+            }

+        }

+    }

+

+    /**

+     * Construct a JSONArray from a source JSON text.

+     *

+     * @param source

+     *            A string that begins with <code>[</code>&nbsp;<small>(left

+     *            bracket)</small> and ends with <code>]</code>

+     *            &nbsp;<small>(right bracket)</small>.

+     * @throws JSONException

+     *             If there is a syntax error.

+     */

+    public JSONArray(String source) throws JSONException {

+        this(new JSONTokener(source));

+    }

+

+    /**

+     * Construct a JSONArray from a Collection.

+     *

+     * @param collection

+     *            A Collection.

+     */

+    public JSONArray(Collection<Object> collection) {

+        this.myArrayList = new ArrayList<Object>();

+        if (collection != null) {

+            Iterator<Object> iter = collection.iterator();

+            while (iter.hasNext()) {

+                this.myArrayList.add(JSONObject.wrap(iter.next()));

+            }

+        }

+    }

+

+    /**

+     * Construct a JSONArray from an array

+     *

+     * @throws JSONException

+     *             If not an array.

+     */

+    public JSONArray(Object array) throws JSONException {

+        this();

+        if (array.getClass().isArray()) {

+            int length = Array.getLength(array);

+            for (int i = 0; i < length; i += 1) {

+                this.put(JSONObject.wrap(Array.get(array, i)));

+            }

+        } else {

+            throw new JSONException(

+                    "JSONArray initial value should be a string or collection or array.");

+        }

+    }

+

+    /**

+     * Get the object value associated with an index.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @return An object value.

+     * @throws JSONException

+     *             If there is no value for the index.

+     */

+    public Object get(int index) throws JSONException {

+        Object object = this.opt(index);

+        if (object == null) {

+            throw new JSONException("JSONArray[" + index + "] not found.");

+        }

+        return object;

+    }

+

+    /**

+     * Get the boolean value associated with an index. The string values "true"

+     * and "false" are converted to boolean.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @return The truth.

+     * @throws JSONException

+     *             If there is no value for the index or if the value is not

+     *             convertible to boolean.

+     */

+    public boolean getBoolean(int index) throws JSONException {

+        Object object = this.get(index);

+        if (object.equals(Boolean.FALSE)

+                || (object instanceof String && ((String) object)

+                        .equalsIgnoreCase("false"))) {

+            return false;

+        } else if (object.equals(Boolean.TRUE)

+                || (object instanceof String && ((String) object)

+                        .equalsIgnoreCase("true"))) {

+            return true;

+        }

+        throw new JSONException("JSONArray[" + index + "] is not a boolean.");

+    }

+

+    /**

+     * Get the double value associated with an index.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @return The value.

+     * @throws JSONException

+     *             If the key is not found or if the value cannot be converted

+     *             to a number.

+     */

+    public double getDouble(int index) throws JSONException {

+        Object object = this.get(index);

+        try {

+            return object instanceof Number ? ((Number) object).doubleValue()

+                    : Double.parseDouble((String) object);

+        } catch (Exception e) {

+            throw new JSONException("JSONArray[" + index + "] is not a number.");

+        }

+    }

+

+    /**

+     * Get the int value associated with an index.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @return The value.

+     * @throws JSONException

+     *             If the key is not found or if the value is not a number.

+     */

+    public int getInt(int index) throws JSONException {

+        Object object = this.get(index);

+        try {

+            return object instanceof Number ? ((Number) object).intValue()

+                    : Integer.parseInt((String) object);

+        } catch (Exception e) {

+            throw new JSONException("JSONArray[" + index + "] is not a number.");

+        }

+    }

+

+    /**

+     * Get the JSONArray associated with an index.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @return A JSONArray value.

+     * @throws JSONException

+     *             If there is no value for the index. or if the value is not a

+     *             JSONArray

+     */

+    public JSONArray getJSONArray(int index) throws JSONException {

+        Object object = this.get(index);

+        if (object instanceof JSONArray) {

+            return (JSONArray) object;

+        }

+        throw new JSONException("JSONArray[" + index + "] is not a JSONArray.");

+    }

+

+    /**

+     * Get the JSONObject associated with an index.

+     *

+     * @param index

+     *            subscript

+     * @return A JSONObject value.

+     * @throws JSONException

+     *             If there is no value for the index or if the value is not a

+     *             JSONObject

+     */

+    public JSONObject getJSONObject(int index) throws JSONException {

+        Object object = this.get(index);

+        if (object instanceof JSONObject) {

+            return (JSONObject) object;

+        }

+        throw new JSONException("JSONArray[" + index + "] is not a JSONObject.");

+    }

+

+    /**

+     * Get the long value associated with an index.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @return The value.

+     * @throws JSONException

+     *             If the key is not found or if the value cannot be converted

+     *             to a number.

+     */

+    public long getLong(int index) throws JSONException {

+        Object object = this.get(index);

+        try {

+            return object instanceof Number ? ((Number) object).longValue()

+                    : Long.parseLong((String) object);

+        } catch (Exception e) {

+            throw new JSONException("JSONArray[" + index + "] is not a number.");

+        }

+    }

+

+    /**

+     * Get the string associated with an index.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @return A string value.

+     * @throws JSONException

+     *             If there is no string value for the index.

+     */

+    public String getString(int index) throws JSONException {

+        Object object = this.get(index);

+        if (object instanceof String) {

+            return (String) object;

+        }

+        throw new JSONException("JSONArray[" + index + "] not a string.");

+    }

+

+    /**

+     * Determine if the value is null.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @return true if the value at the index is null, or if there is no value.

+     */

+    public boolean isNull(int index) {

+        return JSONObject.NULL.equals(this.opt(index));

+    }

+

+    /**

+     * Make a string from the contents of this JSONArray. The

+     * <code>separator</code> string is inserted between each element. Warning:

+     * This method assumes that the data structure is acyclical.

+     *

+     * @param separator

+     *            A string that will be inserted between the elements.

+     * @return a string.

+     * @throws JSONException

+     *             If the array contains an invalid number.

+     */

+    public String join(String separator) throws JSONException {

+        int len = this.length();

+        StringBuffer sb = new StringBuffer();

+

+        for (int i = 0; i < len; i += 1) {

+            if (i > 0) {

+                sb.append(separator);

+            }

+            sb.append(JSONObject.valueToString(this.myArrayList.get(i)));

+        }

+        return sb.toString();

+    }

+

+    /**

+     * Get the number of elements in the JSONArray, included nulls.

+     *

+     * @return The length (or size).

+     */

+    public int length() {

+        return this.myArrayList.size();

+    }

+

+    /**

+     * Get the optional object value associated with an index.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @return An object value, or null if there is no object at that index.

+     */

+    public Object opt(int index) {

+        return (index < 0 || index >= this.length()) ? null : this.myArrayList

+                .get(index);

+    }

+

+    /**

+     * Get the optional boolean value associated with an index. It returns false

+     * if there is no value at that index, or if the value is not Boolean.TRUE

+     * or the String "true".

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @return The truth.

+     */

+    public boolean optBoolean(int index) {

+        return this.optBoolean(index, false);

+    }

+

+    /**

+     * Get the optional boolean value associated with an index. It returns the

+     * defaultValue if there is no value at that index or if it is not a Boolean

+     * or the String "true" or "false" (case insensitive).

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @param defaultValue

+     *            A boolean default.

+     * @return The truth.

+     */

+    public boolean optBoolean(int index, boolean defaultValue) {

+        try {

+            return this.getBoolean(index);

+        } catch (Exception e) {

+            return defaultValue;

+        }

+    }

+

+    /**

+     * Get the optional double value associated with an index. NaN is returned

+     * if there is no value for the index, or if the value is not a number and

+     * cannot be converted to a number.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @return The value.

+     */

+    public double optDouble(int index) {

+        return this.optDouble(index, Double.NaN);

+    }

+

+    /**

+     * Get the optional double value associated with an index. The defaultValue

+     * is returned if there is no value for the index, or if the value is not a

+     * number and cannot be converted to a number.

+     *

+     * @param index

+     *            subscript

+     * @param defaultValue

+     *            The default value.

+     * @return The value.

+     */

+    public double optDouble(int index, double defaultValue) {

+        try {

+            return this.getDouble(index);

+        } catch (Exception e) {

+            return defaultValue;

+        }

+    }

+

+    /**

+     * Get the optional int value associated with an index. Zero is returned if

+     * there is no value for the index, or if the value is not a number and

+     * cannot be converted to a number.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @return The value.

+     */

+    public int optInt(int index) {

+        return this.optInt(index, 0);

+    }

+

+    /**

+     * Get the optional int value associated with an index. The defaultValue is

+     * returned if there is no value for the index, or if the value is not a

+     * number and cannot be converted to a number.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @param defaultValue

+     *            The default value.

+     * @return The value.

+     */

+    public int optInt(int index, int defaultValue) {

+        try {

+            return this.getInt(index);

+        } catch (Exception e) {

+            return defaultValue;

+        }

+    }

+

+    /**

+     * Get the optional JSONArray associated with an index.

+     *

+     * @param index

+     *            subscript

+     * @return A JSONArray value, or null if the index has no value, or if the

+     *         value is not a JSONArray.

+     */

+    public JSONArray optJSONArray(int index) {

+        Object o = this.opt(index);

+        return o instanceof JSONArray ? (JSONArray) o : null;

+    }

+

+    /**

+     * Get the optional JSONObject associated with an index. Null is returned if

+     * the key is not found, or null if the index has no value, or if the value

+     * is not a JSONObject.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @return A JSONObject value.

+     */

+    public JSONObject optJSONObject(int index) {

+        Object o = this.opt(index);

+        return o instanceof JSONObject ? (JSONObject) o : null;

+    }

+

+    /**

+     * Get the optional long value associated with an index. Zero is returned if

+     * there is no value for the index, or if the value is not a number and

+     * cannot be converted to a number.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @return The value.

+     */

+    public long optLong(int index) {

+        return this.optLong(index, 0);

+    }

+

+    /**

+     * Get the optional long value associated with an index. The defaultValue is

+     * returned if there is no value for the index, or if the value is not a

+     * number and cannot be converted to a number.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @param defaultValue

+     *            The default value.

+     * @return The value.

+     */

+    public long optLong(int index, long defaultValue) {

+        try {

+            return this.getLong(index);

+        } catch (Exception e) {

+            return defaultValue;

+        }

+    }

+

+    /**

+     * Get the optional string value associated with an index. It returns an

+     * empty string if there is no value at that index. If the value is not a

+     * string and is not null, then it is coverted to a string.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @return A String value.

+     */

+    public String optString(int index) {

+        return this.optString(index, "");

+    }

+

+    /**

+     * Get the optional string associated with an index. The defaultValue is

+     * returned if the key is not found.

+     *

+     * @param index

+     *            The index must be between 0 and length() - 1.

+     * @param defaultValue

+     *            The default value.

+     * @return A String value.

+     */

+    public String optString(int index, String defaultValue) {

+        Object object = this.opt(index);

+        return JSONObject.NULL.equals(object) ? defaultValue : object

+                .toString();

+    }

+

+    /**

+     * Append a boolean value. This increases the array's length by one.

+     *

+     * @param value

+     *            A boolean value.

+     * @return this.

+     */

+    public JSONArray put(boolean value) {

+        this.put(value ? Boolean.TRUE : Boolean.FALSE);

+        return this;

+    }

+

+    /**

+     * Put a value in the JSONArray, where the value will be a JSONArray which

+     * is produced from a Collection.

+     *

+     * @param value

+     *            A Collection value.

+     * @return this.

+     */

+    public JSONArray put(Collection<Object> value) {

+        this.put(new JSONArray(value));

+        return this;

+    }

+

+    /**

+     * Append a double value. This increases the array's length by one.

+     *

+     * @param value

+     *            A double value.

+     * @throws JSONException

+     *             if the value is not finite.

+     * @return this.

+     */

+    public JSONArray put(double value) throws JSONException {

+        Double d = new Double(value);

+        JSONObject.testValidity(d);

+        this.put(d);

+        return this;

+    }

+

+    /**

+     * Append an int value. This increases the array's length by one.

+     *

+     * @param value

+     *            An int value.

+     * @return this.

+     */

+    public JSONArray put(int value) {

+        this.put(new Integer(value));

+        return this;

+    }

+

+    /**

+     * Append an long value. This increases the array's length by one.

+     *

+     * @param value

+     *            A long value.

+     * @return this.

+     */

+    public JSONArray put(long value) {

+        this.put(new Long(value));

+        return this;

+    }

+

+    /**

+     * Put a value in the JSONArray, where the value will be a JSONObject which

+     * is produced from a Map.

+     *

+     * @param value

+     *            A Map value.

+     * @return this.

+     */

+    public JSONArray put(Map<String,Object> value) {

+        this.put(new JSONObject(value));

+        return this;

+    }

+

+    /**

+     * Append an object value. This increases the array's length by one.

+     *

+     * @param value

+     *            An object value. The value should be a Boolean, Double,

+     *            Integer, JSONArray, JSONObject, Long, or String, or the

+     *            JSONObject.NULL object.

+     * @return this.

+     */

+    public JSONArray put(Object value) {

+        this.myArrayList.add(value);

+        return this;

+    }

+

+    /**

+     * Put or replace a boolean value in the JSONArray. If the index is greater

+     * than the length of the JSONArray, then null elements will be added as

+     * necessary to pad it out.

+     *

+     * @param index

+     *            The subscript.

+     * @param value

+     *            A boolean value.

+     * @return this.

+     * @throws JSONException

+     *             If the index is negative.

+     */

+    public JSONArray put(int index, boolean value) throws JSONException {

+        this.put(index, value ? Boolean.TRUE : Boolean.FALSE);

+        return this;

+    }

+

+    /**

+     * Put a value in the JSONArray, where the value will be a JSONArray which

+     * is produced from a Collection.

+     *

+     * @param index

+     *            The subscript.

+     * @param value

+     *            A Collection value.

+     * @return this.

+     * @throws JSONException

+     *             If the index is negative or if the value is not finite.

+     */

+    public JSONArray put(int index, Collection<Object> value) throws JSONException {

+        this.put(index, new JSONArray(value));

+        return this;

+    }

+

+    /**

+     * Put or replace a double value. If the index is greater than the length of

+     * the JSONArray, then null elements will be added as necessary to pad it

+     * out.

+     *

+     * @param index

+     *            The subscript.

+     * @param value

+     *            A double value.

+     * @return this.

+     * @throws JSONException

+     *             If the index is negative or if the value is not finite.

+     */

+    public JSONArray put(int index, double value) throws JSONException {

+        this.put(index, new Double(value));

+        return this;

+    }

+

+    /**

+     * Put or replace an int value. If the index is greater than the length of

+     * the JSONArray, then null elements will be added as necessary to pad it

+     * out.

+     *

+     * @param index

+     *            The subscript.

+     * @param value

+     *            An int value.

+     * @return this.

+     * @throws JSONException

+     *             If the index is negative.

+     */

+    public JSONArray put(int index, int value) throws JSONException {

+        this.put(index, new Integer(value));

+        return this;

+    }

+

+    /**

+     * Put or replace a long value. If the index is greater than the length of

+     * the JSONArray, then null elements will be added as necessary to pad it

+     * out.

+     *

+     * @param index

+     *            The subscript.

+     * @param value

+     *            A long value.

+     * @return this.

+     * @throws JSONException

+     *             If the index is negative.

+     */

+    public JSONArray put(int index, long value) throws JSONException {

+        this.put(index, new Long(value));

+        return this;

+    }

+

+    /**

+     * Put a value in the JSONArray, where the value will be a JSONObject that

+     * is produced from a Map.

+     *

+     * @param index

+     *            The subscript.

+     * @param value

+     *            The Map value.

+     * @return this.

+     * @throws JSONException

+     *             If the index is negative or if the the value is an invalid

+     *             number.

+     */

+    public JSONArray put(int index, Map<String,Object> value) throws JSONException {

+        this.put(index, new JSONObject(value));

+        return this;

+    }

+

+    /**

+     * Put or replace an object value in the JSONArray. If the index is greater

+     * than the length of the JSONArray, then null elements will be added as

+     * necessary to pad it out.

+     *

+     * @param index

+     *            The subscript.

+     * @param value

+     *            The value to put into the array. The value should be a

+     *            Boolean, Double, Integer, JSONArray, JSONObject, Long, or

+     *            String, or the JSONObject.NULL object.

+     * @return this.

+     * @throws JSONException

+     *             If the index is negative or if the the value is an invalid

+     *             number.

+     */

+    public JSONArray put(int index, Object value) throws JSONException {

+        JSONObject.testValidity(value);

+        if (index < 0) {

+            throw new JSONException("JSONArray[" + index + "] not found.");

+        }

+        if (index < this.length()) {

+            this.myArrayList.set(index, value);

+        } else {

+            while (index != this.length()) {

+                this.put(JSONObject.NULL);

+            }

+            this.put(value);

+        }

+        return this;

+    }

+

+    /**

+     * Remove an index and close the hole.

+     *

+     * @param index

+     *            The index of the element to be removed.

+     * @return The value that was associated with the index, or null if there

+     *         was no value.

+     */

+    public Object remove(int index) {

+        Object o = this.opt(index);

+        this.myArrayList.remove(index);

+        return o;

+    }

+

+    /**

+     * Produce a JSONObject by combining a JSONArray of names with the values of

+     * this JSONArray.

+     *

+     * @param names

+     *            A JSONArray containing a list of key strings. These will be

+     *            paired with the values.

+     * @return A JSONObject, or null if there are no names or if this JSONArray

+     *         has no values.

+     * @throws JSONException

+     *             If any of the names are null.

+     */

+    public JSONObject toJSONObject(JSONArray names) throws JSONException {

+        if (names == null || names.length() == 0 || this.length() == 0) {

+            return null;

+        }

+        JSONObject jo = new JSONObject();

+        for (int i = 0; i < names.length(); i += 1) {

+            jo.put(names.getString(i), this.opt(i));

+        }

+        return jo;

+    }

+

+    /**

+     * Make a JSON text of this JSONArray. For compactness, no unnecessary

+     * whitespace is added. If it is not possible to produce a syntactically

+     * correct JSON text then null will be returned instead. This could occur if

+     * the array contains an invalid number.

+     * <p>

+     * Warning: This method assumes that the data structure is acyclical.

+     *

+     * @return a printable, displayable, transmittable representation of the

+     *         array.

+     */

+    public String toString() {

+        try {

+            return this.toString(0);

+        } catch (Exception e) {

+            return null;

+        }

+    }

+

+    /**

+     * Make a prettyprinted JSON text of this JSONArray. Warning: This method

+     * assumes that the data structure is acyclical.

+     *

+     * @param indentFactor

+     *            The number of spaces to add to each level of indentation.

+     * @return a printable, displayable, transmittable representation of the

+     *         object, beginning with <code>[</code>&nbsp;<small>(left

+     *         bracket)</small> and ending with <code>]</code>

+     *         &nbsp;<small>(right bracket)</small>.

+     * @throws JSONException

+     */

+    public String toString(int indentFactor) throws JSONException {

+        StringWriter sw = new StringWriter();

+        synchronized (sw.getBuffer()) {

+            return this.write(sw, indentFactor, 0).toString();

+        }

+    }

+

+    /**

+     * Write the contents of the JSONArray as JSON text to a writer. For

+     * compactness, no whitespace is added.

+     * <p>

+     * Warning: This method assumes that the data structure is acyclical.

+     *

+     * @return The writer.

+     * @throws JSONException

+     */

+    public Writer write(Writer writer) throws JSONException {

+        return this.write(writer, 0, 0);

+    }

+

+    /**

+     * Write the contents of the JSONArray as JSON text to a writer. For

+     * compactness, no whitespace is added.

+     * <p>

+     * Warning: This method assumes that the data structure is acyclical.

+     *

+     * @param indentFactor

+     *            The number of spaces to add to each level of indentation.

+     * @param indent

+     *            The indention of the top level.

+     * @return The writer.

+     * @throws JSONException

+     */

+    Writer write(Writer writer, int indentFactor, int indent)

+            throws JSONException {

+        try {

+            boolean commanate = false;

+            int length = this.length();

+            writer.write('[');

+

+            if (length == 1) {

+                JSONObject.writeValue(writer, this.myArrayList.get(0),

+                        indentFactor, indent);

+            } else if (length != 0) {

+                final int newindent = indent + indentFactor;

+

+                for (int i = 0; i < length; i += 1) {

+                    if (commanate) {

+                        writer.write(',');

+                    }

+                    if (indentFactor > 0) {

+                        writer.write('\n');

+                    }

+                    JSONObject.indent(writer, newindent);

+                    JSONObject.writeValue(writer, this.myArrayList.get(i),

+                            indentFactor, newindent);

+                    commanate = true;

+                }

+                if (indentFactor > 0) {

+                    writer.write('\n');

+                }

+                JSONObject.indent(writer, indent);

+            }

+            writer.write(']');

+            return writer;

+        } catch (IOException e) {

+            throw new JSONException(e);

+        }

+    }

+}

diff --git a/datarouter-prov/src/main/java/org/json/JSONException.java b/datarouter-prov/src/main/java/org/json/JSONException.java
new file mode 100644
index 0000000..2308eb2
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/JSONException.java
@@ -0,0 +1,63 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+/**

+ * The JSONException is thrown by the JSON.org classes when things are amiss.

+ *

+ * @author JSON.org

+ * @version 2013-02-10

+ */

+public class JSONException extends RuntimeException {

+    private static final long serialVersionUID = 0;

+    private Throwable cause;

+

+    /**

+     * Constructs a JSONException with an explanatory message.

+     *

+     * @param message

+     *            Detail about the reason for the exception.

+     */

+    public JSONException(String message) {

+        super(message);

+    }

+

+    /**

+     * Constructs a new JSONException with the specified cause.

+     */

+    public JSONException(Throwable cause) {

+        super(cause.getMessage());

+        this.cause = cause;

+    }

+

+    /**

+     * Returns the cause of this exception or null if the cause is nonexistent

+     * or unknown.

+     *

+     * @return the cause of this exception or null if the cause is nonexistent

+     *          or unknown.

+     */

+    public Throwable getCause() {

+        return this.cause;

+    }

+}

diff --git a/datarouter-prov/src/main/java/org/json/JSONML.java b/datarouter-prov/src/main/java/org/json/JSONML.java
new file mode 100644
index 0000000..5afb599
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/JSONML.java
@@ -0,0 +1,489 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+/*

+Copyright (c) 2008 JSON.org

+

+Permission is hereby granted, free of charge, to any person obtaining a copy

+of this software and associated documentation files (the "Software"), to deal

+in the Software without restriction, including without limitation the rights

+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

+copies of the Software, and to permit persons to whom the Software is

+furnished to do so, subject to the following conditions:

+

+The above copyright notice and this permission notice shall be included in all

+copies or substantial portions of the Software.

+

+The Software shall be used for Good, not Evil.

+

+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

+SOFTWARE.

+*/

+

+import java.util.Iterator;

+

+

+/**

+ * This provides static methods to convert an XML text into a JSONArray or

+ * JSONObject, and to covert a JSONArray or JSONObject into an XML text using

+ * the JsonML transform.

+ *

+ * @author JSON.org

+ * @version 2012-03-28

+ */

+public class JSONML {

+

+    /**

+     * Parse XML values and store them in a JSONArray.

+     * @param x       The XMLTokener containing the source string.

+     * @param arrayForm true if array form, false if object form.

+     * @param ja      The JSONArray that is containing the current tag or null

+     *     if we are at the outermost level.

+     * @return A JSONArray if the value is the outermost tag, otherwise null.

+     * @throws JSONException

+     */

+    private static Object parse(

+        XMLTokener x,

+        boolean    arrayForm,

+        JSONArray  ja

+    ) throws JSONException {

+        String     attribute;

+        char       c;

+        String       closeTag = null;

+        int        i;

+        JSONArray  newja = null;

+        JSONObject newjo = null;

+        Object     token;

+        String       tagName = null;

+

+// Test for and skip past these forms:

+//      <!-- ... -->

+//      <![  ... ]]>

+//      <!   ...   >

+//      <?   ...  ?>

+

+        while (true) {

+            if (!x.more()) {

+                throw x.syntaxError("Bad XML");

+            }

+            token = x.nextContent();

+            if (token == XML.LT) {

+                token = x.nextToken();

+                if (token instanceof Character) {

+                    if (token == XML.SLASH) {

+

+// Close tag </

+

+                        token = x.nextToken();

+                        if (!(token instanceof String)) {

+                            throw new JSONException(

+                                    "Expected a closing name instead of '" +

+                                    token + "'.");

+                        }

+                        if (x.nextToken() != XML.GT) {

+                            throw x.syntaxError("Misshaped close tag");

+                        }

+                        return token;

+                    } else if (token == XML.BANG) {

+

+// <!

+

+                        c = x.next();

+                        if (c == '-') {

+                            if (x.next() == '-') {

+                                x.skipPast("-->");

+                            } else {

+                                x.back();

+                            }

+                        } else if (c == '[') {

+                            token = x.nextToken();

+                            if (token.equals("CDATA") && x.next() == '[') {

+                                if (ja != null) {

+                                    ja.put(x.nextCDATA());

+                                }

+                            } else {

+                                throw x.syntaxError("Expected 'CDATA['");

+                            }

+                        } else {

+                            i = 1;

+                            do {

+                                token = x.nextMeta();

+                                if (token == null) {

+                                    throw x.syntaxError("Missing '>' after '<!'.");

+                                } else if (token == XML.LT) {

+                                    i += 1;

+                                } else if (token == XML.GT) {

+                                    i -= 1;

+                                }

+                            } while (i > 0);

+                        }

+                    } else if (token == XML.QUEST) {

+

+// <?

+

+                        x.skipPast("?>");

+                    } else {

+                        throw x.syntaxError("Misshaped tag");

+                    }

+

+// Open tag <

+

+                } else {

+                    if (!(token instanceof String)) {

+                        throw x.syntaxError("Bad tagName '" + token + "'.");

+                    }

+                    tagName = (String)token;

+                    newja = new JSONArray();

+                    newjo = new JSONObject();

+                    if (arrayForm) {

+                        newja.put(tagName);

+                        if (ja != null) {

+                            ja.put(newja);

+                        }

+                    } else {

+                        newjo.put("tagName", tagName);

+                        if (ja != null) {

+                            ja.put(newjo);

+                        }

+                    }

+                    token = null;

+                    for (;;) {

+                        if (token == null) {

+                            token = x.nextToken();

+                        }

+                        if (token == null) {

+                            throw x.syntaxError("Misshaped tag");

+                        }

+                        if (!(token instanceof String)) {

+                            break;

+                        }

+

+// attribute = value

+

+                        attribute = (String)token;

+                        if (!arrayForm && ("tagName".equals(attribute) || "childNode".equals(attribute))) {

+                            throw x.syntaxError("Reserved attribute.");

+                        }

+                        token = x.nextToken();

+                        if (token == XML.EQ) {

+                            token = x.nextToken();

+                            if (!(token instanceof String)) {

+                                throw x.syntaxError("Missing value");

+                            }

+                            newjo.accumulate(attribute, XML.stringToValue((String)token));

+                            token = null;

+                        } else {

+                            newjo.accumulate(attribute, "");

+                        }

+                    }

+                    if (arrayForm && newjo.length() > 0) {

+                        newja.put(newjo);

+                    }

+

+// Empty tag <.../>

+

+                    if (token == XML.SLASH) {

+                        if (x.nextToken() != XML.GT) {

+                            throw x.syntaxError("Misshaped tag");

+                        }

+                        if (ja == null) {

+                            if (arrayForm) {

+                                return newja;

+                            } else {

+                                return newjo;

+                            }

+                        }

+

+// Content, between <...> and </...>

+

+                    } else {

+                        if (token != XML.GT) {

+                            throw x.syntaxError("Misshaped tag");

+                        }

+                        closeTag = (String)parse(x, arrayForm, newja);

+                        if (closeTag != null) {

+                            if (!closeTag.equals(tagName)) {

+                                throw x.syntaxError("Mismatched '" + tagName +

+                                        "' and '" + closeTag + "'");

+                            }

+                            tagName = null;

+                            if (!arrayForm && newja.length() > 0) {

+                                newjo.put("childNodes", newja);

+                            }

+                            if (ja == null) {

+                                if (arrayForm) {

+                                    return newja;

+                                } else {

+                                    return newjo;

+                                }

+                            }

+                        }

+                    }

+                }

+            } else {

+                if (ja != null) {

+                    ja.put(token instanceof String

+                        ? XML.stringToValue((String)token)

+                        : token);

+                }

+            }

+        }

+    }

+

+

+    /**

+     * Convert a well-formed (but not necessarily valid) XML string into a

+     * JSONArray using the JsonML transform. Each XML tag is represented as

+     * a JSONArray in which the first element is the tag name. If the tag has

+     * attributes, then the second element will be JSONObject containing the

+     * name/value pairs. If the tag contains children, then strings and

+     * JSONArrays will represent the child tags.

+     * Comments, prologs, DTDs, and <code>&lt;[ [ ]]></code> are ignored.

+     * @param string The source string.

+     * @return A JSONArray containing the structured data from the XML string.

+     * @throws JSONException

+     */

+    public static JSONArray toJSONArray(String string) throws JSONException {

+        return toJSONArray(new XMLTokener(string));

+    }

+

+

+    /**

+     * Convert a well-formed (but not necessarily valid) XML string into a

+     * JSONArray using the JsonML transform. Each XML tag is represented as

+     * a JSONArray in which the first element is the tag name. If the tag has

+     * attributes, then the second element will be JSONObject containing the

+     * name/value pairs. If the tag contains children, then strings and

+     * JSONArrays will represent the child content and tags.

+     * Comments, prologs, DTDs, and <code>&lt;[ [ ]]></code> are ignored.

+     * @param x An XMLTokener.

+     * @return A JSONArray containing the structured data from the XML string.

+     * @throws JSONException

+     */

+    public static JSONArray toJSONArray(XMLTokener x) throws JSONException {

+        return (JSONArray)parse(x, true, null);

+    }

+

+

+    /**

+     * Convert a well-formed (but not necessarily valid) XML string into a

+     * JSONObject using the JsonML transform. Each XML tag is represented as

+     * a JSONObject with a "tagName" property. If the tag has attributes, then

+     * the attributes will be in the JSONObject as properties. If the tag

+     * contains children, the object will have a "childNodes" property which

+     * will be an array of strings and JsonML JSONObjects.

+

+     * Comments, prologs, DTDs, and <code>&lt;[ [ ]]></code> are ignored.

+     * @param x An XMLTokener of the XML source text.

+     * @return A JSONObject containing the structured data from the XML string.

+     * @throws JSONException

+     */

+    public static JSONObject toJSONObject(XMLTokener x) throws JSONException {

+           return (JSONObject)parse(x, false, null);

+    }

+

+

+    /**

+     * Convert a well-formed (but not necessarily valid) XML string into a

+     * JSONObject using the JsonML transform. Each XML tag is represented as

+     * a JSONObject with a "tagName" property. If the tag has attributes, then

+     * the attributes will be in the JSONObject as properties. If the tag

+     * contains children, the object will have a "childNodes" property which

+     * will be an array of strings and JsonML JSONObjects.

+

+     * Comments, prologs, DTDs, and <code>&lt;[ [ ]]></code> are ignored.

+     * @param string The XML source text.

+     * @return A JSONObject containing the structured data from the XML string.

+     * @throws JSONException

+     */

+    public static JSONObject toJSONObject(String string) throws JSONException {

+        return toJSONObject(new XMLTokener(string));

+    }

+

+

+    /**

+     * Reverse the JSONML transformation, making an XML text from a JSONArray.

+     * @param ja A JSONArray.

+     * @return An XML string.

+     * @throws JSONException

+     */

+    public static String toString(JSONArray ja) throws JSONException {

+        int             i;

+        JSONObject   jo;

+        String       key;

+        Iterator<String> keys;

+        int             length;

+        Object         object;

+        StringBuffer sb = new StringBuffer();

+        String       tagName;

+        String       value;

+

+// Emit <tagName

+

+        tagName = ja.getString(0);

+        XML.noSpace(tagName);

+        tagName = XML.escape(tagName);

+        sb.append('<');

+        sb.append(tagName);

+

+        object = ja.opt(1);

+        if (object instanceof JSONObject) {

+            i = 2;

+            jo = (JSONObject)object;

+

+// Emit the attributes

+

+            keys = jo.keys();

+            while (keys.hasNext()) {

+                key = keys.next().toString();

+                XML.noSpace(key);

+                value = jo.optString(key);

+                if (value != null) {

+                    sb.append(' ');

+                    sb.append(XML.escape(key));

+                    sb.append('=');

+                    sb.append('"');

+                    sb.append(XML.escape(value));

+                    sb.append('"');

+                }

+            }

+        } else {

+            i = 1;

+        }

+

+//Emit content in body

+

+        length = ja.length();

+        if (i >= length) {

+            sb.append('/');

+            sb.append('>');

+        } else {

+            sb.append('>');

+            do {

+                object = ja.get(i);

+                i += 1;

+                if (object != null) {

+                    if (object instanceof String) {

+                        sb.append(XML.escape(object.toString()));

+                    } else if (object instanceof JSONObject) {

+                        sb.append(toString((JSONObject)object));

+                    } else if (object instanceof JSONArray) {

+                        sb.append(toString((JSONArray)object));

+                    }

+                }

+            } while (i < length);

+            sb.append('<');

+            sb.append('/');

+            sb.append(tagName);

+            sb.append('>');

+        }

+        return sb.toString();

+    }

+

+    /**

+     * Reverse the JSONML transformation, making an XML text from a JSONObject.

+     * The JSONObject must contain a "tagName" property. If it has children,

+     * then it must have a "childNodes" property containing an array of objects.

+     * The other properties are attributes with string values.

+     * @param jo A JSONObject.

+     * @return An XML string.

+     * @throws JSONException

+     */

+    public static String toString(JSONObject jo) throws JSONException {

+        StringBuffer sb = new StringBuffer();

+        int          i;

+        JSONArray    ja;

+        String       key;

+        Iterator<String> keys;

+        int          length;

+        Object         object;

+        String       tagName;

+        String       value;

+

+//Emit <tagName

+

+        tagName = jo.optString("tagName");

+        if (tagName == null) {

+            return XML.escape(jo.toString());

+        }

+        XML.noSpace(tagName);

+        tagName = XML.escape(tagName);

+        sb.append('<');

+        sb.append(tagName);

+

+//Emit the attributes

+

+        keys = jo.keys();

+        while (keys.hasNext()) {

+            key = keys.next().toString();

+            if (!"tagName".equals(key) && !"childNodes".equals(key)) {

+                XML.noSpace(key);

+                value = jo.optString(key);

+                if (value != null) {

+                    sb.append(' ');

+                    sb.append(XML.escape(key));

+                    sb.append('=');

+                    sb.append('"');

+                    sb.append(XML.escape(value));

+                    sb.append('"');

+                }

+            }

+        }

+

+//Emit content in body

+

+        ja = jo.optJSONArray("childNodes");

+        if (ja == null) {

+            sb.append('/');

+            sb.append('>');

+        } else {

+            sb.append('>');

+            length = ja.length();

+            for (i = 0; i < length; i += 1) {

+                object = ja.get(i);

+                if (object != null) {

+                    if (object instanceof String) {

+                        sb.append(XML.escape(object.toString()));

+                    } else if (object instanceof JSONObject) {

+                        sb.append(toString((JSONObject)object));

+                    } else if (object instanceof JSONArray) {

+                        sb.append(toString((JSONArray)object));

+                    } else {

+                        sb.append(object.toString());

+                    }

+                }

+            }

+            sb.append('<');

+            sb.append('/');

+            sb.append(tagName);

+            sb.append('>');

+        }

+        return sb.toString();

+    }

+}

diff --git a/datarouter-prov/src/main/java/org/json/JSONObject.java b/datarouter-prov/src/main/java/org/json/JSONObject.java
new file mode 100644
index 0000000..b4b0fe5
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/JSONObject.java
@@ -0,0 +1,1653 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+/*

+Copyright (c) 2002 JSON.org

+

+Permission is hereby granted, free of charge, to any person obtaining a copy

+of this software and associated documentation files (the "Software"), to deal

+in the Software without restriction, including without limitation the rights

+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

+copies of the Software, and to permit persons to whom the Software is

+furnished to do so, subject to the following conditions:

+

+The above copyright notice and this permission notice shall be included in all

+copies or substantial portions of the Software.

+

+The Software shall be used for Good, not Evil.

+

+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

+SOFTWARE.

+*/

+

+import java.io.IOException;

+import java.io.StringWriter;

+import java.io.Writer;

+import java.lang.reflect.Field;

+import java.lang.reflect.Method;

+import java.lang.reflect.Modifier;

+import java.util.Collection;

+import java.util.Enumeration;

+import java.util.HashMap;

+import java.util.Iterator;

+import java.util.Locale;

+import java.util.Map;

+import java.util.ResourceBundle;

+import java.util.Set;

+

+/**

+ * A JSONObject is an unordered collection of name/value pairs. Its external

+ * form is a string wrapped in curly braces with colons between the names and

+ * values, and commas between the values and names. The internal form is an

+ * object having <code>get</code> and <code>opt</code> methods for accessing the

+ * values by name, and <code>put</code> methods for adding or replacing values

+ * by name. The values can be any of these types: <code>Boolean</code>,

+ * <code>JSONArray</code>, <code>JSONObject</code>, <code>Number</code>,

+ * <code>String</code>, or the <code>JSONObject.NULL</code> object. A JSONObject

+ * constructor can be used to convert an external form JSON text into an

+ * internal form whose values can be retrieved with the <code>get</code> and

+ * <code>opt</code> methods, or to convert values into a JSON text using the

+ * <code>put</code> and <code>toString</code> methods. A <code>get</code> method

+ * returns a value if one can be found, and throws an exception if one cannot be

+ * found. An <code>opt</code> method returns a default value instead of throwing

+ * an exception, and so is useful for obtaining optional values.

+ * <p>

+ * The generic <code>get()</code> and <code>opt()</code> methods return an

+ * object, which you can cast or query for type. There are also typed

+ * <code>get</code> and <code>opt</code> methods that do type checking and type

+ * coercion for you. The opt methods differ from the get methods in that they do

+ * not throw. Instead, they return a specified value, such as null.

+ * <p>

+ * The <code>put</code> methods add or replace values in an object. For example,

+ *

+ * <pre>

+ * myString = new JSONObject().put(&quot;JSON&quot;, &quot;Hello, World!&quot;).toString();

+ * </pre>

+ *

+ * produces the string <code>{"JSON": "Hello, World"}</code>.

+ * <p>

+ * The texts produced by the <code>toString</code> methods strictly conform to

+ * the JSON syntax rules. The constructors are more forgiving in the texts they

+ * will accept:

+ * <ul>

+ * <li>An extra <code>,</code>&nbsp;<small>(comma)</small> may appear just

+ * before the closing brace.</li>

+ * <li>Strings may be quoted with <code>'</code>&nbsp;<small>(single

+ * quote)</small>.</li>

+ * <li>Strings do not need to be quoted at all if they do not begin with a quote

+ * or single quote, and if they do not contain leading or trailing spaces, and

+ * if they do not contain any of these characters:

+ * <code>{ } [ ] / \ : , = ; #</code> and if they do not look like numbers and

+ * if they are not the reserved words <code>true</code>, <code>false</code>, or

+ * <code>null</code>.</li>

+ * <li>Keys can be followed by <code>=</code> or <code>=></code> as well as by

+ * <code>:</code>.</li>

+ * <li>Values can be followed by <code>;</code> <small>(semicolon)</small> as

+ * well as by <code>,</code> <small>(comma)</small>.</li>

+ * </ul>

+ *

+ * @author JSON.org

+ * @version 2012-12-01

+ */

+public class JSONObject {

+    /**

+     * The maximum number of keys in the key pool.

+     */

+     private static final int keyPoolSize = 100;

+

+   /**

+     * Key pooling is like string interning, but without permanently tying up

+     * memory. To help conserve memory, storage of duplicated key strings in

+     * JSONObjects will be avoided by using a key pool to manage unique key

+     * string objects. This is used by JSONObject.put(string, object).

+     */

+     private static Map<String,Object> keyPool = new HashMap<String,Object>(keyPoolSize);

+

+    /**

+     * JSONObject.NULL is equivalent to the value that JavaScript calls null,

+     * whilst Java's null is equivalent to the value that JavaScript calls

+     * undefined.

+     */

+     private static final class Null {

+

+        /**

+         * There is only intended to be a single instance of the NULL object,

+         * so the clone method returns itself.

+         * @return     NULL.

+         */

+        protected final Object clone() {

+            return this;

+        }

+

+        /**

+         * A Null object is equal to the null value and to itself.

+         * @param object    An object to test for nullness.

+         * @return true if the object parameter is the JSONObject.NULL object

+         *  or null.

+         */

+        public boolean equals(Object object) {

+            return object == null || object == this;

+        }

+

+        /**

+         * Get the "null" string value.

+         * @return The string "null".

+         */

+        public String toString() {

+            return "null";

+        }

+    }

+

+

+    /**

+     * The map where the JSONObject's properties are kept.

+     */

+    private final Map<String,Object> map;

+

+

+    /**

+     * It is sometimes more convenient and less ambiguous to have a

+     * <code>NULL</code> object than to use Java's <code>null</code> value.

+     * <code>JSONObject.NULL.equals(null)</code> returns <code>true</code>.

+     * <code>JSONObject.NULL.toString()</code> returns <code>"null"</code>.

+     */

+    public static final Object NULL = new Null();

+

+

+    /**

+     * Construct an empty JSONObject.

+     */

+    public JSONObject() {

+        this.map = new HashMap<String,Object>();

+    }

+

+

+    /**

+     * Construct a JSONObject from a subset of another JSONObject.

+     * An array of strings is used to identify the keys that should be copied.

+     * Missing keys are ignored.

+     * @param jo A JSONObject.

+     * @param names An array of strings.

+     * @throws JSONException

+     * @exception JSONException If a value is a non-finite number or if a name is duplicated.

+     */

+    public JSONObject(JSONObject jo, String[] names) {

+        this();

+        for (int i = 0; i < names.length; i += 1) {

+            try {

+                this.putOnce(names[i], jo.opt(names[i]));

+            } catch (Exception ignore) {

+            }

+        }

+    }

+

+

+    /**

+     * Construct a JSONObject from a JSONTokener.

+     * @param x A JSONTokener object containing the source string.

+     * @throws JSONException If there is a syntax error in the source string

+     *  or a duplicated key.

+     */

+    public JSONObject(JSONTokener x) throws JSONException {

+        this();

+        char c;

+        String key;

+

+        if (x.nextClean() != '{') {

+            throw x.syntaxError("A JSONObject text must begin with '{'");

+        }

+        for (;;) {

+            c = x.nextClean();

+            switch (c) {

+            case 0:

+                throw x.syntaxError("A JSONObject text must end with '}'");

+            case '}':

+                return;

+            default:

+                x.back();

+                key = x.nextValue().toString();

+            }

+

+// The key is followed by ':'. We will also tolerate '=' or '=>'.

+

+            c = x.nextClean();

+            if (c == '=') {

+                if (x.next() != '>') {

+                    x.back();

+                }

+            } else if (c != ':') {

+                throw x.syntaxError("Expected a ':' after a key");

+            }

+            this.putOnce(key, x.nextValue());

+

+// Pairs are separated by ','. We will also tolerate ';'.

+

+            switch (x.nextClean()) {

+            case ';':

+            case ',':

+                if (x.nextClean() == '}') {

+                    return;

+                }

+                x.back();

+                break;

+            case '}':

+                return;

+            default:

+                throw x.syntaxError("Expected a ',' or '}'");

+            }

+        }

+    }

+

+

+    /**

+     * Construct a JSONObject from a Map.

+     *

+     * @param map A map object that can be used to initialize the contents of

+     *  the JSONObject.

+     * @throws JSONException

+     */

+    public JSONObject(Map<String,Object> map) {

+        this.map = new HashMap<String,Object>();

+        if (map != null) {

+            Iterator<Map.Entry<String,Object>> i = map.entrySet().iterator();

+            while (i.hasNext()) {

+                Map.Entry<String,Object> e = i.next();

+                Object value = e.getValue();

+                if (value != null) {

+                    this.map.put(e.getKey(), wrap(value));

+                }

+            }

+        }

+    }

+

+

+    /**

+     * Construct a JSONObject from an Object using bean getters.

+     * It reflects on all of the public methods of the object.

+     * For each of the methods with no parameters and a name starting

+     * with <code>"get"</code> or <code>"is"</code> followed by an uppercase letter,

+     * the method is invoked, and a key and the value returned from the getter method

+     * are put into the new JSONObject.

+     *

+     * The key is formed by removing the <code>"get"</code> or <code>"is"</code> prefix.

+     * If the second remaining character is not upper case, then the first

+     * character is converted to lower case.

+     *

+     * For example, if an object has a method named <code>"getName"</code>, and

+     * if the result of calling <code>object.getName()</code> is <code>"Larry Fine"</code>,

+     * then the JSONObject will contain <code>"name": "Larry Fine"</code>.

+     *

+     * @param bean An object that has getter methods that should be used

+     * to make a JSONObject.

+     */

+    public JSONObject(Object bean) {

+        this();

+        this.populateMap(bean);

+    }

+

+

+    /**

+     * Construct a JSONObject from an Object, using reflection to find the

+     * public members. The resulting JSONObject's keys will be the strings

+     * from the names array, and the values will be the field values associated

+     * with those keys in the object. If a key is not found or not visible,

+     * then it will not be copied into the new JSONObject.

+     * @param object An object that has fields that should be used to make a

+     * JSONObject.

+     * @param names An array of strings, the names of the fields to be obtained

+     * from the object.

+     */

+    public JSONObject(Object object, String names[]) {

+        this();

+        Class<? extends Object> c = object.getClass();

+        for (int i = 0; i < names.length; i += 1) {

+            String name = names[i];

+            try {

+                this.putOpt(name, c.getField(name).get(object));

+            } catch (Exception ignore) {

+            }

+        }

+    }

+

+

+    /**

+     * Construct a JSONObject from a source JSON text string.

+     * This is the most commonly used JSONObject constructor.

+     * @param source    A string beginning

+     *  with <code>{</code>&nbsp;<small>(left brace)</small> and ending

+     *  with <code>}</code>&nbsp;<small>(right brace)</small>.

+     * @exception JSONException If there is a syntax error in the source

+     *  string or a duplicated key.

+     */

+    public JSONObject(String source) throws JSONException {

+        this(new JSONTokener(source));

+    }

+

+

+    /**

+     * Construct a JSONObject from a ResourceBundle.

+     * @param baseName The ResourceBundle base name.

+     * @param locale The Locale to load the ResourceBundle for.

+     * @throws JSONException If any JSONExceptions are detected.

+     */

+    public JSONObject(String baseName, Locale locale) throws JSONException {

+        this();

+        ResourceBundle bundle = ResourceBundle.getBundle(baseName, locale,

+                Thread.currentThread().getContextClassLoader());

+

+// Iterate through the keys in the bundle.

+

+        Enumeration<?> keys = bundle.getKeys();

+        while (keys.hasMoreElements()) {

+            Object key = keys.nextElement();

+            if (key instanceof String) {

+

+// Go through the path, ensuring that there is a nested JSONObject for each

+// segment except the last. Add the value using the last segment's name into

+// the deepest nested JSONObject.

+

+                String[] path = ((String)key).split("\\.");

+                int last = path.length - 1;

+                JSONObject target = this;

+                for (int i = 0; i < last; i += 1) {

+                    String segment = path[i];

+                    JSONObject nextTarget = target.optJSONObject(segment);

+                    if (nextTarget == null) {

+                        nextTarget = new JSONObject();

+                        target.put(segment, nextTarget);

+                    }

+                    target = nextTarget;

+                }

+                target.put(path[last], bundle.getString((String)key));

+            }

+        }

+    }

+

+

+    /**

+     * Accumulate values under a key. It is similar to the put method except

+     * that if there is already an object stored under the key then a

+     * JSONArray is stored under the key to hold all of the accumulated values.

+     * If there is already a JSONArray, then the new value is appended to it.

+     * In contrast, the put method replaces the previous value.

+     *

+     * If only one value is accumulated that is not a JSONArray, then the

+     * result will be the same as using put. But if multiple values are

+     * accumulated, then the result will be like append.

+     * @param key   A key string.

+     * @param value An object to be accumulated under the key.

+     * @return this.

+     * @throws JSONException If the value is an invalid number

+     *  or if the key is null.

+     */

+    public JSONObject accumulate(

+        String key,

+        Object value

+    ) throws JSONException {

+        testValidity(value);

+        Object object = this.opt(key);

+        if (object == null) {

+            this.put(key, value instanceof JSONArray

+                    ? new JSONArray().put(value)

+                    : value);

+        } else if (object instanceof JSONArray) {

+            ((JSONArray)object).put(value);

+        } else {

+            this.put(key, new JSONArray().put(object).put(value));

+        }

+        return this;

+    }

+

+

+    /**

+     * Append values to the array under a key. If the key does not exist in the

+     * JSONObject, then the key is put in the JSONObject with its value being a

+     * JSONArray containing the value parameter. If the key was already

+     * associated with a JSONArray, then the value parameter is appended to it.

+     * @param key   A key string.

+     * @param value An object to be accumulated under the key.

+     * @return this.

+     * @throws JSONException If the key is null or if the current value

+     *  associated with the key is not a JSONArray.

+     */

+    public JSONObject append(String key, Object value) throws JSONException {

+        testValidity(value);

+        Object object = this.opt(key);

+        if (object == null) {

+            this.put(key, new JSONArray().put(value));

+        } else if (object instanceof JSONArray) {

+            this.put(key, ((JSONArray)object).put(value));

+        } else {

+            throw new JSONException("JSONObject[" + key +

+                    "] is not a JSONArray.");

+        }

+        return this;

+    }

+

+

+    /**

+     * Produce a string from a double. The string "null" will be returned if

+     * the number is not finite.

+     * @param  d A double.

+     * @return A String.

+     */

+    public static String doubleToString(double d) {

+        if (Double.isInfinite(d) || Double.isNaN(d)) {

+            return "null";

+        }

+

+// Shave off trailing zeros and decimal point, if possible.

+

+        String string = Double.toString(d);

+        if (string.indexOf('.') > 0 && string.indexOf('e') < 0 &&

+                string.indexOf('E') < 0) {

+            while (string.endsWith("0")) {

+                string = string.substring(0, string.length() - 1);

+            }

+            if (string.endsWith(".")) {

+                string = string.substring(0, string.length() - 1);

+            }

+        }

+        return string;

+    }

+

+

+    /**

+     * Get the value object associated with a key.

+     *

+     * @param key   A key string.

+     * @return      The object associated with the key.

+     * @throws      JSONException if the key is not found.

+     */

+    public Object get(String key) throws JSONException {

+        if (key == null) {

+            throw new JSONException("Null key.");

+        }

+        Object object = this.opt(key);

+        if (object == null) {

+            throw new JSONException("JSONObject[" + quote(key) +

+                    "] not found.");

+        }

+        return object;

+    }

+

+

+    /**

+     * Get the boolean value associated with a key.

+     *

+     * @param key   A key string.

+     * @return      The truth.

+     * @throws      JSONException

+     *  if the value is not a Boolean or the String "true" or "false".

+     */

+    public boolean getBoolean(String key) throws JSONException {

+        Object object = this.get(key);

+        if (object.equals(Boolean.FALSE) ||

+                (object instanceof String &&

+                ((String)object).equalsIgnoreCase("false"))) {

+            return false;

+        } else if (object.equals(Boolean.TRUE) ||

+                (object instanceof String &&

+                ((String)object).equalsIgnoreCase("true"))) {

+            return true;

+        }

+        throw new JSONException("JSONObject[" + quote(key) +

+                "] is not a Boolean.");

+    }

+

+

+    /**

+     * Get the double value associated with a key.

+     * @param key   A key string.

+     * @return      The numeric value.

+     * @throws JSONException if the key is not found or

+     *  if the value is not a Number object and cannot be converted to a number.

+     */

+    public double getDouble(String key) throws JSONException {

+        Object object = this.get(key);

+        try {

+            return object instanceof Number

+                ? ((Number)object).doubleValue()

+                : Double.parseDouble((String)object);

+        } catch (Exception e) {

+            throw new JSONException("JSONObject[" + quote(key) +

+                "] is not a number.");

+        }

+    }

+

+

+    /**

+     * Get the int value associated with a key.

+     *

+     * @param key   A key string.

+     * @return      The integer value.

+     * @throws   JSONException if the key is not found or if the value cannot

+     *  be converted to an integer.

+     */

+    public int getInt(String key) throws JSONException {

+        Object object = this.get(key);

+        try {

+            return object instanceof Number

+                ? ((Number)object).intValue()

+                : Integer.parseInt((String)object);

+        } catch (Exception e) {

+            throw new JSONException("JSONObject[" + quote(key) +

+                "] is not an int.");

+        }

+    }

+

+

+    /**

+     * Get the JSONArray value associated with a key.

+     *

+     * @param key   A key string.

+     * @return      A JSONArray which is the value.

+     * @throws      JSONException if the key is not found or

+     *  if the value is not a JSONArray.

+     */

+    public JSONArray getJSONArray(String key) throws JSONException {

+        Object object = this.get(key);

+        if (object instanceof JSONArray) {

+            return (JSONArray)object;

+        }

+        throw new JSONException("JSONObject[" + quote(key) +

+                "] is not a JSONArray.");

+    }

+

+

+    /**

+     * Get the JSONObject value associated with a key.

+     *

+     * @param key   A key string.

+     * @return      A JSONObject which is the value.

+     * @throws      JSONException if the key is not found or

+     *  if the value is not a JSONObject.

+     */

+    public JSONObject getJSONObject(String key) throws JSONException {

+        Object object = this.get(key);

+        if (object instanceof JSONObject) {

+            return (JSONObject)object;

+        }

+        throw new JSONException("JSONObject[" + quote(key) +

+                "] is not a JSONObject.");

+    }

+

+

+    /**

+     * Get the long value associated with a key.

+     *

+     * @param key   A key string.

+     * @return      The long value.

+     * @throws   JSONException if the key is not found or if the value cannot

+     *  be converted to a long.

+     */

+    public long getLong(String key) throws JSONException {

+        Object object = this.get(key);

+        try {

+            return object instanceof Number

+                ? ((Number)object).longValue()

+                : Long.parseLong((String)object);

+        } catch (Exception e) {

+            throw new JSONException("JSONObject[" + quote(key) +

+                "] is not a long.");

+        }

+    }

+

+

+    /**

+     * Get an array of field names from a JSONObject.

+     *

+     * @return An array of field names, or null if there are no names.

+     */

+    public static String[] getNames(JSONObject jo) {

+        int length = jo.length();

+        if (length == 0) {

+            return null;

+        }

+        Iterator<String> iterator = jo.keys();

+        String[] names = new String[length];

+        int i = 0;

+        while (iterator.hasNext()) {

+            names[i] = iterator.next();

+            i += 1;

+        }

+        return names;

+    }

+

+

+    /**

+     * Get an array of field names from an Object.

+     *

+     * @return An array of field names, or null if there are no names.

+     */

+    public static String[] getNames(Object object) {

+        if (object == null) {

+            return null;

+        }

+        Class<? extends Object> klass = object.getClass();

+        Field[] fields = klass.getFields();

+        int length = fields.length;

+        if (length == 0) {

+            return null;

+        }

+        String[] names = new String[length];

+        for (int i = 0; i < length; i += 1) {

+            names[i] = fields[i].getName();

+        }

+        return names;

+    }

+

+

+    /**

+     * Get the string associated with a key.

+     *

+     * @param key   A key string.

+     * @return      A string which is the value.

+     * @throws   JSONException if there is no string value for the key.

+     */

+    public String getString(String key) throws JSONException {

+        Object object = this.get(key);

+        if (object instanceof String) {

+            return (String)object;

+        }

+        throw new JSONException("JSONObject[" + quote(key) +

+            "] not a string.");

+    }

+

+

+    /**

+     * Determine if the JSONObject contains a specific key.

+     * @param key   A key string.

+     * @return      true if the key exists in the JSONObject.

+     */

+    public boolean has(String key) {

+        return this.map.containsKey(key);

+    }

+

+

+    /**

+     * Increment a property of a JSONObject. If there is no such property,

+     * create one with a value of 1. If there is such a property, and if

+     * it is an Integer, Long, Double, or Float, then add one to it.

+     * @param key  A key string.

+     * @return this.

+     * @throws JSONException If there is already a property with this name

+     * that is not an Integer, Long, Double, or Float.

+     */

+    public JSONObject increment(String key) throws JSONException {

+        Object value = this.opt(key);

+        if (value == null) {

+            this.put(key, 1);

+        } else if (value instanceof Integer) {

+            this.put(key, ((Integer)value).intValue() + 1);

+        } else if (value instanceof Long) {

+            this.put(key, ((Long)value).longValue() + 1);

+        } else if (value instanceof Double) {

+            this.put(key, ((Double)value).doubleValue() + 1);

+        } else if (value instanceof Float) {

+            this.put(key, ((Float)value).floatValue() + 1);

+        } else {

+            throw new JSONException("Unable to increment [" + quote(key) + "].");

+        }

+        return this;

+    }

+

+

+    /**

+     * Determine if the value associated with the key is null or if there is

+     *  no value.

+     * @param key   A key string.

+     * @return      true if there is no value associated with the key or if

+     *  the value is the JSONObject.NULL object.

+     */

+    public boolean isNull(String key) {

+        return JSONObject.NULL.equals(this.opt(key));

+    }

+

+

+    /**

+     * Get an enumeration of the keys of the JSONObject.

+     *

+     * @return An iterator of the keys.

+     */

+    public Iterator<String> keys() {

+        return this.keySet().iterator();

+    }

+

+

+    /**

+     * Get a set of keys of the JSONObject.

+     *

+     * @return A keySet.

+     */

+    public Set<String> keySet() {

+        return this.map.keySet();

+    }

+

+

+    /**

+     * Get the number of keys stored in the JSONObject.

+     *

+     * @return The number of keys in the JSONObject.

+     */

+    public int length() {

+        return this.map.size();

+    }

+

+

+    /**

+     * Produce a JSONArray containing the names of the elements of this

+     * JSONObject.

+     * @return A JSONArray containing the key strings, or null if the JSONObject

+     * is empty.

+     */

+    public JSONArray names() {

+        JSONArray ja = new JSONArray();

+        Iterator<String> keys = this.keys();

+        while (keys.hasNext()) {

+            ja.put(keys.next());

+        }

+        return ja.length() == 0 ? null : ja;

+    }

+

+    /**

+     * Produce a string from a Number.

+     * @param  number A Number

+     * @return A String.

+     * @throws JSONException If n is a non-finite number.

+     */

+    public static String numberToString(Number number)

+            throws JSONException {

+        if (number == null) {

+            throw new JSONException("Null pointer");

+        }

+        testValidity(number);

+

+// Shave off trailing zeros and decimal point, if possible.

+

+        String string = number.toString();

+        if (string.indexOf('.') > 0 && string.indexOf('e') < 0 &&

+                string.indexOf('E') < 0) {

+            while (string.endsWith("0")) {

+                string = string.substring(0, string.length() - 1);

+            }

+            if (string.endsWith(".")) {

+                string = string.substring(0, string.length() - 1);

+            }

+        }

+        return string;

+    }

+

+

+    /**

+     * Get an optional value associated with a key.

+     * @param key   A key string.

+     * @return      An object which is the value, or null if there is no value.

+     */

+    public Object opt(String key) {

+        return key == null ? null : this.map.get(key);

+    }

+

+

+    /**

+     * Get an optional boolean associated with a key.

+     * It returns false if there is no such key, or if the value is not

+     * Boolean.TRUE or the String "true".

+     *

+     * @param key   A key string.

+     * @return      The truth.

+     */

+    public boolean optBoolean(String key) {

+        return this.optBoolean(key, false);

+    }

+

+

+    /**

+     * Get an optional boolean associated with a key.

+     * It returns the defaultValue if there is no such key, or if it is not

+     * a Boolean or the String "true" or "false" (case insensitive).

+     *

+     * @param key              A key string.

+     * @param defaultValue     The default.

+     * @return      The truth.

+     */

+    public boolean optBoolean(String key, boolean defaultValue) {

+        try {

+            return this.getBoolean(key);

+        } catch (Exception e) {

+            return defaultValue;

+        }

+    }

+

+

+    /**

+     * Get an optional double associated with a key,

+     * or NaN if there is no such key or if its value is not a number.

+     * If the value is a string, an attempt will be made to evaluate it as

+     * a number.

+     *

+     * @param key   A string which is the key.

+     * @return      An object which is the value.

+     */

+    public double optDouble(String key) {

+        return this.optDouble(key, Double.NaN);

+    }

+

+

+    /**

+     * Get an optional double associated with a key, or the

+     * defaultValue if there is no such key or if its value is not a number.

+     * If the value is a string, an attempt will be made to evaluate it as

+     * a number.

+     *

+     * @param key   A key string.

+     * @param defaultValue     The default.

+     * @return      An object which is the value.

+     */

+    public double optDouble(String key, double defaultValue) {

+        try {

+            return this.getDouble(key);

+        } catch (Exception e) {

+            return defaultValue;

+        }

+    }

+

+

+    /**

+     * Get an optional int value associated with a key,

+     * or zero if there is no such key or if the value is not a number.

+     * If the value is a string, an attempt will be made to evaluate it as

+     * a number.

+     *

+     * @param key   A key string.

+     * @return      An object which is the value.

+     */

+    public int optInt(String key) {

+        return this.optInt(key, 0);

+    }

+

+

+    /**

+     * Get an optional int value associated with a key,

+     * or the default if there is no such key or if the value is not a number.

+     * If the value is a string, an attempt will be made to evaluate it as

+     * a number.

+     *

+     * @param key   A key string.

+     * @param defaultValue     The default.

+     * @return      An object which is the value.

+     */

+    public int optInt(String key, int defaultValue) {

+        try {

+            return this.getInt(key);

+        } catch (Exception e) {

+            return defaultValue;

+        }

+    }

+

+

+    /**

+     * Get an optional JSONArray associated with a key.

+     * It returns null if there is no such key, or if its value is not a

+     * JSONArray.

+     *

+     * @param key   A key string.

+     * @return      A JSONArray which is the value.

+     */

+    public JSONArray optJSONArray(String key) {

+        Object o = this.opt(key);

+        return o instanceof JSONArray ? (JSONArray)o : null;

+    }

+

+

+    /**

+     * Get an optional JSONObject associated with a key.

+     * It returns null if there is no such key, or if its value is not a

+     * JSONObject.

+     *

+     * @param key   A key string.

+     * @return      A JSONObject which is the value.

+     */

+    public JSONObject optJSONObject(String key) {

+        Object object = this.opt(key);

+        return object instanceof JSONObject ? (JSONObject)object : null;

+    }

+

+

+    /**

+     * Get an optional long value associated with a key,

+     * or zero if there is no such key or if the value is not a number.

+     * If the value is a string, an attempt will be made to evaluate it as

+     * a number.

+     *

+     * @param key   A key string.

+     * @return      An object which is the value.

+     */

+    public long optLong(String key) {

+        return this.optLong(key, 0);

+    }

+

+

+    /**

+     * Get an optional long value associated with a key,

+     * or the default if there is no such key or if the value is not a number.

+     * If the value is a string, an attempt will be made to evaluate it as

+     * a number.

+     *

+     * @param key          A key string.

+     * @param defaultValue The default.

+     * @return             An object which is the value.

+     */

+    public long optLong(String key, long defaultValue) {

+        try {

+            return this.getLong(key);

+        } catch (Exception e) {

+            return defaultValue;

+        }

+    }

+

+

+    /**

+     * Get an optional string associated with a key.

+     * It returns an empty string if there is no such key. If the value is not

+     * a string and is not null, then it is converted to a string.

+     *

+     * @param key   A key string.

+     * @return      A string which is the value.

+     */

+    public String optString(String key) {

+        return this.optString(key, "");

+    }

+

+

+    /**

+     * Get an optional string associated with a key.

+     * It returns the defaultValue if there is no such key.

+     *

+     * @param key   A key string.

+     * @param defaultValue     The default.

+     * @return      A string which is the value.

+     */

+    public String optString(String key, String defaultValue) {

+        Object object = this.opt(key);

+        return NULL.equals(object) ? defaultValue : object.toString();

+    }

+

+

+    private void populateMap(Object bean) {

+        Class<? extends Object> klass = bean.getClass();

+

+// If klass is a System class then set includeSuperClass to false.

+

+        boolean includeSuperClass = klass.getClassLoader() != null;

+

+        Method[] methods = includeSuperClass

+                ? klass.getMethods()

+                : klass.getDeclaredMethods();

+        for (int i = 0; i < methods.length; i += 1) {

+            try {

+                Method method = methods[i];

+                if (Modifier.isPublic(method.getModifiers())) {

+                    String name = method.getName();

+                    String key = "";

+                    if (name.startsWith("get")) {

+                        if ("getClass".equals(name) ||

+                                "getDeclaringClass".equals(name)) {

+                            key = "";

+                        } else {

+                            key = name.substring(3);

+                        }

+                    } else if (name.startsWith("is")) {

+                        key = name.substring(2);

+                    }

+                    if (key.length() > 0 &&

+                            Character.isUpperCase(key.charAt(0)) &&

+                            method.getParameterTypes().length == 0) {

+                        if (key.length() == 1) {

+                            key = key.toLowerCase();

+                        } else if (!Character.isUpperCase(key.charAt(1))) {

+                            key = key.substring(0, 1).toLowerCase() +

+                                key.substring(1);

+                        }

+

+                        Object result = method.invoke(bean, (Object[])null);

+                        if (result != null) {

+                            this.map.put(key, wrap(result));

+                        }

+                    }

+                }

+            } catch (Exception ignore) {

+            }

+        }

+    }

+

+

+    /**

+     * Put a key/boolean pair in the JSONObject.

+     *

+     * @param key   A key string.

+     * @param value A boolean which is the value.

+     * @return this.

+     * @throws JSONException If the key is null.

+     */

+    public JSONObject put(String key, boolean value) throws JSONException {

+        this.put(key, value ? Boolean.TRUE : Boolean.FALSE);

+        return this;

+    }

+

+

+    /**

+     * Put a key/value pair in the JSONObject, where the value will be a

+     * JSONArray which is produced from a Collection.

+     * @param key   A key string.

+     * @param value A Collection value.

+     * @return      this.

+     * @throws JSONException

+     */

+    public JSONObject put(String key, Collection<Object> value) throws JSONException {

+        this.put(key, new JSONArray(value));

+        return this;

+    }

+

+

+    /**

+     * Put a key/double pair in the JSONObject.

+     *

+     * @param key   A key string.

+     * @param value A double which is the value.

+     * @return this.

+     * @throws JSONException If the key is null or if the number is invalid.

+     */

+    public JSONObject put(String key, double value) throws JSONException {

+        this.put(key, new Double(value));

+        return this;

+    }

+

+

+    /**

+     * Put a key/int pair in the JSONObject.

+     *

+     * @param key   A key string.

+     * @param value An int which is the value.

+     * @return this.

+     * @throws JSONException If the key is null.

+     */

+    public JSONObject put(String key, int value) throws JSONException {

+        this.put(key, new Integer(value));

+        return this;

+    }

+

+

+    /**

+     * Put a key/long pair in the JSONObject.

+     *

+     * @param key   A key string.

+     * @param value A long which is the value.

+     * @return this.

+     * @throws JSONException If the key is null.

+     */

+    public JSONObject put(String key, long value) throws JSONException {

+        this.put(key, new Long(value));

+        return this;

+    }

+

+

+    /**

+     * Put a key/value pair in the JSONObject, where the value will be a

+     * JSONObject which is produced from a Map.

+     * @param key   A key string.

+     * @param value A Map value.

+     * @return      this.

+     * @throws JSONException

+     */

+    public JSONObject put(String key, Map<String, Object> value) throws JSONException {

+        this.put(key, new JSONObject(value));

+        return this;

+    }

+

+

+    /**

+     * Put a key/value pair in the JSONObject. If the value is null,

+     * then the key will be removed from the JSONObject if it is present.

+     * @param key   A key string.

+     * @param value An object which is the value. It should be of one of these

+     *  types: Boolean, Double, Integer, JSONArray, JSONObject, Long, String,

+     *  or the JSONObject.NULL object.

+     * @return this.

+     * @throws JSONException If the value is non-finite number

+     *  or if the key is null.

+     */

+    public JSONObject put(String key, Object value) throws JSONException {

+        String pooled;

+        if (key == null) {

+            throw new JSONException("Null key.");

+        }

+        if (value != null) {

+            testValidity(value);

+            pooled = (String)keyPool.get(key);

+            if (pooled == null) {

+                if (keyPool.size() >= keyPoolSize) {

+                    keyPool = new HashMap<String, Object>(keyPoolSize);

+                }

+                keyPool.put(key, key);

+            } else {

+                key = pooled;

+            }

+            this.map.put(key, value);

+        } else {

+            this.remove(key);

+        }

+        return this;

+    }

+

+

+    /**

+     * Put a key/value pair in the JSONObject, but only if the key and the

+     * value are both non-null, and only if there is not already a member

+     * with that name.

+     * @param key

+     * @param value

+     * @return his.

+     * @throws JSONException if the key is a duplicate

+     */

+    public JSONObject putOnce(String key, Object value) throws JSONException {

+        if (key != null && value != null) {

+            if (this.opt(key) != null) {

+                throw new JSONException("Duplicate key \"" + key + "\"");

+            }

+            this.put(key, value);

+        }

+        return this;

+    }

+

+

+    /**

+     * Put a key/value pair in the JSONObject, but only if the

+     * key and the value are both non-null.

+     * @param key   A key string.

+     * @param value An object which is the value. It should be of one of these

+     *  types: Boolean, Double, Integer, JSONArray, JSONObject, Long, String,

+     *  or the JSONObject.NULL object.

+     * @return this.

+     * @throws JSONException If the value is a non-finite number.

+     */

+    public JSONObject putOpt(String key, Object value) throws JSONException {

+        if (key != null && value != null) {

+            this.put(key, value);

+        }

+        return this;

+    }

+

+

+    /**

+     * Produce a string in double quotes with backslash sequences in all the

+     * right places. A backslash will be inserted within </, producing <\/,

+     * allowing JSON text to be delivered in HTML. In JSON text, a string

+     * cannot contain a control character or an unescaped quote or backslash.

+     * @param string A String

+     * @return  A String correctly formatted for insertion in a JSON text.

+     */

+    public static String quote(String string) {

+        StringWriter sw = new StringWriter();

+        synchronized (sw.getBuffer()) {

+            try {

+                return quote(string, sw).toString();

+            } catch (IOException ignored) {

+                // will never happen - we are writing to a string writer

+                return "";

+            }

+        }

+    }

+

+    public static Writer quote(String string, Writer w) throws IOException {

+        if (string == null || string.length() == 0) {

+            w.write("\"\"");

+            return w;

+        }

+

+        char b;

+        char c = 0;

+        String hhhh;

+        int i;

+        int len = string.length();

+

+        w.write('"');

+        for (i = 0; i < len; i += 1) {

+            b = c;

+            c = string.charAt(i);

+            switch (c) {

+            case '\\':

+            case '"':

+                w.write('\\');

+                w.write(c);

+                break;

+            case '/':

+                if (b == '<') {

+                    w.write('\\');

+                }

+                w.write(c);

+                break;

+            case '\b':

+                w.write("\\b");

+                break;

+            case '\t':

+                w.write("\\t");

+                break;

+            case '\n':

+                w.write("\\n");

+                break;

+            case '\f':

+                w.write("\\f");

+                break;

+            case '\r':

+                w.write("\\r");

+                break;

+            default:

+                if (c < ' ' || (c >= '\u0080' && c < '\u00a0')

+                        || (c >= '\u2000' && c < '\u2100')) {

+                    w.write("\\u");

+                    hhhh = Integer.toHexString(c);

+                    w.write("0000", 0, 4 - hhhh.length());

+                    w.write(hhhh);

+                } else {

+                    w.write(c);

+                }

+            }

+        }

+        w.write('"');

+        return w;

+    }

+

+    /**

+     * Remove a name and its value, if present.

+     * @param key The name to be removed.

+     * @return The value that was associated with the name,

+     * or null if there was no value.

+     */

+    public Object remove(String key) {

+        return this.map.remove(key);

+    }

+

+    /**

+     * Try to convert a string into a number, boolean, or null. If the string

+     * can't be converted, return the string.

+     * @param string A String.

+     * @return A simple JSON value.

+     */

+    public static Object stringToValue(String string) {

+        Double d;

+        if (string.equals("")) {

+            return string;

+        }

+        if (string.equalsIgnoreCase("true")) {

+            return Boolean.TRUE;

+        }

+        if (string.equalsIgnoreCase("false")) {

+            return Boolean.FALSE;

+        }

+        if (string.equalsIgnoreCase("null")) {

+            return JSONObject.NULL;

+        }

+

+        /*

+         * If it might be a number, try converting it.

+         * If a number cannot be produced, then the value will just

+         * be a string. Note that the plus and implied string

+         * conventions are non-standard. A JSON parser may accept

+         * non-JSON forms as long as it accepts all correct JSON forms.

+         */

+

+        char b = string.charAt(0);

+        if ((b >= '0' && b <= '9') || b == '.' || b == '-' || b == '+') {

+            try {

+                if (string.indexOf('.') > -1 ||

+                        string.indexOf('e') > -1 || string.indexOf('E') > -1) {

+                    d = Double.valueOf(string);

+                    if (!d.isInfinite() && !d.isNaN()) {

+                        return d;

+                    }

+                } else {

+                    Long myLong = new Long(string);

+                    if (myLong.longValue() == myLong.intValue()) {

+                        return new Integer(myLong.intValue());

+                    } else {

+                        return myLong;

+                    }

+                }

+            }  catch (Exception ignore) {

+            }

+        }

+        return string;

+    }

+

+

+    /**

+     * Throw an exception if the object is a NaN or infinite number.

+     * @param o The object to test.

+     * @throws JSONException If o is a non-finite number.

+     */

+    public static void testValidity(Object o) throws JSONException {

+        if (o != null) {

+            if (o instanceof Double) {

+                if (((Double)o).isInfinite() || ((Double)o).isNaN()) {

+                    throw new JSONException(

+                        "JSON does not allow non-finite numbers.");

+                }

+            } else if (o instanceof Float) {

+                if (((Float)o).isInfinite() || ((Float)o).isNaN()) {

+                    throw new JSONException(

+                        "JSON does not allow non-finite numbers.");

+                }

+            }

+        }

+    }

+

+

+    /**

+     * Produce a JSONArray containing the values of the members of this

+     * JSONObject.

+     * @param names A JSONArray containing a list of key strings. This

+     * determines the sequence of the values in the result.

+     * @return A JSONArray of values.

+     * @throws JSONException If any of the values are non-finite numbers.

+     */

+    public JSONArray toJSONArray(JSONArray names) throws JSONException {

+        if (names == null || names.length() == 0) {

+            return null;

+        }

+        JSONArray ja = new JSONArray();

+        for (int i = 0; i < names.length(); i += 1) {

+            ja.put(this.opt(names.getString(i)));

+        }

+        return ja;

+    }

+

+    /**

+     * Make a JSON text of this JSONObject. For compactness, no whitespace

+     * is added. If this would not result in a syntactically correct JSON text,

+     * then null will be returned instead.

+     * <p>

+     * Warning: This method assumes that the data structure is acyclical.

+     *

+     * @return a printable, displayable, portable, transmittable

+     *  representation of the object, beginning

+     *  with <code>{</code>&nbsp;<small>(left brace)</small> and ending

+     *  with <code>}</code>&nbsp;<small>(right brace)</small>.

+     */

+    public String toString() {

+        try {

+            return this.toString(0);

+        } catch (Exception e) {

+            return null;

+        }

+    }

+

+

+    /**

+     * Make a prettyprinted JSON text of this JSONObject.

+     * <p>

+     * Warning: This method assumes that the data structure is acyclical.

+     * @param indentFactor The number of spaces to add to each level of

+     *  indentation.

+     * @return a printable, displayable, portable, transmittable

+     *  representation of the object, beginning

+     *  with <code>{</code>&nbsp;<small>(left brace)</small> and ending

+     *  with <code>}</code>&nbsp;<small>(right brace)</small>.

+     * @throws JSONException If the object contains an invalid number.

+     */

+    public String toString(int indentFactor) throws JSONException {

+        StringWriter w = new StringWriter();

+        synchronized (w.getBuffer()) {

+            return this.write(w, indentFactor, 0).toString();

+        }

+    }

+

+    /**

+     * Make a JSON text of an Object value. If the object has an

+     * value.toJSONString() method, then that method will be used to produce

+     * the JSON text. The method is required to produce a strictly

+     * conforming text. If the object does not contain a toJSONString

+     * method (which is the most common case), then a text will be

+     * produced by other means. If the value is an array or Collection,

+     * then a JSONArray will be made from it and its toJSONString method

+     * will be called. If the value is a MAP, then a JSONObject will be made

+     * from it and its toJSONString method will be called. Otherwise, the

+     * value's toString method will be called, and the result will be quoted.

+     *

+     * <p>

+     * Warning: This method assumes that the data structure is acyclical.

+     * @param value The value to be serialized.

+     * @return a printable, displayable, transmittable

+     *  representation of the object, beginning

+     *  with <code>{</code>&nbsp;<small>(left brace)</small> and ending

+     *  with <code>}</code>&nbsp;<small>(right brace)</small>.

+     * @throws JSONException If the value is or contains an invalid number.

+     */

+    @SuppressWarnings("unchecked")

+	public static String valueToString(Object value) throws JSONException {

+        if (value == null || value.equals(null)) {

+            return "null";

+        }

+        if (value instanceof JSONString) {

+            Object object;

+            try {

+                object = ((JSONString)value).toJSONString();

+            } catch (Exception e) {

+                throw new JSONException(e);

+            }

+            if (object instanceof String) {

+                return (String)object;

+            }

+            throw new JSONException("Bad value from toJSONString: " + object);

+        }

+        if (value instanceof Number) {

+            return numberToString((Number) value);

+        }

+        if (value instanceof Boolean || value instanceof JSONObject ||

+                value instanceof JSONArray) {

+            return value.toString();

+        }

+        if (value instanceof Map) {

+            return new JSONObject((Map<String, Object>)value).toString();

+        }

+        if (value instanceof Collection) {

+            return new JSONArray((Collection<Object>)value).toString();

+        }

+        if (value.getClass().isArray()) {

+            return new JSONArray(value).toString();

+        }

+        return quote(value.toString());

+    }

+

+     /**

+      * Wrap an object, if necessary. If the object is null, return the NULL

+      * object. If it is an array or collection, wrap it in a JSONArray. If

+      * it is a map, wrap it in a JSONObject. If it is a standard property

+      * (Double, String, et al) then it is already wrapped. Otherwise, if it

+      * comes from one of the java packages, turn it into a string. And if

+      * it doesn't, try to wrap it in a JSONObject. If the wrapping fails,

+      * then null is returned.

+      *

+      * @param object The object to wrap

+      * @return The wrapped value

+      */

+     @SuppressWarnings("unchecked")

+	public static Object wrap(Object object) {

+         try {

+             if (object == null) {

+                 return NULL;

+             }

+             if (object instanceof JSONObject || object instanceof JSONArray  ||

+                     NULL.equals(object)      || object instanceof JSONString ||

+                     object instanceof Byte   || object instanceof Character  ||

+                     object instanceof Short  || object instanceof Integer    ||

+                     object instanceof Long   || object instanceof Boolean    ||

+                     object instanceof Float  || object instanceof Double     ||

+                     object instanceof String) {

+                 return object;

+             }

+

+             if (object instanceof Collection) {

+                 return new JSONArray((Collection<Object>)object);

+             }

+             if (object.getClass().isArray()) {

+                 return new JSONArray(object);

+             }

+             if (object instanceof Map) {

+                 return new JSONObject((Map<String, Object>)object);

+             }

+             Package objectPackage = object.getClass().getPackage();

+             String objectPackageName = objectPackage != null

+                 ? objectPackage.getName()

+                 : "";

+             if (

+                 objectPackageName.startsWith("java.") ||

+                 objectPackageName.startsWith("javax.") ||

+                 object.getClass().getClassLoader() == null

+             ) {

+                 return object.toString();

+             }

+             return new JSONObject(object);

+         } catch(Exception exception) {

+             return null;

+         }

+     }

+

+

+     /**

+      * Write the contents of the JSONObject as JSON text to a writer.

+      * For compactness, no whitespace is added.

+      * <p>

+      * Warning: This method assumes that the data structure is acyclical.

+      *

+      * @return The writer.

+      * @throws JSONException

+      */

+     public Writer write(Writer writer) throws JSONException {

+        return this.write(writer, 0, 0);

+    }

+

+

+    @SuppressWarnings("unchecked")

+	static final Writer writeValue(Writer writer, Object value,

+            int indentFactor, int indent) throws JSONException, IOException {

+        if (value == null || value.equals(null)) {

+            writer.write("null");

+        } else if (value instanceof JSONObject) {

+            ((JSONObject) value).write(writer, indentFactor, indent);

+        } else if (value instanceof JSONArray) {

+            ((JSONArray) value).write(writer, indentFactor, indent);

+        } else if (value instanceof Map) {

+            new JSONObject((Map<String, Object>) value).write(writer, indentFactor, indent);

+        } else if (value instanceof Collection) {

+            new JSONArray((Collection<Object>) value).write(writer, indentFactor,

+                    indent);

+        } else if (value.getClass().isArray()) {

+            new JSONArray(value).write(writer, indentFactor, indent);

+        } else if (value instanceof Number) {

+            writer.write(numberToString((Number) value));

+        } else if (value instanceof Boolean) {

+            writer.write(value.toString());

+        } else if (value instanceof JSONString) {

+            Object o;

+            try {

+                o = ((JSONString) value).toJSONString();

+            } catch (Exception e) {

+                throw new JSONException(e);

+            }

+            writer.write(o != null ? o.toString() : quote(value.toString()));

+        } else {

+            quote(value.toString(), writer);

+        }

+        return writer;

+    }

+

+    static final void indent(Writer writer, int indent) throws IOException {

+        for (int i = 0; i < indent; i += 1) {

+            writer.write(' ');

+        }

+    }

+

+    /**

+     * Write the contents of the JSONObject as JSON text to a writer. For

+     * compactness, no whitespace is added.

+     * <p>

+     * Warning: This method assumes that the data structure is acyclical.

+     *

+     * @return The writer.

+     * @throws JSONException

+     */

+    Writer write(Writer writer, int indentFactor, int indent)

+            throws JSONException {

+        try {

+            boolean commanate = false;

+            final int length = this.length();

+            Iterator<String> keys = this.keys();

+            writer.write('{');

+

+            if (length == 1) {

+                Object key = keys.next();

+                writer.write(quote(key.toString()));

+                writer.write(':');

+                if (indentFactor > 0) {

+                    writer.write(' ');

+                }

+                writeValue(writer, this.map.get(key), indentFactor, indent);

+            } else if (length != 0) {

+                final int newindent = indent + indentFactor;

+                while (keys.hasNext()) {

+                    Object key = keys.next();

+                    if (commanate) {

+                        writer.write(',');

+                    }

+                    if (indentFactor > 0) {

+                        writer.write('\n');

+                    }

+                    indent(writer, newindent);

+                    writer.write(quote(key.toString()));

+                    writer.write(':');

+                    if (indentFactor > 0) {

+                        writer.write(' ');

+                    }

+                    writeValue(writer, this.map.get(key), indentFactor,

+                            newindent);

+                    commanate = true;

+                }

+                if (indentFactor > 0) {

+                    writer.write('\n');

+                }

+                indent(writer, indent);

+            }

+            writer.write('}');

+            return writer;

+        } catch (IOException exception) {

+            throw new JSONException(exception);

+        }

+     }

+}

diff --git a/datarouter-prov/src/main/java/org/json/JSONString.java b/datarouter-prov/src/main/java/org/json/JSONString.java
new file mode 100644
index 0000000..d01ae33
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/JSONString.java
@@ -0,0 +1,40 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+/**

+ * The <code>JSONString</code> interface allows a <code>toJSONString()</code>

+ * method so that a class can change the behavior of

+ * <code>JSONObject.toString()</code>, <code>JSONArray.toString()</code>,

+ * and <code>JSONWriter.value(</code>Object<code>)</code>. The

+ * <code>toJSONString</code> method will be used instead of the default behavior

+ * of using the Object's <code>toString()</code> method and quoting the result.

+ */

+public interface JSONString {

+    /**

+     * The <code>toJSONString</code> method allows a class to produce its own JSON

+     * serialization.

+     *

+     * @return A strictly syntactically correct JSON text.

+     */

+    public String toJSONString();

+}

diff --git a/datarouter-prov/src/main/java/org/json/JSONStringer.java b/datarouter-prov/src/main/java/org/json/JSONStringer.java
new file mode 100644
index 0000000..91b5877
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/JSONStringer.java
@@ -0,0 +1,100 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+/*

+Copyright (c) 2006 JSON.org

+

+Permission is hereby granted, free of charge, to any person obtaining a copy

+of this software and associated documentation files (the "Software"), to deal

+in the Software without restriction, including without limitation the rights

+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

+copies of the Software, and to permit persons to whom the Software is

+furnished to do so, subject to the following conditions:

+

+The above copyright notice and this permission notice shall be included in all

+copies or substantial portions of the Software.

+

+The Software shall be used for Good, not Evil.

+

+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

+SOFTWARE.

+*/

+

+import java.io.StringWriter;

+

+/**

+ * JSONStringer provides a quick and convenient way of producing JSON text.

+ * The texts produced strictly conform to JSON syntax rules. No whitespace is

+ * added, so the results are ready for transmission or storage. Each instance of

+ * JSONStringer can produce one JSON text.

+ * <p>

+ * A JSONStringer instance provides a <code>value</code> method for appending

+ * values to the

+ * text, and a <code>key</code>

+ * method for adding keys before values in objects. There are <code>array</code>

+ * and <code>endArray</code> methods that make and bound array values, and

+ * <code>object</code> and <code>endObject</code> methods which make and bound

+ * object values. All of these methods return the JSONWriter instance,

+ * permitting cascade style. For example, <pre>

+ * myString = new JSONStringer()

+ *     .object()

+ *         .key("JSON")

+ *         .value("Hello, World!")

+ *     .endObject()

+ *     .toString();</pre> which produces the string <pre>

+ * {"JSON":"Hello, World!"}</pre>

+ * <p>

+ * The first method called must be <code>array</code> or <code>object</code>.

+ * There are no methods for adding commas or colons. JSONStringer adds them for

+ * you. Objects and arrays can be nested up to 20 levels deep.

+ * <p>

+ * This can sometimes be easier than using a JSONObject to build a string.

+ * @author JSON.org

+ * @version 2008-09-18

+ */

+public class JSONStringer extends JSONWriter {

+    /**

+     * Make a fresh JSONStringer. It can be used to build one JSON text.

+     */

+    public JSONStringer() {

+        super(new StringWriter());

+    }

+

+    /**

+     * Return the JSON text. This method is used to obtain the product of the

+     * JSONStringer instance. It will return <code>null</code> if there was a

+     * problem in the construction of the JSON text (such as the calls to

+     * <code>array</code> were not properly balanced with calls to

+     * <code>endArray</code>).

+     * @return The JSON text.

+     */

+    public String toString() {

+        return this.mode == 'd' ? this.writer.toString() : null;

+    }

+}

diff --git a/datarouter-prov/src/main/java/org/json/JSONTokener.java b/datarouter-prov/src/main/java/org/json/JSONTokener.java
new file mode 100644
index 0000000..816f52e
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/JSONTokener.java
@@ -0,0 +1,468 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+import java.io.BufferedReader;

+import java.io.IOException;

+import java.io.InputStream;

+import java.io.InputStreamReader;

+import java.io.Reader;

+import java.io.StringReader;

+

+/*

+Copyright (c) 2002 JSON.org

+

+Permission is hereby granted, free of charge, to any person obtaining a copy

+of this software and associated documentation files (the "Software"), to deal

+in the Software without restriction, including without limitation the rights

+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

+copies of the Software, and to permit persons to whom the Software is

+furnished to do so, subject to the following conditions:

+

+The above copyright notice and this permission notice shall be included in all

+copies or substantial portions of the Software.

+

+The Software shall be used for Good, not Evil.

+

+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

+SOFTWARE.

+*/

+

+/**

+ * A JSONTokener takes a source string and extracts characters and tokens from

+ * it. It is used by the JSONObject and JSONArray constructors to parse

+ * JSON source strings.

+ * @author JSON.org

+ * @version 2012-02-16

+ */

+public class JSONTokener {

+

+    private long    character;

+    private boolean eof;

+    private long    index;

+    private long    line;

+    private char    previous;

+    private Reader  reader;

+    private boolean usePrevious;

+

+

+    /**

+     * Construct a JSONTokener from a Reader.

+     *

+     * @param reader     A reader.

+     */

+    public JSONTokener(Reader reader) {

+        this.reader = reader.markSupported()

+            ? reader

+            : new BufferedReader(reader);

+        this.eof = false;

+        this.usePrevious = false;

+        this.previous = 0;

+        this.index = 0;

+        this.character = 1;

+        this.line = 1;

+    }

+

+

+    /**

+     * Construct a JSONTokener from an InputStream.

+     */

+    public JSONTokener(InputStream inputStream) throws JSONException {

+        this(new InputStreamReader(inputStream));

+    }

+

+

+    /**

+     * Construct a JSONTokener from a string.

+     *

+     * @param s     A source string.

+     */

+    public JSONTokener(String s) {

+        this(new StringReader(s));

+    }

+

+

+    /**

+     * Back up one character. This provides a sort of lookahead capability,

+     * so that you can test for a digit or letter before attempting to parse

+     * the next number or identifier.

+     */

+    public void back() throws JSONException {

+        if (this.usePrevious || this.index <= 0) {

+            throw new JSONException("Stepping back two steps is not supported");

+        }

+        this.index -= 1;

+        this.character -= 1;

+        this.usePrevious = true;

+        this.eof = false;

+    }

+

+

+    /**

+     * Get the hex value of a character (base16).

+     * @param c A character between '0' and '9' or between 'A' and 'F' or

+     * between 'a' and 'f'.

+     * @return  An int between 0 and 15, or -1 if c was not a hex digit.

+     */

+    public static int dehexchar(char c) {

+        if (c >= '0' && c <= '9') {

+            return c - '0';

+        }

+        if (c >= 'A' && c <= 'F') {

+            return c - ('A' - 10);

+        }

+        if (c >= 'a' && c <= 'f') {

+            return c - ('a' - 10);

+        }

+        return -1;

+    }

+

+    public boolean end() {

+        return this.eof && !this.usePrevious;

+    }

+

+

+    /**

+     * Determine if the source string still contains characters that next()

+     * can consume.

+     * @return true if not yet at the end of the source.

+     */

+    public boolean more() throws JSONException {

+        this.next();

+        if (this.end()) {

+            return false;

+        }

+        this.back();

+        return true;

+    }

+

+

+    /**

+     * Get the next character in the source string.

+     *

+     * @return The next character, or 0 if past the end of the source string.

+     */

+    public char next() throws JSONException {

+        int c;

+        if (this.usePrevious) {

+            this.usePrevious = false;

+            c = this.previous;

+        } else {

+            try {

+                c = this.reader.read();

+            } catch (IOException exception) {

+                throw new JSONException(exception);

+            }

+

+            if (c <= 0) { // End of stream

+                this.eof = true;

+                c = 0;

+            }

+        }

+        this.index += 1;

+        if (this.previous == '\r') {

+            this.line += 1;

+            this.character = c == '\n' ? 0 : 1;

+        } else if (c == '\n') {

+            this.line += 1;

+            this.character = 0;

+        } else {

+            this.character += 1;

+        }

+        this.previous = (char) c;

+        return this.previous;

+    }

+

+

+    /**

+     * Consume the next character, and check that it matches a specified

+     * character.

+     * @param c The character to match.

+     * @return The character.

+     * @throws JSONException if the character does not match.

+     */

+    public char next(char c) throws JSONException {

+        char n = this.next();

+        if (n != c) {

+            throw this.syntaxError("Expected '" + c + "' and instead saw '" +

+                    n + "'");

+        }

+        return n;

+    }

+

+

+    /**

+     * Get the next n characters.

+     *

+     * @param n     The number of characters to take.

+     * @return      A string of n characters.

+     * @throws JSONException

+     *   Substring bounds error if there are not

+     *   n characters remaining in the source string.

+     */

+     public String next(int n) throws JSONException {

+         if (n == 0) {

+             return "";

+         }

+

+         char[] chars = new char[n];

+         int pos = 0;

+

+         while (pos < n) {

+             chars[pos] = this.next();

+             if (this.end()) {

+                 throw this.syntaxError("Substring bounds error");

+             }

+             pos += 1;

+         }

+         return new String(chars);

+     }

+

+

+    /**

+     * Get the next char in the string, skipping whitespace.

+     * @throws JSONException

+     * @return  A character, or 0 if there are no more characters.

+     */

+    public char nextClean() throws JSONException {

+        for (;;) {

+            char c = this.next();

+            if (c == 0 || c > ' ') {

+                return c;

+            }

+        }

+    }

+

+

+    /**

+     * Return the characters up to the next close quote character.

+     * Backslash processing is done. The formal JSON format does not

+     * allow strings in single quotes, but an implementation is allowed to

+     * accept them.

+     * @param quote The quoting character, either

+     *      <code>"</code>&nbsp;<small>(double quote)</small> or

+     *      <code>'</code>&nbsp;<small>(single quote)</small>.

+     * @return      A String.

+     * @throws JSONException Unterminated string.

+     */

+    public String nextString(char quote) throws JSONException {

+        char c;

+        StringBuffer sb = new StringBuffer();

+        for (;;) {

+            c = this.next();

+            switch (c) {

+            case 0:

+            case '\n':

+            case '\r':

+                throw this.syntaxError("Unterminated string");

+            case '\\':

+                c = this.next();

+                switch (c) {

+                case 'b':

+                    sb.append('\b');

+                    break;

+                case 't':

+                    sb.append('\t');

+                    break;

+                case 'n':

+                    sb.append('\n');

+                    break;

+                case 'f':

+                    sb.append('\f');

+                    break;

+                case 'r':

+                    sb.append('\r');

+                    break;

+                case 'u':

+                    sb.append((char)Integer.parseInt(this.next(4), 16));

+                    break;

+                case '"':

+                case '\'':

+                case '\\':

+                case '/':

+                    sb.append(c);

+                    break;

+                default:

+                    throw this.syntaxError("Illegal escape.");

+                }

+                break;

+            default:

+                if (c == quote) {

+                    return sb.toString();

+                }

+                sb.append(c);

+            }

+        }

+    }

+

+

+    /**

+     * Get the text up but not including the specified character or the

+     * end of line, whichever comes first.

+     * @param  delimiter A delimiter character.

+     * @return   A string.

+     */

+    public String nextTo(char delimiter) throws JSONException {

+        StringBuffer sb = new StringBuffer();

+        for (;;) {

+            char c = this.next();

+            if (c == delimiter || c == 0 || c == '\n' || c == '\r') {

+                if (c != 0) {

+                    this.back();

+                }

+                return sb.toString().trim();

+            }

+            sb.append(c);

+        }

+    }

+

+

+    /**

+     * Get the text up but not including one of the specified delimiter

+     * characters or the end of line, whichever comes first.

+     * @param delimiters A set of delimiter characters.

+     * @return A string, trimmed.

+     */

+    public String nextTo(String delimiters) throws JSONException {

+        char c;

+        StringBuffer sb = new StringBuffer();

+        for (;;) {

+            c = this.next();

+            if (delimiters.indexOf(c) >= 0 || c == 0 ||

+                    c == '\n' || c == '\r') {

+                if (c != 0) {

+                    this.back();

+                }

+                return sb.toString().trim();

+            }

+            sb.append(c);

+        }

+    }

+

+

+    /**

+     * Get the next value. The value can be a Boolean, Double, Integer,

+     * JSONArray, JSONObject, Long, or String, or the JSONObject.NULL object.

+     * @throws JSONException If syntax error.

+     *

+     * @return An object.

+     */

+    public Object nextValue() throws JSONException {

+        char c = this.nextClean();

+        String string;

+

+        switch (c) {

+            case '"':

+            case '\'':

+                return this.nextString(c);

+            case '{':

+                this.back();

+                return new JSONObject(this);

+            case '[':

+                this.back();

+                return new JSONArray(this);

+        }

+

+        /*

+         * Handle unquoted text. This could be the values true, false, or

+         * null, or it can be a number. An implementation (such as this one)

+         * is allowed to also accept non-standard forms.

+         *

+         * Accumulate characters until we reach the end of the text or a

+         * formatting character.

+         */

+

+        StringBuffer sb = new StringBuffer();

+        while (c >= ' ' && ",:]}/\\\"[{;=#".indexOf(c) < 0) {

+            sb.append(c);

+            c = this.next();

+        }

+        this.back();

+

+        string = sb.toString().trim();

+        if ("".equals(string)) {

+            throw this.syntaxError("Missing value");

+        }

+        return JSONObject.stringToValue(string);

+    }

+

+

+    /**

+     * Skip characters until the next character is the requested character.

+     * If the requested character is not found, no characters are skipped.

+     * @param to A character to skip to.

+     * @return The requested character, or zero if the requested character

+     * is not found.

+     */

+    public char skipTo(char to) throws JSONException {

+        char c;

+        try {

+            long startIndex = this.index;

+            long startCharacter = this.character;

+            long startLine = this.line;

+            this.reader.mark(1000000);

+            do {

+                c = this.next();

+                if (c == 0) {

+                    this.reader.reset();

+                    this.index = startIndex;

+                    this.character = startCharacter;

+                    this.line = startLine;

+                    return c;

+                }

+            } while (c != to);

+        } catch (IOException exc) {

+            throw new JSONException(exc);

+        }

+

+        this.back();

+        return c;

+    }

+

+

+    /**

+     * Make a JSONException to signal a syntax error.

+     *

+     * @param message The error message.

+     * @return  A JSONException object, suitable for throwing

+     */

+    public JSONException syntaxError(String message) {

+        return new JSONException(message + this.toString());

+    }

+

+

+    /**

+     * Make a printable string of this JSONTokener.

+     *

+     * @return " at {index} [character {character} line {line}]"

+     */

+    public String toString() {

+        return " at " + this.index + " [character " + this.character + " line " +

+            this.line + "]";

+    }

+}

diff --git a/datarouter-prov/src/main/java/org/json/JSONWriter.java b/datarouter-prov/src/main/java/org/json/JSONWriter.java
new file mode 100644
index 0000000..a9b0bab
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/JSONWriter.java
@@ -0,0 +1,349 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+import java.io.IOException;

+import java.io.Writer;

+

+/*

+Copyright (c) 2006 JSON.org

+

+Permission is hereby granted, free of charge, to any person obtaining a copy

+of this software and associated documentation files (the "Software"), to deal

+in the Software without restriction, including without limitation the rights

+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

+copies of the Software, and to permit persons to whom the Software is

+furnished to do so, subject to the following conditions:

+

+The above copyright notice and this permission notice shall be included in all

+copies or substantial portions of the Software.

+

+The Software shall be used for Good, not Evil.

+

+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

+SOFTWARE.

+*/

+

+/**

+ * JSONWriter provides a quick and convenient way of producing JSON text.

+ * The texts produced strictly conform to JSON syntax rules. No whitespace is

+ * added, so the results are ready for transmission or storage. Each instance of

+ * JSONWriter can produce one JSON text.

+ * <p>

+ * A JSONWriter instance provides a <code>value</code> method for appending

+ * values to the

+ * text, and a <code>key</code>

+ * method for adding keys before values in objects. There are <code>array</code>

+ * and <code>endArray</code> methods that make and bound array values, and

+ * <code>object</code> and <code>endObject</code> methods which make and bound

+ * object values. All of these methods return the JSONWriter instance,

+ * permitting a cascade style. For example, <pre>

+ * new JSONWriter(myWriter)

+ *     .object()

+ *         .key("JSON")

+ *         .value("Hello, World!")

+ *     .endObject();</pre> which writes <pre>

+ * {"JSON":"Hello, World!"}</pre>

+ * <p>

+ * The first method called must be <code>array</code> or <code>object</code>.

+ * There are no methods for adding commas or colons. JSONWriter adds them for

+ * you. Objects and arrays can be nested up to 20 levels deep.

+ * <p>

+ * This can sometimes be easier than using a JSONObject to build a string.

+ * @author JSON.org

+ * @version 2011-11-24

+ */

+public class JSONWriter {

+    private static final int maxdepth = 200;

+

+    /**

+     * The comma flag determines if a comma should be output before the next

+     * value.

+     */

+    private boolean comma;

+

+    /**

+     * The current mode. Values:

+     * 'a' (array),

+     * 'd' (done),

+     * 'i' (initial),

+     * 'k' (key),

+     * 'o' (object).

+     */

+    protected char mode;

+

+    /**

+     * The object/array stack.

+     */

+    private final JSONObject stack[];

+

+    /**

+     * The stack top index. A value of 0 indicates that the stack is empty.

+     */

+    private int top;

+

+    /**

+     * The writer that will receive the output.

+     */

+    protected Writer writer;

+

+    /**

+     * Make a fresh JSONWriter. It can be used to build one JSON text.

+     */

+    public JSONWriter(Writer w) {

+        this.comma = false;

+        this.mode = 'i';

+        this.stack = new JSONObject[maxdepth];

+        this.top = 0;

+        this.writer = w;

+    }

+

+    /**

+     * Append a value.

+     * @param string A string value.

+     * @return this

+     * @throws JSONException If the value is out of sequence.

+     */

+    private JSONWriter append(String string) throws JSONException {

+        if (string == null) {

+            throw new JSONException("Null pointer");

+        }

+        if (this.mode == 'o' || this.mode == 'a') {

+            try {

+                if (this.comma && this.mode == 'a') {

+                    this.writer.write(',');

+                }

+                this.writer.write(string);

+            } catch (IOException e) {

+                throw new JSONException(e);

+            }

+            if (this.mode == 'o') {

+                this.mode = 'k';

+            }

+            this.comma = true;

+            return this;

+        }

+        throw new JSONException("Value out of sequence.");

+    }

+

+    /**

+     * Begin appending a new array. All values until the balancing

+     * <code>endArray</code> will be appended to this array. The

+     * <code>endArray</code> method must be called to mark the array's end.

+     * @return this

+     * @throws JSONException If the nesting is too deep, or if the object is

+     * started in the wrong place (for example as a key or after the end of the

+     * outermost array or object).

+     */

+    public JSONWriter array() throws JSONException {

+        if (this.mode == 'i' || this.mode == 'o' || this.mode == 'a') {

+            this.push(null);

+            this.append("[");

+            this.comma = false;

+            return this;

+        }

+        throw new JSONException("Misplaced array.");

+    }

+

+    /**

+     * End something.

+     * @param mode Mode

+     * @param c Closing character

+     * @return this

+     * @throws JSONException If unbalanced.

+     */

+    private JSONWriter end(char mode, char c) throws JSONException {

+        if (this.mode != mode) {

+            throw new JSONException(mode == 'a'

+                ? "Misplaced endArray."

+                : "Misplaced endObject.");

+        }

+        this.pop(mode);

+        try {

+            this.writer.write(c);

+        } catch (IOException e) {

+            throw new JSONException(e);

+        }

+        this.comma = true;

+        return this;

+    }

+

+    /**

+     * End an array. This method most be called to balance calls to

+     * <code>array</code>.

+     * @return this

+     * @throws JSONException If incorrectly nested.

+     */

+    public JSONWriter endArray() throws JSONException {

+        return this.end('a', ']');

+    }

+

+    /**

+     * End an object. This method most be called to balance calls to

+     * <code>object</code>.

+     * @return this

+     * @throws JSONException If incorrectly nested.

+     */

+    public JSONWriter endObject() throws JSONException {

+        return this.end('k', '}');

+    }

+

+    /**

+     * Append a key. The key will be associated with the next value. In an

+     * object, every value must be preceded by a key.

+     * @param string A key string.

+     * @return this

+     * @throws JSONException If the key is out of place. For example, keys

+     *  do not belong in arrays or if the key is null.

+     */

+    public JSONWriter key(String string) throws JSONException {

+        if (string == null) {

+            throw new JSONException("Null key.");

+        }

+        if (this.mode == 'k') {

+            try {

+                this.stack[this.top - 1].putOnce(string, Boolean.TRUE);

+                if (this.comma) {

+                    this.writer.write(',');

+                }

+                this.writer.write(JSONObject.quote(string));

+                this.writer.write(':');

+                this.comma = false;

+                this.mode = 'o';

+                return this;

+            } catch (IOException e) {

+                throw new JSONException(e);

+            }

+        }

+        throw new JSONException("Misplaced key.");

+    }

+

+

+    /**

+     * Begin appending a new object. All keys and values until the balancing

+     * <code>endObject</code> will be appended to this object. The

+     * <code>endObject</code> method must be called to mark the object's end.

+     * @return this

+     * @throws JSONException If the nesting is too deep, or if the object is

+     * started in the wrong place (for example as a key or after the end of the

+     * outermost array or object).

+     */

+    public JSONWriter object() throws JSONException {

+        if (this.mode == 'i') {

+            this.mode = 'o';

+        }

+        if (this.mode == 'o' || this.mode == 'a') {

+            this.append("{");

+            this.push(new JSONObject());

+            this.comma = false;

+            return this;

+        }

+        throw new JSONException("Misplaced object.");

+

+    }

+

+

+    /**

+     * Pop an array or object scope.

+     * @param c The scope to close.

+     * @throws JSONException If nesting is wrong.

+     */

+    private void pop(char c) throws JSONException {

+        if (this.top <= 0) {

+            throw new JSONException("Nesting error.");

+        }

+        char m = this.stack[this.top - 1] == null ? 'a' : 'k';

+        if (m != c) {

+            throw new JSONException("Nesting error.");

+        }

+        this.top -= 1;

+        this.mode = this.top == 0

+            ? 'd'

+            : this.stack[this.top - 1] == null

+            ? 'a'

+            : 'k';

+    }

+

+    /**

+     * Push an array or object scope.

+     * @param jo The scope to open.

+     * @throws JSONException If nesting is too deep.

+     */

+    private void push(JSONObject jo) throws JSONException {

+        if (this.top >= maxdepth) {

+            throw new JSONException("Nesting too deep.");

+        }

+        this.stack[this.top] = jo;

+        this.mode = jo == null ? 'a' : 'k';

+        this.top += 1;

+    }

+

+

+    /**

+     * Append either the value <code>true</code> or the value

+     * <code>false</code>.

+     * @param b A boolean.

+     * @return this

+     * @throws JSONException

+     */

+    public JSONWriter value(boolean b) throws JSONException {

+        return this.append(b ? "true" : "false");

+    }

+

+    /**

+     * Append a double value.

+     * @param d A double.

+     * @return this

+     * @throws JSONException If the number is not finite.

+     */

+    public JSONWriter value(double d) throws JSONException {

+        return this.value(new Double(d));

+    }

+

+    /**

+     * Append a long value.

+     * @param l A long.

+     * @return this

+     * @throws JSONException

+     */

+    public JSONWriter value(long l) throws JSONException {

+        return this.append(Long.toString(l));

+    }

+

+

+    /**

+     * Append an object value.

+     * @param object The object to append. It can be null, or a Boolean, Number,

+     *   String, JSONObject, or JSONArray, or an object that implements JSONString.

+     * @return this

+     * @throws JSONException If the value is out of sequence.

+     */

+    public JSONWriter value(Object object) throws JSONException {

+        return this.append(JSONObject.valueToString(object));

+    }

+}

diff --git a/datarouter-prov/src/main/java/org/json/LOGJSONObject.java b/datarouter-prov/src/main/java/org/json/LOGJSONObject.java
new file mode 100644
index 0000000..2f18c54
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/LOGJSONObject.java
@@ -0,0 +1,1653 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+/*

+Copyright (c) 2002 JSON.org

+

+Permission is hereby granted, free of charge, to any person obtaining a copy

+of this software and associated documentation files (the "Software"), to deal

+in the Software without restriction, including without limitation the rights

+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

+copies of the Software, and to permit persons to whom the Software is

+furnished to do so, subject to the following conditions:

+

+The above copyright notice and this permission notice shall be included in all

+copies or substantial portions of the Software.

+

+The Software shall be used for Good, not Evil.

+

+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

+SOFTWARE.

+*/

+

+import java.io.IOException;

+import java.io.StringWriter;

+import java.io.Writer;

+import java.lang.reflect.Field;

+import java.lang.reflect.Method;

+import java.lang.reflect.Modifier;

+import java.util.Collection;

+import java.util.Enumeration;

+import java.util.LinkedHashMap;

+import java.util.Iterator;

+import java.util.Locale;

+import java.util.Map;

+import java.util.ResourceBundle;

+import java.util.Set;

+

+/**

+ * A JSONObject is an unordered collection of name/value pairs. Its external

+ * form is a string wrapped in curly braces with colons between the names and

+ * values, and commas between the values and names. The internal form is an

+ * object having <code>get</code> and <code>opt</code> methods for accessing the

+ * values by name, and <code>put</code> methods for adding or replacing values

+ * by name. The values can be any of these types: <code>Boolean</code>,

+ * <code>JSONArray</code>, <code>JSONObject</code>, <code>Number</code>,

+ * <code>String</code>, or the <code>JSONObject.NULL</code> object. A JSONObject

+ * constructor can be used to convert an external form JSON text into an

+ * internal form whose values can be retrieved with the <code>get</code> and

+ * <code>opt</code> methods, or to convert values into a JSON text using the

+ * <code>put</code> and <code>toString</code> methods. A <code>get</code> method

+ * returns a value if one can be found, and throws an exception if one cannot be

+ * found. An <code>opt</code> method returns a default value instead of throwing

+ * an exception, and so is useful for obtaining optional values.

+ * <p>

+ * The generic <code>get()</code> and <code>opt()</code> methods return an

+ * object, which you can cast or query for type. There are also typed

+ * <code>get</code> and <code>opt</code> methods that do type checking and type

+ * coercion for you. The opt methods differ from the get methods in that they do

+ * not throw. Instead, they return a specified value, such as null.

+ * <p>

+ * The <code>put</code> methods add or replace values in an object. For example,

+ *

+ * <pre>

+ * myString = new JSONObject().put(&quot;JSON&quot;, &quot;Hello, World!&quot;).toString();

+ * </pre>

+ *

+ * produces the string <code>{"JSON": "Hello, World"}</code>.

+ * <p>

+ * The texts produced by the <code>toString</code> methods strictly conform to

+ * the JSON syntax rules. The constructors are more forgiving in the texts they

+ * will accept:

+ * <ul>

+ * <li>An extra <code>,</code>&nbsp;<small>(comma)</small> may appear just

+ * before the closing brace.</li>

+ * <li>Strings may be quoted with <code>'</code>&nbsp;<small>(single

+ * quote)</small>.</li>

+ * <li>Strings do not need to be quoted at all if they do not begin with a quote

+ * or single quote, and if they do not contain leading or trailing spaces, and

+ * if they do not contain any of these characters:

+ * <code>{ } [ ] / \ : , = ; #</code> and if they do not look like numbers and

+ * if they are not the reserved words <code>true</code>, <code>false</code>, or

+ * <code>null</code>.</li>

+ * <li>Keys can be followed by <code>=</code> or <code>=></code> as well as by

+ * <code>:</code>.</li>

+ * <li>Values can be followed by <code>;</code> <small>(semicolon)</small> as

+ * well as by <code>,</code> <small>(comma)</small>.</li>

+ * </ul>

+ *

+ * @author JSON.org

+ * @version 2012-12-01

+ */

+public class LOGJSONObject {

+    /**

+     * The maximum number of keys in the key pool.

+     */

+     private static final int keyPoolSize = 100;

+

+   /**

+     * Key pooling is like string interning, but without permanently tying up

+     * memory. To help conserve memory, storage of duplicated key strings in

+     * JSONObjects will be avoided by using a key pool to manage unique key

+     * string objects. This is used by JSONObject.put(string, object).

+     */

+     private static Map<String,Object> keyPool = new LinkedHashMap<String,Object>(keyPoolSize);

+

+    /**

+     * JSONObject.NULL is equivalent to the value that JavaScript calls null,

+     * whilst Java's null is equivalent to the value that JavaScript calls

+     * undefined.

+     */

+     private static final class Null {

+

+        /**

+         * There is only intended to be a single instance of the NULL object,

+         * so the clone method returns itself.

+         * @return     NULL.

+         */

+        protected final Object clone() {

+            return this;

+        }

+

+        /**

+         * A Null object is equal to the null value and to itself.

+         * @param object    An object to test for nullness.

+         * @return true if the object parameter is the JSONObject.NULL object

+         *  or null.

+         */

+        public boolean equals(Object object) {

+            return object == null || object == this;

+        }

+

+        /**

+         * Get the "null" string value.

+         * @return The string "null".

+         */

+        public String toString() {

+            return "null";

+        }

+    }

+

+

+    /**

+     * The map where the JSONObject's properties are kept.

+     */

+    private final Map<String,Object> map;

+

+

+    /**

+     * It is sometimes more convenient and less ambiguous to have a

+     * <code>NULL</code> object than to use Java's <code>null</code> value.

+     * <code>JSONObject.NULL.equals(null)</code> returns <code>true</code>.

+     * <code>JSONObject.NULL.toString()</code> returns <code>"null"</code>.

+     */

+    public static final Object NULL = new Null();

+

+

+    /**

+     * Construct an empty JSONObject.

+     */

+    public LOGJSONObject() {

+        this.map = new LinkedHashMap<String,Object>();

+    }

+

+

+    /**

+     * Construct a JSONObject from a subset of another JSONObject.

+     * An array of strings is used to identify the keys that should be copied.

+     * Missing keys are ignored.

+     * @param jo A JSONObject.

+     * @param names An array of strings.

+     * @throws JSONException

+     * @exception JSONException If a value is a non-finite number or if a name is duplicated.

+     */

+    public LOGJSONObject(LOGJSONObject jo, String[] names) {

+        this();

+        for (int i = 0; i < names.length; i += 1) {

+            try {

+                this.putOnce(names[i], jo.opt(names[i]));

+            } catch (Exception ignore) {

+            }

+        }

+    }

+

+

+    /**

+     * Construct a JSONObject from a JSONTokener.

+     * @param x A JSONTokener object containing the source string.

+     * @throws JSONException If there is a syntax error in the source string

+     *  or a duplicated key.

+     */

+    public LOGJSONObject(JSONTokener x) throws JSONException {

+        this();

+        char c;

+        String key;

+

+        if (x.nextClean() != '{') {

+            throw x.syntaxError("A JSONObject text must begin with '{'");

+        }

+        for (;;) {

+            c = x.nextClean();

+            switch (c) {

+            case 0:

+                throw x.syntaxError("A JSONObject text must end with '}'");

+            case '}':

+                return;

+            default:

+                x.back();

+                key = x.nextValue().toString();

+            }

+

+// The key is followed by ':'. We will also tolerate '=' or '=>'.

+

+            c = x.nextClean();

+            if (c == '=') {

+                if (x.next() != '>') {

+                    x.back();

+                }

+            } else if (c != ':') {

+                throw x.syntaxError("Expected a ':' after a key");

+            }

+            this.putOnce(key, x.nextValue());

+

+// Pairs are separated by ','. We will also tolerate ';'.

+

+            switch (x.nextClean()) {

+            case ';':

+            case ',':

+                if (x.nextClean() == '}') {

+                    return;

+                }

+                x.back();

+                break;

+            case '}':

+                return;

+            default:

+                throw x.syntaxError("Expected a ',' or '}'");

+            }

+        }

+    }

+

+

+    /**

+     * Construct a JSONObject from a Map.

+     *

+     * @param map A map object that can be used to initialize the contents of

+     *  the JSONObject.

+     * @throws JSONException

+     */

+    public LOGJSONObject(Map<String,Object> map) {

+        this.map = new LinkedHashMap<String,Object>();

+        if (map != null) {

+            Iterator<Map.Entry<String,Object>> i = map.entrySet().iterator();

+            while (i.hasNext()) {

+                Map.Entry<String,Object> e = i.next();

+                Object value = e.getValue();

+                if (value != null) {

+                    this.map.put(e.getKey(), wrap(value));

+                }

+            }

+        }

+    }

+

+

+    /**

+     * Construct a JSONObject from an Object using bean getters.

+     * It reflects on all of the public methods of the object.

+     * For each of the methods with no parameters and a name starting

+     * with <code>"get"</code> or <code>"is"</code> followed by an uppercase letter,

+     * the method is invoked, and a key and the value returned from the getter method

+     * are put into the new JSONObject.

+     *

+     * The key is formed by removing the <code>"get"</code> or <code>"is"</code> prefix.

+     * If the second remaining character is not upper case, then the first

+     * character is converted to lower case.

+     *

+     * For example, if an object has a method named <code>"getName"</code>, and

+     * if the result of calling <code>object.getName()</code> is <code>"Larry Fine"</code>,

+     * then the JSONObject will contain <code>"name": "Larry Fine"</code>.

+     *

+     * @param bean An object that has getter methods that should be used

+     * to make a JSONObject.

+     */

+    public LOGJSONObject(Object bean) {

+        this();

+        this.populateMap(bean);

+    }

+

+

+    /**

+     * Construct a JSONObject from an Object, using reflection to find the

+     * public members. The resulting JSONObject's keys will be the strings

+     * from the names array, and the values will be the field values associated

+     * with those keys in the object. If a key is not found or not visible,

+     * then it will not be copied into the new JSONObject.

+     * @param object An object that has fields that should be used to make a

+     * JSONObject.

+     * @param names An array of strings, the names of the fields to be obtained

+     * from the object.

+     */

+    public LOGJSONObject(Object object, String names[]) {

+        this();

+        Class<? extends Object> c = object.getClass();

+        for (int i = 0; i < names.length; i += 1) {

+            String name = names[i];

+            try {

+                this.putOpt(name, c.getField(name).get(object));

+            } catch (Exception ignore) {

+            }

+        }

+    }

+

+

+    /**

+     * Construct a JSONObject from a source JSON text string.

+     * This is the most commonly used JSONObject constructor.

+     * @param source    A string beginning

+     *  with <code>{</code>&nbsp;<small>(left brace)</small> and ending

+     *  with <code>}</code>&nbsp;<small>(right brace)</small>.

+     * @exception JSONException If there is a syntax error in the source

+     *  string or a duplicated key.

+     */

+    public LOGJSONObject(String source) throws JSONException {

+        this(new JSONTokener(source));

+    }

+

+

+    /**

+     * Construct a JSONObject from a ResourceBundle.

+     * @param baseName The ResourceBundle base name.

+     * @param locale The Locale to load the ResourceBundle for.

+     * @throws JSONException If any JSONExceptions are detected.

+     */

+    public LOGJSONObject(String baseName, Locale locale) throws JSONException {

+        this();

+        ResourceBundle bundle = ResourceBundle.getBundle(baseName, locale,

+                Thread.currentThread().getContextClassLoader());

+

+// Iterate through the keys in the bundle.

+

+        Enumeration<?> keys = bundle.getKeys();

+        while (keys.hasMoreElements()) {

+            Object key = keys.nextElement();

+            if (key instanceof String) {

+

+// Go through the path, ensuring that there is a nested JSONObject for each

+// segment except the last. Add the value using the last segment's name into

+// the deepest nested JSONObject.

+

+                String[] path = ((String)key).split("\\.");

+                int last = path.length - 1;

+                LOGJSONObject target = this;

+                for (int i = 0; i < last; i += 1) {

+                    String segment = path[i];

+                    LOGJSONObject nextTarget = target.optJSONObject(segment);

+                    if (nextTarget == null) {

+                        nextTarget = new LOGJSONObject();

+                        target.put(segment, nextTarget);

+                    }

+                    target = nextTarget;

+                }

+                target.put(path[last], bundle.getString((String)key));

+            }

+        }

+    }

+

+

+    /**

+     * Accumulate values under a key. It is similar to the put method except

+     * that if there is already an object stored under the key then a

+     * JSONArray is stored under the key to hold all of the accumulated values.

+     * If there is already a JSONArray, then the new value is appended to it.

+     * In contrast, the put method replaces the previous value.

+     *

+     * If only one value is accumulated that is not a JSONArray, then the

+     * result will be the same as using put. But if multiple values are

+     * accumulated, then the result will be like append.

+     * @param key   A key string.

+     * @param value An object to be accumulated under the key.

+     * @return this.

+     * @throws JSONException If the value is an invalid number

+     *  or if the key is null.

+     */

+    public LOGJSONObject accumulate(

+        String key,

+        Object value

+    ) throws JSONException {

+        testValidity(value);

+        Object object = this.opt(key);

+        if (object == null) {

+            this.put(key, value instanceof JSONArray

+                    ? new JSONArray().put(value)

+                    : value);

+        } else if (object instanceof JSONArray) {

+            ((JSONArray)object).put(value);

+        } else {

+            this.put(key, new JSONArray().put(object).put(value));

+        }

+        return this;

+    }

+

+

+    /**

+     * Append values to the array under a key. If the key does not exist in the

+     * JSONObject, then the key is put in the JSONObject with its value being a

+     * JSONArray containing the value parameter. If the key was already

+     * associated with a JSONArray, then the value parameter is appended to it.

+     * @param key   A key string.

+     * @param value An object to be accumulated under the key.

+     * @return this.

+     * @throws JSONException If the key is null or if the current value

+     *  associated with the key is not a JSONArray.

+     */

+    public LOGJSONObject append(String key, Object value) throws JSONException {

+        testValidity(value);

+        Object object = this.opt(key);

+        if (object == null) {

+            this.put(key, new JSONArray().put(value));

+        } else if (object instanceof JSONArray) {

+            this.put(key, ((JSONArray)object).put(value));

+        } else {

+            throw new JSONException("JSONObject[" + key +

+                    "] is not a JSONArray.");

+        }

+        return this;

+    }

+

+

+    /**

+     * Produce a string from a double. The string "null" will be returned if

+     * the number is not finite.

+     * @param  d A double.

+     * @return A String.

+     */

+    public static String doubleToString(double d) {

+        if (Double.isInfinite(d) || Double.isNaN(d)) {

+            return "null";

+        }

+

+// Shave off trailing zeros and decimal point, if possible.

+

+        String string = Double.toString(d);

+        if (string.indexOf('.') > 0 && string.indexOf('e') < 0 &&

+                string.indexOf('E') < 0) {

+            while (string.endsWith("0")) {

+                string = string.substring(0, string.length() - 1);

+            }

+            if (string.endsWith(".")) {

+                string = string.substring(0, string.length() - 1);

+            }

+        }

+        return string;

+    }

+

+

+    /**

+     * Get the value object associated with a key.

+     *

+     * @param key   A key string.

+     * @return      The object associated with the key.

+     * @throws      JSONException if the key is not found.

+     */

+    public Object get(String key) throws JSONException {

+        if (key == null) {

+            throw new JSONException("Null key.");

+        }

+        Object object = this.opt(key);

+        if (object == null) {

+            throw new JSONException("JSONObject[" + quote(key) +

+                    "] not found.");

+        }

+        return object;

+    }

+

+

+    /**

+     * Get the boolean value associated with a key.

+     *

+     * @param key   A key string.

+     * @return      The truth.

+     * @throws      JSONException

+     *  if the value is not a Boolean or the String "true" or "false".

+     */

+    public boolean getBoolean(String key) throws JSONException {

+        Object object = this.get(key);

+        if (object.equals(Boolean.FALSE) ||

+                (object instanceof String &&

+                ((String)object).equalsIgnoreCase("false"))) {

+            return false;

+        } else if (object.equals(Boolean.TRUE) ||

+                (object instanceof String &&

+                ((String)object).equalsIgnoreCase("true"))) {

+            return true;

+        }

+        throw new JSONException("JSONObject[" + quote(key) +

+                "] is not a Boolean.");

+    }

+

+

+    /**

+     * Get the double value associated with a key.

+     * @param key   A key string.

+     * @return      The numeric value.

+     * @throws JSONException if the key is not found or

+     *  if the value is not a Number object and cannot be converted to a number.

+     */

+    public double getDouble(String key) throws JSONException {

+        Object object = this.get(key);

+        try {

+            return object instanceof Number

+                ? ((Number)object).doubleValue()

+                : Double.parseDouble((String)object);

+        } catch (Exception e) {

+            throw new JSONException("JSONObject[" + quote(key) +

+                "] is not a number.");

+        }

+    }

+

+

+    /**

+     * Get the int value associated with a key.

+     *

+     * @param key   A key string.

+     * @return      The integer value.

+     * @throws   JSONException if the key is not found or if the value cannot

+     *  be converted to an integer.

+     */

+    public int getInt(String key) throws JSONException {

+        Object object = this.get(key);

+        try {

+            return object instanceof Number

+                ? ((Number)object).intValue()

+                : Integer.parseInt((String)object);

+        } catch (Exception e) {

+            throw new JSONException("JSONObject[" + quote(key) +

+                "] is not an int.");

+        }

+    }

+

+

+    /**

+     * Get the JSONArray value associated with a key.

+     *

+     * @param key   A key string.

+     * @return      A JSONArray which is the value.

+     * @throws      JSONException if the key is not found or

+     *  if the value is not a JSONArray.

+     */

+    public JSONArray getJSONArray(String key) throws JSONException {

+        Object object = this.get(key);

+        if (object instanceof JSONArray) {

+            return (JSONArray)object;

+        }

+        throw new JSONException("JSONObject[" + quote(key) +

+                "] is not a JSONArray.");

+    }

+

+

+    /**

+     * Get the JSONObject value associated with a key.

+     *

+     * @param key   A key string.

+     * @return      A JSONObject which is the value.

+     * @throws      JSONException if the key is not found or

+     *  if the value is not a JSONObject.

+     */

+    public LOGJSONObject getJSONObject(String key) throws JSONException {

+        Object object = this.get(key);

+        if (object instanceof LOGJSONObject) {

+            return (LOGJSONObject)object;

+        }

+        throw new JSONException("JSONObject[" + quote(key) +

+                "] is not a JSONObject.");

+    }

+

+

+    /**

+     * Get the long value associated with a key.

+     *

+     * @param key   A key string.

+     * @return      The long value.

+     * @throws   JSONException if the key is not found or if the value cannot

+     *  be converted to a long.

+     */

+    public long getLong(String key) throws JSONException {

+        Object object = this.get(key);

+        try {

+            return object instanceof Number

+                ? ((Number)object).longValue()

+                : Long.parseLong((String)object);

+        } catch (Exception e) {

+            throw new JSONException("JSONObject[" + quote(key) +

+                "] is not a long.");

+        }

+    }

+

+

+    /**

+     * Get an array of field names from a JSONObject.

+     *

+     * @return An array of field names, or null if there are no names.

+     */

+    public static String[] getNames(LOGJSONObject jo) {

+        int length = jo.length();

+        if (length == 0) {

+            return null;

+        }

+        Iterator<String> iterator = jo.keys();

+        String[] names = new String[length];

+        int i = 0;

+        while (iterator.hasNext()) {

+            names[i] = iterator.next();

+            i += 1;

+        }

+        return names;

+    }

+

+

+    /**

+     * Get an array of field names from an Object.

+     *

+     * @return An array of field names, or null if there are no names.

+     */

+    public static String[] getNames(Object object) {

+        if (object == null) {

+            return null;

+        }

+        Class<? extends Object> klass = object.getClass();

+        Field[] fields = klass.getFields();

+        int length = fields.length;

+        if (length == 0) {

+            return null;

+        }

+        String[] names = new String[length];

+        for (int i = 0; i < length; i += 1) {

+            names[i] = fields[i].getName();

+        }

+        return names;

+    }

+

+

+    /**

+     * Get the string associated with a key.

+     *

+     * @param key   A key string.

+     * @return      A string which is the value.

+     * @throws   JSONException if there is no string value for the key.

+     */

+    public String getString(String key) throws JSONException {

+        Object object = this.get(key);

+        if (object instanceof String) {

+            return (String)object;

+        }

+        throw new JSONException("JSONObject[" + quote(key) +

+            "] not a string.");

+    }

+

+

+    /**

+     * Determine if the JSONObject contains a specific key.

+     * @param key   A key string.

+     * @return      true if the key exists in the JSONObject.

+     */

+    public boolean has(String key) {

+        return this.map.containsKey(key);

+    }

+

+

+    /**

+     * Increment a property of a JSONObject. If there is no such property,

+     * create one with a value of 1. If there is such a property, and if

+     * it is an Integer, Long, Double, or Float, then add one to it.

+     * @param key  A key string.

+     * @return this.

+     * @throws JSONException If there is already a property with this name

+     * that is not an Integer, Long, Double, or Float.

+     */

+    public LOGJSONObject increment(String key) throws JSONException {

+        Object value = this.opt(key);

+        if (value == null) {

+            this.put(key, 1);

+        } else if (value instanceof Integer) {

+            this.put(key, ((Integer)value).intValue() + 1);

+        } else if (value instanceof Long) {

+            this.put(key, ((Long)value).longValue() + 1);

+        } else if (value instanceof Double) {

+            this.put(key, ((Double)value).doubleValue() + 1);

+        } else if (value instanceof Float) {

+            this.put(key, ((Float)value).floatValue() + 1);

+        } else {

+            throw new JSONException("Unable to increment [" + quote(key) + "].");

+        }

+        return this;

+    }

+

+

+    /**

+     * Determine if the value associated with the key is null or if there is

+     *  no value.

+     * @param key   A key string.

+     * @return      true if there is no value associated with the key or if

+     *  the value is the JSONObject.NULL object.

+     */

+    public boolean isNull(String key) {

+        return LOGJSONObject.NULL.equals(this.opt(key));

+    }

+

+

+    /**

+     * Get an enumeration of the keys of the JSONObject.

+     *

+     * @return An iterator of the keys.

+     */

+    public Iterator<String> keys() {

+        return this.keySet().iterator();

+    }

+

+

+    /**

+     * Get a set of keys of the JSONObject.

+     *

+     * @return A keySet.

+     */

+    public Set<String> keySet() {

+        return this.map.keySet();

+    }

+

+

+    /**

+     * Get the number of keys stored in the JSONObject.

+     *

+     * @return The number of keys in the JSONObject.

+     */

+    public int length() {

+        return this.map.size();

+    }

+

+

+    /**

+     * Produce a JSONArray containing the names of the elements of this

+     * JSONObject.

+     * @return A JSONArray containing the key strings, or null if the JSONObject

+     * is empty.

+     */

+    public JSONArray names() {

+        JSONArray ja = new JSONArray();

+        Iterator<String> keys = this.keys();

+        while (keys.hasNext()) {

+            ja.put(keys.next());

+        }

+        return ja.length() == 0 ? null : ja;

+    }

+

+    /**

+     * Produce a string from a Number.

+     * @param  number A Number

+     * @return A String.

+     * @throws JSONException If n is a non-finite number.

+     */

+    public static String numberToString(Number number)

+            throws JSONException {

+        if (number == null) {

+            throw new JSONException("Null pointer");

+        }

+        testValidity(number);

+

+// Shave off trailing zeros and decimal point, if possible.

+

+        String string = number.toString();

+        if (string.indexOf('.') > 0 && string.indexOf('e') < 0 &&

+                string.indexOf('E') < 0) {

+            while (string.endsWith("0")) {

+                string = string.substring(0, string.length() - 1);

+            }

+            if (string.endsWith(".")) {

+                string = string.substring(0, string.length() - 1);

+            }

+        }

+        return string;

+    }

+

+

+    /**

+     * Get an optional value associated with a key.

+     * @param key   A key string.

+     * @return      An object which is the value, or null if there is no value.

+     */

+    public Object opt(String key) {

+        return key == null ? null : this.map.get(key);

+    }

+

+

+    /**

+     * Get an optional boolean associated with a key.

+     * It returns false if there is no such key, or if the value is not

+     * Boolean.TRUE or the String "true".

+     *

+     * @param key   A key string.

+     * @return      The truth.

+     */

+    public boolean optBoolean(String key) {

+        return this.optBoolean(key, false);

+    }

+

+

+    /**

+     * Get an optional boolean associated with a key.

+     * It returns the defaultValue if there is no such key, or if it is not

+     * a Boolean or the String "true" or "false" (case insensitive).

+     *

+     * @param key              A key string.

+     * @param defaultValue     The default.

+     * @return      The truth.

+     */

+    public boolean optBoolean(String key, boolean defaultValue) {

+        try {

+            return this.getBoolean(key);

+        } catch (Exception e) {

+            return defaultValue;

+        }

+    }

+

+

+    /**

+     * Get an optional double associated with a key,

+     * or NaN if there is no such key or if its value is not a number.

+     * If the value is a string, an attempt will be made to evaluate it as

+     * a number.

+     *

+     * @param key   A string which is the key.

+     * @return      An object which is the value.

+     */

+    public double optDouble(String key) {

+        return this.optDouble(key, Double.NaN);

+    }

+

+

+    /**

+     * Get an optional double associated with a key, or the

+     * defaultValue if there is no such key or if its value is not a number.

+     * If the value is a string, an attempt will be made to evaluate it as

+     * a number.

+     *

+     * @param key   A key string.

+     * @param defaultValue     The default.

+     * @return      An object which is the value.

+     */

+    public double optDouble(String key, double defaultValue) {

+        try {

+            return this.getDouble(key);

+        } catch (Exception e) {

+            return defaultValue;

+        }

+    }

+

+

+    /**

+     * Get an optional int value associated with a key,

+     * or zero if there is no such key or if the value is not a number.

+     * If the value is a string, an attempt will be made to evaluate it as

+     * a number.

+     *

+     * @param key   A key string.

+     * @return      An object which is the value.

+     */

+    public int optInt(String key) {

+        return this.optInt(key, 0);

+    }

+

+

+    /**

+     * Get an optional int value associated with a key,

+     * or the default if there is no such key or if the value is not a number.

+     * If the value is a string, an attempt will be made to evaluate it as

+     * a number.

+     *

+     * @param key   A key string.

+     * @param defaultValue     The default.

+     * @return      An object which is the value.

+     */

+    public int optInt(String key, int defaultValue) {

+        try {

+            return this.getInt(key);

+        } catch (Exception e) {

+            return defaultValue;

+        }

+    }

+

+

+    /**

+     * Get an optional JSONArray associated with a key.

+     * It returns null if there is no such key, or if its value is not a

+     * JSONArray.

+     *

+     * @param key   A key string.

+     * @return      A JSONArray which is the value.

+     */

+    public JSONArray optJSONArray(String key) {

+        Object o = this.opt(key);

+        return o instanceof JSONArray ? (JSONArray)o : null;

+    }

+

+

+    /**

+     * Get an optional JSONObject associated with a key.

+     * It returns null if there is no such key, or if its value is not a

+     * JSONObject.

+     *

+     * @param key   A key string.

+     * @return      A JSONObject which is the value.

+     */

+    public LOGJSONObject optJSONObject(String key) {

+        Object object = this.opt(key);

+        return object instanceof LOGJSONObject ? (LOGJSONObject)object : null;

+    }

+

+

+    /**

+     * Get an optional long value associated with a key,

+     * or zero if there is no such key or if the value is not a number.

+     * If the value is a string, an attempt will be made to evaluate it as

+     * a number.

+     *

+     * @param key   A key string.

+     * @return      An object which is the value.

+     */

+    public long optLong(String key) {

+        return this.optLong(key, 0);

+    }

+

+

+    /**

+     * Get an optional long value associated with a key,

+     * or the default if there is no such key or if the value is not a number.

+     * If the value is a string, an attempt will be made to evaluate it as

+     * a number.

+     *

+     * @param key          A key string.

+     * @param defaultValue The default.

+     * @return             An object which is the value.

+     */

+    public long optLong(String key, long defaultValue) {

+        try {

+            return this.getLong(key);

+        } catch (Exception e) {

+            return defaultValue;

+        }

+    }

+

+

+    /**

+     * Get an optional string associated with a key.

+     * It returns an empty string if there is no such key. If the value is not

+     * a string and is not null, then it is converted to a string.

+     *

+     * @param key   A key string.

+     * @return      A string which is the value.

+     */

+    public String optString(String key) {

+        return this.optString(key, "");

+    }

+

+

+    /**

+     * Get an optional string associated with a key.

+     * It returns the defaultValue if there is no such key.

+     *

+     * @param key   A key string.

+     * @param defaultValue     The default.

+     * @return      A string which is the value.

+     */

+    public String optString(String key, String defaultValue) {

+        Object object = this.opt(key);

+        return NULL.equals(object) ? defaultValue : object.toString();

+    }

+

+

+    private void populateMap(Object bean) {

+        Class<? extends Object> klass = bean.getClass();

+

+// If klass is a System class then set includeSuperClass to false.

+

+        boolean includeSuperClass = klass.getClassLoader() != null;

+

+        Method[] methods = includeSuperClass

+                ? klass.getMethods()

+                : klass.getDeclaredMethods();

+        for (int i = 0; i < methods.length; i += 1) {

+            try {

+                Method method = methods[i];

+                if (Modifier.isPublic(method.getModifiers())) {

+                    String name = method.getName();

+                    String key = "";

+                    if (name.startsWith("get")) {

+                        if ("getClass".equals(name) ||

+                                "getDeclaringClass".equals(name)) {

+                            key = "";

+                        } else {

+                            key = name.substring(3);

+                        }

+                    } else if (name.startsWith("is")) {

+                        key = name.substring(2);

+                    }

+                    if (key.length() > 0 &&

+                            Character.isUpperCase(key.charAt(0)) &&

+                            method.getParameterTypes().length == 0) {

+                        if (key.length() == 1) {

+                            key = key.toLowerCase();

+                        } else if (!Character.isUpperCase(key.charAt(1))) {

+                            key = key.substring(0, 1).toLowerCase() +

+                                key.substring(1);

+                        }

+

+                        Object result = method.invoke(bean, (Object[])null);

+                        if (result != null) {

+                            this.map.put(key, wrap(result));

+                        }

+                    }

+                }

+            } catch (Exception ignore) {

+            }

+        }

+    }

+

+

+    /**

+     * Put a key/boolean pair in the JSONObject.

+     *

+     * @param key   A key string.

+     * @param value A boolean which is the value.

+     * @return this.

+     * @throws JSONException If the key is null.

+     */

+    public LOGJSONObject put(String key, boolean value) throws JSONException {

+        this.put(key, value ? Boolean.TRUE : Boolean.FALSE);

+        return this;

+    }

+

+

+    /**

+     * Put a key/value pair in the JSONObject, where the value will be a

+     * JSONArray which is produced from a Collection.

+     * @param key   A key string.

+     * @param value A Collection value.

+     * @return      this.

+     * @throws JSONException

+     */

+    public LOGJSONObject put(String key, Collection<Object> value) throws JSONException {

+        this.put(key, new JSONArray(value));

+        return this;

+    }

+

+

+    /**

+     * Put a key/double pair in the JSONObject.

+     *

+     * @param key   A key string.

+     * @param value A double which is the value.

+     * @return this.

+     * @throws JSONException If the key is null or if the number is invalid.

+     */

+    public LOGJSONObject put(String key, double value) throws JSONException {

+        this.put(key, new Double(value));

+        return this;

+    }

+

+

+    /**

+     * Put a key/int pair in the JSONObject.

+     *

+     * @param key   A key string.

+     * @param value An int which is the value.

+     * @return this.

+     * @throws JSONException If the key is null.

+     */

+    public LOGJSONObject put(String key, int value) throws JSONException {

+        this.put(key, new Integer(value));

+        return this;

+    }

+

+

+    /**

+     * Put a key/long pair in the JSONObject.

+     *

+     * @param key   A key string.

+     * @param value A long which is the value.

+     * @return this.

+     * @throws JSONException If the key is null.

+     */

+    public LOGJSONObject put(String key, long value) throws JSONException {

+        this.put(key, new Long(value));

+        return this;

+    }

+

+

+    /**

+     * Put a key/value pair in the JSONObject, where the value will be a

+     * JSONObject which is produced from a Map.

+     * @param key   A key string.

+     * @param value A Map value.

+     * @return      this.

+     * @throws JSONException

+     */

+    public LOGJSONObject put(String key, Map<String, Object> value) throws JSONException {

+        this.put(key, new LOGJSONObject(value));

+        return this;

+    }

+

+

+    /**

+     * Put a key/value pair in the JSONObject. If the value is null,

+     * then the key will be removed from the JSONObject if it is present.

+     * @param key   A key string.

+     * @param value An object which is the value. It should be of one of these

+     *  types: Boolean, Double, Integer, JSONArray, JSONObject, Long, String,

+     *  or the JSONObject.NULL object.

+     * @return this.

+     * @throws JSONException If the value is non-finite number

+     *  or if the key is null.

+     */

+    public LOGJSONObject put(String key, Object value) throws JSONException {

+        String pooled;

+        if (key == null) {

+            throw new JSONException("Null key.");

+        }

+        if (value != null) {

+            testValidity(value);

+            pooled = (String)keyPool.get(key);

+            if (pooled == null) {

+                if (keyPool.size() >= keyPoolSize) {

+                    keyPool = new LinkedHashMap<String, Object>(keyPoolSize);

+                }

+                keyPool.put(key, key);

+            } else {

+                key = pooled;

+            }

+            this.map.put(key, value);

+        } else {

+            this.remove(key);

+        }

+        return this;

+    }

+

+

+    /**

+     * Put a key/value pair in the JSONObject, but only if the key and the

+     * value are both non-null, and only if there is not already a member

+     * with that name.

+     * @param key

+     * @param value

+     * @return his.

+     * @throws JSONException if the key is a duplicate

+     */

+    public LOGJSONObject putOnce(String key, Object value) throws JSONException {

+        if (key != null && value != null) {

+            if (this.opt(key) != null) {

+                throw new JSONException("Duplicate key \"" + key + "\"");

+            }

+            this.put(key, value);

+        }

+        return this;

+    }

+

+

+    /**

+     * Put a key/value pair in the JSONObject, but only if the

+     * key and the value are both non-null.

+     * @param key   A key string.

+     * @param value An object which is the value. It should be of one of these

+     *  types: Boolean, Double, Integer, JSONArray, JSONObject, Long, String,

+     *  or the JSONObject.NULL object.

+     * @return this.

+     * @throws JSONException If the value is a non-finite number.

+     */

+    public LOGJSONObject putOpt(String key, Object value) throws JSONException {

+        if (key != null && value != null) {

+            this.put(key, value);

+        }

+        return this;

+    }

+

+

+    /**

+     * Produce a string in double quotes with backslash sequences in all the

+     * right places. A backslash will be inserted within </, producing <\/,

+     * allowing JSON text to be delivered in HTML. In JSON text, a string

+     * cannot contain a control character or an unescaped quote or backslash.

+     * @param string A String

+     * @return  A String correctly formatted for insertion in a JSON text.

+     */

+    public static String quote(String string) {

+        StringWriter sw = new StringWriter();

+        synchronized (sw.getBuffer()) {

+            try {

+                return quote(string, sw).toString();

+            } catch (IOException ignored) {

+                // will never happen - we are writing to a string writer

+                return "";

+            }

+        }

+    }

+

+    public static Writer quote(String string, Writer w) throws IOException {

+        if (string == null || string.length() == 0) {

+            w.write("\"\"");

+            return w;

+        }

+

+        char b;

+        char c = 0;

+        String hhhh;

+        int i;

+        int len = string.length();

+

+        w.write('"');

+        for (i = 0; i < len; i += 1) {

+            b = c;

+            c = string.charAt(i);

+            switch (c) {

+            case '\\':

+            case '"':

+                w.write('\\');

+                w.write(c);

+                break;

+            case '/':

+                if (b == '<') {

+                    w.write('\\');

+                }

+                w.write(c);

+                break;

+            case '\b':

+                w.write("\\b");

+                break;

+            case '\t':

+                w.write("\\t");

+                break;

+            case '\n':

+                w.write("\\n");

+                break;

+            case '\f':

+                w.write("\\f");

+                break;

+            case '\r':

+                w.write("\\r");

+                break;

+            default:

+                if (c < ' ' || (c >= '\u0080' && c < '\u00a0')

+                        || (c >= '\u2000' && c < '\u2100')) {

+                    w.write("\\u");

+                    hhhh = Integer.toHexString(c);

+                    w.write("0000", 0, 4 - hhhh.length());

+                    w.write(hhhh);

+                } else {

+                    w.write(c);

+                }

+            }

+        }

+        w.write('"');

+        return w;

+    }

+

+    /**

+     * Remove a name and its value, if present.

+     * @param key The name to be removed.

+     * @return The value that was associated with the name,

+     * or null if there was no value.

+     */

+    public Object remove(String key) {

+        return this.map.remove(key);

+    }

+

+    /**

+     * Try to convert a string into a number, boolean, or null. If the string

+     * can't be converted, return the string.

+     * @param string A String.

+     * @return A simple JSON value.

+     */

+    public static Object stringToValue(String string) {

+        Double d;

+        if (string.equals("")) {

+            return string;

+        }

+        if (string.equalsIgnoreCase("true")) {

+            return Boolean.TRUE;

+        }

+        if (string.equalsIgnoreCase("false")) {

+            return Boolean.FALSE;

+        }

+        if (string.equalsIgnoreCase("null")) {

+            return LOGJSONObject.NULL;

+        }

+

+        /*

+         * If it might be a number, try converting it.

+         * If a number cannot be produced, then the value will just

+         * be a string. Note that the plus and implied string

+         * conventions are non-standard. A JSON parser may accept

+         * non-JSON forms as long as it accepts all correct JSON forms.

+         */

+

+        char b = string.charAt(0);

+        if ((b >= '0' && b <= '9') || b == '.' || b == '-' || b == '+') {

+            try {

+                if (string.indexOf('.') > -1 ||

+                        string.indexOf('e') > -1 || string.indexOf('E') > -1) {

+                    d = Double.valueOf(string);

+                    if (!d.isInfinite() && !d.isNaN()) {

+                        return d;

+                    }

+                } else {

+                    Long myLong = new Long(string);

+                    if (myLong.longValue() == myLong.intValue()) {

+                        return new Integer(myLong.intValue());

+                    } else {

+                        return myLong;

+                    }

+                }

+            }  catch (Exception ignore) {

+            }

+        }

+        return string;

+    }

+

+

+    /**

+     * Throw an exception if the object is a NaN or infinite number.

+     * @param o The object to test.

+     * @throws JSONException If o is a non-finite number.

+     */

+    public static void testValidity(Object o) throws JSONException {

+        if (o != null) {

+            if (o instanceof Double) {

+                if (((Double)o).isInfinite() || ((Double)o).isNaN()) {

+                    throw new JSONException(

+                        "JSON does not allow non-finite numbers.");

+                }

+            } else if (o instanceof Float) {

+                if (((Float)o).isInfinite() || ((Float)o).isNaN()) {

+                    throw new JSONException(

+                        "JSON does not allow non-finite numbers.");

+                }

+            }

+        }

+    }

+

+

+    /**

+     * Produce a JSONArray containing the values of the members of this

+     * JSONObject.

+     * @param names A JSONArray containing a list of key strings. This

+     * determines the sequence of the values in the result.

+     * @return A JSONArray of values.

+     * @throws JSONException If any of the values are non-finite numbers.

+     */

+    public JSONArray toJSONArray(JSONArray names) throws JSONException {

+        if (names == null || names.length() == 0) {

+            return null;

+        }

+        JSONArray ja = new JSONArray();

+        for (int i = 0; i < names.length(); i += 1) {

+            ja.put(this.opt(names.getString(i)));

+        }

+        return ja;

+    }

+

+    /**

+     * Make a JSON text of this JSONObject. For compactness, no whitespace

+     * is added. If this would not result in a syntactically correct JSON text,

+     * then null will be returned instead.

+     * <p>

+     * Warning: This method assumes that the data structure is acyclical.

+     *

+     * @return a printable, displayable, portable, transmittable

+     *  representation of the object, beginning

+     *  with <code>{</code>&nbsp;<small>(left brace)</small> and ending

+     *  with <code>}</code>&nbsp;<small>(right brace)</small>.

+     */

+    public String toString() {

+        try {

+            return this.toString(0);

+        } catch (Exception e) {

+            return null;

+        }

+    }

+

+

+    /**

+     * Make a prettyprinted JSON text of this JSONObject.

+     * <p>

+     * Warning: This method assumes that the data structure is acyclical.

+     * @param indentFactor The number of spaces to add to each level of

+     *  indentation.

+     * @return a printable, displayable, portable, transmittable

+     *  representation of the object, beginning

+     *  with <code>{</code>&nbsp;<small>(left brace)</small> and ending

+     *  with <code>}</code>&nbsp;<small>(right brace)</small>.

+     * @throws JSONException If the object contains an invalid number.

+     */

+    public String toString(int indentFactor) throws JSONException {

+        StringWriter w = new StringWriter();

+        synchronized (w.getBuffer()) {

+            return this.write(w, indentFactor, 0).toString();

+        }

+    }

+

+    /**

+     * Make a JSON text of an Object value. If the object has an

+     * value.toJSONString() method, then that method will be used to produce

+     * the JSON text. The method is required to produce a strictly

+     * conforming text. If the object does not contain a toJSONString

+     * method (which is the most common case), then a text will be

+     * produced by other means. If the value is an array or Collection,

+     * then a JSONArray will be made from it and its toJSONString method

+     * will be called. If the value is a MAP, then a JSONObject will be made

+     * from it and its toJSONString method will be called. Otherwise, the

+     * value's toString method will be called, and the result will be quoted.

+     *

+     * <p>

+     * Warning: This method assumes that the data structure is acyclical.

+     * @param value The value to be serialized.

+     * @return a printable, displayable, transmittable

+     *  representation of the object, beginning

+     *  with <code>{</code>&nbsp;<small>(left brace)</small> and ending

+     *  with <code>}</code>&nbsp;<small>(right brace)</small>.

+     * @throws JSONException If the value is or contains an invalid number.

+     */

+    @SuppressWarnings("unchecked")

+	public static String valueToString(Object value) throws JSONException {

+        if (value == null || value.equals(null)) {

+            return "null";

+        }

+        if (value instanceof JSONString) {

+            Object object;

+            try {

+                object = ((JSONString)value).toJSONString();

+            } catch (Exception e) {

+                throw new JSONException(e);

+            }

+            if (object instanceof String) {

+                return (String)object;

+            }

+            throw new JSONException("Bad value from toJSONString: " + object);

+        }

+        if (value instanceof Number) {

+            return numberToString((Number) value);

+        }

+        if (value instanceof Boolean || value instanceof LOGJSONObject ||

+                value instanceof JSONArray) {

+            return value.toString();

+        }

+        if (value instanceof Map) {

+            return new LOGJSONObject((Map<String, Object>)value).toString();

+        }

+        if (value instanceof Collection) {

+            return new JSONArray((Collection<Object>)value).toString();

+        }

+        if (value.getClass().isArray()) {

+            return new JSONArray(value).toString();

+        }

+        return quote(value.toString());

+    }

+

+     /**

+      * Wrap an object, if necessary. If the object is null, return the NULL

+      * object. If it is an array or collection, wrap it in a JSONArray. If

+      * it is a map, wrap it in a JSONObject. If it is a standard property

+      * (Double, String, et al) then it is already wrapped. Otherwise, if it

+      * comes from one of the java packages, turn it into a string. And if

+      * it doesn't, try to wrap it in a JSONObject. If the wrapping fails,

+      * then null is returned.

+      *

+      * @param object The object to wrap

+      * @return The wrapped value

+      */

+     @SuppressWarnings("unchecked")

+	public static Object wrap(Object object) {

+         try {

+             if (object == null) {

+                 return NULL;

+             }

+             if (object instanceof LOGJSONObject || object instanceof JSONArray  ||

+                     NULL.equals(object)      || object instanceof JSONString ||

+                     object instanceof Byte   || object instanceof Character  ||

+                     object instanceof Short  || object instanceof Integer    ||

+                     object instanceof Long   || object instanceof Boolean    ||

+                     object instanceof Float  || object instanceof Double     ||

+                     object instanceof String) {

+                 return object;

+             }

+

+             if (object instanceof Collection) {

+                 return new JSONArray((Collection<Object>)object);

+             }

+             if (object.getClass().isArray()) {

+                 return new JSONArray(object);

+             }

+             if (object instanceof Map) {

+                 return new LOGJSONObject((Map<String, Object>)object);

+             }

+             Package objectPackage = object.getClass().getPackage();

+             String objectPackageName = objectPackage != null

+                 ? objectPackage.getName()

+                 : "";

+             if (

+                 objectPackageName.startsWith("java.") ||

+                 objectPackageName.startsWith("javax.") ||

+                 object.getClass().getClassLoader() == null

+             ) {

+                 return object.toString();

+             }

+             return new LOGJSONObject(object);

+         } catch(Exception exception) {

+             return null;

+         }

+     }

+

+

+     /**

+      * Write the contents of the JSONObject as JSON text to a writer.

+      * For compactness, no whitespace is added.

+      * <p>

+      * Warning: This method assumes that the data structure is acyclical.

+      *

+      * @return The writer.

+      * @throws JSONException

+      */

+     public Writer write(Writer writer) throws JSONException {

+        return this.write(writer, 0, 0);

+    }

+

+

+    @SuppressWarnings("unchecked")

+	static final Writer writeValue(Writer writer, Object value,

+            int indentFactor, int indent) throws JSONException, IOException {

+        if (value == null || value.equals(null)) {

+            writer.write("null");

+        } else if (value instanceof LOGJSONObject) {

+            ((LOGJSONObject) value).write(writer, indentFactor, indent);

+        } else if (value instanceof JSONArray) {

+            ((JSONArray) value).write(writer, indentFactor, indent);

+        } else if (value instanceof Map) {

+            new LOGJSONObject((Map<String, Object>) value).write(writer, indentFactor, indent);

+        } else if (value instanceof Collection) {

+            new JSONArray((Collection<Object>) value).write(writer, indentFactor,

+                    indent);

+        } else if (value.getClass().isArray()) {

+            new JSONArray(value).write(writer, indentFactor, indent);

+        } else if (value instanceof Number) {

+            writer.write(numberToString((Number) value));

+        } else if (value instanceof Boolean) {

+            writer.write(value.toString());

+        } else if (value instanceof JSONString) {

+            Object o;

+            try {

+                o = ((JSONString) value).toJSONString();

+            } catch (Exception e) {

+                throw new JSONException(e);

+            }

+            writer.write(o != null ? o.toString() : quote(value.toString()));

+        } else {

+            quote(value.toString(), writer);

+        }

+        return writer;

+    }

+

+    static final void indent(Writer writer, int indent) throws IOException {

+        for (int i = 0; i < indent; i += 1) {

+            writer.write(' ');

+        }

+    }

+

+    /**

+     * Write the contents of the JSONObject as JSON text to a writer. For

+     * compactness, no whitespace is added.

+     * <p>

+     * Warning: This method assumes that the data structure is acyclical.

+     *

+     * @return The writer.

+     * @throws JSONException

+     */

+    Writer write(Writer writer, int indentFactor, int indent)

+            throws JSONException {

+        try {

+            boolean commanate = false;

+            final int length = this.length();

+            Iterator<String> keys = this.keys();

+            writer.write('{');

+

+            if (length == 1) {

+                Object key = keys.next();

+                writer.write(quote(key.toString()));

+                writer.write(':');

+                if (indentFactor > 0) {

+                    writer.write(' ');

+                }

+                writeValue(writer, this.map.get(key), indentFactor, indent);

+            } else if (length != 0) {

+                final int newindent = indent + indentFactor;

+                while (keys.hasNext()) {

+                    Object key = keys.next();

+                    if (commanate) {

+                        writer.write(',');

+                    }

+                    if (indentFactor > 0) {

+                        writer.write('\n');

+                    }

+                    indent(writer, newindent);

+                    writer.write(quote(key.toString()));

+                    writer.write(':');

+                    if (indentFactor > 0) {

+                        writer.write(' ');

+                    }

+                    writeValue(writer, this.map.get(key), indentFactor,

+                            newindent);

+                    commanate = true;

+                }

+                if (indentFactor > 0) {

+                    writer.write('\n');

+                }

+                indent(writer, indent);

+            }

+            writer.write('}');

+            return writer;

+        } catch (IOException exception) {

+            throw new JSONException(exception);

+        }

+     }

+}

diff --git a/datarouter-prov/src/main/java/org/json/None.java b/datarouter-prov/src/main/java/org/json/None.java
new file mode 100644
index 0000000..5b9a47d
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/None.java
@@ -0,0 +1,31 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+public interface None {

+    /**

+     * Negative One

+     */

+    public static final int none = -1;

+

+}

diff --git a/datarouter-prov/src/main/java/org/json/XML.java b/datarouter-prov/src/main/java/org/json/XML.java
new file mode 100644
index 0000000..33f43e5
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/XML.java
@@ -0,0 +1,530 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+/*

+Copyright (c) 2002 JSON.org

+

+Permission is hereby granted, free of charge, to any person obtaining a copy

+of this software and associated documentation files (the "Software"), to deal

+in the Software without restriction, including without limitation the rights

+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

+copies of the Software, and to permit persons to whom the Software is

+furnished to do so, subject to the following conditions:

+

+The above copyright notice and this permission notice shall be included in all

+copies or substantial portions of the Software.

+

+The Software shall be used for Good, not Evil.

+

+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

+SOFTWARE.

+*/

+

+import java.util.Iterator;

+

+

+/**

+ * This provides static methods to convert an XML text into a JSONObject,

+ * and to covert a JSONObject into an XML text.

+ * @author JSON.org

+ * @version 2012-10-26

+ */

+public class XML {

+

+    /** The Character '&amp;'. */

+    public static final Character AMP   = new Character('&');

+

+    /** The Character '''. */

+    public static final Character APOS  = new Character('\'');

+

+    /** The Character '!'. */

+    public static final Character BANG  = new Character('!');

+

+    /** The Character '='. */

+    public static final Character EQ    = new Character('=');

+

+    /** The Character '>'. */

+    public static final Character GT    = new Character('>');

+

+    /** The Character '&lt;'. */

+    public static final Character LT    = new Character('<');

+

+    /** The Character '?'. */

+    public static final Character QUEST = new Character('?');

+

+    /** The Character '"'. */

+    public static final Character QUOT  = new Character('"');

+

+    /** The Character '/'. */

+    public static final Character SLASH = new Character('/');

+

+    /**

+     * Replace special characters with XML escapes:

+     * <pre>

+     * &amp; <small>(ampersand)</small> is replaced by &amp;amp;

+     * &lt; <small>(less than)</small> is replaced by &amp;lt;

+     * &gt; <small>(greater than)</small> is replaced by &amp;gt;

+     * &quot; <small>(double quote)</small> is replaced by &amp;quot;

+     * </pre>

+     * @param string The string to be escaped.

+     * @return The escaped string.

+     */

+    public static String escape(String string) {

+        StringBuffer sb = new StringBuffer();

+        for (int i = 0, length = string.length(); i < length; i++) {

+            char c = string.charAt(i);

+            switch (c) {

+            case '&':

+                sb.append("&amp;");

+                break;

+            case '<':

+                sb.append("&lt;");

+                break;

+            case '>':

+                sb.append("&gt;");

+                break;

+            case '"':

+                sb.append("&quot;");

+                break;

+            case '\'':

+                sb.append("&apos;");

+                break;

+            default:

+                sb.append(c);

+            }

+        }

+        return sb.toString();

+    }

+

+    /**

+     * Throw an exception if the string contains whitespace.

+     * Whitespace is not allowed in tagNames and attributes.

+     * @param string

+     * @throws JSONException

+     */

+    public static void noSpace(String string) throws JSONException {

+        int i, length = string.length();

+        if (length == 0) {

+            throw new JSONException("Empty string.");

+        }

+        for (i = 0; i < length; i += 1) {

+            if (Character.isWhitespace(string.charAt(i))) {

+                throw new JSONException("'" + string +

+                        "' contains a space character.");

+            }

+        }

+    }

+

+    /**

+     * Scan the content following the named tag, attaching it to the context.

+     * @param x       The XMLTokener containing the source string.

+     * @param context The JSONObject that will include the new material.

+     * @param name    The tag name.

+     * @return true if the close tag is processed.

+     * @throws JSONException

+     */

+    private static boolean parse(XMLTokener x, JSONObject context,

+                                 String name) throws JSONException {

+        char       c;

+        int        i;

+        JSONObject jsonobject = null;

+        String     string;

+        String     tagName;

+        Object     token;

+

+// Test for and skip past these forms:

+//      <!-- ... -->

+//      <!   ...   >

+//      <![  ... ]]>

+//      <?   ...  ?>

+// Report errors for these forms:

+//      <>

+//      <=

+//      <<

+

+        token = x.nextToken();

+

+// <!

+

+        if (token == BANG) {

+            c = x.next();

+            if (c == '-') {

+                if (x.next() == '-') {

+                    x.skipPast("-->");

+                    return false;

+                }

+                x.back();

+            } else if (c == '[') {

+                token = x.nextToken();

+                if ("CDATA".equals(token)) {

+                    if (x.next() == '[') {

+                        string = x.nextCDATA();

+                        if (string.length() > 0) {

+                            context.accumulate("content", string);

+                        }

+                        return false;

+                    }

+                }

+                throw x.syntaxError("Expected 'CDATA['");

+            }

+            i = 1;

+            do {

+                token = x.nextMeta();

+                if (token == null) {

+                    throw x.syntaxError("Missing '>' after '<!'.");

+                } else if (token == LT) {

+                    i += 1;

+                } else if (token == GT) {

+                    i -= 1;

+                }

+            } while (i > 0);

+            return false;

+        } else if (token == QUEST) {

+

+// <?

+

+            x.skipPast("?>");

+            return false;

+        } else if (token == SLASH) {

+

+// Close tag </

+

+            token = x.nextToken();

+            if (name == null) {

+                throw x.syntaxError("Mismatched close tag " + token);

+            }

+            if (!token.equals(name)) {

+                throw x.syntaxError("Mismatched " + name + " and " + token);

+            }

+            if (x.nextToken() != GT) {

+                throw x.syntaxError("Misshaped close tag");

+            }

+            return true;

+

+        } else if (token instanceof Character) {

+            throw x.syntaxError("Misshaped tag");

+

+// Open tag <

+

+        } else {

+            tagName = (String)token;

+            token = null;

+            jsonobject = new JSONObject();

+            for (;;) {

+                if (token == null) {

+                    token = x.nextToken();

+                }

+

+// attribute = value

+

+                if (token instanceof String) {

+                    string = (String)token;

+                    token = x.nextToken();

+                    if (token == EQ) {

+                        token = x.nextToken();

+                        if (!(token instanceof String)) {

+                            throw x.syntaxError("Missing value");

+                        }

+                        jsonobject.accumulate(string,

+                                XML.stringToValue((String)token));

+                        token = null;

+                    } else {

+                        jsonobject.accumulate(string, "");

+                    }

+

+// Empty tag <.../>

+

+                } else if (token == SLASH) {

+                    if (x.nextToken() != GT) {

+                        throw x.syntaxError("Misshaped tag");

+                    }

+                    if (jsonobject.length() > 0) {

+                        context.accumulate(tagName, jsonobject);

+                    } else {

+                        context.accumulate(tagName, "");

+                    }

+                    return false;

+

+// Content, between <...> and </...>

+

+                } else if (token == GT) {

+                    for (;;) {

+                        token = x.nextContent();

+                        if (token == null) {

+                            if (tagName != null) {

+                                throw x.syntaxError("Unclosed tag " + tagName);

+                            }

+                            return false;

+                        } else if (token instanceof String) {

+                            string = (String)token;

+                            if (string.length() > 0) {

+                                jsonobject.accumulate("content",

+                                        XML.stringToValue(string));

+                            }

+

+// Nested element

+

+                        } else if (token == LT) {

+                            if (parse(x, jsonobject, tagName)) {

+                                if (jsonobject.length() == 0) {

+                                    context.accumulate(tagName, "");

+                                } else if (jsonobject.length() == 1 &&

+                                       jsonobject.opt("content") != null) {

+                                    context.accumulate(tagName,

+                                            jsonobject.opt("content"));

+                                } else {

+                                    context.accumulate(tagName, jsonobject);

+                                }

+                                return false;

+                            }

+                        }

+                    }

+                } else {

+                    throw x.syntaxError("Misshaped tag");

+                }

+            }

+        }

+    }

+

+

+    /**

+     * Try to convert a string into a number, boolean, or null. If the string

+     * can't be converted, return the string. This is much less ambitious than

+     * JSONObject.stringToValue, especially because it does not attempt to

+     * convert plus forms, octal forms, hex forms, or E forms lacking decimal

+     * points.

+     * @param string A String.

+     * @return A simple JSON value.

+     */

+    public static Object stringToValue(String string) {

+        if ("".equals(string)) {

+            return string;

+        }

+        if ("true".equalsIgnoreCase(string)) {

+            return Boolean.TRUE;

+        }

+        if ("false".equalsIgnoreCase(string)) {

+            return Boolean.FALSE;

+        }

+        if ("null".equalsIgnoreCase(string)) {

+            return JSONObject.NULL;

+        }

+        if ("0".equals(string)) {

+            return new Integer(0);

+        }

+

+// If it might be a number, try converting it. If that doesn't work,

+// return the string.

+

+        try {

+            char initial = string.charAt(0);

+            boolean negative = false;

+            if (initial == '-') {

+                initial = string.charAt(1);

+                negative = true;

+            }

+            if (initial == '0' && string.charAt(negative ? 2 : 1) == '0') {

+                return string;

+            }

+            if ((initial >= '0' && initial <= '9')) {

+                if (string.indexOf('.') >= 0) {

+                    return Double.valueOf(string);

+                } else if (string.indexOf('e') < 0 && string.indexOf('E') < 0) {

+                    Long myLong = new Long(string);

+                    if (myLong.longValue() == myLong.intValue()) {

+                        return new Integer(myLong.intValue());

+                    } else {

+                        return myLong;

+                    }

+                }

+            }

+        }  catch (Exception ignore) {

+        }

+        return string;

+    }

+

+

+    /**

+     * Convert a well-formed (but not necessarily valid) XML string into a

+     * JSONObject. Some information may be lost in this transformation

+     * because JSON is a data format and XML is a document format. XML uses

+     * elements, attributes, and content text, while JSON uses unordered

+     * collections of name/value pairs and arrays of values. JSON does not

+     * does not like to distinguish between elements and attributes.

+     * Sequences of similar elements are represented as JSONArrays. Content

+     * text may be placed in a "content" member. Comments, prologs, DTDs, and

+     * <code>&lt;[ [ ]]></code> are ignored.

+     * @param string The source string.

+     * @return A JSONObject containing the structured data from the XML string.

+     * @throws JSONException

+     */

+    public static JSONObject toJSONObject(String string) throws JSONException {

+        JSONObject jo = new JSONObject();

+        XMLTokener x = new XMLTokener(string);

+        while (x.more() && x.skipPast("<")) {

+            parse(x, jo, null);

+        }

+        return jo;

+    }

+

+

+    /**

+     * Convert a JSONObject into a well-formed, element-normal XML string.

+     * @param object A JSONObject.

+     * @return  A string.

+     * @throws  JSONException

+     */

+    public static String toString(Object object) throws JSONException {

+        return toString(object, null);

+    }

+

+

+    /**

+     * Convert a JSONObject into a well-formed, element-normal XML string.

+     * @param object A JSONObject.

+     * @param tagName The optional name of the enclosing tag.

+     * @return A string.

+     * @throws JSONException

+     */

+    public static String toString(Object object, String tagName)

+            throws JSONException {

+        StringBuffer sb = new StringBuffer();

+        int          i;

+        JSONArray    ja;

+        JSONObject   jo;

+        String       key;

+        Iterator<String> keys;

+        int          length;

+        String       string;

+        Object       value;

+        if (object instanceof JSONObject) {

+

+// Emit <tagName>

+

+            if (tagName != null) {

+                sb.append('<');

+                sb.append(tagName);

+                sb.append('>');

+            }

+

+// Loop thru the keys.

+

+            jo = (JSONObject)object;

+            keys = jo.keys();

+            while (keys.hasNext()) {

+                key = keys.next().toString();

+                value = jo.opt(key);

+                if (value == null) {

+                    value = "";

+                }

+                if (value instanceof String) {

+                    string = (String)value;

+                } else {

+                    string = null;

+                }

+

+// Emit content in body

+

+                if ("content".equals(key)) {

+                    if (value instanceof JSONArray) {

+                        ja = (JSONArray)value;

+                        length = ja.length();

+                        for (i = 0; i < length; i += 1) {

+                            if (i > 0) {

+                                sb.append('\n');

+                            }

+                            sb.append(escape(ja.get(i).toString()));

+                        }

+                    } else {

+                        sb.append(escape(value.toString()));

+                    }

+

+// Emit an array of similar keys

+

+                } else if (value instanceof JSONArray) {

+                    ja = (JSONArray)value;

+                    length = ja.length();

+                    for (i = 0; i < length; i += 1) {

+                        value = ja.get(i);

+                        if (value instanceof JSONArray) {

+                            sb.append('<');

+                            sb.append(key);

+                            sb.append('>');

+                            sb.append(toString(value));

+                            sb.append("</");

+                            sb.append(key);

+                            sb.append('>');

+                        } else {

+                            sb.append(toString(value, key));

+                        }

+                    }

+                } else if ("".equals(value)) {

+                    sb.append('<');

+                    sb.append(key);

+                    sb.append("/>");

+

+// Emit a new tag <k>

+

+                } else {

+                    sb.append(toString(value, key));

+                }

+            }

+            if (tagName != null) {

+

+// Emit the </tagname> close tag

+

+                sb.append("</");

+                sb.append(tagName);

+                sb.append('>');

+            }

+            return sb.toString();

+

+// XML does not have good support for arrays. If an array appears in a place

+// where XML is lacking, synthesize an <array> element.

+

+        } else {

+            if (object.getClass().isArray()) {

+                object = new JSONArray(object);

+            }

+            if (object instanceof JSONArray) {

+                ja = (JSONArray)object;

+                length = ja.length();

+                for (i = 0; i < length; i += 1) {

+                    sb.append(toString(ja.opt(i), tagName == null ? "array" : tagName));

+                }

+                return sb.toString();

+            } else {

+                string = (object == null) ? "null" : escape(object.toString());

+                return (tagName == null) ? "\"" + string + "\"" :

+                    (string.length() == 0) ? "<" + tagName + "/>" :

+                    "<" + tagName + ">" + string + "</" + tagName + ">";

+            }

+        }

+    }

+}

diff --git a/datarouter-prov/src/main/java/org/json/XMLTokener.java b/datarouter-prov/src/main/java/org/json/XMLTokener.java
new file mode 100644
index 0000000..bdb5466
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/XMLTokener.java
@@ -0,0 +1,390 @@
+/*******************************************************************************

+ * ============LICENSE_START==================================================

+ * * org.onap.dmaap

+ * * ===========================================================================

+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+ * * ===========================================================================

+ * * Licensed under the Apache License, Version 2.0 (the "License");

+ * * you may not use this file except in compliance with the License.

+ * * You may obtain a copy of the License at

+ * * 

+ *  *      http://www.apache.org/licenses/LICENSE-2.0

+ * * 

+ *  * Unless required by applicable law or agreed to in writing, software

+ * * distributed under the License is distributed on an "AS IS" BASIS,

+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * * See the License for the specific language governing permissions and

+ * * limitations under the License.

+ * * ============LICENSE_END====================================================

+ * *

+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+ * *

+ ******************************************************************************/

+package org.json;

+

+import java.util.HashMap;

+import java.util.Map;

+

+/*

+Copyright (c) 2002 JSON.org

+

+Permission is hereby granted, free of charge, to any person obtaining a copy

+of this software and associated documentation files (the "Software"), to deal

+in the Software without restriction, including without limitation the rights

+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

+copies of the Software, and to permit persons to whom the Software is

+furnished to do so, subject to the following conditions:

+

+The above copyright notice and this permission notice shall be included in all

+copies or substantial portions of the Software.

+

+The Software shall be used for Good, not Evil.

+

+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

+SOFTWARE.

+*/

+

+/**

+ * The XMLTokener extends the JSONTokener to provide additional methods

+ * for the parsing of XML texts.

+ * @author JSON.org

+ * @version 2012-11-13

+ */

+public class XMLTokener extends JSONTokener {

+

+

+   /** The table of entity values. It initially contains Character values for

+    * amp, apos, gt, lt, quot.

+    */

+   public static final Map<String,Character> entity;

+

+   static {

+       entity = new HashMap<String,Character>(8);

+       entity.put("amp",  XML.AMP);

+       entity.put("apos", XML.APOS);

+       entity.put("gt",   XML.GT);

+       entity.put("lt",   XML.LT);

+       entity.put("quot", XML.QUOT);

+   }

+

+    /**

+     * Construct an XMLTokener from a string.

+     * @param s A source string.

+     */

+    public XMLTokener(String s) {

+        super(s);

+    }

+

+    /**

+     * Get the text in the CDATA block.

+     * @return The string up to the <code>]]&gt;</code>.

+     * @throws JSONException If the <code>]]&gt;</code> is not found.

+     */

+    public String nextCDATA() throws JSONException {

+        char         c;

+        int          i;

+        StringBuffer sb = new StringBuffer();

+        for (;;) {

+            c = next();

+            if (end()) {

+                throw syntaxError("Unclosed CDATA");

+            }

+            sb.append(c);

+            i = sb.length() - 3;

+            if (i >= 0 && sb.charAt(i) == ']' &&

+                          sb.charAt(i + 1) == ']' && sb.charAt(i + 2) == '>') {

+                sb.setLength(i);

+                return sb.toString();

+            }

+        }

+    }

+

+

+    /**

+     * Get the next XML outer token, trimming whitespace. There are two kinds

+     * of tokens: the '<' character which begins a markup tag, and the content

+     * text between markup tags.

+     *

+     * @return  A string, or a '<' Character, or null if there is no more

+     * source text.

+     * @throws JSONException

+     */

+    public Object nextContent() throws JSONException {

+        char         c;

+        StringBuffer sb;

+        do {

+            c = next();

+        } while (Character.isWhitespace(c));

+        if (c == 0) {

+            return null;

+        }

+        if (c == '<') {

+            return XML.LT;

+        }

+        sb = new StringBuffer();

+        for (;;) {

+            if (c == '<' || c == 0) {

+                back();

+                return sb.toString().trim();

+            }

+            if (c == '&') {

+                sb.append(nextEntity(c));

+            } else {

+                sb.append(c);

+            }

+            c = next();

+        }

+    }

+

+

+    /**

+     * Return the next entity. These entities are translated to Characters:

+     *     <code>&amp;  &apos;  &gt;  &lt;  &quot;</code>.

+     * @param ampersand An ampersand character.

+     * @return  A Character or an entity String if the entity is not recognized.

+     * @throws JSONException If missing ';' in XML entity.

+     */

+    public Object nextEntity(char ampersand) throws JSONException {

+        StringBuffer sb = new StringBuffer();

+        for (;;) {

+            char c = next();

+            if (Character.isLetterOrDigit(c) || c == '#') {

+                sb.append(Character.toLowerCase(c));

+            } else if (c == ';') {

+                break;

+            } else {

+                throw syntaxError("Missing ';' in XML entity: &" + sb);

+            }

+        }

+        String string = sb.toString();

+        Object object = entity.get(string);

+        return object != null ? object : ampersand + string + ";";

+    }

+

+

+    /**

+     * Returns the next XML meta token. This is used for skipping over <!...>

+     * and <?...?> structures.

+     * @return Syntax characters (<code>< > / = ! ?</code>) are returned as

+     *  Character, and strings and names are returned as Boolean. We don't care

+     *  what the values actually are.

+     * @throws JSONException If a string is not properly closed or if the XML

+     *  is badly structured.

+     */

+    public Object nextMeta() throws JSONException {

+        char c;

+        char q;

+        do {

+            c = next();

+        } while (Character.isWhitespace(c));

+        switch (c) {

+        case 0:

+            throw syntaxError("Misshaped meta tag");

+        case '<':

+            return XML.LT;

+        case '>':

+            return XML.GT;

+        case '/':

+            return XML.SLASH;

+        case '=':

+            return XML.EQ;

+        case '!':

+            return XML.BANG;

+        case '?':

+            return XML.QUEST;

+        case '"':

+        case '\'':

+            q = c;

+            for (;;) {

+                c = next();

+                if (c == 0) {

+                    throw syntaxError("Unterminated string");

+                }

+                if (c == q) {

+                    return Boolean.TRUE;

+                }

+            }

+        default:

+            for (;;) {

+                c = next();

+                if (Character.isWhitespace(c)) {

+                    return Boolean.TRUE;

+                }

+                switch (c) {

+                case 0:

+                case '<':

+                case '>':

+                case '/':

+                case '=':

+                case '!':

+                case '?':

+                case '"':

+                case '\'':

+                    back();

+                    return Boolean.TRUE;

+                }

+            }

+        }

+    }

+

+

+    /**

+     * Get the next XML Token. These tokens are found inside of angle

+     * brackets. It may be one of these characters: <code>/ > = ! ?</code> or it

+     * may be a string wrapped in single quotes or double quotes, or it may be a

+     * name.

+     * @return a String or a Character.

+     * @throws JSONException If the XML is not well formed.

+     */

+    public Object nextToken() throws JSONException {

+        char c;

+        char q;

+        StringBuffer sb;

+        do {

+            c = next();

+        } while (Character.isWhitespace(c));

+        switch (c) {

+        case 0:

+            throw syntaxError("Misshaped element");

+        case '<':

+            throw syntaxError("Misplaced '<'");

+        case '>':

+            return XML.GT;

+        case '/':

+            return XML.SLASH;

+        case '=':

+            return XML.EQ;

+        case '!':

+            return XML.BANG;

+        case '?':

+            return XML.QUEST;

+

+// Quoted string

+

+        case '"':

+        case '\'':

+            q = c;

+            sb = new StringBuffer();

+            for (;;) {

+                c = next();

+                if (c == 0) {

+                    throw syntaxError("Unterminated string");

+                }

+                if (c == q) {

+                    return sb.toString();

+                }

+                if (c == '&') {

+                    sb.append(nextEntity(c));

+                } else {

+                    sb.append(c);

+                }

+            }

+        default:

+

+// Name

+

+            sb = new StringBuffer();

+            for (;;) {

+                sb.append(c);

+                c = next();

+                if (Character.isWhitespace(c)) {

+                    return sb.toString();

+                }

+                switch (c) {

+                case 0:

+                    return sb.toString();

+                case '>':

+                case '/':

+                case '=':

+                case '!':

+                case '?':

+                case '[':

+                case ']':

+                    back();

+                    return sb.toString();

+                case '<':

+                case '"':

+                case '\'':

+                    throw syntaxError("Bad character in a name");

+                }

+            }

+        }

+    }

+

+

+    /**

+     * Skip characters until past the requested string.

+     * If it is not found, we are left at the end of the source with a result of false.

+     * @param to A string to skip past.

+     * @throws JSONException

+     */

+    public boolean skipPast(String to) throws JSONException {

+        boolean b;

+        char c;

+        int i;

+        int j;

+        int offset = 0;

+        int length = to.length();

+        char[] circle = new char[length];

+

+        /*

+         * First fill the circle buffer with as many characters as are in the

+         * to string. If we reach an early end, bail.

+         */

+

+        for (i = 0; i < length; i += 1) {

+            c = next();

+            if (c == 0) {

+                return false;

+            }

+            circle[i] = c;

+        }

+

+        /* We will loop, possibly for all of the remaining characters. */

+

+        for (;;) {

+            j = offset;

+            b = true;

+

+            /* Compare the circle buffer with the to string. */

+

+            for (i = 0; i < length; i += 1) {

+                if (circle[j] != to.charAt(i)) {

+                    b = false;

+                    break;

+                }

+                j += 1;

+                if (j >= length) {

+                    j -= length;

+                }

+            }

+

+            /* If we exit the loop with b intact, then victory is ours. */

+

+            if (b) {

+                return true;

+            }

+

+            /* Get the next character. If there isn't one, then defeat is ours. */

+

+            c = next();

+            if (c == 0) {

+                return false;

+            }

+            /*

+             * Shove the character in the circle buffer and advance the

+             * circle offset. The offset is mod n.

+             */

+            circle[offset] = c;

+            offset += 1;

+            if (offset >= length) {

+                offset -= length;

+            }

+        }

+    }

+}

diff --git a/datarouter-prov/src/main/java/org/json/package.html b/datarouter-prov/src/main/java/org/json/package.html
new file mode 100644
index 0000000..392a0c6
--- /dev/null
+++ b/datarouter-prov/src/main/java/org/json/package.html
@@ -0,0 +1,40 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+<!-- CVS: $Id: package.html,v 1.1 2013/04/26 21:01:51 eby Exp $ -->

+<!--

+                       AT&T - PROPRIETARY

+          THIS FILE CONTAINS PROPRIETARY INFORMATION OF

+       AT&T AND IS NOT TO BE DISCLOSED OR USED EXCEPT IN

+                   ACCORDANCE WITH APPLICABLE AGREEMENTS.

+

+           Copyright (c) 2013 AT&T Knowledge Ventures

+               Unpublished and Not for Publication

+                      All Rights Reserved

+-->

+<html>

+<body>

+<p>

+This package provides the json.org JSON library.

+</p>

+</body>

+</html>

diff --git a/datarouter-prov/src/main/resources/EelfMessages.properties b/datarouter-prov/src/main/resources/EelfMessages.properties
new file mode 100644
index 0000000..5e8b179
--- /dev/null
+++ b/datarouter-prov/src/main/resources/EelfMessages.properties
@@ -0,0 +1,58 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+########################################################################

+#Resource key=Error Code|Message text|Resolution text |Description text

+#######

+#Newlines can be utilized to add some clarity ensuring continuing line

+#has atleast one leading space

+#ResourceKey=\

+#             ERR0000E\

+#             Sample error msg txt\

+#             Sample resolution msg\

+#             Sample description txt

+#

+######

+#Error code classification category

+#100	Permission errors

+#200	Availability errors/Timeouts

+#300	Data errors

+#400	Schema Interface type/validation errors

+#500	Business process errors

+#900	Unknown errors

+#

+########################################################################

+

+# Messages for Data Router EELF framework

+

+#Prints FeedID in the EELF apilog

+MESSAGE_WITH__FEEDID=EELF0001I| FeedID  = {0}

+

+#Prints User in the EELF apilog

+MESSAGE_WITH_BEHALF=EELF0002I| User = {0}

+

+#Prints User and FeedID in the EELF apilog

+MESSAGE_WITH_BEHALF_AND_FEEDID=EELF0003I| User = {0} FeedID  = {1}

+

+#Prints User and SubID in the EELF apilog

+MESSAGE_WITH_BEHALF_AND_SUBID=EELF0004I| User = {0} SubscriberID  = {1}

+

diff --git a/datarouter-prov/src/main/resources/authz.jar b/datarouter-prov/src/main/resources/authz.jar
new file mode 100644
index 0000000..6d0dd8a
--- /dev/null
+++ b/datarouter-prov/src/main/resources/authz.jar
Binary files differ
diff --git a/datarouter-prov/src/main/resources/docker-compose/database/install_db.sql b/datarouter-prov/src/main/resources/docker-compose/database/install_db.sql
new file mode 100644
index 0000000..64a0762
--- /dev/null
+++ b/datarouter-prov/src/main/resources/docker-compose/database/install_db.sql
@@ -0,0 +1,143 @@
+CREATE DATABASE IF NOT EXISTS datarouter;
+
+CREATE USER 'datarouter'@'%' IDENTIFIED BY 'datarouter';
+
+GRANT ALL PRIVILEGES ON * . * TO 'datarouter'@'%';
+
+use datarouter;
+
+CREATE TABLE FEEDS (
+    FEEDID         INT UNSIGNED NOT NULL PRIMARY KEY,
+    NAME           VARCHAR(20) NOT NULL,
+    VERSION        VARCHAR(20) NOT NULL,
+    DESCRIPTION    VARCHAR(256),
+    AUTH_CLASS     VARCHAR(32) NOT NULL,
+    PUBLISHER      VARCHAR(8) NOT NULL,
+    SELF_LINK      VARCHAR(256),
+    PUBLISH_LINK   VARCHAR(256),
+    SUBSCRIBE_LINK VARCHAR(256),
+    LOG_LINK       VARCHAR(256),
+    DELETED        BOOLEAN DEFAULT FALSE,
+    LAST_MOD       TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+
+CREATE TABLE FEED_ENDPOINT_IDS (
+    FEEDID        INT UNSIGNED NOT NULL,
+    USERID        VARCHAR(20) NOT NULL,
+    PASSWORD      VARCHAR(32) NOT NULL
+);
+
+CREATE TABLE FEED_ENDPOINT_ADDRS (
+    FEEDID        INT UNSIGNED NOT NULL,
+    ADDR          VARCHAR(44) NOT NULL
+);
+
+CREATE TABLE SUBSCRIPTIONS (
+    SUBID              INT UNSIGNED NOT NULL PRIMARY KEY,
+    FEEDID             INT UNSIGNED NOT NULL,
+    DELIVERY_URL       VARCHAR(256),
+    DELIVERY_USER      VARCHAR(20),
+    DELIVERY_PASSWORD  VARCHAR(32),
+    DELIVERY_USE100    BOOLEAN DEFAULT FALSE,
+    METADATA_ONLY      BOOLEAN DEFAULT FALSE,
+    SUBSCRIBER         VARCHAR(8) NOT NULL,
+    SELF_LINK          VARCHAR(256),
+    LOG_LINK           VARCHAR(256),
+    LAST_MOD           TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+
+CREATE TABLE PARAMETERS (
+    KEYNAME        VARCHAR(32) NOT NULL PRIMARY KEY,
+    VALUE          VARCHAR(4096) NOT NULL
+);
+
+CREATE TABLE LOG_RECORDS (
+    TYPE	   ENUM('pub', 'del', 'exp') NOT NULL,
+    EVENT_TIME     BIGINT NOT NULL,           /* time of the publish request */
+    PUBLISH_ID     VARCHAR(64) NOT NULL,      /* unique ID assigned to this publish attempt */
+    FEEDID         INT UNSIGNED NOT NULL,     /* pointer to feed in FEEDS */
+    REQURI         VARCHAR(256) NOT NULL,     /* request URI */
+    METHOD         ENUM('DELETE', 'GET', 'HEAD', 'OPTIONS', 'PUT', 'POST', 'TRACE') NOT NULL, /* HTTP method */
+    CONTENT_TYPE   VARCHAR(256) NOT NULL,     /* content type of published file */
+    CONTENT_LENGTH BIGINT UNSIGNED NOT NULL,  /* content length of published file */
+
+    FEED_FILEID    VARCHAR(128),		/* file ID of published file */
+    REMOTE_ADDR    VARCHAR(40),			/* IP address of publishing endpoint */
+    USER           VARCHAR(20),			/* user name of publishing endpoint */
+    STATUS         SMALLINT,			/* status code returned to delivering agent */
+
+    DELIVERY_SUBID INT UNSIGNED,		/* pointer to subscription in SUBSCRIPTIONS */
+    DELIVERY_FILEID  VARCHAR(128),		/* file ID of file being delivered */
+    RESULT         SMALLINT,			/* result received from subscribing agent */
+
+    ATTEMPTS       INT,				/* deliveries attempted */
+    REASON         ENUM('notRetryable', 'retriesExhausted'),
+
+    RECORD_ID      BIGINT UNSIGNED NOT NULL PRIMARY KEY, /* unique ID for this record */
+
+    INDEX (FEEDID) USING BTREE,
+    INDEX (DELIVERY_SUBID) USING BTREE,
+    INDEX (RECORD_ID) USING BTREE
+) ENGINE = MyISAM;
+
+CREATE TABLE INGRESS_ROUTES (
+    SEQUENCE  INT UNSIGNED NOT NULL,
+    FEEDID    INT UNSIGNED NOT NULL,
+    USERID    VARCHAR(20),
+    SUBNET    VARCHAR(44),
+    NODESET   INT UNSIGNED NOT NULL
+);
+
+CREATE TABLE EGRESS_ROUTES (
+    SUBID    INT UNSIGNED NOT NULL PRIMARY KEY,
+    NODEID   INT UNSIGNED NOT NULL
+);
+
+CREATE TABLE NETWORK_ROUTES (
+    FROMNODE INT UNSIGNED NOT NULL,
+    TONODE   INT UNSIGNED NOT NULL,
+    VIANODE  INT UNSIGNED NOT NULL
+);
+
+CREATE TABLE NODESETS (
+    SETID   INT UNSIGNED NOT NULL,
+    NODEID  INT UNSIGNED NOT NULL
+);
+
+CREATE TABLE NODES (
+    NODEID  INT UNSIGNED NOT NULL PRIMARY KEY,
+    NAME    VARCHAR(255) NOT NULL,
+    ACTIVE  BOOLEAN DEFAULT TRUE
+);
+
+CREATE TABLE GROUPS (
+    GROUPID  INT UNSIGNED NOT NULL PRIMARY KEY,
+    AUTHID    VARCHAR(100) NOT NULL,
+    NAME    VARCHAR(50) NOT NULL,
+    DESCRIPTION    VARCHAR(255),
+    CLASSIFICATION    VARCHAR(20) NOT NULL,
+    MEMBERS    TINYTEXT,
+    LAST_MOD       TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+
+-- 'PROV_AUTH_ADDRESSES', '192.168.56.1' ipv4 address of provision server
+INSERT INTO PARAMETERS VALUES
+	('ACTIVE_POD',  'prov.datarouternew.com'),
+	('PROV_ACTIVE_NAME',  'prov.datarouternew.com'),
+	('STANDBY_POD', ''),
+	('PROV_NAME',   'prov.datarouternew.com'),
+	('NODES',       'node.datarouternew.com'),
+	('PROV_DOMAIN', 'datarouternew.com'),
+	('DELIVERY_INIT_RETRY_INTERVAL', '10'),
+	('DELIVERY_MAX_AGE', '86400'),
+	('DELIVERY_MAX_RETRY_INTERVAL', '3600'),
+	('DELIVERY_RETRY_RATIO', '2'),
+	('LOGROLL_INTERVAL', '300'),
+	('PROV_AUTH_ADDRESSES', 'prov.datarouternew.com'), 
+	('PROV_AUTH_SUBJECTS', ''),
+	('PROV_MAXFEED_COUNT',	'10000'),
+	('PROV_MAXSUB_COUNT',	'100000'),
+	('PROV_REQUIRE_CERT', 'false'),
+	('PROV_REQUIRE_SECURE', 'false'),
+	('_INT_VALUES', 'LOGROLL_INTERVAL|PROV_MAXFEED_COUNT|PROV_MAXSUB_COUNT|DELIVERY_INIT_RETRY_INTERVAL|DELIVERY_MAX_RETRY_INTERVAL|DELIVERY_RETRY_RATIO|DELIVERY_MAX_AGE')
+	;
\ No newline at end of file
diff --git a/datarouter-prov/src/main/resources/docker-compose/docker-compose.yml b/datarouter-prov/src/main/resources/docker-compose/docker-compose.yml
new file mode 100644
index 0000000..4e2a81a
--- /dev/null
+++ b/datarouter-prov/src/main/resources/docker-compose/docker-compose.yml
@@ -0,0 +1,69 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+version: '2'

+services: 

+  datarouter-prov:

+    image: attos/datarouter-prov

+    container_name: datarouter-prov

+    hostname: prov.datarouternew.com

+    ports:

+     - "8443:8443"

+     - "8080:8080"  

+#    volumes:

+#     - ./prov_data/proserver.properties:/opt/app/datartr/etc/proserver.properties

+#     - ./prov_data/datarouter-prov-jar-with-dependencies.jar:/opt/app/datartr/lib/datarouter-prov-jar-with-dependencies.jar

+#      - ./prov_data/addSubscriber.txt:/opt/app/datartr/addSubscriber.txt

+#      - ./prov_data/addFeed3.txt:/opt/app/datartr/addFeed3.txt

+    entrypoint: ["bash", "-c", "sleep 10; /bin/sh -c ./startup.sh"]

+    depends_on:

+      - mysql_container

+    extra_hosts:

+      - "node.datarouternew.com:172.18.0.4"

+

+    

+  datarouter-node:

+    image: attos/datarouter-node

+    container_name: datarouter-node

+    hostname: node.datarouternew.com

+    ports:

+     - "9443:8443"

+     - "9090:8080"

+#    volumes:

+#     - ./node_data/node.properties:/opt/app/datartr/etc/node.properties

+    entrypoint: ["bash", "-c", "sleep 15; /bin/sh -c ./startup.sh"]    

+    depends_on:

+      - datarouter-prov

+    extra_hosts:

+      - "prov.datarouternew.com:172.18.0.3"

+      

+  mysql_container:

+    image: mysql/mysql-server:5.6

+    container_name: mysql

+    ports:

+     - "3306:3306"

+    environment:

+      MYSQL_ROOT_PASSWORD: att2017

+    volumes:

+    - ./database:/tmp/database

+    - ./database:/docker-entrypoint-initdb.d

+    

diff --git a/datarouter-prov/src/main/resources/docker-compose/node_data/node.properties b/datarouter-prov/src/main/resources/docker-compose/node_data/node.properties
new file mode 100644
index 0000000..f57833c
--- /dev/null
+++ b/datarouter-prov/src/main/resources/docker-compose/node_data/node.properties
@@ -0,0 +1,112 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+#

+#	Configuration parameters fixed at startup for the DataRouter node

+#

+#	URL to retrieve dynamic configuration

+#

+#ProvisioningURL:	${DRTR_PROV_INTURL:-https://feeds-drtr.web.att.com/internal/prov}

+ProvisioningURL=https://prov.datarouternew.com:8443/internal/prov

+

+#

+#	URL to upload PUB/DEL/EXP logs

+#

+#LogUploadURL:	${DRTR_LOG_URL:-https://feeds-drtr.web.att.com/internal/logs}

+LogUploadURL=https://prov.datarouternew.com:8443/internal/logs

+

+#

+#	The port number for http as seen within the server

+#

+#IntHttpPort:	${DRTR_NODE_INTHTTPPORT:-8080}

+IntHttpPort=8080

+#

+#	The port number for https as seen within the server

+#

+IntHttpsPort=8443

+#

+#	The external port number for https taking port mapping into account

+#

+ExtHttpsPort=443

+#

+#	The minimum interval between fetches of the dynamic configuration

+#	from the provisioning server

+#

+MinProvFetchInterval=10000

+#

+#	The minimum interval between saves of the redirection data file

+#

+MinRedirSaveInterval=10000

+#

+#	The path to the directory where log files are stored

+#

+LogDir=/opt/app/datartr/logs

+#

+#	The retention interval (in days) for log files

+#

+LogRetention=30

+#

+#	The path to the directories where data and meta data files are stored

+#

+SpoolDir=/opt/app/datartr/spool

+#

+#	The path to the redirection data file

+#

+#RedirectionFile:	etc/redirections.dat

+#

+#	The type of keystore for https

+#

+KeyStoreType:	jks

+#

+#	The path to the keystore for https

+#

+KeyStoreFile:/opt/app/datartr/self_signed/keystore.jks

+#

+#	The password for the https keystore

+#

+KeyStorePassword=changeit

+#

+#	The password for the private key in the https keystore

+#

+KeyPassword=changeit

+#

+#	The type of truststore for https

+#

+TrustStoreType=jks

+#

+#	The path to the truststore for https

+#

+#TrustStoreFile=/usr/lib/jvm/java-8-oracle/jre/lib/security/cacerts

+TrustStoreFile=/opt/app/datartr/self_signed/cacerts.jks

+#

+#	The password for the https truststore

+#

+TrustStorePassword=changeit

+#

+#	The path to the file used to trigger an orderly shutdown

+#

+QuiesceFile=etc/SHUTDOWN

+#

+#	The key used to generate passwords for node to node transfers

+#

+NodeAuthKey=Node123!

+

diff --git a/datarouter-prov/src/main/resources/docker-compose/node_data/self_signed/cacerts.jks b/datarouter-prov/src/main/resources/docker-compose/node_data/self_signed/cacerts.jks
new file mode 100644
index 0000000..dfd8143
--- /dev/null
+++ b/datarouter-prov/src/main/resources/docker-compose/node_data/self_signed/cacerts.jks
Binary files differ
diff --git a/datarouter-prov/src/main/resources/docker-compose/node_data/self_signed/keystore.jks b/datarouter-prov/src/main/resources/docker-compose/node_data/self_signed/keystore.jks
new file mode 100644
index 0000000..e5a4e78
--- /dev/null
+++ b/datarouter-prov/src/main/resources/docker-compose/node_data/self_signed/keystore.jks
Binary files differ
diff --git a/datarouter-prov/src/main/resources/docker-compose/node_data/self_signed/mykey.cer b/datarouter-prov/src/main/resources/docker-compose/node_data/self_signed/mykey.cer
new file mode 100644
index 0000000..2a5c9d7
--- /dev/null
+++ b/datarouter-prov/src/main/resources/docker-compose/node_data/self_signed/mykey.cer
Binary files differ
diff --git a/datarouter-prov/src/main/resources/docker-compose/node_data/self_signed/nodekey.cer b/datarouter-prov/src/main/resources/docker-compose/node_data/self_signed/nodekey.cer
new file mode 100644
index 0000000..4cdfdfe
--- /dev/null
+++ b/datarouter-prov/src/main/resources/docker-compose/node_data/self_signed/nodekey.cer
Binary files differ
diff --git a/datarouter-prov/src/main/resources/docker-compose/prov_data/addFeed3.txt b/datarouter-prov/src/main/resources/docker-compose/prov_data/addFeed3.txt
new file mode 100644
index 0000000..a21c7ae
--- /dev/null
+++ b/datarouter-prov/src/main/resources/docker-compose/prov_data/addFeed3.txt
@@ -0,0 +1,44 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+{

+     "name": "Jettydemo",

+     "version": "m1.0",

+     "description": "Jettydemo",

+     "business_description": "Jettydemo",

+     "suspend": false,

+     "deleted": false,

+     "changeowner": true,

+     "authorization": {

+          "classification": "unclassified",

+          "endpoint_addrs": [

+               "172.18.0.3",

+			],

+          "endpoint_ids": [

+               {

+                    "password": "rs873m",

+                    "id": "rs873m"

+               }

+          ]

+     },

+}

+

diff --git a/datarouter-prov/src/main/resources/docker-compose/prov_data/addSubscriber.txt b/datarouter-prov/src/main/resources/docker-compose/prov_data/addSubscriber.txt
new file mode 100644
index 0000000..e974631
--- /dev/null
+++ b/datarouter-prov/src/main/resources/docker-compose/prov_data/addSubscriber.txt
@@ -0,0 +1,36 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+{ 

+                "delivery" :	

+               				

+                { 

+                                "url" : "http://172.18.0.3:7070/", 

+                                "user" : "LOGIN", 

+                                "password" : "PASSWORD", 

+                                "use100" : true 

+                },

+                "metadataOnly" : false, 

+                "suspend" : false, 

+				"groupid" : 29,

+                "subscriber" : "sg481n"

+}

diff --git a/datarouter-prov/src/main/resources/docker-compose/prov_data/provserver.properties b/datarouter-prov/src/main/resources/docker-compose/prov_data/provserver.properties
new file mode 100644
index 0000000..6a03cbd
--- /dev/null
+++ b/datarouter-prov/src/main/resources/docker-compose/prov_data/provserver.properties
@@ -0,0 +1,59 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+#

+#                        AT&T - PROPRIETARY

+#          THIS FILE CONTAINS PROPRIETARY INFORMATION OF

+#        AT&T AND IS NOT TO BE DISCLOSED OR USED EXCEPT IN

+#             ACCORDANCE WITH APPLICABLE AGREEMENTS.

+#

+#          Copyright (c) 2013 AT&T Knowledge Ventures

+#              Unpublished and Not for Publication

+#                     All Rights Reserved

+#

+# CVS: $Id: provserver.properties,v 1.7 2013/05/29 14:44:36 eby Exp $

+#

+

+#Jetty Server properties

+com.att.research.datarouter.provserver.http.port           = 8080

+com.att.research.datarouter.provserver.https.port          = 8443

+com.att.research.datarouter.provserver.https.relaxation	   = true

+com.att.research.datarouter.provserver.keymanager.password = changeit

+com.att.research.datarouter.provserver.keystore.type       = jks

+com.att.research.datarouter.provserver.keystore.path       = /opt/app/datartr/self_signed/keystore.jks

+

+com.att.research.datarouter.provserver.keystore.password   = changeit

+#com.att.research.datarouter.provserver.truststore.path     = /home/eby/dr2/misc/cacerts+1

+#com.att.research.datarouter.provserver.truststore.path     = /usr/lib/jvm/java-8-oracle/jre/lib/security/cacerts

+com.att.research.datarouter.provserver.truststore.path     = /opt/app/datartr/self_signed/cacerts.jks

+

+com.att.research.datarouter.provserver.truststore.password = changeit

+com.att.research.datarouter.provserver.accesslog.dir       = /opt/app/datartr/logs

+com.att.research.datarouter.provserver.spooldir            = /opt/app/datartr/spool

+#com.att.research.datarouter.provserver.dbscripts          = /home/eby/dr2/cvs/datarouter/prov/misc/

+com.att.research.datarouter.provserver.logretention        = 30

+

+# Database access

+com.att.research.datarouter.db.driver   = com.mysql.jdbc.Driver

+com.att.research.datarouter.db.url      = jdbc:mysql://172.18.0.2:3306/datarouter

+com.att.research.datarouter.db.login    = datarouter

+com.att.research.datarouter.db.password = datarouter

diff --git a/datarouter-prov/src/main/resources/docker-compose/prov_data/self_signed/cacerts.jks b/datarouter-prov/src/main/resources/docker-compose/prov_data/self_signed/cacerts.jks
new file mode 100644
index 0000000..76a480a
--- /dev/null
+++ b/datarouter-prov/src/main/resources/docker-compose/prov_data/self_signed/cacerts.jks
Binary files differ
diff --git a/datarouter-prov/src/main/resources/docker-compose/prov_data/self_signed/keystore.jks b/datarouter-prov/src/main/resources/docker-compose/prov_data/self_signed/keystore.jks
new file mode 100644
index 0000000..2c22b4a
--- /dev/null
+++ b/datarouter-prov/src/main/resources/docker-compose/prov_data/self_signed/keystore.jks
Binary files differ
diff --git a/datarouter-prov/src/main/resources/docker-compose/prov_data/self_signed/mykey.cer b/datarouter-prov/src/main/resources/docker-compose/prov_data/self_signed/mykey.cer
new file mode 100644
index 0000000..2a5c9d7
--- /dev/null
+++ b/datarouter-prov/src/main/resources/docker-compose/prov_data/self_signed/mykey.cer
Binary files differ
diff --git a/datarouter-prov/src/main/resources/docker/Dockerfile b/datarouter-prov/src/main/resources/docker/Dockerfile
new file mode 100644
index 0000000..215c433
--- /dev/null
+++ b/datarouter-prov/src/main/resources/docker/Dockerfile
@@ -0,0 +1,9 @@
+FROM java:8

+ADD opt /opt/

+ADD startup.sh /startup.sh

+RUN chmod 700 /startup.sh

+ENTRYPOINT ./startup.sh start

+EXPOSE 8443

+EXPOSE 8080

+

+

diff --git a/datarouter-prov/src/main/resources/docker/startup.sh b/datarouter-prov/src/main/resources/docker/startup.sh
new file mode 100644
index 0000000..191a804
--- /dev/null
+++ b/datarouter-prov/src/main/resources/docker/startup.sh
@@ -0,0 +1,16 @@
+LIB=/opt/app/datartr/lib
+ETC=/opt/app/datartr/etc
+echo "this is LIB" $LIB
+echo "this is ETC" $ETC
+mkdir -p /opt/app/datartr/logs
+mkdir -p /opt/app/datartr/spool
+CLASSPATH=$ETC
+for FILE in `find $LIB -name *.jar`; do
+  CLASSPATH=$CLASSPATH:$FILE
+done
+java -classpath $CLASSPATH  com.att.research.datarouter.provisioning.Main
+
+runner_file="$LIB/datarouter-prov-jar-with-dependencies.jar"
+echo "Starting using" $runner_file
+java -Dcom.att.eelf.logging.file==/opt/app/datartr/etc/logback.xml -Dcom.att.eelf.logging.path=/root -jar $runner_file
+
diff --git a/datarouter-prov/src/main/resources/log4j.properties b/datarouter-prov/src/main/resources/log4j.properties
new file mode 100644
index 0000000..bb4eaa0
--- /dev/null
+++ b/datarouter-prov/src/main/resources/log4j.properties
@@ -0,0 +1,68 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+

+

+log4j.rootLogger=info

+

+log4j.appender.stdout=org.apache.log4j.ConsoleAppender

+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout

+log4j.appender.stdout.layout.ConversionPattern=%d %5p [%t] - %m%n

+

+#

+# Logger used for provisioning events

+#

+log4j.logger.com.att.research.datarouter.provisioning.events=info, eventlog

+log4j.additivity.com.att.research.datarouter.provisioning.events=false

+

+log4j.appender.eventlog=org.apache.log4j.DailyRollingFileAppender

+log4j.appender.eventlog.file=/root/dr2/logs/provevent.log

+log4j.appender.eventlog.datePattern='.'yyyyMMdd

+log4j.appender.eventlog.append=true

+log4j.appender.eventlog.layout=org.apache.log4j.PatternLayout

+log4j.appender.eventlog.layout.ConversionPattern=%d %-5p [%t] - %m%n

+

+#

+# Logger used for internal provisioning server events

+#

+log4j.logger.com.att.research.datarouter.provisioning.internal=debug, intlog

+log4j.additivity.com.att.research.datarouter.provisioning.internal=false

+

+log4j.appender.intlog=org.apache.log4j.DailyRollingFileAppender

+log4j.appender.intlog.file=/root/dr2/logs/provint.log

+log4j.appender.intlog.datePattern='.'yyyyMMdd

+log4j.appender.intlog.append=true

+log4j.appender.intlog.layout=org.apache.log4j.PatternLayout

+log4j.appender.intlog.layout.ConversionPattern=%d %-5p [%t] - %m%n

+

+#

+# Logger used for policy engine

+#

+log4j.logger.com.att.research.datarouter.authz.impl.ProvAuthorizer=debug, pelog

+log4j.additivity.com.att.research.datarouter.authz.impl.ProvAuthorizer=false

+

+log4j.appender.pelog=org.apache.log4j.DailyRollingFileAppender

+log4j.appender.pelog.file=/root/dr2/logs/policyengine.log

+log4j.appender.pelog.datePattern='.'yyyyMMdd

+log4j.appender.pelog.append=true

+log4j.appender.pelog.layout=org.apache.log4j.PatternLayout

+log4j.appender.pelog.layout.ConversionPattern=%d %-5p [%t] - %m%n

diff --git a/datarouter-prov/src/main/resources/logback.xml b/datarouter-prov/src/main/resources/logback.xml
new file mode 100644
index 0000000..7d73e0d
--- /dev/null
+++ b/datarouter-prov/src/main/resources/logback.xml
@@ -0,0 +1,405 @@
+<!--

+  ============LICENSE_START==================================================

+  * org.onap.dmaap

+  * ===========================================================================

+  * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+  * ===========================================================================

+  * Licensed under the Apache License, Version 2.0 (the "License");

+  * you may not use this file except in compliance with the License.

+  * You may obtain a copy of the License at

+  * 

+   *      http://www.apache.org/licenses/LICENSE-2.0

+  * 

+   * Unless required by applicable law or agreed to in writing, software

+  * distributed under the License is distributed on an "AS IS" BASIS,

+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+  * See the License for the specific language governing permissions and

+  * limitations under the License.

+  * ============LICENSE_END====================================================

+  *

+  * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+  *

+-->

+<configuration scan="true" scanPeriod="3 seconds" debug="true">

+  <!--<jmxConfigurator /> -->

+  <!-- directory path for all other type logs -->

+  <!-- property name="logDir" value="/home/eby/dr2/logs" / -->

+  <property name="logDir" value="/opt/app/datartr/logs" />

+ 

+  <!-- directory path for debugging type logs -->

+  <!-- property name="debugDir" value="/home/eby/dr2/debug-logs" /-->

+  

+  <!--  specify the component name 

+    <ECOMP-component-name>::= "MSO" | "DCAE" | "ASDC " | "AAI" |"Policy" | "SDNC" | "AC"  -->

+  <!-- This creates the MSO directory in in the LogDir which is not needed, mentioned last directory of the path-->

+  <!-- property name="componentName" value="logs"></property -->

+  

+  <!--  log file names -->

+  <property name="generalLogName" value="apicalls" />

+  <!-- name="securityLogName" value="security" -->

+  <!-- name="performanceLogName" value="performance" -->

+  <!-- name="serverLogName" value="server" -->

+  <!-- name="policyLogName" value="policy"-->

+  <property name="errorLogName" value="errors" />

+  <!-- name="metricsLogName" value="metrics" -->

+  <!-- name="auditLogName" value="audit" -->

+  <!-- name="debugLogName" value="debug" -->

+  <property name="jettyLogName" value="jetty"></property> 

+  <property name="defaultPattern"    value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|%msg%n" />

+  <property name="jettyLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%thread|%.-5level|%msg%n" />

+  

+  <property name="debugLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|[%caller{3}]|%msg%n" />

+     

+  <property name="logDirectory" value="${logDir}" />

+  <!-- property name="debugLogDirectory" value="${debugDir}/${componentName}" /-->

+  

+  

+  <!-- Example evaluator filter applied against console appender -->

+  <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">

+    <encoder>

+      <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+

+  <!-- ============================================================================ -->

+  <!-- EELF Appenders -->

+  <!-- ============================================================================ -->

+

+  <!-- The EELFAppender is used to record events to the general application 

+    log -->

+    

+    

+  <appender name="EELF"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${generalLogName}.log</file>

+     <filter class="ch.qos.logback.classic.filter.LevelFilter">

+		<level>INFO</level>

+		<onMatch>ACCEPT</onMatch>

+		<onMismatch>DENY</onMismatch>

+	</filter>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${generalLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+      <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  

+  <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELF" />

+  </appender>

+

+  <!-- EELF Security Appender. This appender is used to record security events 

+    to the security log file. Security events are separate from other loggers 

+    in EELF so that security log records can be captured and managed in a secure 

+    way separate from the other logs. This appender is set to never discard any 

+    events. -->

+  <!--appender name="EELFSecurity"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${securityLogName}.log</file>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${securityLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+      <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  

+  <appender name="asyncEELFSecurity" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <discardingThreshold>0</discardingThreshold>

+    <appender-ref ref="EELFSecurity" />

+  </appender-->

+

+  <!-- EELF Performance Appender. This appender is used to record performance 

+    records. -->

+  <!--appender name="EELFPerformance"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${performanceLogName}.log</file>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${performanceLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+      <outputPatternAsHeader>true</outputPatternAsHeader>

+      <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  <appender name="asyncEELFPerformance" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELFPerformance" />

+  </appender-->

+

+  <!-- EELF Server Appender. This appender is used to record Server related 

+    logging events. The Server logger and appender are specializations of the 

+    EELF application root logger and appender. This can be used to segregate Server 

+    events from other components, or it can be eliminated to record these events 

+    as part of the application root log. -->

+  <!--appender name="EELFServer"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${serverLogName}.log</file>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${serverLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+        <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  <appender name="asyncEELFServer" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELFServer" />

+  </appender-->

+

+  

+  <!-- EELF Policy Appender. This appender is used to record Policy engine 

+    related logging events. The Policy logger and appender are specializations 

+    of the EELF application root logger and appender. This can be used to segregate 

+    Policy engine events from other components, or it can be eliminated to record 

+    these events as part of the application root log. -->

+  <!--appender name="EELFPolicy"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${policyLogName}.log</file>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${policyLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+        <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  <appender name="asyncEELFPolicy" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELFPolicy" >

+  </appender-->

+  

+  

+  <!-- EELF Audit Appender. This appender is used to record audit engine 

+    related logging events. The audit logger and appender are specializations 

+    of the EELF application root logger and appender. This can be used to segregate 

+    Policy engine events from other components, or it can be eliminated to record 

+    these events as part of the application root log. -->

+    

+  <!--appender name="EELFAudit"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${auditLogName}.log</file>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${auditLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+         <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELFAudit" />

+  </appender-->

+

+<!--appender name="EELFMetrics"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${metricsLogName}.log</file>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${metricsLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder-->

+      <!-- <pattern>"%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - 

+        %msg%n"</pattern> -->

+      <!--pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  

+  

+  <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELFMetrics"/>

+  </appender-->

+   

+  <appender name="EELFError"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${errorLogName}.log</file>

+    <filter class="ch.qos.logback.classic.filter.LevelFilter">

+		<level>ERROR</level>

+		<onMatch>ACCEPT</onMatch>

+		<onMismatch>DENY</onMismatch>

+	</filter>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${errorLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+      <pattern>${defaultPattern}</pattern>

+    </encoder>

+  </appender>

+  

+  <appender name="asyncEELFError" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELFError"/>

+  </appender>

+  

+  <!-- ============================================================================ -->

+   <appender name="jettylog"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${logDirectory}/${jettyLogName}.log</file>

+	 <filter class="com.att.research.datarouter.provisioning.eelf.JettyFilter" />

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${logDirectory}/${jettyLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+      <pattern>${jettyLoggerPattern}</pattern>

+    </encoder>

+  </appender>

+  

+  <appender name="asyncEELFjettylog" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="jettylog" />

+    <includeCallerData>true</includeCallerData>

+  </appender>

+  

+   <!-- ============================================================================ -->

+

+

+   <!--appender name="EELFDebug"

+    class="ch.qos.logback.core.rolling.RollingFileAppender">

+    <file>${debugLogDirectory}/${debugLogName}.log</file>

+    <rollingPolicy

+      class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">

+      <fileNamePattern>${debugLogDirectory}/${debugLogName}.%i.log.zip

+      </fileNamePattern>

+      <minIndex>1</minIndex>

+      <maxIndex>9</maxIndex>

+    </rollingPolicy>

+    <triggeringPolicy

+      class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">

+      <maxFileSize>5MB</maxFileSize>

+    </triggeringPolicy>

+    <encoder>

+      <pattern>${debugLoggerPattern}</pattern>

+    </encoder>

+  </appender>

+  

+  <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">

+    <queueSize>256</queueSize>

+    <appender-ref ref="EELFDebug" />

+    <includeCallerData>true</includeCallerData>

+  </appender-->

+ 

+  

+  <!-- ============================================================================ -->

+  <!--  EELF loggers -->

+  <!-- ============================================================================ -->

+  <logger name="com.att.eelf" level="info" additivity="false">

+    <appender-ref ref="asyncEELF" />

+  </logger>

+  

+     <logger name="com.att.eelf.error" level="error" additivity="false">

+ 		 <appender-ref ref="asyncEELFError" />

+ 	 </logger>

+  

+     <logger name="log4j.logger.org.eclipse.jetty" additivity="false" level="info">

+		<appender-ref ref="asyncEELFjettylog"/>

+	</logger> 

+	

+  <!-- logger name="com.att.eelf.security" level="info" additivity="false">

+    <appender-ref ref="asyncEELFSecurity" /> 

+  </logger>

+  <logger name="com.att.eelf.perf" level="info" additivity="false">

+    <appender-ref ref="asyncEELFPerformance" />

+  </logger>

+  <logger name="com.att.eelf.server" level="info" additivity="false">

+    <appender-ref ref="asyncEELFServer" />

+  </logger>

+  <logger name="com.att.eelf.policy" level="info" additivity="false">

+    <appender-ref ref="asyncEELFPolicy" />

+  </logger>

+

+  <logger name="com.att.eelf.audit" level="info" additivity="false">

+    <appender-ref ref="asyncEELFAudit" />

+  </logger>

+  

+  <logger name="com.att.eelf.metrics" level="info" additivity="false">

+        <appender-ref ref="asyncEELFMetrics" />

+  </logger>

+   

+   <logger name="com.att.eelf.debug" level="debug" additivity="false">

+        <appender-ref ref="asyncEELFDebug" />

+  </logger-->

+

+  

+

+  

+  <root level="INFO">

+    <appender-ref ref="asyncEELF" />

+    <appender-ref ref="asyncEELFError" />

+     <appender-ref ref="asyncEELFjettylog" />

+  </root>

+

+</configuration>

diff --git a/datarouter-prov/src/main/resources/misc/doaction b/datarouter-prov/src/main/resources/misc/doaction
new file mode 100644
index 0000000..4319332
--- /dev/null
+++ b/datarouter-prov/src/main/resources/misc/doaction
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+cd /opt/app/datartr/etc
+for action in "$@"
+do
+case "$action" in
+'stop')
+	/opt/app/platform/init.d/drtrprov stop
+	;;
+'start')
+	/opt/app/platform/init.d/drtrprov start || exit 1
+	;;
+'backup')
+	cp log4j.properties log4j.properties.save 2>/dev/null
+	cp provserver.properties provserver.properties.save 2>/dev/null
+	cp mail.properties mail.properties.save 2>/dev/null
+	cp havecert havecert.save 2>/dev/null
+	cp mysql_init_0001 mysql_init_0001.save 2>/dev/null
+	;;
+'restore')
+	cp log4j.properties.save log4j.properties 2>/dev/null
+	cp provserver.properties.save provserver.properties 2>/dev/null
+	cp mail.properties.save mail.properties 2>/dev/null
+	cp havecert.save havecert 2>/dev/null
+	cp mysql_init_0001.save mysql_init_0001 2>/dev/null
+	;;
+'config')
+	/bin/bash log4j.properties.tmpl >log4j.properties
+	/bin/bash provserver.properties.tmpl >provserver.properties
+	/bin/bash mail.properties.tmpl >mail.properties
+	/bin/bash havecert.tmpl >havecert
+	/bin/bash mysql_init_0001.tmpl >mysql_init_0001
+	echo "$AFTSWM_ACTION_NEW_VERSION" >VERSION.prov
+	chmod +x havecert
+	rm -f /opt/app/platform/rc.d/K90zdrtrprov /opt/app/platform/rc.d/S99zdrtrprov
+	ln -s ../init.d/drtrprov /opt/app/platform/rc.d/K90zdrtrprov
+	ln -s ../init.d/drtrprov /opt/app/platform/rc.d/S99zdrtrprov
+	;;
+'clean')
+	rm -f log4j.properties log4j.properties.save
+	rm -f provserver.properties provserver.properties.save
+	rm -f mail.properties mail.properties.save
+	rm -f havecert havecert.properties.save
+	rm -f mysql_init_0001 mysql_init_0001.save
+	rm -f VERSION.prov
+	rm -f /opt/app/platform/rc.d/K90zdrtrprov /opt/app/platform/rc.d/S99zdrtrprov
+	;;
+*)
+	exit 1
+	;;
+esac
+done
+exit 0
diff --git a/datarouter-prov/src/main/resources/misc/dr-route b/datarouter-prov/src/main/resources/misc/dr-route
new file mode 100644
index 0000000..77c6c18
--- /dev/null
+++ b/datarouter-prov/src/main/resources/misc/dr-route
@@ -0,0 +1,26 @@
+#!/bin/bash
+#
+#                        AT&T - PROPRIETARY
+#          THIS FILE CONTAINS PROPRIETARY INFORMATION OF
+#        AT&T AND IS NOT TO BE DISCLOSED OR USED EXCEPT IN
+#             ACCORDANCE WITH APPLICABLE AGREEMENTS.
+#
+#          Copyright (c) 2013 AT&T Knowledge Ventures
+#              Unpublished and Not for Publication
+#                     All Rights Reserved
+#
+#  dr-route -- A script to interact with a provisioning server to manage the DR routing tables.
+#
+#  $Id: dr-route,v 1.2 2013/11/06 16:23:54 eby Exp $
+#
+
+JAVA_HOME=/opt/java/jdk/jdk180
+JAVA_OPTS="-Xms1G -Xmx1G"
+TZ=GMT0
+PATH=$JAVA_HOME/bin:/bin:/usr/bin
+CLASSPATH=`echo /opt/app/datartr/etc /opt/app/datartr/lib/*.jar | tr ' ' ':'`
+export CLASSPATH JAVA_HOME JAVA_OPTS TZ PATH
+
+$JAVA_HOME/bin/java \
+	-Dlog4j.configuration=file:///opt/app/datartr/etc/log4j.drroute.properties \
+	com.att.research.datarouter.provisioning.utils.DRRouteCLI $*
diff --git a/datarouter-prov/src/main/resources/misc/drtrprov b/datarouter-prov/src/main/resources/misc/drtrprov
new file mode 100644
index 0000000..c801ce0
--- /dev/null
+++ b/datarouter-prov/src/main/resources/misc/drtrprov
@@ -0,0 +1,131 @@
+#!/bin/bash
+#
+#                        AT&T - PROPRIETARY
+#          THIS FILE CONTAINS PROPRIETARY INFORMATION OF
+#        AT&T AND IS NOT TO BE DISCLOSED OR USED EXCEPT IN
+#             ACCORDANCE WITH APPLICABLE AGREEMENTS.
+#
+#          Copyright (c) 2013 AT&T Knowledge Ventures
+#              Unpublished and Not for Publication
+#                     All Rights Reserved
+#
+#  This is the startup/shutdown script for the AT&T Data Router Provisioning Server.
+#
+#  $Id: drtrprov,v 1.3 2013/10/29 16:57:57 eby Exp $
+#
+
+umask 0022
+
+JAVA_HOME=/opt/java/jdk/jdk180
+JAVA_OPTS="-Xms2G -Xmx8G"
+TZ=GMT0
+PATH=$JAVA_HOME/bin:/bin:/usr/bin
+CLASSPATH=`echo /opt/app/datartr/etc /opt/app/datartr/lib/*.jar | tr ' ' ':'`
+export CLASSPATH JAVA_HOME JAVA_OPTS TZ PATH
+
+pids() {
+	pgrep -u datartr -f provisioning.Main
+}
+
+start() {
+	ID=`id -n -u`
+	GRP=`id -n -g`
+	if [ "$ID" != "root" ]
+	then
+		echo drtrprov must be started as user datartr not $ID
+		exit 1
+	fi
+#  if [ "$GRP" != "datartr" ]
+# 	then
+# 		echo drtrprov must be started as group datartr not $GRP
+# 		exit 1
+# 	fi  
+# 	cd /opt/app/datartr
+# 	if etc/havecert
+# 	then
+# 		echo >/dev/null
+# 	else
+# 		echo No certificate file available.  Cannot start
+# 		exit 0
+# 	fi
+	if [ "`pgrep -u mysql mysqld`" = "" ]
+	then
+		echo MySQL is not running.  It must be started before drtrprov
+		exit 0
+	fi
+	PIDS=`pids`
+	if [ "$PIDS" != "" ]
+	then
+		echo drtrprov already running
+		exit 0
+	fi
+	echo '0 1 * * * /opt/app/datartr/bin/runreports' | crontab
+	nohup java $JAVA_OPTS com.att.research.datarouter.provisioning.Main </dev/null &
+	sleep 5
+	PIDS=`pids`
+	if [ "$PIDS" = "" ]
+	then
+		echo drtrprov startup failed
+	else
+		echo drtrprov started
+	fi
+}
+
+stop() {
+	ID=`id -n -u`
+	GRP=`id -n -g`
+	if [ "$ID" != "datartr" ]
+	then
+		echo drtrprov must be stopped as user datartr not $ID
+		exit 1
+	fi
+	if [ "$GRP" != "datartr" ]
+	then
+		echo drtrprov must be stopped as group datartr not $GRP
+		exit 1
+	fi
+	/usr/bin/curl http://127.0.0.1:8080/internal/halt
+	sleep 5
+	PIDS=`pids`
+	if [ "$PIDS" != "" ]
+	then
+		sleep 5
+		kill -9 $PIDS
+		sleep 5
+		echo drtrprov stopped
+	else
+		echo drtrprov not running
+	fi
+}
+
+status() {
+	PIDS=`pids`
+	if [ "$PIDS" != "" ]
+	then
+		echo drtrprov running
+	else
+		echo drtrprov not running
+	fi
+}
+
+case "$1" in
+'start')
+	start
+	;;
+'stop')
+	stop
+	;;
+'restart')
+	stop
+	sleep 20
+	start
+	;;
+'status')
+	status
+	;;
+*)
+	echo "Usage: $0 { start | stop | restart | status }"
+	exit 1
+	;;
+esac
+exit 0
diff --git a/datarouter-prov/src/main/resources/misc/havecert.tmpl b/datarouter-prov/src/main/resources/misc/havecert.tmpl
new file mode 100644
index 0000000..e238986
--- /dev/null
+++ b/datarouter-prov/src/main/resources/misc/havecert.tmpl
@@ -0,0 +1,11 @@
+#!/bin/bash
+cat <<!EOF
+TZ=GMT0
+cd /opt/app/datartr;
+if [ -f ${DRTR_PROV_KSTOREFILE:-etc/keystore} ]
+then
+	exit 0
+fi
+echo `date '+%F %T,000'` WARN Certificate file "${DRTR_PROV_KSTOREFILE:-etc/keystore}" is missing >>${DRTR_PROV_LOGS:-logs}/provint.log
+exit 1
+!EOF
diff --git a/datarouter-prov/src/main/resources/misc/log4j.drroute.properties b/datarouter-prov/src/main/resources/misc/log4j.drroute.properties
new file mode 100644
index 0000000..4ff4278
--- /dev/null
+++ b/datarouter-prov/src/main/resources/misc/log4j.drroute.properties
@@ -0,0 +1,41 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+#

+#                        AT&T - PROPRIETARY

+#          THIS FILE CONTAINS PROPRIETARY INFORMATION OF

+#        AT&T AND IS NOT TO BE DISCLOSED OR USED EXCEPT IN

+#             ACCORDANCE WITH APPLICABLE AGREEMENTS.

+#

+#          Copyright (c) 2013 AT&T Knowledge Ventures

+#              Unpublished and Not for Publication

+#                     All Rights Reserved

+#

+# CVS: $Id: log4j.drroute.properties,v 1.1 2013/11/06 16:23:54 eby Exp $

+#	This log4j properties file is used only by dr-route

+#

+

+log4j.rootLogger=INFO, stdout

+log4j.appender.stdout=org.apache.log4j.ConsoleAppender

+log4j.appender.stdout.Target=System.out

+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout

+log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n

diff --git a/datarouter-prov/src/main/resources/misc/log4j.properties.tmpl b/datarouter-prov/src/main/resources/misc/log4j.properties.tmpl
new file mode 100644
index 0000000..ed1d7fa
--- /dev/null
+++ b/datarouter-prov/src/main/resources/misc/log4j.properties.tmpl
@@ -0,0 +1,68 @@
+cat <<!EOF
+#
+#                        AT&T - PROPRIETARY
+#          THIS FILE CONTAINS PROPRIETARY INFORMATION OF
+#        AT&T AND IS NOT TO BE DISCLOSED OR USED EXCEPT IN
+#             ACCORDANCE WITH APPLICABLE AGREEMENTS.
+#
+#          Copyright (c) 2013 AT&T Knowledge Ventures
+#              Unpublished and Not for Publication
+#                     All Rights Reserved
+#
+# CVS: $Id: log4j.properties.tmpl,v 1.4 2014/01/13 19:44:57 eby Exp $
+#
+
+log4j.rootLogger=info
+
+#
+# Logger used for provisioning events
+#
+log4j.logger.com.att.research.datarouter.provisioning.events=info, eventlog
+log4j.additivity.com.att.research.datarouter.provisioning.events=false
+
+log4j.appender.eventlog=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.eventlog.file=${DRTR_PROV_LOGS:-/opt/app/datartr/logs}/provevent.log
+log4j.appender.eventlog.datePattern='.'yyyyMMdd
+log4j.appender.eventlog.append=true
+log4j.appender.eventlog.layout=org.apache.log4j.PatternLayout
+log4j.appender.eventlog.layout.ConversionPattern=%d %-5p [%t] - %m%n
+
+#
+# Logger used for internal provisioning server events
+#
+log4j.logger.com.att.research.datarouter.provisioning.internal=debug, intlog
+log4j.additivity.com.att.research.datarouter.provisioning.internal=false
+
+log4j.appender.intlog=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.intlog.file=${DRTR_PROV_LOGS:-/opt/app/datartr/logs}/provint.log
+log4j.appender.intlog.datePattern='.'yyyyMMdd
+log4j.appender.intlog.append=true
+log4j.appender.intlog.layout=org.apache.log4j.PatternLayout
+log4j.appender.intlog.layout.ConversionPattern=%d %-5p [%t] - %m%n
+
+#
+# Logger used for policy engine
+#
+log4j.logger.com.att.research.datarouter.authz.impl.ProvAuthorizer=debug, pelog
+log4j.additivity.com.att.research.datarouter.authz.impl.ProvAuthorizer=false
+
+log4j.appender.pelog=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.pelog.file=${DRTR_PROV_LOGS:-/opt/app/datartr/logs}/policyengine.log
+log4j.appender.pelog.datePattern='.'yyyyMMdd
+log4j.appender.pelog.append=true
+log4j.appender.pelog.layout=org.apache.log4j.PatternLayout
+log4j.appender.pelog.layout.ConversionPattern=%d %-5p [%t] - %m%n
+
+#
+# Logger used for Jetty server
+#
+log4j.logger.org.eclipse.jetty=info, jetty
+log4j.additivity.org.eclipse.jetty.server.Server=false
+
+log4j.appender.jetty=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.jetty.file=${DRTR_PROV_LOGS:-/opt/app/datartr/logs}/jetty.log
+log4j.appender.jetty.datePattern='.'yyyyMMdd
+log4j.appender.jetty.append=true
+log4j.appender.jetty.layout=org.apache.log4j.PatternLayout
+log4j.appender.jetty.layout.ConversionPattern=%d %-5p [%t] - %m%n
+!EOF
diff --git a/datarouter-prov/src/main/resources/misc/mysql_dr_schema.sql b/datarouter-prov/src/main/resources/misc/mysql_dr_schema.sql
new file mode 100644
index 0000000..837030c
--- /dev/null
+++ b/datarouter-prov/src/main/resources/misc/mysql_dr_schema.sql
@@ -0,0 +1,139 @@
+create database datarouter;
+
+use datarouter;
+
+CREATE TABLE FEEDS (
+    FEEDID         INT UNSIGNED NOT NULL PRIMARY KEY,
+    NAME           VARCHAR(20) NOT NULL,
+    VERSION        VARCHAR(20) NOT NULL,
+    DESCRIPTION    VARCHAR(256),
+    AUTH_CLASS     VARCHAR(32) NOT NULL,
+    PUBLISHER      VARCHAR(8) NOT NULL,
+    SELF_LINK      VARCHAR(256),
+    PUBLISH_LINK   VARCHAR(256),
+    SUBSCRIBE_LINK VARCHAR(256),
+    LOG_LINK       VARCHAR(256),
+    DELETED        BOOLEAN DEFAULT FALSE,
+    LAST_MOD       TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+
+CREATE TABLE FEED_ENDPOINT_IDS (
+    FEEDID        INT UNSIGNED NOT NULL,
+    USERID        VARCHAR(20) NOT NULL,
+    PASSWORD      VARCHAR(32) NOT NULL
+);
+
+CREATE TABLE FEED_ENDPOINT_ADDRS (
+    FEEDID        INT UNSIGNED NOT NULL,
+    ADDR          VARCHAR(44) NOT NULL
+);
+
+CREATE TABLE SUBSCRIPTIONS (
+    SUBID              INT UNSIGNED NOT NULL PRIMARY KEY,
+    FEEDID             INT UNSIGNED NOT NULL,
+    DELIVERY_URL       VARCHAR(256),
+    DELIVERY_USER      VARCHAR(20),
+    DELIVERY_PASSWORD  VARCHAR(32),
+    DELIVERY_USE100    BOOLEAN DEFAULT FALSE,
+    METADATA_ONLY      BOOLEAN DEFAULT FALSE,
+    SUBSCRIBER         VARCHAR(8) NOT NULL,
+    SELF_LINK          VARCHAR(256),
+    LOG_LINK           VARCHAR(256),
+    LAST_MOD           TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+
+CREATE TABLE PARAMETERS (
+    KEYNAME        VARCHAR(32) NOT NULL PRIMARY KEY,
+    VALUE          VARCHAR(4096) NOT NULL
+);
+
+CREATE TABLE LOG_RECORDS (
+    TYPE	   ENUM('pub', 'del', 'exp') NOT NULL,
+    EVENT_TIME     BIGINT NOT NULL,           /* time of the publish request */
+    PUBLISH_ID     VARCHAR(64) NOT NULL,      /* unique ID assigned to this publish attempt */
+    FEEDID         INT UNSIGNED NOT NULL,     /* pointer to feed in FEEDS */
+    REQURI         VARCHAR(256) NOT NULL,     /* request URI */
+    METHOD         ENUM('DELETE', 'GET', 'HEAD', 'OPTIONS', 'PUT', 'POST', 'TRACE') NOT NULL, /* HTTP method */
+    CONTENT_TYPE   VARCHAR(256) NOT NULL,     /* content type of published file */
+    CONTENT_LENGTH BIGINT UNSIGNED NOT NULL,  /* content length of published file */
+
+    FEED_FILEID    VARCHAR(128),		/* file ID of published file */
+    REMOTE_ADDR    VARCHAR(40),			/* IP address of publishing endpoint */
+    USER           VARCHAR(20),			/* user name of publishing endpoint */
+    STATUS         SMALLINT,			/* status code returned to delivering agent */
+
+    DELIVERY_SUBID INT UNSIGNED,		/* pointer to subscription in SUBSCRIPTIONS */
+    DELIVERY_FILEID  VARCHAR(128),		/* file ID of file being delivered */
+    RESULT         SMALLINT,			/* result received from subscribing agent */
+
+    ATTEMPTS       INT,				/* deliveries attempted */
+    REASON         ENUM('notRetryable', 'retriesExhausted'),
+
+    RECORD_ID      BIGINT UNSIGNED NOT NULL PRIMARY KEY, /* unique ID for this record */
+
+    INDEX (FEEDID) USING BTREE,
+    INDEX (DELIVERY_SUBID) USING BTREE,
+    INDEX (RECORD_ID) USING BTREE
+) ENGINE = MyISAM;
+
+CREATE TABLE INGRESS_ROUTES (
+    SEQUENCE  INT UNSIGNED NOT NULL,
+    FEEDID    INT UNSIGNED NOT NULL,
+    USERID    VARCHAR(20),
+    SUBNET    VARCHAR(44),
+    NODESET   INT UNSIGNED NOT NULL
+);
+
+CREATE TABLE EGRESS_ROUTES (
+    SUBID    INT UNSIGNED NOT NULL PRIMARY KEY,
+    NODEID   INT UNSIGNED NOT NULL
+);
+
+CREATE TABLE NETWORK_ROUTES (
+    FROMNODE INT UNSIGNED NOT NULL,
+    TONODE   INT UNSIGNED NOT NULL,
+    VIANODE  INT UNSIGNED NOT NULL
+);
+
+CREATE TABLE NODESETS (
+    SETID   INT UNSIGNED NOT NULL,
+    NODEID  INT UNSIGNED NOT NULL
+);
+
+CREATE TABLE NODES (
+    NODEID  INT UNSIGNED NOT NULL PRIMARY KEY,
+    NAME    VARCHAR(255) NOT NULL,
+    ACTIVE  BOOLEAN DEFAULT TRUE
+);
+
+CREATE TABLE GROUPS (
+    GROUPID  INT UNSIGNED NOT NULL PRIMARY KEY,
+    AUTHID    VARCHAR(100) NOT NULL,
+    NAME    VARCHAR(50) NOT NULL,
+    DESCRIPTION    VARCHAR(255),
+    CLASSIFICATION    VARCHAR(20) NOT NULL,
+    MEMBERS    TINYTEXT,
+    LAST_MOD       TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+
+-- 'PROV_AUTH_ADDRESSES', '192.168.56.1' ipv4 address of provision server
+INSERT INTO PARAMETERS VALUES
+	('ACTIVE_POD',  '127.0.0.1'),
+	('PROV_ACTIVE_NAME',  '${PROV_ACTIVE_NAME}'),
+	('STANDBY_POD', '${DRTR_PROV_STANDBYPOD}'),
+	('PROV_NAME',   'ALCDTL47TJ6015:6080'),
+	('NODES',       '127.0.0.1:8080'),
+	('PROV_DOMAIN', '127.0.0.1'),
+	('DELIVERY_INIT_RETRY_INTERVAL', '10'),
+	('DELIVERY_MAX_AGE', '86400'),
+	('DELIVERY_MAX_RETRY_INTERVAL', '3600'),
+	('DELIVERY_RETRY_RATIO', '2'),
+	('LOGROLL_INTERVAL', '300'),
+	('PROV_AUTH_ADDRESSES', '192.168.56.1'), 
+	('PROV_AUTH_SUBJECTS', ''),
+	('PROV_MAXFEED_COUNT',	'10000'),
+	('PROV_MAXSUB_COUNT',	'100000'),
+	('PROV_REQUIRE_CERT', 'false'),
+	('PROV_REQUIRE_SECURE', 'false'),
+	('_INT_VALUES', 'LOGROLL_INTERVAL|PROV_MAXFEED_COUNT|PROV_MAXSUB_COUNT|DELIVERY_INIT_RETRY_INTERVAL|DELIVERY_MAX_RETRY_INTERVAL|DELIVERY_RETRY_RATIO|DELIVERY_MAX_AGE')
+	;
\ No newline at end of file
diff --git a/datarouter-prov/src/main/resources/misc/notes b/datarouter-prov/src/main/resources/misc/notes
new file mode 100644
index 0000000..e3f872e
--- /dev/null
+++ b/datarouter-prov/src/main/resources/misc/notes
@@ -0,0 +1,78 @@
+Package notes for com.att.dmaap.datarouter:prov
+
+This component is for the Data Router Provisioning Server software.
+
+The following pre-requisite components should already be present:
+	com.att.aft.swm:swm-cli
+	com.att.aft.swm:swm-node
+	- SWM Variables: AFTSWM_AUTOLINK_PARENTS=/opt/app:/opt/app/workload,/opt/app/aft:/opt/app/workload/aft
+	com.att.platform:uam-auto
+	com.att.java:jdk8lin
+	com.att.platform:initd
+	com.att.platform:port-fwd
+	- SWM Variables: PLATFORM_PORT_FWD=80,8080|443,8443
+	mysql:mysql
+	mysql:mysql-config
+	- SWM Variables: MYSQL_CONFIG_SIZE=small
+		MYSQL_DB_DATABASES=datarouter
+		MYSQL_DB_datarouter_USERS=datarouter,tier2
+		MYSQL_DB_datarouter_USERS_datarouter_LEVEL=RW
+		MYSQL_DB_datarouter_USERS_datarouter_PASSWORD=datarouter
+		MYSQL_DB_datarouter_USERS_tier2_LEVEL=RO
+		MYSQL_DB_datarouter_USERS_tier2_PASSWORD=<password>
+		MYSQL_MAX_ALLOWED_PACKET=32M
+		MYSQL_MAX_CONNECTIONS=300
+		MYSQL_PASSWORD=datarouter
+		MYSQL_PORT=3306
+
+
+In a production environment, the SWM variables that MUST be overwridden are:
+	DRTR_PROV_ACTIVEPOD, DRTR_PROV_STANDBYPOD, DRTR_PROV_NODES
+In addition, in a non-production environment, the DRTR_PROV_CNAME SWM variable
+must also be overwridden.
+
+The SWM variables that can be set to control the provisioning server are:
+
+DRTR_PROV_ACTIVEPOD
+	The FQDN of the active POD
+DRTR_PROV_STANDBYPOD
+	The FQDN of the standby POD
+DRTR_PROV_CNAME (default feeds-drtr.web.att.com)
+	The DNS CNAME used for the prov server in this environment.
+DRTR_PROV_NODES
+	Pipe-delimited list of DR nodes to init the DB with.
+DRTR_PROV_DOMAIN (default web.att.com)
+	Domain to use for non-FQDN node names
+
+DRTR_PROV_INTHTTPPORT (default 8080)
+	The TCP/IP port number the component should listen on for "go fetch"
+	requests from the provisioning server
+DRTR_PROV_INTHTTPSPORT (default 8443)
+	The TCP/IP port number the component should listen on for publish
+	requests from feed publishers and other nodes
+DRTR_PROV_LOGS (default /opt/app/datartr/logs)
+	The directory where log files should be kept
+DRTR_PROV_SPOOL (default /opt/app/datartr/spool)
+	The directory where logfiles from the DR nodes are spooled before being
+	imported into the DB.
+
+DRTR_PROV_KEYMGRPASS (default changeit)
+	The password for the key manager
+DRTR_PROV_KSTOREFILE (default /opt/app/datartr/etc/keystore)
+	The java keystore file containing the server certificate and private key
+	for this server
+DRTR_PROV_KSTOREPASS (default changeit)
+	The password for the keystore file
+DRTR_PROV_TSTOREFILE (by default, use the truststore from the Java JDK)
+	The java keystore file containing the trusted certificate authority
+	certificates
+DRTR_PROV_TSTOREPASS (default changeit)
+	The password for the trust store file.  Only applies if a trust store
+	file is specified.
+
+DRTR_PROV_DBLOGIN (default datarouter)
+	The login used to access MySQL
+DRTR_PROV_DBPASS (default datarouter)
+	The password used to access MySQL
+DRTR_PROV_DBSCRIPTS (default /opt/app/datartr/etc)
+	The directory containing DB initialization scripts
diff --git a/datarouter-prov/src/main/resources/misc/provcmd b/datarouter-prov/src/main/resources/misc/provcmd
new file mode 100644
index 0000000..63efa54
--- /dev/null
+++ b/datarouter-prov/src/main/resources/misc/provcmd
@@ -0,0 +1,163 @@
+#!/bin/bash
+#
+#                        AT&T - PROPRIETARY
+#          THIS FILE CONTAINS PROPRIETARY INFORMATION OF
+#        AT&T AND IS NOT TO BE DISCLOSED OR USED EXCEPT IN
+#             ACCORDANCE WITH APPLICABLE AGREEMENTS.
+#
+#          Copyright (c) 2013 AT&T Knowledge Ventures
+#              Unpublished and Not for Publication
+#                     All Rights Reserved
+#
+#  provcmd -- A script to interact with a provisioning server to manage the provisioning parameters.
+#     Set $VERBOSE to a non-empty string to see the curl commands as they are executed.
+#
+#  $Id: provcmd,v 1.6 2014/03/31 13:23:33 eby Exp $
+#
+
+PATH=/opt/app/datartr/bin:/bin:/usr/bin:$PATH
+PROVCMD="$0"
+export PATH PROVSRVR PROVCMD NOPROXY
+
+if [ ! -x /usr/bin/curl ]
+then
+	echo provcmd: curl is required for this tool.
+	exit 1
+fi
+optloop=
+while [ -z "$optloop" ]
+do
+	if [ "$1" == '-s' ]
+	then
+		shift
+		PROVSRVR="$1"
+		shift
+	elif [ "$1" == '-v' ]
+	then
+		shift
+		VERBOSE=x
+	elif [ "$1" == '-N' ]
+	then
+		shift
+		NOPROXY='?noproxy=1'
+	else
+		optloop=1
+	fi
+done
+if [ -z "$PROVSRVR" ]
+then
+	echo "provcmd: you need to specify the server, either via the -s option"
+	echo "         or by setting and exporting PROVSRVR"
+	exit 1
+fi
+
+CMD="$1"
+shift
+if [ "$CMD" == 'delete' ]
+then
+	if [ $# -gt 0 ]
+	then
+		for i
+		do
+			[ -n "$VERBOSE" ] && echo curl -4 -k -X DELETE "https://$PROVSRVR/internal/api/$1$NOPROXY"
+			curl -4 -k -X DELETE "https://$PROVSRVR/internal/api/$1$NOPROXY"
+		done
+		exit 0
+	fi
+elif [ "$CMD" == 'create' ]
+then
+	if [ $# -eq 2 ]
+	then
+		# create (with POST), then set the value
+		[ -n "$VERBOSE" ] && echo curl -4 -k -X POST --data '' "https://$PROVSRVR/internal/api/$1$NOPROXY"
+		curl -4 -k -X POST --data '' "https://$PROVSRVR/internal/api/$1$NOPROXY"
+		$PROVCMD set "$1" "$2"
+		exit 0
+	fi
+elif [ "$CMD" == 'get' ]
+then
+	if [ $# -eq 1 ]
+	then
+		# get
+		[ -n "$VERBOSE" ] && echo curl -4 -k "https://$PROVSRVR/internal/api/$1$NOPROXY"
+		curl -4 -k "https://$PROVSRVR/internal/api/$1$NOPROXY" 2>/dev/null | tr '|' '\012' | sort
+		exit 0
+	fi
+elif [ "$CMD" == 'set' ]
+then
+	if [ $# -ge 2 ]
+	then
+		p="$1"
+		shift
+		v=""
+		for i; do [ -n "$v" ] && v="$v|"; v="$v$i"; done
+		# set (with PUT)
+		ue=`urlencode "$v"`
+		NOPROXY=`echo $NOPROXY | tr '?' '&'`
+		[ -n "$VERBOSE" ] && echo curl -4 -k -X PUT "https://$PROVSRVR/internal/api/$p?val=$ue$NOPROXY"
+		curl -4 -k -X PUT "https://$PROVSRVR/internal/api/$p?val=$ue$NOPROXY"
+		exit 0
+	fi
+elif [ "$CMD" == 'append' ]
+then
+	if [ $# -ge 2 ]
+	then
+		p="$1"
+		shift
+		tmp=`curl -4 -k "https://$PROVSRVR/internal/api/$p$NOPROXY" 2>/dev/null`
+		$PROVCMD set "$p" "$tmp" "$@"
+		exit 0
+	fi
+elif [ "$CMD" == 'remove' ]
+then
+	if [ $# -eq 2 ]
+	then
+		p="$1"
+		rm="$2"
+		$PROVCMD get "$p" | grep -v "^$rm\$" > /tmp/pc$$
+		IFS=$'\r\n'
+		$PROVCMD set "$p" `cat /tmp/pc$$`
+		rm /tmp/pc$$
+		exit 0
+	fi
+fi
+
+# Some error somewhere - display usage
+cat <<'EOF'
+usage: provcmd [ -s server ] delete name1 [ name2 ... ]
+       provcmd [ -s server ] get name
+       provcmd [ -s server ] create name value
+       provcmd [ -s server ] set name value1 [ value2 ... ]
+       provcmd [ -s server ] append name value1 [ value2 ... ]
+       provcmd [ -s server ] remove name value
+
+delete - remove the parameters named name1, name2 ...
+get    - displays the parameters' value
+create - creates a new parameter
+set    - sets the value of an existing parameter
+append - appends the value to a list-based parameter
+remove - removes a value from a list based parameter
+
+server - the provisioning server FQDN (feeds-drtr.web.att.com for production)
+
+Standard Parameters Names:
+------------------------------
+ACTIVE_POD
+DELIVERY_INIT_RETRY_INTERVAL
+DELIVERY_MAX_AGE
+DELIVERY_MAX_RETRY_INTERVAL
+DELIVERY_RETRY_RATIO
+LOGROLL_INTERVAL
+NODES
+PROV_ACTIVE_NAME
+PROV_AUTH_ADDRESSES
+PROV_AUTH_SUBJECTS
+PROV_DOMAIN
+PROV_MAXFEED_COUNT
+PROV_MAXSUB_COUNT
+PROV_NAME
+PROV_REQUIRE_CERT
+PROV_REQUIRE_SECURE
+STANDBY_POD
+EOF
+exit 1
diff --git a/datarouter-prov/src/main/resources/misc/runreports b/datarouter-prov/src/main/resources/misc/runreports
new file mode 100644
index 0000000..009b749
--- /dev/null
+++ b/datarouter-prov/src/main/resources/misc/runreports
@@ -0,0 +1,54 @@
+#!/bin/bash
+#
+#                        AT&T - PROPRIETARY
+#          THIS FILE CONTAINS PROPRIETARY INFORMATION OF
+#        AT&T AND IS NOT TO BE DISCLOSED OR USED EXCEPT IN
+#             ACCORDANCE WITH APPLICABLE AGREEMENTS.
+#
+#          Copyright (c) 2013 AT&T Knowledge Ventures
+#              Unpublished and Not for Publication
+#                     All Rights Reserved
+#
+#  This script runs daily to generate reports files in the logs directory.
+#
+#  $Id: runreports,v 1.2 2013/11/06 16:23:54 eby Exp $
+#
+
+umask 0022
+
+JAVA_HOME=/opt/java/jdk/jdk180
+JAVA_OPTS="-Xms1G -Xmx4G"
+JAVA_CLASS=com.att.research.datarouter.reports.Report
+TZ=GMT0
+PATH=$JAVA_HOME/bin:/bin:/usr/bin
+CLASSPATH=`echo /opt/app/datartr/etc /opt/app/datartr/lib/*.jar | tr ' ' ':'`
+LOGDIR=/opt/app/datartr/logs
+YESTERDAY=`/bin/date --date=yesterday '+%Y%m%d'`
+
+export CLASSPATH JAVA_HOME JAVA_OPTS TZ PATH
+
+ID=`id -n -u`
+GRP=`id -n -g`
+if [ "$ID" != "datartr" ]
+then
+	echo runreports must be started as user datartr not $ID
+	exit 1
+fi
+if [ "$GRP" != "datartr" ]
+then
+	echo runreports must be started as group datartr not $GRP
+	exit 1
+fi
+if [ "`pgrep -u mysql mysqld`" = "" ]
+then
+	echo MySQL is not running.  It must be started before runreports
+	exit 1
+fi
+
+# Volume report
+java $JAVA_OPTS $JAVA_CLASS -t volume -o $LOGDIR/volume.csv.$YESTERDAY yesterday </dev/null >/dev/null
+
+# Subscriber report
+java $JAVA_OPTS $JAVA_CLASS -t subscriber -o $LOGDIR/subscriber.csv.$YESTERDAY yesterday </dev/null >/dev/null
+
+exit 0
diff --git a/datarouter-prov/src/main/resources/provserver.properties b/datarouter-prov/src/main/resources/provserver.properties
new file mode 100644
index 0000000..af5073e
--- /dev/null
+++ b/datarouter-prov/src/main/resources/provserver.properties
@@ -0,0 +1,48 @@
+#-------------------------------------------------------------------------------

+# ============LICENSE_START==================================================

+# * org.onap.dmaap

+# * ===========================================================================

+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.

+# * ===========================================================================

+# * Licensed under the Apache License, Version 2.0 (the "License");

+# * you may not use this file except in compliance with the License.

+# * You may obtain a copy of the License at

+# * 

+#  *      http://www.apache.org/licenses/LICENSE-2.0

+# * 

+#  * Unless required by applicable law or agreed to in writing, software

+# * distributed under the License is distributed on an "AS IS" BASIS,

+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# * See the License for the specific language governing permissions and

+# * limitations under the License.

+# * ============LICENSE_END====================================================

+# *

+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.

+# *

+#-------------------------------------------------------------------------------

+

+

+#Jetty Server properties

+com.att.research.datarouter.provserver.http.port           = 8080

+com.att.research.datarouter.provserver.https.port          = 8443

+com.att.research.datarouter.provserver.https.relaxation	   = true

+com.att.research.datarouter.provserver.keymanager.password = changeit

+com.att.research.datarouter.provserver.keystore.type       = jks

+com.att.research.datarouter.provserver.keystore.path       = /opt/app/datartr/self_signed/keystore.jks

+

+com.att.research.datarouter.provserver.keystore.password   = changeit

+#com.att.research.datarouter.provserver.truststore.path     = /home/eby/dr2/misc/cacerts+1

+#com.att.research.datarouter.provserver.truststore.path     = /usr/lib/jvm/java-8-oracle/jre/lib/security/cacerts

+com.att.research.datarouter.provserver.truststore.path     = /opt/app/datartr/self_signed/cacerts.jks

+

+com.att.research.datarouter.provserver.truststore.password = changeit

+com.att.research.datarouter.provserver.accesslog.dir       = /opt/app/datartr/logs

+com.att.research.datarouter.provserver.spooldir            = /opt/app/datartr/spool

+#com.att.research.datarouter.provserver.dbscripts          = /home/eby/dr2/cvs/datarouter/prov/misc/

+com.att.research.datarouter.provserver.logretention        = 30

+

+# Database access

+com.att.research.datarouter.db.driver   = com.mysql.jdbc.Driver

+com.att.research.datarouter.db.url      = jdbc:mysql://172.18.0.2:3306/datarouter

+com.att.research.datarouter.db.login    = datarouter

+com.att.research.datarouter.db.password = datarouter

diff --git a/datarouter-prov/src/main/resources/startup.sh b/datarouter-prov/src/main/resources/startup.sh
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/datarouter-prov/src/main/resources/startup.sh
diff --git a/datarouter-prov/src/main/resources/subscriber.jar b/datarouter-prov/src/main/resources/subscriber.jar
new file mode 100644
index 0000000..c8e4775
--- /dev/null
+++ b/datarouter-prov/src/main/resources/subscriber.jar
Binary files differ
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/AllTests.java b/datarouter-prov/src/test/java/datarouter/provisioning/AllTests.java
new file mode 100644
index 0000000..9215955
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/AllTests.java
@@ -0,0 +1,48 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package datarouter.provisioning;
+
+
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+import org.junit.runners.Suite.SuiteClasses;
+
+@RunWith(Suite.class)
+@SuiteClasses({
+	testDRFeedsPost.class,
+	testDRFeedsGet.class,
+	testDRFeedsPut.class,
+	testDRFeedsDelete.class,
+	testFeedPut.class,
+	testSubscribePost.class,
+	testInternalGet.class,
+	testInternalMisc.class,
+	testPublish.class,
+	testLogGet.class,
+	testFeedDelete.class,
+	testCleanup.class,
+	testRLEBitSet.class
+})
+public class AllTests {
+}
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/FillDB.java b/datarouter-prov/src/test/java/datarouter/provisioning/FillDB.java
new file mode 100644
index 0000000..c41cc80
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/FillDB.java
@@ -0,0 +1,125 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+package datarouter.provisioning;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.security.KeyManagementException;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.UnrecoverableKeyException;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.conn.scheme.Scheme;
+import org.apache.http.conn.ssl.SSLSocketFactory;
+import org.apache.http.entity.ByteArrayEntity;
+import org.apache.http.entity.ContentType;
+import org.apache.http.impl.client.AbstractHttpClient;
+import org.apache.http.impl.client.DefaultHttpClient;
+import org.apache.http.util.EntityUtils;
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+import com.att.research.datarouter.provisioning.FeedServlet;
+
+public class FillDB {
+	public static void main(String[] args)
+		throws KeyStoreException, FileNotFoundException, KeyManagementException, UnrecoverableKeyException, NoSuchAlgorithmException
+	{
+		AbstractHttpClient httpclient = new DefaultHttpClient();
+
+		String keystore = "/home/eby/dr2/misc/client.keystore";
+		String kspass   = "changeit";
+		KeyStore trustStore  = KeyStore.getInstance(KeyStore.getDefaultType());
+	    FileInputStream instream = new FileInputStream(new File(keystore));
+	    try {
+	        trustStore.load(instream, kspass.toCharArray());
+	    } catch (Exception x) {
+	    	System.err.println("READING KEYSTORE: "+x);
+	    } finally {
+	        try { instream.close(); } catch (Exception ignore) {}
+	    }
+
+	    SSLSocketFactory socketFactory = new SSLSocketFactory(trustStore, "changeit", trustStore);
+	    Scheme sch = new Scheme("https", 443, socketFactory);
+	    httpclient.getConnectionManager().getSchemeRegistry().register(sch);
+
+	    JSONObject jo = buildFeedRequest();
+		for (int i = 0; i < 10000; i++) {
+			jo.put("version", ""+System.currentTimeMillis());
+			int rv = -1;
+			String url   = "https://conwy.proto.research.att.com:6443/";
+			HttpPost httpPost = new HttpPost(url);
+			try {
+				httpPost.addHeader(FeedServlet.BEHALF_HEADER, "JUnit");
+				String t = jo.toString();
+				HttpEntity body = new ByteArrayEntity(t.getBytes(), ContentType.create(FeedServlet.FEED_CONTENT_TYPE));
+				httpPost.setEntity(body);
+
+				HttpResponse response = httpclient.execute(httpPost);
+				rv = response.getStatusLine().getStatusCode();
+				HttpEntity entity = response.getEntity();
+				EntityUtils.consume(entity);
+			} catch (IOException e) {
+				System.err.println(e);
+			} finally {
+				httpPost.releaseConnection();
+			}
+			System.out.println(i + " " + rv);
+		}
+	}
+	private static JSONObject buildFeedRequest() {
+		JSONObject jo = new JSONObject();
+		jo.put("name", "feed");
+		jo.put("version", ""+System.currentTimeMillis());
+		jo.put("description", "Sample feed used by JUnit to test");
+
+			JSONObject jo2 = new JSONObject();
+			jo2.put("classification", "unrestricted");
+
+			JSONArray ja = new JSONArray();
+				JSONObject jo3 = new JSONObject();
+				jo3.put("id", "id001");
+				jo3.put("password", "re1kwelj");
+				JSONObject jo4 = new JSONObject();
+				jo4.put("id", "id002");
+				jo4.put("password", "o9eqlmbd");
+				ja.put(jo3);
+				ja.put(jo4);
+			jo2.put("endpoint_ids", ja);
+
+			ja = new JSONArray();
+				ja.put("10.0.0.1");
+				ja.put("192.168.0.1");
+				ja.put("135.207.136.128/25");
+			jo2.put("endpoint_addrs", ja);
+
+		jo.put("authorization", jo2);
+		return jo;
+	}
+}
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/package.html b/datarouter-prov/src/test/java/datarouter/provisioning/package.html
new file mode 100644
index 0000000..d0383b8
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/package.html
@@ -0,0 +1,29 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+<html>
+<body>
+<p>
+This package provides JUnit tests for the provisioning server.
+</p>
+</body>
+</html>
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/testBase.java b/datarouter-prov/src/test/java/datarouter/provisioning/testBase.java
new file mode 100644
index 0000000..9a96933
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/testBase.java
@@ -0,0 +1,158 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package datarouter.provisioning;
+
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.KeyStore;
+import java.util.Properties;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.StatusLine;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.conn.scheme.Scheme;
+import org.apache.http.conn.ssl.SSLSocketFactory;
+import org.apache.http.impl.client.AbstractHttpClient;
+import org.apache.http.impl.client.DefaultHttpClient;
+import org.apache.http.util.EntityUtils;
+import org.json.JSONObject;
+import org.json.JSONTokener;
+import org.junit.After;
+import org.junit.Before;
+
+import com.att.research.datarouter.provisioning.FeedServlet;
+
+public class testBase {
+	/** The properties file to read the DB properties from */
+	public static final String CONFIG_FILE = "tests.properties";
+
+	public Properties props;
+	protected AbstractHttpClient httpclient;
+	protected String s_33;
+	protected String s_257;
+	protected static JSONObject db_state;
+
+	@Before
+	public void setUp() throws Exception {
+		if (props == null) {
+			props = new Properties();
+			InputStream inStream = getClass().getClassLoader().getResourceAsStream(CONFIG_FILE);
+			try {
+				props.load(inStream);
+			} catch (Exception e) {
+				e.printStackTrace();
+			} finally {
+				inStream.close();
+			}
+		}
+
+		httpclient = new DefaultHttpClient();
+		String s = "0123456789ABCDEF";
+		s_33 = s + s + "!";
+		s = s + s + s + s;
+		s_257 = s + s + s + s + "!";
+
+		// keystore
+		String store = props.getProperty("test.keystore");
+		String pass  = props.getProperty("test.kspassword");
+		KeyStore keyStore  = KeyStore.getInstance(KeyStore.getDefaultType());
+	    FileInputStream instream = new FileInputStream(new File(store));
+	    try {
+	    	keyStore.load(instream, pass.toCharArray());
+	    } catch (Exception x) {
+	    	System.err.println("READING KEYSTORE: "+x);
+	    } finally {
+	        try { instream.close(); } catch (Exception ignore) {}
+	    }
+
+		store = props.getProperty("test.truststore");
+		pass  = props.getProperty("test.tspassword");
+		KeyStore trustStore  = KeyStore.getInstance(KeyStore.getDefaultType());
+	    instream = new FileInputStream(new File(store));
+	    try {
+	        trustStore.load(instream, pass.toCharArray());
+	    } catch (Exception x) {
+	    	System.err.println("READING TRUSTSTORE: "+x);
+	    } finally {
+	        try { instream.close(); } catch (Exception ignore) {}
+	    }
+
+	    SSLSocketFactory socketFactory = new SSLSocketFactory(keyStore, "changeit", trustStore);
+	    Scheme sch = new Scheme("https", 443, socketFactory);
+	    httpclient.getConnectionManager().getSchemeRegistry().register(sch);
+	}
+
+	public JSONObject getDBstate() {
+		// set db_state!
+		if (db_state == null) {
+			String url   = props.getProperty("test.host") + "/internal/prov";
+			HttpGet httpGet = new HttpGet(url);
+			try {
+				httpGet.addHeader(FeedServlet.BEHALF_HEADER, "JUnit");
+				HttpResponse response = httpclient.execute(httpGet);
+				HttpEntity entity = response.getEntity();
+				String ctype = entity.getContentType().getValue().trim();
+				// save the response body as db_state
+				boolean ok  = ctype.equals(FeedServlet.PROVFULL_CONTENT_TYPE1);
+				        ok |= ctype.equals(FeedServlet.PROVFULL_CONTENT_TYPE2);
+				if (ok) {
+					db_state = null;
+					try {
+						db_state = new JSONObject(new JSONTokener(entity.getContent()));
+					} catch (Exception e) {
+						fail("Bad JSON: "+e.getMessage());
+					}
+				} else {
+					EntityUtils.consume(entity);
+				}
+			} catch (IOException e) {
+				fail(e.getMessage());
+			} finally {
+				httpGet.releaseConnection();
+			}
+		}
+		return db_state;
+	}
+
+	@After
+	public void tearDown() throws Exception {
+		// When HttpClient instance is no longer needed,
+		// shut down the connection manager to ensure
+		// immediate deallocation of all system resources
+		httpclient.getConnectionManager().shutdown();
+	}
+
+	protected void ckResponse(HttpResponse response, int expect) {
+		System.out.println(response.getStatusLine());
+		StatusLine sl = response.getStatusLine();
+		int code = sl.getStatusCode();
+		if (code != expect)
+			fail("Unexpected response, expect "+expect+" got "+code+" "+sl.getReasonPhrase());
+	}
+}
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/testCleanup.java b/datarouter-prov/src/test/java/datarouter/provisioning/testCleanup.java
new file mode 100644
index 0000000..fa1c5f4
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/testCleanup.java
@@ -0,0 +1,85 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package datarouter.provisioning;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpDelete;
+import org.apache.http.util.EntityUtils;
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.att.research.datarouter.provisioning.FeedServlet;
+
+public class testCleanup extends testBase {
+	@Before
+	public void setUp() throws Exception {
+		super.setUp();
+		getDBstate();
+	}
+
+	@Test
+	public void testNormal() {
+		// Delete all feeds w/JUnit as publisher
+		JSONArray ja = db_state.getJSONArray("feeds");
+		for (int i = 0; i < ja.length(); i++) {
+			JSONObject feed = ja.getJSONObject(i);
+			if (feed != null && !feed.getBoolean("deleted")) {
+				if (feed.getString("publisher").equals("JUnit")) {
+					int feedid = feed.getInt("feedid");
+					delete("/feed/"+feedid);
+				}
+			}
+		}
+		// Delete all subscriptions w/JUnit as subscriber
+		ja = db_state.getJSONArray("subscriptions");
+		for (int i = 0; i < ja.length(); i++) {
+			JSONObject sub = ja.getJSONObject(i);
+			if (sub != null && sub.getString("subscriber").equals("JUnit")) {
+				int subid = sub.getInt("subid");
+				delete("/subs/"+subid);
+			}
+		}
+	}
+	private void delete(String uri) {
+		String url = props.getProperty("test.host") + uri;;
+		HttpDelete del = new HttpDelete(url);
+		try {
+			del.addHeader(FeedServlet.BEHALF_HEADER, "JUnit");
+			HttpResponse response = httpclient.execute(del);
+			HttpEntity entity = response.getEntity();
+			EntityUtils.consume(entity);
+		} catch (IOException e) {
+			fail(e.getMessage());
+		} finally {
+			del.releaseConnection();
+		}
+	}
+}
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/testDRFeedsDelete.java b/datarouter-prov/src/test/java/datarouter/provisioning/testDRFeedsDelete.java
new file mode 100644
index 0000000..fb4a554
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/testDRFeedsDelete.java
@@ -0,0 +1,59 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package datarouter.provisioning;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpDelete;
+import org.apache.http.util.EntityUtils;
+import org.junit.Test;
+
+import com.att.research.datarouter.provisioning.FeedServlet;
+
+public class testDRFeedsDelete extends testBase {
+	@Test
+	public void testNotAllowed() {
+		String url = props.getProperty("test.host") + "/";
+		HttpDelete del = new HttpDelete(url);
+		try {
+			del.addHeader(FeedServlet.BEHALF_HEADER, "JUnit");
+
+			HttpResponse response = httpclient.execute(del);
+		    ckResponse(response, HttpServletResponse.SC_METHOD_NOT_ALLOWED);
+
+			HttpEntity entity = response.getEntity();
+			EntityUtils.consume(entity);
+		} catch (IOException e) {
+			fail(e.getMessage());
+		} finally {
+			del.releaseConnection();
+		}
+	}
+}
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/testDRFeedsGet.java b/datarouter-prov/src/test/java/datarouter/provisioning/testDRFeedsGet.java
new file mode 100644
index 0000000..33303fa
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/testDRFeedsGet.java
@@ -0,0 +1,188 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package datarouter.provisioning;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.util.EntityUtils;
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.json.JSONTokener;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.att.research.datarouter.provisioning.FeedServlet;
+
+public class testDRFeedsGet extends testBase {
+	private JSONArray returnedlist;
+
+	@BeforeClass
+	public static void setUpBeforeClass() throws Exception {
+	}
+
+	@AfterClass
+	public static void tearDownAfterClass() throws Exception {
+	}
+
+	@Before
+	public void setUp() throws Exception {
+		super.setUp();
+		getDBstate();
+	}
+
+	@Test
+	public void testNormal() {
+		testCommon(HttpServletResponse.SC_OK);
+		int expect = 0;
+		JSONArray ja = db_state.getJSONArray("feeds");
+		for (int i = 0; i < ja.length(); i++) {
+			JSONObject jo = ja.getJSONObject(i);
+			if (!jo.getBoolean("deleted"))
+				expect++;
+		}
+		if (returnedlist.length() != expect)
+			fail("bad length, got "+ returnedlist.length() + " expect " + expect);
+	}
+	@Test
+	public void testNormalGoodName() {
+		JSONArray ja = db_state.getJSONArray("feeds");
+		JSONObject feed0 = ja.getJSONObject(0);
+		String name = feed0.getString("name");
+		String query = "?name=" + name;
+		int expect = 0;
+		for (int n = 0; n < ja.length(); n++) {
+			JSONObject jo = ja.getJSONObject(n);
+			if (!jo.getBoolean("deleted") && jo.getString("name").equals(name))
+				expect++;
+		}
+		testCommon(HttpServletResponse.SC_OK, query, FeedServlet.FEEDLIST_CONTENT_TYPE, "JUnit");
+		if (returnedlist.length() != expect)
+			fail("bad length, got "+ returnedlist.length() + " expect "+expect);
+	}
+	@Test
+	public void testNormalBadName() {
+		String query = "?name=ZZTOP123456";
+		testCommon(HttpServletResponse.SC_OK, query, FeedServlet.FEEDLIST_CONTENT_TYPE, "JUnit");
+		if (returnedlist.length() != 0)
+			fail("bad length, got "+ returnedlist.length() + " expect 0");
+	}
+	@Test
+	public void testNormalBadPath() {
+		String query = "flarg/?publisher=JUnit";
+		testCommon(HttpServletResponse.SC_NOT_FOUND, query, "text/html;charset=ISO-8859-1", "JUnit");
+	}
+	@Test
+	public void testNormalGoodPublisher() {
+		JSONArray ja = db_state.getJSONArray("feeds");
+		JSONObject feed0 = ja.getJSONObject(0);
+		String query = "?publisher=" + feed0.getString("publisher");
+		testCommon(HttpServletResponse.SC_OK, query, FeedServlet.FEEDLIST_CONTENT_TYPE, "JUnit");
+		int expect = 0;
+		for (int i = 0; i < ja.length(); i++) {
+			JSONObject jo = ja.getJSONObject(i);
+			if (jo.getString("publisher").equals(feed0.getString("publisher")) && !jo.getBoolean("deleted"))
+				expect++;
+		}
+		if (returnedlist.length() != expect)
+			fail("bad length, got "+returnedlist.length()+" expected "+expect);
+	}
+	@Test
+	public void testNormalBadPublisher() {
+		String query = "?publisher=ZZTOP123456";
+		testCommon(HttpServletResponse.SC_OK, query, FeedServlet.FEEDLIST_CONTENT_TYPE, "JUnit");
+		if (returnedlist.length() != 0)
+			fail("bad length");
+	}
+	@Test
+	public void testNormalGoodSubscriber() {
+		JSONArray ja = db_state.getJSONArray("subscriptions");
+		if (ja.length() > 0) {
+			JSONObject sub0 = ja.getJSONObject(0);
+			String query = "?subscriber=" + sub0.getString("subscriber");
+			testCommon(HttpServletResponse.SC_OK, query, FeedServlet.FEEDLIST_CONTENT_TYPE, "JUnit");
+// aarg! - this is complicated!
+//		int expect = 0;
+//		for (int i = 0; i < ja.length(); i++) {
+//			JSONObject jo = ja.getJSONObject(i);
+//			if (jo.getString("subscriber").equals(sub0.getString("subscriber")))
+//				expect++;
+//		}
+//		if (returnedlist.length() != 1)
+//			fail("bad length "+returnedlist.toString());
+		} else {
+			// There are no subscriptions yet, so use a made up name
+			testCommon(HttpServletResponse.SC_OK, "?subscriber=foo", FeedServlet.FEEDLIST_CONTENT_TYPE, "JUnit");
+		}
+	}
+	@Test
+	public void testNormalBadSubscriber() {
+		String query = "?subscriber=ZZTOP123456";
+		testCommon(HttpServletResponse.SC_OK, query, FeedServlet.FEEDLIST_CONTENT_TYPE, "JUnit");
+		if (returnedlist.length() != 0)
+			fail("bad length");
+	}
+	private void testCommon(int expect) {
+		testCommon(expect, "", FeedServlet.FEEDLIST_CONTENT_TYPE, "JUnit");
+	}
+	private void testCommon(int expect, String query, String ectype, String bhdr) {
+		String url = props.getProperty("test.host") + "/" + query;
+		HttpGet httpGet = new HttpGet(url);
+		try {
+			if (bhdr != null)
+				httpGet.addHeader(FeedServlet.BEHALF_HEADER, bhdr);
+
+			HttpResponse response = httpclient.execute(httpGet);
+		    ckResponse(response, expect);
+
+			HttpEntity entity = response.getEntity();
+			String ctype = entity.getContentType().getValue().trim();
+			if (!ctype.equals(ectype))
+				fail("Got wrong content type: "+ctype);
+
+			// do something useful with the response body and ensure it is fully consumed
+			if (ctype.equals(FeedServlet.FEEDLIST_CONTENT_TYPE)) {
+				try {
+					returnedlist = new JSONArray(new JSONTokener(entity.getContent()));
+				} catch (Exception e) {
+					fail("Bad JSON: "+e.getMessage());
+				}
+			} else {
+				EntityUtils.consume(entity);
+			}
+		} catch (IOException e) {
+			fail(e.getMessage());
+		} finally {
+			httpGet.releaseConnection();
+		}
+	}
+}
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/testDRFeedsPost.java b/datarouter-prov/src/test/java/datarouter/provisioning/testDRFeedsPost.java
new file mode 100644
index 0000000..65d041d
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/testDRFeedsPost.java
@@ -0,0 +1,282 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package datarouter.provisioning;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.http.Header;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.entity.ByteArrayEntity;
+import org.apache.http.entity.ContentType;
+import org.apache.http.util.EntityUtils;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+import org.json.JSONTokener;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.att.research.datarouter.provisioning.FeedServlet;
+
+public class testDRFeedsPost extends testBase {
+	@BeforeClass
+	public static void setUpBeforeClass() throws Exception {
+	}
+
+	@AfterClass
+	public static void tearDownAfterClass() throws Exception {
+	}
+
+	@Test
+	public void testNormal() {
+		JSONObject jo = buildFeedRequest();
+		testCommon(jo, HttpServletResponse.SC_CREATED);
+	}
+	@Test
+	public void testNormalNoCTVersion() {
+		JSONObject jo = buildFeedRequest();
+		testCommon(jo, HttpServletResponse.SC_CREATED, "application/vnd.att-dr.feed", "JUnit");
+	}
+	@Test
+	public void testBadContentType() {
+		JSONObject jo = buildFeedRequest();
+		testCommon(jo, HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, "bad/bad", "Junit");
+	}
+	@Test
+	public void testNoBehalfHeader() {
+		JSONObject jo = buildFeedRequest();
+		testCommon(jo, HttpServletResponse.SC_BAD_REQUEST, FeedServlet.FEED_CONTENT_TYPE, null);
+	}
+	@Test
+	public void testMissingName() {
+		JSONObject jo = buildFeedRequest();
+		jo.remove("name");
+		testCommon(jo, 400);
+	}
+	@Test
+	public void testTooLongName() {
+		JSONObject jo = buildFeedRequest();
+		jo.put("name", "123456789012345678901234567890");
+		testCommon(jo, 400);
+	}
+	@Test
+	public void testMissingVersion() {
+		JSONObject jo = buildFeedRequest();
+		jo.remove("version");
+		testCommon(jo, 400);
+	}
+	@Test
+	public void testTooLongVersion() {
+		JSONObject jo = buildFeedRequest();
+		jo.put("version", "123456789012345678901234567890");
+		testCommon(jo, 400);
+	}
+	@Test
+	public void testTooLongDescription() {
+		// normal request
+		JSONObject jo = buildFeedRequest();
+		jo.put("description", s_257);
+		testCommon(jo, 400);
+	}
+	@Test
+	public void testMissingAuthorization() {
+		JSONObject jo = buildFeedRequest();
+		jo.remove("authorization");
+		testCommon(jo, 400);
+	}
+	@Test
+	public void testMissingClassification() {
+		JSONObject jo = buildFeedRequest();
+		JSONObject j2 = jo.getJSONObject("authorization");
+		j2.remove("classification");
+		testCommon(jo, 400);
+	}
+	@Test
+	public void testTooLongClassification() {
+		JSONObject jo = buildFeedRequest();
+		JSONObject j2 = jo.getJSONObject("authorization");
+		j2.put("classification", s_33);
+		testCommon(jo, 400);
+	}
+	@Test
+	public void testNoEndpointIds() {
+		JSONObject jo = buildFeedRequest();
+		JSONObject j2 = jo.getJSONObject("authorization");
+		j2.put("endpoint_ids", new JSONArray());
+		testCommon(jo, 400);
+	}
+	@Test
+	public void testBadIPAddress1() {
+		JSONObject jo = buildFeedRequest();
+		JSONObject j2 = jo.getJSONObject("authorization");
+		JSONArray ja = j2.getJSONArray("endpoint_addrs");
+		ja.put("ZZZ^&#$%@#&^%$@#&^");
+		testCommon(jo, 400);
+	}
+	@Test
+	public void testBadIPAddress2() {
+		JSONObject jo = buildFeedRequest();
+		JSONObject j2 = jo.getJSONObject("authorization");
+		JSONArray ja = j2.getJSONArray("endpoint_addrs");
+		ja.put("135.207.136.678");	// bad IPv4 addr
+		testCommon(jo, 400);
+	}
+	@Test
+	public void testBadIPAddress3() {
+		JSONObject jo = buildFeedRequest();
+		JSONObject j2 = jo.getJSONObject("authorization");
+		JSONArray ja = j2.getJSONArray("endpoint_addrs");
+		ja.put("2001:1890:1110:d000:1a29::17567"); // bad IPv6 addr
+		testCommon(jo, 400);
+	}
+	@Test
+	public void testBadNetMask() {
+		JSONObject jo = buildFeedRequest();
+		JSONObject j2 = jo.getJSONObject("authorization");
+		JSONArray ja = j2.getJSONArray("endpoint_addrs");
+		ja.put("10.10.10.10/64");
+		testCommon(jo, 400);
+	}
+	@Test
+	public void testGoodIPAddress1() {
+		JSONObject jo = buildFeedRequest();
+		JSONObject j2 = jo.getJSONObject("authorization");
+		JSONArray ja = j2.getJSONArray("endpoint_addrs");
+		ja.put("135.207.136.175"); // good IPv4 addr
+		testCommon(jo, 201);
+	}
+	@Test
+	public void testGoodIPAddress2() {
+		JSONObject jo = buildFeedRequest();
+		JSONObject j2 = jo.getJSONObject("authorization");
+		JSONArray ja = j2.getJSONArray("endpoint_addrs");
+		ja.put("2001:1890:1110:d000:1a29::175"); // good IPv6 addr
+		testCommon(jo, 201);
+	}
+	@Test
+	public void testGoodNetMask() {
+		JSONObject jo = buildFeedRequest();
+		JSONObject j2 = jo.getJSONObject("authorization");
+		JSONArray ja = j2.getJSONArray("endpoint_addrs");
+		ja.put("2001:1890:1110:d000:1a29::175/120");
+		testCommon(jo, 201);
+	}
+	private void testCommon(JSONObject jo, int expect) {
+		testCommon(jo, expect, FeedServlet.FEED_CONTENT_TYPE, "JUnit");
+	}
+	private void testCommon(JSONObject jo, int expect, String ctype, String bhdr) {
+		String url   = props.getProperty("test.host") + "/";
+		HttpPost httpPost = new HttpPost(url);
+		try {
+			if (bhdr != null)
+				httpPost.addHeader(FeedServlet.BEHALF_HEADER, bhdr);
+			String t = jo.toString();
+			HttpEntity body = new ByteArrayEntity(t.getBytes(), ContentType.create(ctype));
+			httpPost.setEntity(body);
+
+			HttpResponse response = httpclient.execute(httpPost);
+		    ckResponse(response, expect);
+
+			HttpEntity entity = response.getEntity();
+			ctype = entity.getContentType().getValue().trim();
+			int code = response.getStatusLine().getStatusCode();
+			if (code == HttpServletResponse.SC_CREATED && !ctype.equals(FeedServlet.FEEDFULL_CONTENT_TYPE))
+				fail("Got wrong content type: "+ctype);
+
+			if (code == HttpServletResponse.SC_CREATED) {
+				Header[] loc = response.getHeaders("Location");
+				if (loc == null)
+					fail("Missing Location header.");
+			}
+
+			// do something useful with the response body and ensure it is fully consumed
+			if (ctype.equals(FeedServlet.FEEDFULL_CONTENT_TYPE)) {
+				// ck Location header!
+				JSONObject jo2 = null;
+				try {
+					jo2 = new JSONObject(new JSONTokener(entity.getContent()));
+	System.err.println(jo2.toString());
+				} catch (Exception e) {
+					fail("Bad JSON: "+e.getMessage());
+				}
+				try {
+					jo2.getString("publisher");
+					JSONObject jo3 = jo2.getJSONObject("links");
+					jo3.getString("self");
+					jo3.getString("publish");
+					jo3.getString("subscribe");
+					jo3.getString("log");
+				} catch (JSONException e) {
+					fail("required field missing from result: "+e.getMessage());
+				}
+			} else {
+				EntityUtils.consume(entity);
+			}
+		} catch (IOException e) {
+			fail(e.getMessage());
+		} finally {
+			httpPost.releaseConnection();
+		}
+	}
+	private JSONObject buildFeedRequest() {
+		JSONObject jo = new JSONObject();
+		jo.put("name", "JunitFeed");
+		jo.put("version", ""+System.currentTimeMillis());	// make version unique
+		jo.put("description", "Sample feed used by JUnit to test");
+
+			JSONObject jo2 = new JSONObject();
+			jo2.put("classification", "unrestricted");
+
+			JSONArray ja = new JSONArray();
+				JSONObject jo3 = new JSONObject();
+				jo3.put("id", "id001");
+				jo3.put("password", "re1kwelj");
+				JSONObject jo4 = new JSONObject();
+				jo4.put("id", "id002");
+				jo4.put("password", "o9eqlmbd");
+				ja.put(jo3);
+				ja.put(jo4);
+			jo2.put("endpoint_ids", ja);
+
+			ja = new JSONArray();
+				ja.put("10.0.0.1");
+				ja.put("192.168.0.1");
+				ja.put("135.207.136.128/25");
+			jo2.put("endpoint_addrs", ja);
+
+		jo.put("authorization", jo2);
+		return jo;
+	}
+}
+/*
+curl -v -X POST -H 'X-ATT-DR-ON-BEHALF-OF: tester' -H 'Content-type: application/vnd.att-dr.feed' --user publisher:tomcat \
+	--data "$data" http://127.0.0.1:8080/prov/feed/
+*/
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/testDRFeedsPut.java b/datarouter-prov/src/test/java/datarouter/provisioning/testDRFeedsPut.java
new file mode 100644
index 0000000..aee58b2
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/testDRFeedsPut.java
@@ -0,0 +1,59 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package datarouter.provisioning;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpPut;
+import org.apache.http.util.EntityUtils;
+import org.junit.Test;
+
+import com.att.research.datarouter.provisioning.FeedServlet;
+
+public class testDRFeedsPut extends testBase {
+	@Test
+	public void testNotAllowed() {
+		String url = props.getProperty("test.host") + "/";
+		HttpPut put = new HttpPut(url);
+		try {
+			put.addHeader(FeedServlet.BEHALF_HEADER, "JUnit");
+
+			HttpResponse response = httpclient.execute(put);
+		    ckResponse(response, HttpServletResponse.SC_METHOD_NOT_ALLOWED);
+
+			HttpEntity entity = response.getEntity();
+			EntityUtils.consume(entity);
+		} catch (IOException e) {
+			fail(e.getMessage());
+		} finally {
+			put.releaseConnection();
+		}
+	}
+}
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/testFeedDelete.java b/datarouter-prov/src/test/java/datarouter/provisioning/testFeedDelete.java
new file mode 100644
index 0000000..6e2b718
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/testFeedDelete.java
@@ -0,0 +1,98 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package datarouter.provisioning;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpDelete;
+import org.apache.http.util.EntityUtils;
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.att.research.datarouter.provisioning.FeedServlet;
+
+public class testFeedDelete extends testBase {
+	@BeforeClass
+	public static void setUpBeforeClass() throws Exception {
+	}
+
+	@AfterClass
+	public static void tearDownAfterClass() throws Exception {
+	}
+
+	@Before
+	public void setUp() throws Exception {
+		super.setUp();
+		getDBstate();
+	}
+
+	@Test
+	public void testDeleteNormal() {
+		// Delete the first non-deleted feed in the DB
+		JSONArray ja = db_state.getJSONArray("feeds");
+		for (int i = ja.length()-1; i >= 0; i--) {
+			JSONObject feed = ja.getJSONObject(i);
+			if (!feed.getBoolean("deleted")) {
+				int feedid = feed.getInt("feedid");
+				testCommon(HttpServletResponse.SC_NO_CONTENT, "/feed/"+feedid);
+				return;
+			}
+		}
+	}
+	@Test
+	public void testDeleteNoFeedID() {
+		testCommon(HttpServletResponse.SC_BAD_REQUEST, "/feed/");
+	}
+	@Test
+	public void testDeleteNoFeed() {
+		testCommon(HttpServletResponse.SC_NOT_FOUND, "/feed/999999");
+	}
+	private void testCommon(int expect, String uri) {
+		String url = props.getProperty("test.host") + uri;
+		HttpDelete del = new HttpDelete(url);
+		try {
+			del.addHeader(FeedServlet.BEHALF_HEADER, "JUnit");
+
+			HttpResponse response = httpclient.execute(del);
+		    ckResponse(response, expect);
+
+			HttpEntity entity = response.getEntity();
+			EntityUtils.consume(entity);
+		} catch (IOException e) {
+			fail(e.getMessage());
+		} finally {
+			del.releaseConnection();
+		}
+	}
+}
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/testFeedPut.java b/datarouter-prov/src/test/java/datarouter/provisioning/testFeedPut.java
new file mode 100644
index 0000000..8393a98
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/testFeedPut.java
@@ -0,0 +1,202 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package datarouter.provisioning;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.http.Header;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpPut;
+import org.apache.http.entity.ByteArrayEntity;
+import org.apache.http.entity.ContentType;
+import org.apache.http.util.EntityUtils;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+import org.json.JSONTokener;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.att.research.datarouter.provisioning.FeedServlet;
+
+public class testFeedPut extends testBase {
+	@BeforeClass
+	public static void setUpBeforeClass() throws Exception {
+	}
+
+	@AfterClass
+	public static void tearDownAfterClass() throws Exception {
+	}
+
+	@Before
+	public void setUp() throws Exception {
+		super.setUp();
+		getDBstate();
+	}
+
+	@Test
+	public void testPutNoFeedID() {
+		JSONObject jo = buildFeedRequest();
+		testCommon(jo, HttpServletResponse.SC_BAD_REQUEST, "/feed/");
+	}
+	@Test
+	public void testPutNoFeed() {
+		JSONObject jo = buildFeedRequest();
+		testCommon(jo, HttpServletResponse.SC_NOT_FOUND, "/feed/999999");
+	}
+	@Test
+	public void testBadContentType() {
+		JSONObject jo = buildFeedRequest();
+		testCommon(jo, HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, "bad/bad", "JUnit");
+	}
+	@Test
+	public void testChangeName() {
+		JSONObject jo = buildFeedRequest();
+		jo.put("name", "badname");
+		testCommon(jo, HttpServletResponse.SC_BAD_REQUEST, FeedServlet.FEED_CONTENT_TYPE, "JUnit");
+	}
+	@Test
+	public void testChangeVersion() {
+		JSONObject jo = buildFeedRequest();
+		jo.put("version", "badvers");
+		testCommon(jo, HttpServletResponse.SC_BAD_REQUEST, FeedServlet.FEED_CONTENT_TYPE, "JUnit");
+	}
+	@Test
+	public void testBadPublisher() {
+		JSONObject jo = buildFeedRequest();
+		testCommon(jo, HttpServletResponse.SC_BAD_REQUEST, FeedServlet.FEED_CONTENT_TYPE, "BadBadBad");
+	}
+	@Test
+	public void testChangeDescription() {
+		JSONObject jo = buildFeedRequest();
+		// change descr
+		jo.put("description", "This description HAS BEEN CHANGED!!!");
+		testCommon(jo, HttpServletResponse.SC_OK, FeedServlet.FEED_CONTENT_TYPE, "JUnit");
+	}
+
+	private void testCommon(JSONObject jo, int expect, String uri) {
+		testCommon(jo, expect, FeedServlet.FEED_CONTENT_TYPE, "Junit", uri);
+	}
+	private void testCommon(JSONObject jo, int expect, String ctype, String bhdr) {
+		JSONArray ja = db_state.getJSONArray("feeds");
+		for (int i = 0; i < ja.length(); i++) {
+			JSONObject feed0 = ja.getJSONObject(i);
+			if (!feed0.getBoolean("deleted") && feed0.getString("publisher").equals(bhdr)) {
+				int feedid = feed0.getInt("feedid");
+				testCommon(jo, expect, ctype, bhdr, "/feed/"+feedid);
+				return;
+			}
+		}
+	}
+	private void testCommon(JSONObject jo, int expect, String ctype, String bhdr, String uri) {
+		String url   = props.getProperty("test.host") + uri;
+		HttpPut put = new HttpPut(url);
+		try {
+			if (bhdr != null)
+				put.addHeader(FeedServlet.BEHALF_HEADER, bhdr);
+			String t = jo.toString();
+			HttpEntity body = new ByteArrayEntity(t.getBytes(), ContentType.create(ctype));
+			put.setEntity(body);
+
+			HttpResponse response = httpclient.execute(put);
+		    ckResponse(response, expect);
+
+			HttpEntity entity = response.getEntity();
+			ctype = entity.getContentType().getValue().trim();
+			int code = response.getStatusLine().getStatusCode();
+			if (code == HttpServletResponse.SC_CREATED && !ctype.equals(FeedServlet.FEEDFULL_CONTENT_TYPE))
+				fail("Got wrong content type: "+ctype);
+
+			if (code == HttpServletResponse.SC_CREATED) {
+				Header[] loc = response.getHeaders("Location");
+				if (loc == null)
+					fail("Missing Location header.");
+			}
+
+			// do something useful with the response body and ensure it is fully consumed
+			if (ctype.equals(FeedServlet.FEEDFULL_CONTENT_TYPE)) {
+				// ck Location header!
+				JSONObject jo2 = null;
+				try {
+					jo2 = new JSONObject(new JSONTokener(entity.getContent()));
+	System.err.println(jo2.toString());
+				} catch (Exception e) {
+					fail("Bad JSON: "+e.getMessage());
+				}
+				try {
+					jo2.getString("publisher");
+					JSONObject jo3 = jo2.getJSONObject("links");
+					jo3.getString("self");
+					jo3.getString("publish");
+					jo3.getString("subscribe");
+					jo3.getString("log");
+				} catch (JSONException e) {
+					fail("required field missing from result: "+e.getMessage());
+				}
+			} else {
+				EntityUtils.consume(entity);
+			}
+		} catch (IOException e) {
+			fail(e.getMessage());
+		} finally {
+			put.releaseConnection();
+		}
+	}
+	private JSONObject buildFeedRequest() {
+		JSONObject jo = new JSONObject();
+		jo.put("name", "feed");
+		jo.put("version", "1.0.0");
+		jo.put("description", "Sample feed used by JUnit to test");
+
+			JSONObject jo2 = new JSONObject();
+			jo2.put("classification", "unrestricted");
+
+			JSONArray ja = new JSONArray();
+				JSONObject jo3 = new JSONObject();
+				jo3.put("id", "id001");
+				jo3.put("password", "re1kwelj");
+				JSONObject jo4 = new JSONObject();
+				jo4.put("id", "id002");
+				jo4.put("password", "o9eqlmbd");
+				ja.put(jo3);
+				ja.put(jo4);
+			jo2.put("endpoint_ids", ja);
+
+			ja = new JSONArray();
+				ja.put("20.0.0.1");
+				ja.put("195.68.12.15");
+				ja.put("135.207.136.128/25");
+			jo2.put("endpoint_addrs", ja);
+
+		jo.put("authorization", jo2);
+		return jo;
+	}
+}
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/testInternalGet.java b/datarouter-prov/src/test/java/datarouter/provisioning/testInternalGet.java
new file mode 100644
index 0000000..877975a
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/testInternalGet.java
@@ -0,0 +1,105 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package datarouter.provisioning;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.util.EntityUtils;
+import org.json.JSONException;
+import org.json.JSONObject;
+import org.json.JSONTokener;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.att.research.datarouter.provisioning.FeedServlet;
+import com.att.research.datarouter.provisioning.beans.Parameters;
+
+public class testInternalGet extends testBase {
+	@BeforeClass
+	public static void setUpBeforeClass() throws Exception {
+	}
+
+	@AfterClass
+	public static void tearDownAfterClass() throws Exception {
+	}
+
+	@Test
+	public void testNormal() {
+		String url   = props.getProperty("test.host") + "/internal/prov";
+		HttpGet httpPost = new HttpGet(url);
+		try {
+			httpPost.addHeader(FeedServlet.BEHALF_HEADER, "JUnit");
+
+			HttpResponse response = httpclient.execute(httpPost);
+			int code = response.getStatusLine().getStatusCode();
+			if (code != 200)
+				fail("Unexpected response, expect "+200+" got "+code);
+
+			HttpEntity entity = response.getEntity();
+			String ctype = entity.getContentType().getValue().trim();
+			boolean ok  = ctype.equals(FeedServlet.PROVFULL_CONTENT_TYPE1);
+	                ok |= ctype.equals(FeedServlet.PROVFULL_CONTENT_TYPE2);
+			if (!ok)
+				fail("Got wrong content type: "+ctype);
+
+			// do something useful with the response body and ensure it is fully consumed
+			if (ok) {
+				JSONObject jo = null;
+				try {
+					jo = new JSONObject(new JSONTokener(entity.getContent()));
+				} catch (Exception e) {
+					fail("Bad JSON: "+e.getMessage());
+				}
+				try {
+					jo.getJSONArray("feeds");
+					jo.getJSONArray("subscriptions");
+					JSONObject jo2 = jo.getJSONObject("parameters");
+					jo2.getJSONArray(Parameters.NODES);
+					jo2.getString(Parameters.ACTIVE_POD);
+					jo2.getString(Parameters.STANDBY_POD);
+					jo2.getInt(Parameters.LOGROLL_INTERVAL);
+					jo2.getInt(Parameters.DELIVERY_INIT_RETRY_INTERVAL);
+					jo2.getInt(Parameters.DELIVERY_MAX_RETRY_INTERVAL);
+					jo2.getInt(Parameters.DELIVERY_RETRY_RATIO);
+					jo2.getInt(Parameters.DELIVERY_MAX_AGE);
+				} catch (JSONException e) {
+					fail("required field missing from result: "+e.getMessage());
+				}
+			} else {
+				EntityUtils.consume(entity);
+			}
+		} catch (IOException e) {
+			e.printStackTrace();
+			fail(e.getMessage());
+		} finally {
+			httpPost.releaseConnection();
+		}
+	}
+}
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/testInternalMisc.java b/datarouter-prov/src/test/java/datarouter/provisioning/testInternalMisc.java
new file mode 100644
index 0000000..f8a1da1
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/testInternalMisc.java
@@ -0,0 +1,151 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package datarouter.provisioning;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.util.EntityUtils;
+import org.json.JSONArray;
+import org.json.JSONTokener;
+import org.junit.Test;
+
+import com.att.research.datarouter.provisioning.FeedServlet;
+
+public class testInternalMisc extends testBase {
+	@Test
+	public void testInternalDrlogs() {
+		String url   = props.getProperty("test.host") + "/internal/drlogs";
+		HttpGet httpPost = new HttpGet(url);
+		try {
+			httpPost.addHeader(FeedServlet.BEHALF_HEADER, "JUnit");
+
+			HttpResponse response = httpclient.execute(httpPost);
+			int code = response.getStatusLine().getStatusCode();
+			if (code != 200)
+				fail("Unexpected response, expect "+HttpServletResponse.SC_NOT_FOUND+" got "+code);
+
+			HttpEntity entity = response.getEntity();
+			String ctype = entity.getContentType().getValue().trim();
+			boolean ok  = ctype.equals("text/plain");
+			if (!ok)
+				fail("Got wrong content type: "+ctype);
+
+			EntityUtils.consume(entity);
+		} catch (IOException e) {
+			e.printStackTrace();
+			fail(e.getMessage());
+		} finally {
+			httpPost.releaseConnection();
+		}
+	}
+
+	@Test
+	public void testInternalHalt() {
+		String url   = props.getProperty("test.host") + "/internal/halt";
+		HttpGet httpPost = new HttpGet(url);
+		try {
+			httpPost.addHeader(FeedServlet.BEHALF_HEADER, "JUnit");
+
+			HttpResponse response = httpclient.execute(httpPost);
+			int code = response.getStatusLine().getStatusCode();
+			if (code != HttpServletResponse.SC_NOT_FOUND)
+				fail("Unexpected response, expect "+HttpServletResponse.SC_NOT_FOUND+" got "+code);
+
+			HttpEntity entity = response.getEntity();
+			EntityUtils.consume(entity);
+		} catch (IOException e) {
+			e.printStackTrace();
+			fail(e.getMessage());
+		} finally {
+			httpPost.releaseConnection();
+		}
+	}
+
+	@SuppressWarnings("unused")
+	@Test
+	public void testInternalLogs() {
+		String url   = props.getProperty("test.host") + "/internal/logs";
+		HttpGet httpPost = new HttpGet(url);
+		try {
+			httpPost.addHeader(FeedServlet.BEHALF_HEADER, "JUnit");
+
+			HttpResponse response = httpclient.execute(httpPost);
+			int code = response.getStatusLine().getStatusCode();
+			if (code != 200)
+				fail("Unexpected response, expect "+200+" got "+code);
+
+			HttpEntity entity = response.getEntity();
+			String ctype = entity.getContentType().getValue().trim();
+			boolean ok  = ctype.equals("application/json");
+			if (!ok)
+				fail("Got wrong content type: "+ctype);
+
+			// do something useful with the response body and ensure it is fully consumed
+			if (ok) {
+				try {
+					new JSONArray(new JSONTokener(entity.getContent()));
+				} catch (Exception e) {
+					fail("Bad JSON: "+e.getMessage());
+				}
+			} else {
+				EntityUtils.consume(entity);
+			}
+		} catch (IOException e) {
+			e.printStackTrace();
+			fail(e.getMessage());
+		} finally {
+			httpPost.releaseConnection();
+		}
+	}
+
+	@Test
+	public void testInternalBadURL() {
+		String url   = props.getProperty("test.host") + "/internal/badurl";
+		HttpGet httpPost = new HttpGet(url);
+		try {
+			httpPost.addHeader(FeedServlet.BEHALF_HEADER, "JUnit");
+
+			HttpResponse response = httpclient.execute(httpPost);
+			int code = response.getStatusLine().getStatusCode();
+			if (code != HttpServletResponse.SC_NOT_FOUND)
+				fail("Unexpected response, expect "+HttpServletResponse.SC_NOT_FOUND+" got "+code);
+
+			HttpEntity entity = response.getEntity();
+			EntityUtils.consume(entity);
+		} catch (IOException e) {
+			e.printStackTrace();
+			fail(e.getMessage());
+		} finally {
+			httpPost.releaseConnection();
+		}
+	}
+
+}
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/testLogGet.java b/datarouter-prov/src/test/java/datarouter/provisioning/testLogGet.java
new file mode 100644
index 0000000..510150a
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/testLogGet.java
@@ -0,0 +1,181 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+package datarouter.provisioning;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.util.EntityUtils;
+import org.json.JSONArray;
+import org.json.JSONTokener;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.att.research.datarouter.provisioning.FeedServlet;
+
+public class testLogGet extends testBase {
+	private JSONArray returnedlist;
+	private int feedid = 4;
+	private SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'");
+
+	@BeforeClass
+	public static void setUpBeforeClass() throws Exception {
+		// need to seed the DB here
+	}
+
+	@AfterClass
+	public static void tearDownAfterClass() throws Exception {
+		// need to "unseed" the DB
+	}
+
+	@Before
+	public void setUp() throws Exception {
+		super.setUp();
+		getDBstate();
+//		JSONArray ja = db_state.getJSONArray("feeds");
+//		for (int i = 0; i < ja.length(); i++) {
+//			JSONObject jo = ja.getJSONObject(i);
+//			if (!jo.getBoolean("deleted"))
+//				feedid = jo.getInt("feedid");
+//		}
+	}
+
+	@Test
+	public void testNormal() {
+		testCommon(HttpServletResponse.SC_OK);
+	}
+	@Test
+	public void testNormalPubOnly() {
+		testCommon(HttpServletResponse.SC_OK, "?type=pub");
+	}
+	@Test
+	public void testNormalDelOnly() {
+		testCommon(HttpServletResponse.SC_OK, "?type=del");
+	}
+	@Test
+	public void testNormalExpOnly() {
+		testCommon(HttpServletResponse.SC_OK, "?type=exp");
+	}
+	@Test
+	public void testNormalXXXOnly() {
+		testCommon(HttpServletResponse.SC_BAD_REQUEST, "?type=xxx");
+	}
+	@Test
+	public void testNormalStatusSuccess() {
+		testCommon(HttpServletResponse.SC_OK, "?statusCode=success");
+	}
+	@Test
+	public void testNormalStatusRedirect() {
+		testCommon(HttpServletResponse.SC_OK, "?statusCode=redirect");
+	}
+	@Test
+	public void testNormalStatusFailure() {
+		testCommon(HttpServletResponse.SC_OK, "?statusCode=failure");
+	}
+	@Test
+	public void testNormalStatus200() {
+		testCommon(HttpServletResponse.SC_OK, "?statusCode=200");
+	}
+	@Test
+	public void testNormalStatusXXX() {
+		testCommon(HttpServletResponse.SC_BAD_REQUEST, "?statusCode=xxx");
+	}
+	@Test
+	public void testNormalExpiryNotRetryable() {
+		testCommon(HttpServletResponse.SC_OK, "?expiryReason=notRetryable");
+	}
+	@Test
+	public void testNormalExpiryRetriesExhausted() {
+		testCommon(HttpServletResponse.SC_OK, "?expiryReason=retriesExhausted");
+	}
+	@Test
+	public void testNormalExpiryXXX() {
+		testCommon(HttpServletResponse.SC_BAD_REQUEST, "?expiryReason=xxx");
+	}
+	@Test
+	public void testNormalPublishId() {
+		testCommon(HttpServletResponse.SC_OK, "?publishId=1366985877801.mtdvnj00-drtr.proto.research.att.com");
+	}
+	@Test
+	public void testNormalStart() {
+		long n = System.currentTimeMillis() - (5 * 24 * 60 * 60 * 1000L);	// 5 days
+		testCommon(HttpServletResponse.SC_OK, String.format("?start=%s", sdf.format(n)));
+	}
+	@Test
+	public void testBadStart() {
+		testCommon(HttpServletResponse.SC_BAD_REQUEST, "?start=xxx");
+	}
+	@Test
+	public void testLongEnd() {
+		testCommon(HttpServletResponse.SC_OK, "?end=1364837896220");
+	}
+	@Test
+	public void testBadEnd() {
+		testCommon(HttpServletResponse.SC_BAD_REQUEST, "?end=2013-04-25T11:01:25Q");
+	}
+	private void testCommon(int expect) {
+		testCommon(expect, "");
+	}
+	private void testCommon(int expect, String query) {
+		String url = props.getProperty("test.host") + "/feedlog/" + feedid + query;
+		HttpGet httpGet = new HttpGet(url);
+		try {
+			HttpResponse response = httpclient.execute(httpGet);
+		    ckResponse(response, expect);
+
+			HttpEntity entity = response.getEntity();
+			String ctype = entity.getContentType().getValue().trim();
+			if (expect == HttpServletResponse.SC_OK) {
+				if (!ctype.equals(FeedServlet.LOGLIST_CONTENT_TYPE))
+					fail("Got wrong content type: "+ctype);
+			}
+
+			// do something useful with the response body and ensure it is fully consumed
+			if (ctype.equals(FeedServlet.LOGLIST_CONTENT_TYPE)) {
+				try {
+					returnedlist = new JSONArray(new JSONTokener(entity.getContent()));
+					int n = returnedlist.length();
+					if (n != 0)
+						System.err.println(n + " items");
+				} catch (Exception e) {
+					fail("Bad JSON: "+e.getMessage());
+				}
+			} else {
+				EntityUtils.consume(entity);
+			}
+		} catch (IOException e) {
+			fail(e.getMessage());
+		} finally {
+			httpGet.releaseConnection();
+		}
+	}
+}
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/testPublish.java b/datarouter-prov/src/test/java/datarouter/provisioning/testPublish.java
new file mode 100644
index 0000000..15d0565
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/testPublish.java
@@ -0,0 +1,119 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package datarouter.provisioning;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.http.Header;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.RedirectStrategy;
+import org.apache.http.client.methods.HttpDelete;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+import org.apache.http.client.methods.HttpRequestBase;
+import org.apache.http.impl.client.DefaultRedirectStrategy;
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.att.research.datarouter.provisioning.FeedServlet;
+
+public class testPublish extends testBase {
+	private String publish_url;
+
+	@BeforeClass
+	public static void setUpBeforeClass() throws Exception {
+	}
+
+	@AfterClass
+	public static void tearDownAfterClass() throws Exception {
+	}
+
+	@Before
+	public void setUp() throws Exception {
+		super.setUp();
+		getDBstate();
+		// Get publish URL from first feed
+		JSONArray ja = db_state.getJSONArray("feeds");
+		for (int i = ja.length()-1; i >= 0; i--) {
+			JSONObject feed = ja.getJSONObject(i);
+			if (!feed.getBoolean("deleted")) {
+				publish_url = feed.getJSONObject("links").getString("publish");
+				publish_url += "/" + System.currentTimeMillis();
+				return;
+			}
+		}
+	}
+
+	@Test
+	public void testDelete() {
+		HttpDelete x = new HttpDelete(publish_url);
+		testCommon(x);
+	}
+	@Test
+	public void testGet() {
+		HttpGet x = new HttpGet(publish_url);
+		testCommon(x);
+	}
+	@Test
+	public void testPut() {
+		HttpPut x = new HttpPut(publish_url);
+		testCommon(x);
+	}
+	@Test
+	public void testPost() {
+		HttpPost x = new HttpPost(publish_url);
+		testCommon(x);
+	}
+	private void testCommon(HttpRequestBase rb) {
+		try {
+			rb.addHeader(FeedServlet.BEHALF_HEADER, "JUnit");
+			RedirectStrategy strategy = new DefaultRedirectStrategy() {
+				protected boolean isRedirectable(String method) {
+					return false;
+				}
+			};
+			httpclient.setRedirectStrategy(strategy);
+			HttpResponse response = httpclient.execute(rb);
+		    ckResponse(response, HttpServletResponse.SC_MOVED_PERMANENTLY);
+
+		    // Make sure there is a Location hdr
+		    Header[] loc = response.getHeaders("Location");
+		    if (loc == null || loc.length == 0)
+		    	fail("No location header");
+		} catch (IOException e) {
+			fail(e.getMessage());
+		} finally {
+			rb.releaseConnection();
+		}
+	}
+}
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/testRLEBitSet.java b/datarouter-prov/src/test/java/datarouter/provisioning/testRLEBitSet.java
new file mode 100644
index 0000000..40970ed
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/testRLEBitSet.java
@@ -0,0 +1,231 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package datarouter.provisioning;
+
+import static org.junit.Assert.fail;
+
+import java.util.Iterator;
+
+import org.junit.Test;
+
+import com.att.research.datarouter.provisioning.utils.RLEBitSet;
+
+public class testRLEBitSet {
+	@Test
+	public void testBasicConstructor() {
+		RLEBitSet bs = new RLEBitSet();
+		if (!bs.isEmpty())
+			fail("bit set not empty");
+	}
+	@Test
+	public void testStringConstructor() {
+		RLEBitSet bs = new RLEBitSet("1-10");
+		if (bs.isEmpty())
+			fail("bit set is empty");
+		if (!bs.toString().equals("1-10"))
+			fail("bad value");
+		bs = new RLEBitSet("69,70,71");
+		if (bs.isEmpty())
+			fail("bit set is empty");
+		if (!bs.toString().equals("69-71"))
+			fail("bad value");
+		bs = new RLEBitSet("555 444    443  442");
+		if (!bs.toString().equals("442-444,555"))
+			fail("bad value");
+	}
+	@Test
+	public void testLength() {
+		RLEBitSet bs = new RLEBitSet();
+		if (bs.length() != 0)
+			fail("testLength fail "+bs + " " + bs.length());
+		bs = new RLEBitSet("1-10");
+		if (bs.length() != 11)
+			fail("testLength fail "+bs + " " + bs.length());
+		bs = new RLEBitSet("1-20,100000000-100000005");
+		if (bs.length() != 100000006)
+			fail("testLength fail "+bs + " " + bs.length());
+	}
+	@Test
+	public void testGet() {
+		RLEBitSet bs = new RLEBitSet("1-10");
+		if (!bs.get(5))
+			fail("get");
+		if (bs.get(69))
+			fail("get");
+	}
+	@Test
+	public void testSetOneBit() {
+		RLEBitSet bs = new RLEBitSet();
+		for (int i = 12; i < 200; i++)
+			bs.set(i);
+		bs.set(690);
+		for (int i = 305; i < 309; i++)
+			bs.set(i);
+		bs.set(304);
+		if (!bs.toString().equals("12-199,304-308,690"))
+			fail("testSetOneBit fail "+bs);
+	}
+	@Test
+	public void testSetString() {
+		RLEBitSet bs = new RLEBitSet();
+		bs.set("1-100");
+		if (!bs.toString().equals("1-100"))
+			fail("testSetString fail "+bs);
+	}
+	@Test
+	public void testSetRange() {
+		RLEBitSet bs = new RLEBitSet();
+		bs.set(50,60);
+		if (!bs.toString().equals("50-59"))
+			fail("testSetRange fail "+bs);
+	}
+	@Test
+	public void testClearOneBit() {
+		RLEBitSet bs = new RLEBitSet("1-10");
+		bs.clear(5);
+		if (!bs.toString().equals("1-4,6-10"))
+			fail("testClearOneBit fail");
+		bs = new RLEBitSet("1-10");
+		bs.clear(11);
+		if (!bs.toString().equals("1-10"))
+			fail("testClearOneBit fail "+bs);
+	}
+	@Test
+	public void testClearRangeLeft() {
+		RLEBitSet bs = new RLEBitSet("100-200");
+		bs.clear(40,50);
+		if (!bs.toString().equals("100-200"))
+			fail("testClearRangeLeft fail "+bs);
+	}
+	@Test
+	public void testClearRangeRight() {
+		RLEBitSet bs = new RLEBitSet("100-200");
+		bs.clear(400,500);
+		if (!bs.toString().equals("100-200"))
+			fail("testClearRangeRight fail "+bs);
+	}
+	@Test
+	public void testClearRangeMiddle() {
+		RLEBitSet bs = new RLEBitSet("100-200");
+		bs.clear(120,130);
+		if (!bs.toString().equals("100-119,130-200"))
+			fail("testClearRangeRight fail "+bs);
+	}
+	@Test
+	public void testClearRangeIntersect() {
+		RLEBitSet bs = new RLEBitSet("100-200");
+		bs.clear(100,200);
+		if (!bs.toString().equals("200"))
+			fail("testClearRangeIntersect fail "+bs);
+	}
+	@Test
+	public void testClearOverlapLeft() {
+		RLEBitSet bs = new RLEBitSet("100-200");
+		bs.clear(50,150);
+		if (!bs.toString().equals("150-200"))
+			fail("testClearOverlapLeft fail "+bs);
+	}
+	@Test
+	public void testClearOverlapRight() {
+		RLEBitSet bs = new RLEBitSet("100-200");
+		bs.clear(150,250);
+		if (!bs.toString().equals("100-149"))
+			fail("testClearOverlapRight fail "+bs);
+	}
+	@Test
+	public void testClearOverlapAll() {
+		RLEBitSet bs = new RLEBitSet("100-200");
+		bs.clear(50,250);
+		if (!bs.toString().equals(""))
+			fail("testClearOverlapAll fail "+bs);
+	}
+	@Test
+	public void testAnd() {
+		RLEBitSet bs = new RLEBitSet("100-200");
+		RLEBitSet b2 = new RLEBitSet("150-400");
+		bs.and(b2);
+		if (!bs.toString().equals("150-200"))
+			fail("testAnd fail "+bs);
+		bs = new RLEBitSet("100-200");
+		b2 = new RLEBitSet("1500-4000");
+		bs.and(b2);
+		if (!bs.isEmpty())
+			fail("testAnd fail "+bs);
+	}
+	@Test
+	public void testAndNot() {
+		RLEBitSet bs = new RLEBitSet("100-200");
+		RLEBitSet b2 = new RLEBitSet("150-159");
+		bs.andNot(b2);
+		if (!bs.toString().equals("100-149,160-200"))
+			fail("testAndNot fail "+bs);
+	}
+	@Test
+	public void testIsEmpty() {
+		RLEBitSet bs = new RLEBitSet("");
+		if (!bs.isEmpty())
+			fail("testIsEmpty fail "+bs);
+		bs.set(1);
+		if (bs.isEmpty())
+			fail("testIsEmpty fail "+bs);
+	}
+	@Test
+	public void testCardinality() {
+		RLEBitSet bs = new RLEBitSet("1-120,10000000-10000005");
+		if (bs.cardinality() != 126)
+			fail("testCardinality fail 1");
+	}
+	@Test
+	public void testIterator() {
+		RLEBitSet bs = new RLEBitSet("1,5,10-12");
+		Iterator<Long[]> i = bs.getRangeIterator();
+		if (!i.hasNext())
+			fail("iterator fail 1");
+		Long[] ll = i.next();
+		if (ll == null || ll[0] != 1 || ll[1] != 1)
+			fail("iterator fail 2");
+
+		if (!i.hasNext())
+			fail("iterator fail 3");
+		ll = i.next();
+		if (ll == null || ll[0] != 5 || ll[1] != 5)
+			fail("iterator fail 4");
+
+		if (!i.hasNext())
+			fail("iterator fail 5");
+		ll = i.next();
+		if (ll == null || ll[0] != 10 || ll[1] != 12)
+			fail("iterator fail 6");
+
+		if (i.hasNext())
+			fail("iterator fail 7");
+	}
+	@Test
+	public void testClone() {
+		RLEBitSet bs1 = new RLEBitSet("1,5,10-12");
+		RLEBitSet bs2 = (RLEBitSet) bs1.clone();
+		if (!bs1.toString().equals(bs2.toString()))
+			fail("clone");
+	}
+}
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/testRouteAPI.java b/datarouter-prov/src/test/java/datarouter/provisioning/testRouteAPI.java
new file mode 100644
index 0000000..7295d00
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/testRouteAPI.java
@@ -0,0 +1,30 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package datarouter.provisioning;
+
+import datarouter.provisioning.testBase;
+
+public class testRouteAPI extends testBase {
+
+}
diff --git a/datarouter-prov/src/test/java/datarouter/provisioning/testSubscribePost.java b/datarouter-prov/src/test/java/datarouter/provisioning/testSubscribePost.java
new file mode 100644
index 0000000..0f624ec
--- /dev/null
+++ b/datarouter-prov/src/test/java/datarouter/provisioning/testSubscribePost.java
@@ -0,0 +1,180 @@
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * * 
+ *  *      http://www.apache.org/licenses/LICENSE-2.0
+ * * 
+ *  * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package datarouter.provisioning;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.entity.ByteArrayEntity;
+import org.apache.http.entity.ContentType;
+import org.apache.http.util.EntityUtils;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+import org.json.JSONTokener;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.att.research.datarouter.provisioning.FeedServlet;
+import com.att.research.datarouter.provisioning.SubscribeServlet;
+
+public class testSubscribePost extends testBase {
+	private int feednum = 0;
+
+	@BeforeClass
+	public static void setUpBeforeClass() throws Exception {
+	}
+
+	@AfterClass
+	public static void tearDownAfterClass() throws Exception {
+	}
+
+	@Before
+	public void setUp() throws Exception {
+		super.setUp();
+		getDBstate();
+		// use the first feed to subscribe to
+		JSONArray ja = db_state.getJSONArray("feeds");
+		for (int i = 0; i < ja.length(); i++) {
+			JSONObject feed0 = ja.getJSONObject(i);
+			if (feed0 != null && !feed0.getBoolean("deleted")) {
+				feednum = feed0.getInt("feedid");
+				return;
+			}
+		}
+	}
+
+	@Test
+	public void testNormal() {
+		JSONObject jo = buildSubRequest();
+		testCommon(jo, HttpServletResponse.SC_CREATED);
+	}
+	@Test
+	public void testMissingUrl() {
+		JSONObject jo = buildSubRequest();
+		jo.getJSONObject("delivery").remove("url");
+		testCommon(jo, HttpServletResponse.SC_BAD_REQUEST);
+	}
+	@Test
+	public void testTooLongUrl() {
+		JSONObject jo = buildSubRequest();
+		jo.getJSONObject("delivery").put("url", "https://"+s_257);
+		testCommon(jo, HttpServletResponse.SC_BAD_REQUEST);
+	}
+	@Test
+	public void testMissingUser() {
+		JSONObject jo = buildSubRequest();
+		jo.getJSONObject("delivery").remove("user");
+		testCommon(jo, HttpServletResponse.SC_BAD_REQUEST);
+	}
+	@Test
+	public void testTooLongUser() {
+		JSONObject jo = buildSubRequest();
+		jo.getJSONObject("delivery").put("user", s_33);
+		testCommon(jo, HttpServletResponse.SC_BAD_REQUEST);
+	}
+	@Test
+	public void testMissingPassword() {
+		JSONObject jo = buildSubRequest();
+		jo.getJSONObject("delivery").remove("password");
+		testCommon(jo, HttpServletResponse.SC_BAD_REQUEST);
+	}
+	@Test
+	public void testTooLongPassword() {
+		JSONObject jo = buildSubRequest();
+		jo.getJSONObject("delivery").put("password", s_33);
+		testCommon(jo, HttpServletResponse.SC_BAD_REQUEST);
+	}
+	@Test
+	public void testNonBooleanMetadata() {
+		JSONObject jo = buildSubRequest();
+		jo.put("metadataOnly", s_33);
+		testCommon(jo, HttpServletResponse.SC_BAD_REQUEST);
+	}
+	private void testCommon(JSONObject jo, int expect) {
+		String url   = props.getProperty("test.host") + "/subscribe/" + feednum;
+		HttpPost httpPost = new HttpPost(url);
+		try {
+			httpPost.addHeader(SubscribeServlet.BEHALF_HEADER, "JUnit");
+			String t = jo.toString();
+			HttpEntity body = new ByteArrayEntity(t.getBytes(), ContentType.create(SubscribeServlet.SUB_CONTENT_TYPE));
+			httpPost.setEntity(body);
+
+			HttpResponse response = httpclient.execute(httpPost);
+		    ckResponse(response, expect);
+
+			HttpEntity entity = response.getEntity();
+			String ctype = entity.getContentType().getValue();
+			int code = response.getStatusLine().getStatusCode();
+			if (code == HttpServletResponse.SC_CREATED && !ctype.equals(SubscribeServlet.SUBFULL_CONTENT_TYPE))
+				fail("Got wrong content type: "+ctype);
+
+			// do something useful with the response body and ensure it is fully consumed
+			if (ctype.equals(FeedServlet.SUBFULL_CONTENT_TYPE)) {
+				JSONObject jo2 = null;
+				try {
+					jo2 = new JSONObject(new JSONTokener(entity.getContent()));
+				} catch (Exception e) {
+					fail("Bad JSON: "+e.getMessage());
+				}
+				try {
+					jo2.getString("subscriber");
+					JSONObject jo3 = jo2.getJSONObject("links");
+					jo3.getString("self");
+					jo3.getString("feed");
+					jo3.getString("log");
+				} catch (JSONException e) {
+					fail("required field missing from result: "+e.getMessage());
+				}
+			} else {
+				EntityUtils.consume(entity);
+			}
+		} catch (IOException e) {
+			fail(e.getMessage());
+		} finally {
+			httpPost.releaseConnection();
+		}
+	}
+	private JSONObject buildSubRequest() {
+		JSONObject jo = new JSONObject();
+
+			JSONObject jo2 = new JSONObject();
+			jo2.put("url", "https://www.att.com/");
+			jo2.put("user", "dmr");
+			jo2.put("password", "passw0rd");
+			jo2.put("use100", true);
+
+		jo.put("delivery", jo2);
+		jo.put("metadataOnly", Boolean.FALSE);
+		return jo;
+	}
+}