Initial OpenECOMP SDC commit
Change-Id: I0924d5a6ae9cdc161ae17c68d3689a30d10f407b
Signed-off-by: Michael Lando <ml636r@att.com>
diff --git a/catalog-dao/src/test/java/org/openecomp/sdc/be/resources/ArtifactDaoTest.java b/catalog-dao/src/test/java/org/openecomp/sdc/be/resources/ArtifactDaoTest.java
new file mode 100644
index 0000000..aa9d30c
--- /dev/null
+++ b/catalog-dao/src/test/java/org/openecomp/sdc/be/resources/ArtifactDaoTest.java
@@ -0,0 +1,577 @@
+/*-
+ * ============LICENSE_START=======================================================
+ * SDC
+ * ================================================================================
+ * Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.openecomp.sdc.be.resources;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import javax.annotation.Resource;
+
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.openecomp.sdc.be.config.ConfigurationManager;
+import org.openecomp.sdc.be.dao.api.IGenericSearchDAO;
+import org.openecomp.sdc.be.dao.api.ResourceUploadStatus;
+import org.openecomp.sdc.be.dao.es.ElasticSearchClient;
+import org.openecomp.sdc.be.resources.api.IResourceUploader;
+import org.openecomp.sdc.be.resources.data.ESArtifactData;
+import org.openecomp.sdc.common.api.ConfigurationSource;
+import org.openecomp.sdc.common.impl.ExternalConfiguration;
+import org.openecomp.sdc.common.impl.FSConfigurationSource;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.TestExecutionListeners;
+import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+import org.springframework.test.context.support.DependencyInjectionTestExecutionListener;
+import org.springframework.test.context.support.DirtiesContextTestExecutionListener;
+import org.springframework.test.context.transaction.TransactionalTestExecutionListener;
+
+import fj.data.Either;
+
+@RunWith(SpringJUnit4ClassRunner.class)
+@ContextConfiguration("classpath:application-context-test.xml")
+@TestExecutionListeners(listeners = { DependencyInjectionTestExecutionListener.class,
+ DirtiesContextTestExecutionListener.class, TransactionalTestExecutionListener.class }) // ,
+ // CassandraUnitTestExecutionListener.class})
+// @EmbeddedCassandra(host ="localhost", port=9042)
+public class ArtifactDaoTest {
+ private static final String TEST_IMAGES_DIRECTORY = "src/test/resources/images";
+
+ @Resource
+ ElasticSearchClient esclient;
+
+ /*
+ * @Resource(name = "artifact-dao") private IArtifactDAO artifactDAO;
+ */
+
+ @Resource(name = "resource-upload")
+ private IResourceUploader daoUploader;
+ ESArtifactData arData;
+
+ @Resource(name = "resource-dao")
+ private IGenericSearchDAO resourceDAO;
+
+ private String nodeType = "NodeType1";
+ private String nodeTypeVersion = "1.0.0";
+
+ private String nodeType2 = "NodeType2";
+ private String nodeTypeVersion2 = "1.0.1";
+
+ private String nodeType3 = "NodeType3";
+ private String nodeNypeVersion3 = "1.1.1";
+
+ private String topologyId = "topology";
+ private String topologyTemplateName = "topologyTemplate";
+ private String topologyTemplateVersion = "1.1.1";
+
+ private String nodeTypeTemplate1 = "NodeTypeTemplate1";
+ private String nodeTypeTemplate2 = "NodeTypeTemplate2";
+ private String nodeTypeTemplate3 = "NodeTypeTemplate3";
+
+ private static ConfigurationManager configurationManager;
+
+ @Before
+ public void before() {
+ // try {
+ // clearIndex(ICatalogDAO.RESOURCES_INDEX, ArtifactData.class);
+ // clearIndex(ICatalogDAO.RESOURCES_INDEX, ServiceArtifactData.class);
+ // } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ // e.printStackTrace();
+ // }
+
+ }
+
+ @BeforeClass
+ public static void setupBeforeClass() {
+ ExternalConfiguration.setAppName("catalog-dao");
+ String appConfigDir = "src/test/resources/config/catalog-dao";
+ ConfigurationSource configurationSource = new FSConfigurationSource(ExternalConfiguration.getChangeListener(),
+ appConfigDir);
+ configurationManager = new ConfigurationManager(configurationSource);
+ }
+
+ // @Before
+ // public void createSchema(){
+ // SdcSchemaBuilder.createSchema();
+ // }
+ //
+
+ @Test
+ public void testSaveNewArtifact() {
+ // daoUploader = new ArtifactUploader(artifactDAO);
+ if (daoUploader == null) {
+ assertTrue(false);
+ }
+ String strData = "qweqwqweqw34e4wrwer";
+
+ String myNodeType = "MyNewNodeType";
+
+ ESArtifactData arData = new ESArtifactData("artifactNewMarina11", strData.getBytes());
+
+ ResourceUploadStatus status = daoUploader.saveArtifact(arData, true);
+
+ assertEquals(status, ResourceUploadStatus.OK);
+
+ daoUploader.deleteArtifact(arData.getId());
+
+ }
+
+ /*
+ * @Test public void testSaveNewImage(){
+ *
+ * Path iconPath = Paths.get(TEST_IMAGES_DIRECTORY, "apache.png");
+ *
+ * ImageData imageData = new ImageData(); try {
+ * imageData.setData(Files.readAllBytes(iconPath));
+ * imageData.setComponentName("ComponentMarina");
+ * imageData.setComponentVersion("v.1.0");
+ * imageData.setArtifactName("apache.png");
+ * imageData.setResourceCreator("Marina");
+ * imageData.setResourceLastUpdater("Marina"); ResourceUploadStatus status =
+ * daoUploader.saveImage(imageData, true); assertEquals(status,
+ * ResourceUploadStatus.OK); } catch (IOException e) { // TODO
+ * Auto-generated catch block e.printStackTrace(); }
+ *
+ *
+ * }
+ */
+
+ // @Test
+ // public void testGetArtifactsList() {
+ // //daoUploader = new ArtifactUploader(artifactDAO);
+ // if(daoUploader==null){
+ // assertTrue(false);
+ // }
+ // String myNodeType = "MyListNodeType";
+ //
+ //
+ //
+ // //resourceDAO.save(indexedNodeType);
+ //
+ // String strData = "qweqwqweqw34e4wrwer";
+ // ESArtifactData arData1 = new ESArtifactData("artifactNewMarina_1",
+ // strData.getBytes());
+ //
+ //
+ // ResourceUploadStatus status = daoUploader.saveArtifact(arData1, true);
+ // assertEquals(status, ResourceUploadStatus.OK);
+ //
+ // ESArtifactData arData2 = new ESArtifactData("artifactNewMarina_2",
+ // strData.getBytes());
+ //
+ //
+ // status = daoUploader.saveArtifact(arData2, true);
+ // assertEquals(status, ResourceUploadStatus.OK);
+ //
+ // ESArtifactData arData3 = new ESArtifactData("artifactNewMarina_3",
+ // strData.getBytes());
+ //
+ //
+ // status = daoUploader.saveArtifact(arData3, true);
+ // assertEquals(status, ResourceUploadStatus.OK);
+ //
+ //
+ //
+ // Either<List<ESArtifactData>, ResourceUploadStatus> arrArray =
+ // daoUploader.getArtifacts(myNodeType, nodeTypeVersion);
+ // assertTrue(arrArray.isLeft());
+ //
+ // assertEquals(3, arrArray.left().value().size());
+ //
+ // daoUploader.deleteArtifact(arData1.getId());
+ // daoUploader.deleteArtifact(arData2.getId());
+ // daoUploader.deleteArtifact(arData3.getId());
+ //
+ // //resourceDAO.delete(IndexedNodeType.class, indexedNodeType.getId());
+ //
+ // }
+ //
+
+ /*
+ * @Test public void testGetSeviceArtifactsList() {
+ *
+ * if(daoUploader==null){ assertTrue(false); } String strData =
+ * "qweqwqweqw34e4wrwer";
+ *
+ * ServiceArtifactData serviceArData = new
+ * ServiceArtifactData("serviceArData", topologyTemplateName,
+ * topologyTemplateVersion, nodeTypeTemplate1, nodeType, nodeTypeVersion,
+ * "YANG", strData.getBytes(), strData.getBytes(), "Marina", null);
+ * //serviceArData.setRefArtifactId(arData.getId()); ResourceUploadStatus
+ * status = daoUploader.saveServiceArtifact(serviceArData, true);
+ *
+ * ServiceArtifactData serviceArData1 = new
+ * ServiceArtifactData("serviceArData1", topologyTemplateName,
+ * topologyTemplateVersion, nodeTypeTemplate2, nodeType2, nodeTypeVersion2,
+ * "YANG", strData.getBytes(), strData.getBytes(), "Marina", null);
+ * //serviceArData1.setRefArtifactId(arData4.getId()); status =
+ * daoUploader.saveServiceArtifact(serviceArData1, true);
+ * ServiceArtifactData getServiceData =
+ * daoUploader.getServiceArtifact(serviceArData.getId()).left().value();
+ *
+ * List<ServiceArtifactData> arrArray =
+ * daoUploader.getServiceArtifacts(topologyTemplateName,
+ * topologyTemplateVersion).left().value();
+ *
+ * assertEquals(2, arrArray.size());
+ *
+ * daoUploader.deleteArtifact(serviceArData.getId());
+ * daoUploader.deleteArtifact(serviceArData1.getId());
+ *
+ *
+ * }
+ */
+
+ @Test
+ public void testGetArtifact() {
+
+ String myNodeType = "MyNodeType";
+
+ // resourceDAO.save(indexedNodeType);
+ ESArtifactData arData = getArtifactData(myNodeType, nodeTypeVersion);
+
+ ESArtifactData getData = null;
+ Either<ESArtifactData, ResourceUploadStatus> getArtifactStatus = daoUploader
+ .getArtifact(myNodeType + "- dassasd" + ":" + nodeTypeVersion + ":updatedArtifact");
+ if (getArtifactStatus.isRight()) {
+ daoUploader.saveArtifact(arData, true);
+ getArtifactStatus = daoUploader.getArtifact(arData.getId());
+ }
+ assertNotNull(getArtifactStatus.left().value());
+
+ }
+
+ /*
+ * @Test public void testGetSeviceArtifact() {
+ *
+ * ServiceArtifactData servArData = getServiceArtifactData();
+ *
+ * Either<ServiceArtifactData, ResourceUploadStatus>
+ * getServiceArtifactStatus =
+ * daoUploader.getServiceArtifact("MyService:v.1.1:updatedServiceArtifact");
+ * if (!getServiceArtifactStatus.isLeft()){
+ * daoUploader.saveServiceArtifact(servArData, true);
+ * getServiceArtifactStatus =
+ * daoUploader.getServiceArtifact(servArData.getId()); }
+ *
+ * assertNotNull(getServiceArtifactStatus.left().value());
+ *
+ * daoUploader.deleteArtifact(getServiceArtifactStatus.left().value().getId(
+ * ));
+ *
+ *
+ * }
+ */
+
+ /*
+ * @Test public void testGetSeviceArtifactsCollection() {
+ *
+ * prepareTopolgyService(); prepareTestTopolgyService();
+ * Either<ServiceArtifactsDataCollection, ResourceUploadStatus>
+ * getServiceArtifactsCollectionStatus =
+ * daoUploader.getServiceArtifactsCollection(topologyTemplateName,
+ * topologyTemplateVersion); ServiceArtifactsDataCollection serviceAtrifacts
+ * = getServiceArtifactsCollectionStatus.left().value();
+ *
+ * Map<String, List<ArtifactData>> map =
+ * serviceAtrifacts.getServiceArtifactDataMap();
+ *
+ * List<ArtifactData> list = map.get(nodeType); assertNotNull(list);
+ * assertEquals(2, list.size());
+ *
+ *
+ * list = map.get(nodeTypeTemplate1 ); assertNotNull(list); assertEquals(1,
+ * list.size());
+ *
+ * list = map.get(nodeTypeTemplate2 ); assertNotNull(list); assertEquals(1,
+ * list.size());
+ *
+ *
+ * }
+ */
+
+ @Test
+ public void testUpdateArtifact() {
+ // daoUploader = new ArtifactUploader(artifactDAO);
+ if (daoUploader == null) {
+ assertTrue(false);
+ }
+ ResourceUploadStatus status = ResourceUploadStatus.OK;
+
+ String myNodeType = "MyUpdatedNodeType";
+
+ // resourceDAO.save(indexedNodeType);
+
+ ESArtifactData arData = getArtifactData(myNodeType, nodeTypeVersion);
+ Either<ESArtifactData, ResourceUploadStatus> getArtifactStatus = daoUploader.getArtifact(arData.getId());
+
+ if (!getArtifactStatus.isLeft())
+ status = daoUploader.saveArtifact(arData, false);
+
+ String payload1 = "new payloadjfdsgh";
+ arData.setDataAsArray(payload1.getBytes());
+
+ status = daoUploader.updateArtifact(arData);
+
+ assertEquals(status, ResourceUploadStatus.OK);
+ // resourceDAO.delete(IndexedNodeType.class, indexedNodeType.getId());
+
+ }
+
+ private ESArtifactData getArtifactData(String componentName, String componentVersion) {
+ String strData = "qweqwqweqw34e4wrwer";
+ ESArtifactData arData = new ESArtifactData("updatedArtifact", strData.getBytes());
+
+ return arData;
+ }
+
+ /*
+ * private ServiceArtifactData getServiceArtifactData(){ String strData =
+ * "qweqwqweqw34e4wrwer"; ServiceArtifactData arData = new
+ * ServiceArtifactData("updatedServiceArtifact", "MyService", "v.1.1",
+ * "MyComponentTemplate", "MyComponent", "v.1.1", "YANG",
+ * strData.getBytes(), strData.getBytes(), "Marina", null);
+ *
+ * return arData; }
+ */
+
+ /*
+ * private void prepareTopolgyService(){
+ *
+ * List<String> listCap = new ArrayList<String>(); listCap.add("very_evil");
+ * List<String> listCap1 = new ArrayList<String>(); listCap.add("evil");
+ * try{ // Initialize test data IndexedNodeType indexedNodeType = new
+ * IndexedNodeType(); CSARDependency dep = new CSARDependency();
+ * dep.setName(nodeType); dep.setVersion(nodeTypeVersion);
+ * indexedNodeType.setElementId(nodeType);
+ * indexedNodeType.setArchiveName(nodeType);
+ * indexedNodeType.setArchiveVersion(nodeTypeVersion);
+ * indexedNodeType.setCreationDate(new Date());
+ * indexedNodeType.setLastUpdateDate(new Date());
+ * indexedNodeType.setDefaultCapabilities(listCap);
+ * resourceDAO.save(indexedNodeType);
+ *
+ *
+ * IndexedNodeType indexedNodeType1 = new IndexedNodeType();
+ * indexedNodeType1.setElementId(nodeType2);
+ * indexedNodeType1.setArchiveName(nodeType2);
+ * indexedNodeType1.setArchiveVersion(nodeTypeVersion2); CSARDependency dep1
+ * = new CSARDependency(); dep1.setName(nodeType2);
+ * dep1.setVersion(nodeTypeVersion2); indexedNodeType1.setCreationDate(new
+ * Date()); indexedNodeType1.setLastUpdateDate(new Date());
+ * indexedNodeType1.setDefaultCapabilities(listCap1);
+ * resourceDAO.save(indexedNodeType1);
+ *
+ *
+ * indexedNodeType.setElementId(nodeType3);
+ * indexedNodeType.setArchiveName(nodeType3);
+ * indexedNodeType.setArchiveVersion(nodeNypeVersion3); CSARDependency dep2
+ * = new CSARDependency(); dep2.setName(nodeType3);
+ * dep2.setVersion(nodeNypeVersion3); indexedNodeType.setCreationDate(new
+ * Date()); indexedNodeType.setLastUpdateDate(new Date());
+ * indexedNodeType.setDefaultCapabilities(null);
+ * resourceDAO.save(indexedNodeType); String osgiliath100Id =
+ * indexedNodeType.getId();
+ *
+ * Topology topology = new Topology(); topology.setId(topologyId);
+ * Set<CSARDependency> dependencies = new HashSet<CSARDependency>();
+ * dependencies.add(dep); dependencies.add(dep2); dependencies.add(dep1);
+ * topology.setDependencies(dependencies); Map<String, NodeTemplate>
+ * nodeTemplates = new HashMap <String, NodeTemplate>();
+ *
+ * NodeTemplate template1 = new NodeTemplate(nodeType, null, null, null,
+ * null, null, null); template1.setName(nodeTypeTemplate1);
+ * nodeTemplates.put(nodeTypeTemplate1, template1 );
+ *
+ * NodeTemplate template2 = new NodeTemplate(nodeType2, null, null, null,
+ * null, null, null); template2.setName(nodeTypeTemplate2 );
+ * nodeTemplates.put(nodeTypeTemplate2, template2 );
+ *
+ * NodeTemplate template3 = new NodeTemplate(nodeType, null, null, null,
+ * null, null, null); template3.setName(nodeTypeTemplate3 );
+ * nodeTemplates.put(nodeTypeTemplate3, template3);
+ *
+ * topology.setNodeTemplates(nodeTemplates); resourceDAO.save(topology);
+ *
+ * TopologyTemplate topologyTemplate = new TopologyTemplate();
+ * topologyTemplate.setId(topologyTemplateName);
+ * topologyTemplate.setName(topologyTemplateName);
+ * topologyTemplate.setTopologyId(topology.getId());
+ * topologyTemplate.setDescription("my topology template");
+ * resourceDAO.save(topologyTemplate);
+ *
+ * String strData = "qweqwqweqw34e4wrwer"; ArtifactData arData = new
+ * ArtifactData("artifact1", nodeType, nodeTypeVersion, "YANG",
+ * strData.getBytes(), strData.getBytes(), "Marina"); ArtifactData arData1 =
+ * new ArtifactData("artifact2", nodeType, nodeTypeVersion, "YANG",
+ * strData.getBytes(), strData.getBytes(), "Marina"); ResourceUploadStatus
+ * status = daoUploader.saveArtifact(arData, true); status =
+ * daoUploader.saveArtifact(arData1, true);
+ *
+ * ArtifactData arData3 = new ArtifactData("artifact1", nodeType2,
+ * nodeTypeVersion2, "YANG", strData.getBytes(), strData.getBytes(),
+ * "Marina"); status = daoUploader.saveArtifact(arData3, true);
+ *
+ * ArtifactData arData4 = new ArtifactData("artifact2", nodeType2,
+ * nodeTypeVersion2, "YANG", strData.getBytes(), strData.getBytes(),
+ * "Marina"); status = daoUploader.saveArtifact(arData4, true);
+ *
+ * ServiceArtifactData serviceArData = new
+ * ServiceArtifactData("serviceArData", topologyTemplateName,
+ * topologyTemplateVersion, nodeTypeTemplate1, nodeType, nodeTypeVersion,
+ * "YANG", strData.getBytes(), strData.getBytes(), "Marina",
+ * arData.getId());
+ *
+ * status = daoUploader.saveServiceArtifact(serviceArData, true);
+ *
+ * ServiceArtifactData serviceArData1 = new
+ * ServiceArtifactData("serviceArData1", topologyTemplateName,
+ * topologyTemplateVersion, nodeTypeTemplate2, nodeType2, nodeTypeVersion2,
+ * "YANG", strData.getBytes(), strData.getBytes(), "Marina",
+ * arData4.getId());
+ *
+ * status = daoUploader.saveServiceArtifact(serviceArData1, true);
+ *
+ *
+ * } catch (Exception e) { // TODO Auto-generated catch block
+ * e.printStackTrace(); }
+ *
+ * }
+ *
+ * private void prepareTestTopolgyService(){
+ *
+ * List<String> listCap = new ArrayList<String>();
+ * listCap.add("very_evil test"); List<String> listCap1 = new
+ * ArrayList<String>(); listCap.add("evil test"); try{ // Initialize test
+ * data IndexedNodeType indexedNodeType = new IndexedNodeType();
+ * CSARDependency dep = new CSARDependency(); dep.setName(nodeType +
+ * " test"); dep.setVersion(nodeTypeVersion);
+ * indexedNodeType.setElementId(nodeType + " test");
+ * indexedNodeType.setArchiveName(nodeType + " test");
+ * indexedNodeType.setArchiveVersion(nodeTypeVersion);
+ * indexedNodeType.setCreationDate(new Date());
+ * indexedNodeType.setLastUpdateDate(new Date());
+ * indexedNodeType.setDefaultCapabilities(listCap);
+ * resourceDAO.save(indexedNodeType);
+ *
+ *
+ * IndexedNodeType indexedNodeType1 = new IndexedNodeType();
+ * indexedNodeType1.setElementId(nodeType2 + " test");
+ * indexedNodeType1.setArchiveName(nodeType2 + " test");
+ * indexedNodeType1.setArchiveVersion(nodeTypeVersion2); CSARDependency dep1
+ * = new CSARDependency(); dep1.setName(nodeType2 + " test");
+ * dep1.setVersion(nodeTypeVersion2); indexedNodeType1.setCreationDate(new
+ * Date()); indexedNodeType1.setLastUpdateDate(new Date());
+ * indexedNodeType1.setDefaultCapabilities(listCap1);
+ * resourceDAO.save(indexedNodeType1);
+ *
+ *
+ * indexedNodeType.setElementId(nodeType3 + " test");
+ * indexedNodeType.setArchiveName(nodeType3 + " test");
+ * indexedNodeType.setArchiveVersion(nodeNypeVersion3); CSARDependency dep2
+ * = new CSARDependency(); dep2.setName(nodeType3 + " test");
+ * dep2.setVersion(nodeNypeVersion3); indexedNodeType.setCreationDate(new
+ * Date()); indexedNodeType.setLastUpdateDate(new Date());
+ * indexedNodeType.setDefaultCapabilities(null);
+ * resourceDAO.save(indexedNodeType); String osgiliath100Id =
+ * indexedNodeType.getId();
+ *
+ * Topology topology = new Topology(); topology.setId(topologyId + " test");
+ * Set<CSARDependency> dependencies = new HashSet<CSARDependency>();
+ * dependencies.add(dep); dependencies.add(dep2); dependencies.add(dep1);
+ * topology.setDependencies(dependencies); Map<String, NodeTemplate>
+ * nodeTemplates = new HashMap <String, NodeTemplate>();
+ *
+ * NodeTemplate template1 = new NodeTemplate(nodeType + " test", null, null,
+ * null, null, null, null); template1.setName(nodeTypeTemplate1 + " test");
+ * nodeTemplates.put(nodeTypeTemplate1 + " test", template1 );
+ *
+ * NodeTemplate template2 = new NodeTemplate(nodeType2 + " test", null,
+ * null, null, null, null, null); template2.setName(nodeTypeTemplate2 +
+ * " test" ); nodeTemplates.put(nodeTypeTemplate2 + " test", template2 );
+ *
+ * NodeTemplate template3 = new NodeTemplate(nodeType, null, null, null,
+ * null, null, null); template3.setName(nodeTypeTemplate3 + " test" );
+ * nodeTemplates.put(nodeTypeTemplate3 + " test", template3);
+ *
+ * topology.setNodeTemplates(nodeTemplates); resourceDAO.save(topology);
+ *
+ * TopologyTemplate topologyTemplate = new TopologyTemplate();
+ * topologyTemplate.setId(topologyTemplateName + " test");
+ * topologyTemplate.setName(topologyTemplateName + " test");
+ * topologyTemplate.setTopologyId(topology.getId());
+ * topologyTemplate.setDescription("my topology template");
+ * resourceDAO.save(topologyTemplate);
+ *
+ * String strData = "qweqwqweqw34e4wrwer"; ArtifactData arData = new
+ * ArtifactData("artifact1 test", nodeType + " test", nodeTypeVersion,
+ * "YANG", strData.getBytes(), strData.getBytes(), "Marina"); ArtifactData
+ * arData1 = new ArtifactData("artifact2 test", nodeType + " test",
+ * nodeTypeVersion, "YANG", strData.getBytes(), strData.getBytes(),
+ * "Marina"); ResourceUploadStatus status = daoUploader.saveArtifact(arData,
+ * true); status = daoUploader.saveArtifact(arData1, true);
+ *
+ * ArtifactData arData3 = new ArtifactData("artifact1 test", nodeType2 +
+ * " test", nodeTypeVersion2, "YANG", strData.getBytes(),
+ * strData.getBytes(), "Marina"); status = daoUploader.saveArtifact(arData3,
+ * true);
+ *
+ * ArtifactData arData4 = new ArtifactData("artifact2 test", nodeType2 +
+ * " test", nodeTypeVersion2, "YANG", strData.getBytes(),
+ * strData.getBytes(), "Marina"); status = daoUploader.saveArtifact(arData4,
+ * true);
+ *
+ * ServiceArtifactData serviceArData = new
+ * ServiceArtifactData("serviceArData test" , topologyTemplateName +
+ * " test", topologyTemplateVersion, nodeTypeTemplate1 + " test", nodeType +
+ * " test", nodeTypeVersion, "YANG", strData.getBytes(), strData.getBytes(),
+ * "Marina", arData.getId());
+ *
+ * status = daoUploader.saveServiceArtifact(serviceArData, true);
+ *
+ * ServiceArtifactData serviceArData1 = new
+ * ServiceArtifactData("serviceArData1 test", topologyTemplateName +
+ * " test", topologyTemplateVersion, nodeTypeTemplate2 + " test", nodeType2
+ * + " test", nodeTypeVersion2, "YANG", strData.getBytes(),
+ * strData.getBytes(), "Marina", arData4.getId());
+ *
+ * status = daoUploader.saveServiceArtifact(serviceArData1, true);
+ *
+ *
+ * } catch (Exception e) { // TODO Auto-generated catch block
+ * e.printStackTrace(); }
+ *
+ * }
+ */
+
+ private void clearIndex(String indexName, Class<?> clazz) throws InterruptedException {
+
+ DeleteIndexResponse actionGet = esclient.getClient().admin().indices().delete(new DeleteIndexRequest(indexName))
+ .actionGet();
+ assertTrue(actionGet.isAcknowledged());
+ }
+
+}
diff --git a/catalog-dao/src/test/java/org/openecomp/sdc/be/resources/AuditingDaoTest.java b/catalog-dao/src/test/java/org/openecomp/sdc/be/resources/AuditingDaoTest.java
new file mode 100644
index 0000000..06d26f7
--- /dev/null
+++ b/catalog-dao/src/test/java/org/openecomp/sdc/be/resources/AuditingDaoTest.java
@@ -0,0 +1,463 @@
+/*-
+ * ============LICENSE_START=======================================================
+ * SDC
+ * ================================================================================
+ * Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.openecomp.sdc.be.resources;
+
+import fj.data.Either;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.index.query.MatchAllQueryBuilder;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.openecomp.sdc.be.config.Configuration;
+import org.openecomp.sdc.be.config.ConfigurationManager;
+import org.openecomp.sdc.be.config.Configuration.ElasticSearchConfig.IndicesTimeFrequencyEntry;
+import org.openecomp.sdc.be.dao.api.ActionStatus;
+import org.openecomp.sdc.be.dao.es.ElasticSearchClient;
+import org.openecomp.sdc.be.dao.impl.AuditingDao;
+import org.openecomp.sdc.be.resources.data.auditing.*;
+import org.openecomp.sdc.common.api.ConfigurationSource;
+import org.openecomp.sdc.common.api.Constants;
+import org.openecomp.sdc.common.datastructure.AuditingFieldsKeysEnum;
+import org.openecomp.sdc.common.datastructure.ESTimeBasedEvent;
+import org.openecomp.sdc.common.impl.ExternalConfiguration;
+import org.openecomp.sdc.common.impl.FSConfigurationSource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.TestExecutionListeners;
+import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+import org.springframework.test.context.support.DependencyInjectionTestExecutionListener;
+import org.springframework.test.context.support.DirtiesContextTestExecutionListener;
+import org.springframework.test.context.transaction.TransactionalTestExecutionListener;
+
+import javax.annotation.Resource;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import static org.junit.Assert.*;
+
+@RunWith(SpringJUnit4ClassRunner.class)
+@ContextConfiguration("classpath:application-context-test.xml")
+@TestExecutionListeners(listeners = { DependencyInjectionTestExecutionListener.class,
+ DirtiesContextTestExecutionListener.class, TransactionalTestExecutionListener.class })
+public class AuditingDaoTest {
+ private static Logger log = LoggerFactory.getLogger(AuditingDaoTest.class.getName());
+ @Resource(name = "elasticsearch-client")
+ private ElasticSearchClient esclient;
+
+ @Resource(name = "auditingDao")
+ private AuditingDao auditingDao;
+
+ private static ConfigurationManager configurationManager;
+ // private static Map<AuditingFieldsKeysEnum, String> auditField2esField;
+
+ @BeforeClass
+ public static void setupBeforeClass() {
+
+ ExternalConfiguration.setAppName("catalog-dao");
+ String appConfigDir = "src/test/resources/config/catalog-dao";
+ ConfigurationSource configurationSource = new FSConfigurationSource(ExternalConfiguration.getChangeListener(),
+ appConfigDir);
+ configurationManager = new ConfigurationManager(configurationSource);
+ // initAudit2EsMap();
+ }
+
+ @After
+ public void tearDown() {
+ deleteOldIndexes();
+ }
+
+ @Before
+ public void setup() {
+ auditingDao.setConfigurationManager(configurationManager);
+ deleteOldIndexes();
+ }
+
+ private void deleteOldIndexes() {
+ DeleteIndexResponse deleteResponse = esclient.getClient().admin().indices()
+ .prepareDelete(auditingDao.getIndexPrefix() + "*").execute().actionGet();
+ if (!deleteResponse.isAcknowledged()) {
+ log.debug("Couldn't delete old auditing indexes!");
+ assertTrue(false);
+ }
+ }
+
+ // @Test
+ public void testAddUpdateAdminEventMinute() {
+
+ String timestamp = "2015-06-23 13:34:53.123";
+
+ String creationPeriod = Constants.MINUTE;
+ String expectedIndexName = auditingDao.getIndexPrefix() + "-2015-06-23-13-34";
+ assertTrue(!esclient.getClient().admin().indices().prepareExists(expectedIndexName).execute().actionGet()
+ .isExists());
+ Map<AuditingFieldsKeysEnum, Object> params = getUserAdminEventParams(timestamp);
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName, UserAdminEvent.class);
+ params = getUserAccessEventParams(timestamp);
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName, UserAccessEvent.class);
+ params = getResourceAdminEventParams(timestamp, "addResource");
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName, ResourceAdminEvent.class);
+ }
+
+ // @Test
+ public void testAddUpdateAdminEventYearly() {
+
+ String timestamp = "2016-06-23 13:34:53.123";
+ String creationPeriod = Constants.YEAR;
+ String expectedIndexName = auditingDao.getIndexPrefix() + "-2016";
+ assertTrue(!esclient.getClient().admin().indices().prepareExists(expectedIndexName).execute().actionGet()
+ .isExists());
+ Map<AuditingFieldsKeysEnum, Object> params = getUserAdminEventParams(timestamp);
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName, UserAdminEvent.class);
+ params = getUserAccessEventParams(timestamp);
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName, UserAccessEvent.class);
+ params = getResourceAdminEventParams(timestamp, "addResource");
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName, ResourceAdminEvent.class);
+ }
+
+ @Test
+ public void testGetDistributionStatusEvent() {
+
+ String timestamp1 = "2016-06-23 13:34:53.123";
+ String creationPeriod = Constants.MONTH;
+ String expectedIndexName1 = auditingDao.getIndexPrefix() + "-2016-06";
+ assertTrue(!esclient.getClient().admin().indices().prepareExists(expectedIndexName1).execute().actionGet()
+ .isExists());
+ Map<AuditingFieldsKeysEnum, Object> params = getDistributionStatusEventParams(timestamp1);
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName1, DistributionStatusEvent.class);
+ String timestamp2 = "2015-06-23 13:34:53.123";
+
+ String expectedIndexName2 = auditingDao.getIndexPrefix() + "-2015-06";
+ assertTrue(!esclient.getClient().admin().indices().prepareExists(expectedIndexName2).execute().actionGet()
+ .isExists());
+ Map<AuditingFieldsKeysEnum, Object> params2 = getDistributionStatusEventParams(timestamp2);
+ testCreationPeriodScenario(params2, creationPeriod, expectedIndexName2, DistributionStatusEvent.class);
+ Either<List<ESTimeBasedEvent>, ActionStatus> status = auditingDao.getListOfDistributionStatuses("123-456");
+ assertEquals(2, status.left().value().size());
+ }
+
+ @Test
+ public void testGetCountAdminEventMonthly() {
+
+ String timestamp1 = "2016-06-23 13:34:53.123";
+ String timestamp2 = "2015-06-23 13:34:53.123";
+ String creationPeriod = Constants.MONTH;
+ String expectedIndexName1 = auditingDao.getIndexPrefix() + "-2016-06";
+ assertTrue(!esclient.getClient().admin().indices().prepareExists(expectedIndexName1).execute().actionGet()
+ .isExists());
+ String expectedIndexName2 = auditingDao.getIndexPrefix() + "-2015-06";
+ assertTrue(!esclient.getClient().admin().indices().prepareExists(expectedIndexName2).execute().actionGet()
+ .isExists());
+
+ Map<AuditingFieldsKeysEnum, Object> params1 = getUserAdminEventParams(timestamp1);
+ testCreationPeriodScenario(params1, creationPeriod, expectedIndexName1, UserAdminEvent.class);
+ Map<AuditingFieldsKeysEnum, Object> params2 = getUserAdminEventParams(timestamp2);
+ testCreationPeriodScenario(params2, creationPeriod, expectedIndexName2, UserAdminEvent.class);
+
+ long count = auditingDao.count(UserAdminEvent.class, new MatchAllQueryBuilder());
+ log.debug("Testing auditing count {}", count);
+ assertEquals(2, count);
+ }
+
+ @Test
+ public void testServiceDistributionStatuses() {
+
+ String timestamp = "2016-06-23 13:34:53.123";
+ String creationPeriod = Constants.MONTH;
+ String expectedIndexName = auditingDao.getIndexPrefix() + "-2016-06";
+ assertTrue(!esclient.getClient().admin().indices().prepareExists(expectedIndexName).execute().actionGet()
+ .isExists());
+ Map<AuditingFieldsKeysEnum, Object> params = getUserAdminEventParams(timestamp);
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName, UserAdminEvent.class);
+ params = getUserAccessEventParams(timestamp);
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName, UserAccessEvent.class);
+ params = getResourceAdminEventParams(timestamp, "DRequest");
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName, ResourceAdminEvent.class);
+ params = getDistributionNotificationEventParams(timestamp);
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName, DistributionNotificationEvent.class);
+ Either<List<ESTimeBasedEvent>, ActionStatus> status = auditingDao
+ .getServiceDistributionStatusesList("SeviceId");
+ log.debug("Testing auditing count {}", status);
+ }
+
+ @Test
+ public void testAddUpdateAdminEventMonthly() {
+
+ String timestamp = "2016-06-23 13:34:53.123";
+ String creationPeriod = Constants.MONTH;
+ String expectedIndexName = auditingDao.getIndexPrefix() + "-2016-06";
+ assertTrue(!esclient.getClient().admin().indices().prepareExists(expectedIndexName).execute().actionGet()
+ .isExists());
+ Map<AuditingFieldsKeysEnum, Object> params = getUserAdminEventParams(timestamp);
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName, UserAdminEvent.class);
+ params = getUserAccessEventParams(timestamp);
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName, UserAccessEvent.class);
+ params = getResourceAdminEventParams(timestamp, "addResource");
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName, ResourceAdminEvent.class);
+ }
+
+ private SearchResponse testCreationPeriodScenario(Map<AuditingFieldsKeysEnum, Object> params, String creationPeriod,
+ String expectedIndexName, Class<? extends AuditingGenericEvent> clazz) {
+
+ String typeName = clazz.getSimpleName().toLowerCase();
+ log.debug("Testing auditing type {}", typeName);
+ setCreationPeriod(creationPeriod);
+ ActionStatus saveUserAdminEvent = auditingDao.addRecord(params, typeName);
+ assertEquals(ActionStatus.OK, saveUserAdminEvent);
+ assertTrue(esclient.getClient().admin().indices().prepareExists(expectedIndexName).execute().actionGet()
+ .isExists());
+ MatchAllQueryBuilder matchAllQueryBuilder = new MatchAllQueryBuilder();
+
+ SearchResponse searchResponse = esclient.getClient().prepareSearch(expectedIndexName).setTypes(typeName)
+ .setQuery(matchAllQueryBuilder).execute().actionGet();
+
+ SearchHits hits = searchResponse.getHits();
+ assertEquals(1, hits.getTotalHits());
+ log.debug("Checking that all expected fields are properly persisted");
+ validateHitValues(params, hits.getAt(0));
+ log.debug("testCreationPeriodScenario successful");
+ return searchResponse;
+ }
+
+ private void validateHitValues(Map<AuditingFieldsKeysEnum, Object> params, SearchHit searchHit) {
+ Map<String, Object> source = searchHit.getSource();
+ log.debug("Hit source is {}", searchHit.sourceAsString());
+ for (Entry<AuditingFieldsKeysEnum, Object> paramsEntry : params.entrySet()) {
+ AuditingFieldsKeysEnum key = paramsEntry.getKey();
+ log.debug("Testing auditing field {}", key.name());
+ Object value = paramsEntry.getValue();
+ // assertEquals(value, source.get(auditField2esField.get(key)));
+ assertEquals(value, source.get(key.getDisplayName()));
+ }
+ }
+
+ private void setCreationPeriod(String creationPeriod) {
+ Configuration configuration = configurationManager.getConfiguration();
+ List<IndicesTimeFrequencyEntry> indicesTimeFrequencyEntries = new ArrayList<>();
+ IndicesTimeFrequencyEntry indicesTimeFrequencyEntry = new IndicesTimeFrequencyEntry();
+ indicesTimeFrequencyEntry.setIndexPrefix("auditingevents");
+ indicesTimeFrequencyEntry.setCreationPeriod(creationPeriod);
+ configuration.getElasticSearch().setIndicesTimeFrequency(indicesTimeFrequencyEntries);
+ }
+
+ private Map<AuditingFieldsKeysEnum, Object> getUserAdminEventParams(String timestamp) {
+
+ Map<AuditingFieldsKeysEnum, Object> params = new HashMap<AuditingFieldsKeysEnum, Object>();
+ String action = "updateUser";
+ String modifierName = "moshe moshe";
+ String modifierUid = "mosheUid";
+ String userUid = "mosheUid";
+ String userBeforeName = "moshe moshe";
+ String userBeforeEmail = "moshe@moshe1.com";
+ String userBeforeRole = "TESTER";
+ String userAfterName = "moshe moshe";
+ String userAfterEmail = "moshe@moshe2.com";
+ String userAfterRole = "TESTER";
+ String userStatus = "200";
+ String userDesc = "OK";
+
+ params.put(AuditingFieldsKeysEnum.AUDIT_ACTION, action);
+ params.put(AuditingFieldsKeysEnum.AUDIT_MODIFIER_UID, modifierName + '(' + modifierUid + ')');
+ params.put(AuditingFieldsKeysEnum.AUDIT_USER_UID, userUid);
+ params.put(AuditingFieldsKeysEnum.AUDIT_USER_BEFORE,
+ userUid + ", " + userBeforeName + ", " + userBeforeEmail + ", " + userBeforeRole);
+ params.put(AuditingFieldsKeysEnum.AUDIT_USER_AFTER,
+ userUid + ", " + userAfterName + ", " + userAfterEmail + ", " + userAfterRole);
+ params.put(AuditingFieldsKeysEnum.AUDIT_STATUS, userStatus);
+ params.put(AuditingFieldsKeysEnum.AUDIT_DESC, userDesc);
+ params.put(AuditingFieldsKeysEnum.AUDIT_TIMESTAMP, timestamp);
+
+ return params;
+ }
+
+ private Map<AuditingFieldsKeysEnum, Object> getUserAccessEventParams(String timestamp) {
+
+ Map<AuditingFieldsKeysEnum, Object> params = new HashMap<AuditingFieldsKeysEnum, Object>();
+ String action = "userAccess";
+ String userUid = "mosheUid";
+ String userName = "moshe moshe";
+ String userStatus = "200";
+ String userDesc = "OK";
+
+ params.put(AuditingFieldsKeysEnum.AUDIT_ACTION, action);
+ params.put(AuditingFieldsKeysEnum.AUDIT_USER_UID, userName + '(' + userUid + ')');
+ params.put(AuditingFieldsKeysEnum.AUDIT_STATUS, userStatus);
+ params.put(AuditingFieldsKeysEnum.AUDIT_DESC, userDesc);
+ params.put(AuditingFieldsKeysEnum.AUDIT_TIMESTAMP, timestamp);
+
+ return params;
+ }
+
+ private Map<AuditingFieldsKeysEnum, Object> getResourceAdminEventParams(String timestamp, String action) {
+
+ Map<AuditingFieldsKeysEnum, Object> params = new HashMap<AuditingFieldsKeysEnum, Object>();
+
+ String modifierName = "moshe moshe";
+ String modifierUid = "mosheUid";
+ String resourceName = "Centos";
+ String resourceType = "Resource";
+ String currState = "READY_FOR_CERTIFICATION";
+ String prevState = "CHECKED_OUT";
+ String currVersion = "1.1.4";
+ String prevVersion = "1.1.3";
+ String status = "200";
+ String desc = "OK";
+ String distributionId = "123-456";
+ String serviceId = "SeviceId";
+
+ params.put(AuditingFieldsKeysEnum.AUDIT_ACTION, action);
+ params.put(AuditingFieldsKeysEnum.AUDIT_MODIFIER_NAME, modifierName);
+ params.put(AuditingFieldsKeysEnum.AUDIT_MODIFIER_UID, modifierUid);
+ params.put(AuditingFieldsKeysEnum.AUDIT_RESOURCE_NAME, resourceName);
+ params.put(AuditingFieldsKeysEnum.AUDIT_RESOURCE_TYPE, resourceType);
+ params.put(AuditingFieldsKeysEnum.AUDIT_RESOURCE_CURR_STATE, currState);
+ params.put(AuditingFieldsKeysEnum.AUDIT_RESOURCE_PREV_STATE, prevState);
+ params.put(AuditingFieldsKeysEnum.AUDIT_RESOURCE_CURR_VERSION, currVersion);
+ params.put(AuditingFieldsKeysEnum.AUDIT_RESOURCE_PREV_VERSION, prevVersion);
+ params.put(AuditingFieldsKeysEnum.AUDIT_STATUS, status);
+ params.put(AuditingFieldsKeysEnum.AUDIT_DESC, desc);
+ params.put(AuditingFieldsKeysEnum.AUDIT_TIMESTAMP, timestamp);
+ params.put(AuditingFieldsKeysEnum.AUDIT_DISTRIBUTION_ID, distributionId);
+ params.put(AuditingFieldsKeysEnum.AUDIT_SERVICE_INSTANCE_ID, serviceId);
+
+ return params;
+ }
+
+ private Map<AuditingFieldsKeysEnum, Object> getDistributionStatusEventParams(String timestamp) {
+
+ Map<AuditingFieldsKeysEnum, Object> params = new HashMap<AuditingFieldsKeysEnum, Object>();
+ String action = "DStatus";
+ String modifierName = "moshe moshe";
+ String modifierUid = "mosheUid";
+ String topicName = "Centos";
+ String serviceId = "SeviceId";
+ String resourceUrl = "resourceUrl";
+ String distributionId = "123-456";
+
+ String status = "200";
+ String desc = "OK";
+
+ params.put(AuditingFieldsKeysEnum.AUDIT_DESC, desc);
+ params.put(AuditingFieldsKeysEnum.AUDIT_TIMESTAMP, timestamp);
+ params.put(AuditingFieldsKeysEnum.AUDIT_STATUS, status);
+ params.put(AuditingFieldsKeysEnum.AUDIT_ACTION, action);
+ params.put(AuditingFieldsKeysEnum.AUDIT_DISTRIBUTION_ID, distributionId);
+ params.put(AuditingFieldsKeysEnum.AUDIT_DISTRIBUTION_CONSUMER_ID, modifierUid);
+ params.put(AuditingFieldsKeysEnum.AUDIT_DISTRIBUTION_TOPIC_NAME, topicName);
+ params.put(AuditingFieldsKeysEnum.AUDIT_DISTRIBUTION_RESOURCE_URL, resourceUrl);
+ params.put(AuditingFieldsKeysEnum.AUDIT_DISTRIBUTION_STATUS_TIME, timestamp);
+ params.put(AuditingFieldsKeysEnum.AUDIT_SERVICE_INSTANCE_ID, serviceId);
+
+ return params;
+ }
+
+ // @Test
+ public void getListOfDistributionByActionTest() {
+
+ String timestamp = "2016-06-23 13:34:53.123";
+ String distributionId = "123-456";
+
+ String creationPeriod = Constants.MONTH;
+ String expectedIndexName = auditingDao.getIndexPrefix() + "-2016-06";
+ assertTrue(!esclient.getClient().admin().indices().prepareExists(expectedIndexName).execute().actionGet()
+ .isExists());
+
+ // Client client = esclient.getClient();
+ // final CreateIndexRequestBuilder createIndexRequestBuilder =
+ // client.admin().indices().prepareCreate(expectedIndexName);
+ // final XContentBuilder mappingBuilder =
+ // jsonBuilder().startObject().startObject("resourceadminevent")
+ // .startObject("_ttl").field("enabled", "true").field("default",
+ // "1s").endObject().endObject()
+ // .endObject();
+ // System.out.println(mappingBuilder.string());
+ // createIndexRequestBuilder.addMapping(documentType, mappingBuilder);
+ //
+ // // MAPPING DONE
+ // createIndexRequestBuilder.execute().actionGet();
+ //
+ //
+
+ Map<AuditingFieldsKeysEnum, Object> params = getResourceAdminEventParams(timestamp, "DRequest");
+ params.put(AuditingFieldsKeysEnum.AUDIT_DISTRIBUTION_ID, distributionId);
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName, ResourceAdminEvent.class);
+ params = getDistributionNotificationEventParams(timestamp);
+ params.put(AuditingFieldsKeysEnum.AUDIT_DISTRIBUTION_ID, distributionId);
+
+ testCreationPeriodScenario(params, creationPeriod, expectedIndexName, DistributionNotificationEvent.class);
+
+ Either<List<ESTimeBasedEvent>, ActionStatus> distributionByAction = auditingDao
+ .getListOfDistributionByAction(distributionId, "DRequest", "200", ResourceAdminEvent.class);
+ assertTrue(distributionByAction.isLeft());
+ assertFalse(distributionByAction.left().value().isEmpty());
+
+ distributionByAction = auditingDao.getListOfDistributionByAction(distributionId, "DNotify", "200",
+ DistributionNotificationEvent.class);
+ assertTrue(distributionByAction.isLeft());
+ assertFalse(distributionByAction.left().value().isEmpty());
+
+ }
+
+ private Map<AuditingFieldsKeysEnum, Object> getDistributionNotificationEventParams(String timestamp) {
+
+ Map<AuditingFieldsKeysEnum, Object> params = new HashMap<AuditingFieldsKeysEnum, Object>();
+
+ String action = "DNotify";
+ String modifierName = "moshe moshe";
+ String modifierUid = "mosheUid";
+ String resourceName = "Centos";
+ String resourceType = "Resource";
+
+ String currVersion = "1.1.4";
+ String currState = "READY_FOR_CERTIFICATION";
+ String status = "200";
+ String desc = "OK";
+ String did = "1027";
+ String topicName = "Centos";
+ String serviceId = "SeviceId";
+ String requestId = "12364";
+
+ params.put(AuditingFieldsKeysEnum.AUDIT_ACTION, action);
+ params.put(AuditingFieldsKeysEnum.AUDIT_MODIFIER_NAME, requestId);
+ params.put(AuditingFieldsKeysEnum.AUDIT_MODIFIER_UID, modifierUid);
+ params.put(AuditingFieldsKeysEnum.AUDIT_MODIFIER_NAME, modifierName);
+ params.put(AuditingFieldsKeysEnum.AUDIT_RESOURCE_NAME, resourceName);
+ params.put(AuditingFieldsKeysEnum.AUDIT_RESOURCE_TYPE, resourceType);
+ params.put(AuditingFieldsKeysEnum.AUDIT_RESOURCE_CURR_STATE, currState);
+ params.put(AuditingFieldsKeysEnum.AUDIT_DISTRIBUTION_TOPIC_NAME, topicName);
+ params.put(AuditingFieldsKeysEnum.AUDIT_RESOURCE_CURR_VERSION, currVersion);
+ params.put(AuditingFieldsKeysEnum.AUDIT_STATUS, status);
+ params.put(AuditingFieldsKeysEnum.AUDIT_DESC, desc);
+ params.put(AuditingFieldsKeysEnum.AUDIT_TIMESTAMP, timestamp);
+ params.put(AuditingFieldsKeysEnum.AUDIT_DISTRIBUTION_ID, did);
+ params.put(AuditingFieldsKeysEnum.AUDIT_SERVICE_INSTANCE_ID, serviceId);
+ return params;
+ }
+
+}
diff --git a/catalog-dao/src/test/java/org/openecomp/sdc/be/resources/CassandraTest.java b/catalog-dao/src/test/java/org/openecomp/sdc/be/resources/CassandraTest.java
new file mode 100644
index 0000000..caaf070
--- /dev/null
+++ b/catalog-dao/src/test/java/org/openecomp/sdc/be/resources/CassandraTest.java
@@ -0,0 +1,74 @@
+/*-
+ * ============LICENSE_START=======================================================
+ * SDC
+ * ================================================================================
+ * Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.openecomp.sdc.be.resources;
+
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.Session;
+import com.datastax.driver.mapping.Mapper;
+import com.datastax.driver.mapping.MappingManager;
+
+import org.openecomp.sdc.be.dao.Account;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class CassandraTest {
+ private static Logger log = LoggerFactory.getLogger(CassandraTest.class.getName());
+ private Cluster cluster;
+
+ // #\@Test
+ public void testCrud() {
+ String node = "mtanjv9sdcg44";
+
+ cluster = Cluster.builder().addContactPoint(node).build();
+
+ // Query
+ String query = "CREATE KEYSPACE IF NOT EXISTS dstest WITH replication "
+ + "= {'class':'SimpleStrategy', 'replication_factor':1};";
+
+ String queryTable = "CREATE TABLE IF NOT EXISTS accounts(email varchar PRIMARY KEY, name varchar);";
+
+ Session session = cluster.connect();
+ // Executing the query
+ session.execute(query);
+ // //using the KeySpace
+ session.execute("USE dstest");
+ session.execute(queryTable);
+
+ Mapper<Account> mapper = new MappingManager(session).mapper(Account.class);
+ Account account = new Account("John Doe", "jd@example.com");
+ // Class<? extends Account> class1 = account.getClass();
+ // Class class2 = Account.class;
+ mapper.save(account);
+
+ Account whose = mapper.get("jd@example.com");
+ log.debug("Account name: {}", whose.getName());
+
+ account.setName("Samanta Smit");
+ mapper.save(account);
+ whose = mapper.get("jd@example.com");
+ log.debug("Account name: {}", whose.getName());
+
+ mapper.delete(account);
+ whose = mapper.get("jd@example.com");
+
+ cluster.close();
+ }
+}
diff --git a/catalog-dao/src/test/java/org/openecomp/sdc/be/resources/ESUsersDAOTest.java b/catalog-dao/src/test/java/org/openecomp/sdc/be/resources/ESUsersDAOTest.java
new file mode 100644
index 0000000..a99acbe
--- /dev/null
+++ b/catalog-dao/src/test/java/org/openecomp/sdc/be/resources/ESUsersDAOTest.java
@@ -0,0 +1,64 @@
+/*-
+ * ============LICENSE_START=======================================================
+ * SDC
+ * ================================================================================
+ * Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.openecomp.sdc.be.resources;
+
+//@RunWith(SpringJUnit4ClassRunner.class)
+//@ContextConfiguration("classpath:application-context-test.xml")
+//@TestExecutionListeners(listeners = {DependencyInjectionTestExecutionListener.class, DirtiesContextTestExecutionListener.class, TransactionalTestExecutionListener.class})
+public class ESUsersDAOTest {
+
+ // @Resource
+ // ElasticSearchClient esclient;
+ //
+ //// @Resource(name = "users-dao")
+ // private IUsersDAO usersDao;
+
+ // @Test
+ public void testNewUserStub() {
+
+ }
+
+ // @Test
+ public void testNewUser() {
+ // if( usersDao == null ){
+ // assertTrue(false);
+ // }
+ //
+ // String id = "yhufksd57834601";
+ // UserData userData = new UserData("Myname", "Mylastname", id, "email",
+ // "Tester");
+ //
+ // ActionStatus saveUserData = usersDao.saveUserData(userData);
+ // assertEquals(saveUserData, ActionStatus.OK);
+ //
+ // Either<UserData, ActionStatus> statusFromEs =
+ // usersDao.getUserData(id);
+ // assertTrue(statusFromEs.isLeft() );
+ // UserData fromEs = statusFromEs.left().value();
+ // assertNotNull(fromEs);
+ // assertEquals(userData, fromEs);
+ //
+ //
+ // usersDao.deleteUserData(id);
+
+ }
+
+}
diff --git a/catalog-dao/src/test/java/org/openecomp/sdc/be/resources/TitanGenericDaoTest.java b/catalog-dao/src/test/java/org/openecomp/sdc/be/resources/TitanGenericDaoTest.java
new file mode 100644
index 0000000..a4110f5
--- /dev/null
+++ b/catalog-dao/src/test/java/org/openecomp/sdc/be/resources/TitanGenericDaoTest.java
@@ -0,0 +1,721 @@
+/*-
+ * ============LICENSE_START=======================================================
+ * SDC
+ * ================================================================================
+ * Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.openecomp.sdc.be.resources;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import javax.annotation.Resource;
+
+import org.apache.commons.configuration.BaseConfiguration;
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.tinkerpop.gremlin.structure.Direction;
+import org.apache.tinkerpop.gremlin.structure.Edge;
+import org.apache.tinkerpop.gremlin.structure.Vertex;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.openecomp.sdc.be.config.ConfigurationManager;
+import org.openecomp.sdc.be.dao.neo4j.GraphEdgeLabels;
+import org.openecomp.sdc.be.dao.neo4j.GraphPropertiesDictionary;
+import org.openecomp.sdc.be.dao.titan.TitanGenericDao;
+import org.openecomp.sdc.be.dao.titan.TitanOperationStatus;
+import org.openecomp.sdc.be.dao.utils.UserStatusEnum;
+import org.openecomp.sdc.be.datatypes.components.ResourceMetadataDataDefinition;
+import org.openecomp.sdc.be.datatypes.enums.NodeTypeEnum;
+import org.openecomp.sdc.be.resources.data.AdditionalInfoParameterData;
+import org.openecomp.sdc.be.resources.data.ArtifactData;
+import org.openecomp.sdc.be.resources.data.ComponentInstanceData;
+import org.openecomp.sdc.be.resources.data.GraphNodeLock;
+import org.openecomp.sdc.be.resources.data.ResourceMetadataData;
+import org.openecomp.sdc.be.resources.data.UserData;
+import org.openecomp.sdc.common.api.ConfigurationSource;
+import org.openecomp.sdc.common.api.UserRoleEnum;
+import org.openecomp.sdc.common.impl.ExternalConfiguration;
+import org.openecomp.sdc.common.impl.FSConfigurationSource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.TestExecutionListeners;
+import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+import org.springframework.test.context.support.DependencyInjectionTestExecutionListener;
+import org.springframework.test.context.support.DirtiesContextTestExecutionListener;
+import org.springframework.test.context.transaction.TransactionalTestExecutionListener;
+
+import com.google.gson.Gson;
+import com.thinkaurelius.titan.core.PropertyKey;
+import com.thinkaurelius.titan.core.TitanEdge;
+import com.thinkaurelius.titan.core.TitanFactory;
+import com.thinkaurelius.titan.core.TitanGraph;
+import com.thinkaurelius.titan.core.TitanVertex;
+import com.thinkaurelius.titan.core.attribute.Text;
+import com.thinkaurelius.titan.core.schema.TitanManagement;
+
+import fj.data.Either;
+
+@RunWith(SpringJUnit4ClassRunner.class)
+@ContextConfiguration("classpath:application-context-test.xml")
+@TestExecutionListeners(listeners = { DependencyInjectionTestExecutionListener.class,
+ DirtiesContextTestExecutionListener.class, TransactionalTestExecutionListener.class })
+public class TitanGenericDaoTest {
+ private static Logger log = LoggerFactory.getLogger(TitanGenericDaoTest.class.getName());
+ private static ConfigurationManager configurationManager;
+
+ @Resource(name = "titan-generic-dao")
+ private TitanGenericDao titanDao;
+
+ @BeforeClass
+ public static void setupBeforeClass() {
+ ExternalConfiguration.setAppName("catalog-dao");
+ String appConfigDir = "src/test/resources/config/catalog-dao";
+ ConfigurationSource configurationSource = new FSConfigurationSource(ExternalConfiguration.getChangeListener(),
+ appConfigDir);
+ configurationManager = new ConfigurationManager(configurationSource);
+ configurationManager.getConfiguration()
+ .setTitanCfgFile("../catalog-be/src/main/resources/config/titan.properties");
+ configurationManager.getConfiguration().setTitanInMemoryGraph(true);
+ }
+
+ // @Test
+ public void testcheckEdgeProps() {
+ TitanGraph graph = titanDao.getGraph().left().value();
+ TitanVertex v1 = graph.addVertex();
+ v1.property("prop1", 123);
+ TitanVertex v2 = graph.addVertex();
+ v2.property("prop1", 456);
+ TitanEdge addEdge = v1.addEdge("label11", v2);
+ addEdge.property("edgeProp", "my prop edge");
+ graph.tx().commit();
+
+ Either<TitanVertex, TitanOperationStatus> v11 = titanDao.getVertexByProperty("prop1", 123);
+ Iterator<Edge> edges = v11.left().value().edges(Direction.OUT, "label11");
+ Edge edge = edges.next();
+ // String value = (String)edge.value("edgeProp");
+ String value = (String) titanDao.getProperty(edge, "edgeProp");
+ log.debug(value);
+
+ }
+
+ @Test
+ public void testCrudNode() {
+
+ String id = "user12345abc";
+ UserData userData = new UserData("Myname123", "Mylastname", id, "email123", "Tester",
+ UserStatusEnum.ACTIVE.name(), null);
+
+ Either<UserData, TitanOperationStatus> newNode = titanDao.createNode(userData, UserData.class);
+
+ assertTrue(newNode.isLeft());
+
+ log.debug("{}", newNode.left().value());
+
+ titanDao.commit();
+
+ ImmutablePair<String, Object> keyValueId = userData.getKeyValueId();
+ Either<UserData, TitanOperationStatus> node = titanDao.getNode(keyValueId.getKey(), keyValueId.getValue(),
+ UserData.class);
+ titanDao.commit();
+ assertTrue(node.isLeft());
+ log.debug("{}", node.left().value());
+
+ userData.setRole("Designer");
+ node = titanDao.updateNode(userData, UserData.class);
+ assertTrue(node.isLeft());
+ log.debug("{}", node.left().value());
+ assertEquals(null, "Designer", node.left().value().getRole());
+ titanDao.commit();
+
+ node = titanDao.deleteNode(userData, UserData.class);
+ assertTrue(node.isLeft());
+ log.debug("{}", node.left().value());
+ titanDao.commit();
+
+ node = titanDao.getNode(keyValueId.getKey(), keyValueId.getValue(), UserData.class);
+ assertTrue(node.isRight());
+ log.debug("{}", node.right().value());
+
+ }
+
+ @Test
+ public void testGetByCategoryAndAll() {
+
+ // create 2 nodes
+ String id = "user12345abc";
+ UserData userData1 = new UserData("Myname123", "Mylastname", id, "email123", "Tester",
+ UserStatusEnum.ACTIVE.name(), null);
+
+ Either<UserData, TitanOperationStatus> node1 = titanDao.createNode(userData1, UserData.class);
+ assertTrue(node1.isLeft());
+ log.debug("{}", node1.left().value());
+
+ id = "userdfkoer45abc";
+ UserData userData2 = new UserData("Mynadyhme123", "Mylasghtname", id, "emaighdl123", "Designer",
+ UserStatusEnum.ACTIVE.name(), null);
+ Either<UserData, TitanOperationStatus> node2 = titanDao.createNode(userData2, UserData.class);
+ assertTrue(node2.isLeft());
+ log.debug("{}", node2.left().value());
+
+ titanDao.commit();
+
+ ImmutablePair<String, Object> keyValueId1 = userData1.getKeyValueId();
+ // get first node
+ Either<UserData, TitanOperationStatus> node = titanDao.getNode(keyValueId1.getKey(), keyValueId1.getValue(),
+ UserData.class);
+ assertTrue(node.isLeft());
+ log.debug("{}", node.left().value());
+ titanDao.commit();
+
+ // get all must be 2 + 1 default user = 3
+ Either<List<UserData>, TitanOperationStatus> all = titanDao.getAll(NodeTypeEnum.User, UserData.class);
+ assertTrue(all.isLeft());
+ assertTrue(all.left().value().size() > 0);
+
+ log.debug("{}", all.left().value());
+
+ Map<String, Object> props = new HashMap<String, Object>();
+
+ props.put(keyValueId1.getKey(), keyValueId1.getValue());
+
+ // get by criteria. must be 1
+ Either<List<UserData>, TitanOperationStatus> byCriteria = titanDao.getByCriteria(NodeTypeEnum.User, props,
+ UserData.class);
+ assertTrue(byCriteria.isLeft());
+ assertEquals(1, byCriteria.left().value().size());
+
+ log.debug("{}", byCriteria.left().value());
+
+ // delete all nodes
+ node = titanDao.deleteNode(userData1, UserData.class);
+ assertTrue(node.isLeft());
+ node = titanDao.deleteNode(userData2, UserData.class);
+ assertTrue(node.isLeft());
+ }
+
+ @Test
+ public void testGetEdgesForNode() {
+ String id = "user12345abc";
+ UserData userData = new UserData("Myname123", "Mylastname", id, "email123", UserRoleEnum.ADMIN.name(),
+ UserStatusEnum.ACTIVE.name(), null);
+ titanDao.createNode(userData, UserData.class);
+ ResourceMetadataData resourceData = new ResourceMetadataData();
+ resourceData.getMetadataDataDefinition().setName("resourceForLock");
+ resourceData.getMetadataDataDefinition().setVersion("0.1");
+ resourceData.getMetadataDataDefinition().setState("newState");
+ resourceData.getMetadataDataDefinition().setUniqueId(resourceData.getMetadataDataDefinition().getName() + "."
+ + resourceData.getMetadataDataDefinition().getVersion());
+
+ titanDao.createNode(resourceData, ResourceMetadataData.class);
+ titanDao.createRelation(userData, resourceData, GraphEdgeLabels.LAST_MODIFIER, null);
+ titanDao.commit();
+
+ Either<List<Edge>, TitanOperationStatus> eitherEdges = titanDao.getEdgesForNode(userData, Direction.OUT);
+ assertTrue(eitherEdges.isLeft());
+ assertTrue(eitherEdges.left().value().size() == 1);
+
+ eitherEdges = titanDao.getEdgesForNode(userData, Direction.IN);
+ assertTrue(eitherEdges.isLeft());
+ assertTrue(eitherEdges.left().value().size() == 0);
+
+ eitherEdges = titanDao.getEdgesForNode(resourceData, Direction.OUT);
+ assertTrue(eitherEdges.isLeft());
+ assertTrue(eitherEdges.left().value().size() == 0);
+
+ eitherEdges = titanDao.getEdgesForNode(resourceData, Direction.IN);
+ assertTrue(eitherEdges.isLeft());
+ assertTrue(eitherEdges.left().value().size() == 1);
+
+ eitherEdges = titanDao.getEdgesForNode(resourceData, Direction.BOTH);
+ assertTrue(eitherEdges.isLeft());
+ assertTrue(eitherEdges.left().value().size() == 1);
+
+ eitherEdges = titanDao.getEdgesForNode(userData, Direction.BOTH);
+ assertTrue(eitherEdges.isLeft());
+ assertTrue(eitherEdges.left().value().size() == 1);
+
+ titanDao.deleteNode(userData, UserData.class);
+ titanDao.deleteNode(resourceData, ResourceMetadataData.class);
+ titanDao.commit();
+ }
+
+ @Test
+ public void testLockElement() {
+
+ ResourceMetadataData resourceData = new ResourceMetadataData();
+
+ resourceData.getMetadataDataDefinition().setName("resourceForLock");
+ resourceData.getMetadataDataDefinition().setVersion("0.1");
+ resourceData.getMetadataDataDefinition().setState("newState");
+ resourceData.getMetadataDataDefinition().setUniqueId(resourceData.getMetadataDataDefinition().getName() + "."
+ + resourceData.getMetadataDataDefinition().getVersion());
+
+ Either<ResourceMetadataData, TitanOperationStatus> resource1 = titanDao.createNode(resourceData,
+ ResourceMetadataData.class);
+ assertTrue(resource1.isLeft());
+ titanDao.commit();
+ String lockId = "lock_" + resourceData.getLabel() + "_" + resource1.left().value().getUniqueId();
+
+ Either<GraphNodeLock, TitanOperationStatus> nodeLock = titanDao
+ .getNode(GraphPropertiesDictionary.UNIQUE_ID.getProperty(), lockId, GraphNodeLock.class);
+ assertTrue(nodeLock.isRight());
+ assertEquals(TitanOperationStatus.NOT_FOUND, nodeLock.right().value());
+
+ TitanOperationStatus status = titanDao.lockElement(resourceData);
+ assertEquals(TitanOperationStatus.OK, status);
+
+ nodeLock = titanDao.getNode(GraphPropertiesDictionary.UNIQUE_ID.getProperty(), lockId, GraphNodeLock.class);
+ assertTrue(nodeLock.isLeft());
+ assertEquals(lockId, nodeLock.left().value().getUniqueId());
+
+ titanDao.commit();
+
+ status = titanDao.lockElement(resourceData);
+ assertEquals(TitanOperationStatus.ALREADY_LOCKED, status);
+
+ status = titanDao.releaseElement(resourceData);
+ assertEquals(TitanOperationStatus.OK, status);
+
+ nodeLock = titanDao.getNode(GraphPropertiesDictionary.UNIQUE_ID.getProperty(), lockId, GraphNodeLock.class);
+ assertTrue(nodeLock.isRight());
+ assertEquals(TitanOperationStatus.NOT_FOUND, nodeLock.right().value());
+ titanDao.deleteNode(resourceData, ResourceMetadataData.class);
+ titanDao.commit();
+
+ }
+
+ @Test
+ public void testReLockElement() throws InterruptedException {
+
+ ResourceMetadataData resourceData = new ResourceMetadataData();
+
+ resourceData.getMetadataDataDefinition().setName("resourceForReLock");
+ resourceData.getMetadataDataDefinition().setVersion("0.1");
+ resourceData.getMetadataDataDefinition().setState("newState");
+ resourceData.getMetadataDataDefinition().setUniqueId(resourceData.getMetadataDataDefinition().getName() + "."
+ + resourceData.getMetadataDataDefinition().getVersion());
+
+ Either<ResourceMetadataData, TitanOperationStatus> resource1 = titanDao.createNode(resourceData,
+ ResourceMetadataData.class);
+ assertTrue(resource1.isLeft());
+ titanDao.commit();
+ String lockId = "lock_" + resourceData.getLabel() + "_" + resource1.left().value().getUniqueId();
+
+ Either<GraphNodeLock, TitanOperationStatus> nodeLock = titanDao
+ .getNode(GraphPropertiesDictionary.UNIQUE_ID.getProperty(), lockId, GraphNodeLock.class);
+ assertTrue(nodeLock.isRight());
+ assertEquals(TitanOperationStatus.NOT_FOUND, nodeLock.right().value());
+
+ // lock
+ TitanOperationStatus status = titanDao.lockElement(resourceData);
+ assertEquals(TitanOperationStatus.OK, status);
+
+ nodeLock = titanDao.getNode(GraphPropertiesDictionary.UNIQUE_ID.getProperty(), lockId, GraphNodeLock.class);
+ assertTrue(nodeLock.isLeft());
+ assertEquals(lockId, nodeLock.left().value().getUniqueId());
+ long time1 = nodeLock.left().value().getTime();
+
+ titanDao.commit();
+
+ // timeout
+ configurationManager.getConfiguration().setTitanLockTimeout(2L);
+ Thread.sleep(5001);
+
+ // relock
+ status = titanDao.lockElement(resourceData);
+ assertEquals(TitanOperationStatus.OK, status);
+
+ nodeLock = titanDao.getNode(GraphPropertiesDictionary.UNIQUE_ID.getProperty(), lockId, GraphNodeLock.class);
+ assertTrue(nodeLock.isLeft());
+ assertEquals(lockId, nodeLock.left().value().getUniqueId());
+
+ long time2 = nodeLock.left().value().getTime();
+
+ assertTrue(time2 > time1);
+
+ status = titanDao.releaseElement(resourceData);
+ assertEquals(TitanOperationStatus.OK, status);
+
+ nodeLock = titanDao.getNode(GraphPropertiesDictionary.UNIQUE_ID.getProperty(), lockId, GraphNodeLock.class);
+ assertTrue(nodeLock.isRight());
+ assertEquals(TitanOperationStatus.NOT_FOUND, nodeLock.right().value());
+
+ titanDao.deleteNode(resourceData, ResourceMetadataData.class);
+ titanDao.commit();
+
+ }
+
+ @Test
+ public void testBoolean() {
+ ResourceMetadataData resourceData = new ResourceMetadataData();
+
+ resourceData.getMetadataDataDefinition().setName("resourceForLock");
+ resourceData.getMetadataDataDefinition().setVersion("0.1");
+ resourceData.getMetadataDataDefinition().setState("NOT_CERTIFIED_CHECKOUT");
+ resourceData.getMetadataDataDefinition().setHighestVersion(true);
+ resourceData.getMetadataDataDefinition().setUniqueId(resourceData.getMetadataDataDefinition().getName() + "."
+ + resourceData.getMetadataDataDefinition().getVersion());
+
+ Either<ResourceMetadataData, TitanOperationStatus> resource1 = titanDao.createNode(resourceData,
+ ResourceMetadataData.class);
+ assertTrue(resource1.isLeft());
+
+ resourceData = new ResourceMetadataData();
+
+ resourceData.getMetadataDataDefinition().setName("resourceForLock");
+ resourceData.getMetadataDataDefinition().setVersion("0.2");
+ resourceData.getMetadataDataDefinition().setState("NOT_CERTIFIED_CHECKOUT");
+ resourceData.getMetadataDataDefinition().setHighestVersion(false);
+ resourceData.getMetadataDataDefinition().setUniqueId(resourceData.getMetadataDataDefinition().getName() + "."
+ + resourceData.getMetadataDataDefinition().getVersion());
+
+ Either<ResourceMetadataData, TitanOperationStatus> resource2 = titanDao.createNode(resourceData,
+ ResourceMetadataData.class);
+ titanDao.commit();
+
+ Map<String, Object> props = new HashMap<String, Object>();
+
+ props.put(GraphPropertiesDictionary.STATE.getProperty(), "NOT_CERTIFIED_CHECKOUT");
+ props.put("name", "resourceForLock");
+ props.put(GraphPropertiesDictionary.IS_HIGHEST_VERSION.getProperty(), false);
+
+ // get by criteria. must be 1
+ Either<List<ResourceMetadataData>, TitanOperationStatus> byCriteria = titanDao
+ .getByCriteria(NodeTypeEnum.Resource, props, ResourceMetadataData.class);
+ assertTrue(byCriteria.isLeft());
+
+ titanDao.deleteNode(resource1.left().value(), ResourceMetadataData.class);
+
+ titanDao.deleteNode(resource2.left().value(), ResourceMetadataData.class);
+ titanDao.commit();
+ }
+
+ // @Test
+ public void testStringSearch() {
+ TitanGraph graph;
+
+ BaseConfiguration conf = new BaseConfiguration();
+ conf.setProperty("storage.backend", "inmemory");
+ graph = TitanFactory.open(conf);
+
+ // TitanManagement graphMgt = graph.getManagementSystem();
+ TitanManagement graphMgt = graph.openManagement();
+ PropertyKey propKey = graphMgt.makePropertyKey("string1").dataType(String.class).make();
+ graphMgt.buildIndex("string1", Vertex.class).addKey(propKey).unique().buildCompositeIndex();
+
+ propKey = graphMgt.makePropertyKey("string2").dataType(String.class).make();
+
+ // graphMgt.buildIndex("string2", Vertex.class).addKey(propKey,
+ // Mapping.TEXT.getParameter()).buildMixedIndex("search");
+ graphMgt.buildIndex("string2", Vertex.class).addKey(propKey).unique().buildCompositeIndex();
+ graphMgt.commit();
+
+ // TitanVertex v = graph.addVertex();
+ // v.addProperty("string1", "My new String 1");
+ // v.addProperty("string2", "String11");
+ // graph.commit();
+ //
+ // v = graph.addVertex();
+ // v.addProperty("string1", "my new string 1");
+ // v.addProperty("string2", "string11");
+ // graph.commit();
+ //
+ // System.out.println("First index search - case");
+ //
+ // Iterable<Vertex> vertices = graph.getVertices("string1", "My new
+ // String 1");
+ // Iterator<Vertex> iter = vertices.iterator();
+ // while ( iter.hasNext() ){
+ // Vertex ver = iter.next();
+ // System.out.println(com.tinkerpop.blueprints.util.ElementHelper.getProperties(ver));
+ // }
+ // System.out.println("First index search non case");
+ //
+ // vertices = graph.getVertices("string1", "my new string 1");
+ // iter = vertices.iterator();
+ // while ( iter.hasNext() ){
+ // Vertex ver = iter.next();
+ // System.out.println(com.tinkerpop.blueprints.util.ElementHelper.getProperties(ver));
+ // }
+ // System.out.println("Second index search case");
+ //
+ // vertices = graph.getVertices("string2", "String11");
+ // iter = vertices.iterator();
+ // while ( iter.hasNext() ){
+ // Vertex ver = iter.next();
+ // System.out.println(com.tinkerpop.blueprints.util.ElementHelper.getProperties(ver));
+ // }
+ // System.out.println("second index search non case");
+ //
+ // vertices = graph.getVertices("string2", "string11");
+ // iter = vertices.iterator();
+ // while ( iter.hasNext() ){
+ // Vertex ver = iter.next();
+ // System.out.println(com.tinkerpop.blueprints.util.ElementHelper.getProperties(ver));
+ // }
+ // System.out.println("Query index search case");
+ // vertices = graph.query().has("string1", "My new String
+ // 1").vertices();
+ // iter = vertices.iterator();
+ // while ( iter.hasNext() ){
+ // Vertex ver = iter.next();
+ // System.out.println(com.tinkerpop.blueprints.util.ElementHelper.getProperties(ver));
+ // }
+ // System.out.println("Query index search non case");
+ // vertices = graph.query().has("string1", "my new string
+ // 1").vertices();
+ // iter = vertices.iterator();
+ // while ( iter.hasNext() ){
+ // Vertex ver = iter.next();
+ // System.out.println(com.tinkerpop.blueprints.util.ElementHelper.getProperties(ver));
+ // }
+
+ log.debug("**** predicat index search non case");
+ Iterable<TitanVertex> vertices = graph.query().has("string1", Text.REGEX, "my new string 1").vertices();
+ Iterator<TitanVertex> iter = vertices.iterator();
+ while (iter.hasNext()) {
+ Vertex ver = iter.next();
+ // System.out.println(com.tinkerpop.blueprints.util.ElementHelper.getProperties(ver));
+ log.debug("{}", titanDao.getProperties(ver));
+ }
+
+ }
+
+ @Test
+ public void testDuplicateResultDueToTitanBug() {
+
+ // TitanGraph titanGraph = titanDao.getGraph().left().value();
+ // TitanManagement managementSystem = titanGraph.getManagementSystem();
+
+ // GraphPropertiesDictionary[] properties = {
+ // GraphPropertiesDictionary.IS_ABSTRACT,
+ // GraphPropertiesDictionary.ADDITIONAL_INFO_ID_TO_KEY,
+ // GraphPropertiesDictionary.POSITION_X,
+ // GraphPropertiesDictionary.ARTIFACT_TIMEOUT };
+ //
+ // for (GraphPropertiesDictionary property : properties) {
+ // if (false ==
+ // managementSystem.containsGraphIndex(property.getProperty())) {
+ // PropertyKey propKey1 =
+ // managementSystem.makePropertyKey(property.getProperty()).dataType(property.getClazz()).make();
+ // managementSystem.buildIndex(property.getProperty(),
+ // Vertex.class).addKey(propKey1).unique().buildCompositeIndex();
+ // }
+ // }
+
+ // managementSystem.commit();
+
+ ResourceMetadataData resourceData1 = new ResourceMetadataData();
+ resourceData1.getMetadataDataDefinition().setUniqueId("A");
+ ((ResourceMetadataDataDefinition) resourceData1.getMetadataDataDefinition()).setAbstract(true);
+ resourceData1.getMetadataDataDefinition().setName("aaaa");
+
+ Either<ResourceMetadataData, TitanOperationStatus> newNode1 = titanDao.createNode(resourceData1,
+ ResourceMetadataData.class);
+ assertTrue(newNode1.isLeft());
+ log.debug("{}", newNode1.left().value());
+ // titanDao.commit();
+
+ Map<String, Object> props = new HashMap<>();
+ props.put(GraphPropertiesDictionary.IS_ABSTRACT.getProperty(), true);
+ Either<List<ResourceMetadataData>, TitanOperationStatus> byCriteria = titanDao
+ .getByCriteria(NodeTypeEnum.Resource, props, ResourceMetadataData.class);
+ assertTrue(byCriteria.isLeft());
+ assertEquals("check one result returned", 1, byCriteria.left().value().size());
+ // titanDao.commit();
+
+ ResourceMetadataData resourceToUpdate = new ResourceMetadataData();
+ ((ResourceMetadataDataDefinition) resourceToUpdate.getMetadataDataDefinition()).setAbstract(false);
+ resourceToUpdate.getMetadataDataDefinition().setUniqueId("A");
+ Either<ResourceMetadataData, TitanOperationStatus> updateNode = titanDao.updateNode(resourceToUpdate,
+ ResourceMetadataData.class);
+ assertTrue(updateNode.isLeft());
+ // titanDao.commit();
+
+ byCriteria = titanDao.getByCriteria(NodeTypeEnum.Resource, props, ResourceMetadataData.class);
+ assertTrue(byCriteria.isRight());
+ assertEquals("check one result returned due to titan bug", TitanOperationStatus.NOT_FOUND,
+ byCriteria.right().value());
+
+ AdditionalInfoParameterData infoParameterData = new AdditionalInfoParameterData();
+ infoParameterData.getAdditionalInfoParameterDataDefinition().setUniqueId("123");
+ Map<String, String> idToKey = new HashMap<>();
+ idToKey.put("key1", "value1");
+ infoParameterData.setIdToKey(idToKey);
+
+ Either<AdditionalInfoParameterData, TitanOperationStatus> newNode2 = titanDao.createNode(infoParameterData,
+ AdditionalInfoParameterData.class);
+ assertTrue(newNode2.isLeft());
+ log.debug("{}", newNode2.left().value());
+ // titanDao.commit();
+
+ Map<String, String> idToKey2 = new HashMap<>();
+ idToKey2.put("key1", "value2");
+
+ Map<String, Object> props2 = new HashMap<>();
+ props2.put(GraphPropertiesDictionary.UNIQUE_ID.getProperty(), "123");
+ Gson gson = new Gson();
+ props2.put(GraphPropertiesDictionary.ADDITIONAL_INFO_ID_TO_KEY.getProperty(), gson.toJson(idToKey2));
+ // props2.put(GraphPropertiesDictionary.ADDITIONAL_INFO_ID_TO_KEY.getProperty(),
+ // idToKey2);
+
+ Either<List<AdditionalInfoParameterData>, TitanOperationStatus> byCriteria2 = titanDao
+ .getByCriteria(NodeTypeEnum.AdditionalInfoParameters, props2, AdditionalInfoParameterData.class);
+ assertTrue(byCriteria2.isRight());
+ assertEquals("check one result returned due to titan bug", TitanOperationStatus.NOT_FOUND,
+ byCriteria2.right().value());
+
+ infoParameterData.setIdToKey(idToKey2);
+
+ Either<AdditionalInfoParameterData, TitanOperationStatus> updateNode2 = titanDao.updateNode(infoParameterData,
+ AdditionalInfoParameterData.class);
+ assertTrue(updateNode2.isLeft());
+ // titanDao.commit();
+
+ props2.put(GraphPropertiesDictionary.ADDITIONAL_INFO_ID_TO_KEY.getProperty(), idToKey);
+ byCriteria2 = titanDao.getByCriteria(NodeTypeEnum.AdditionalInfoParameters, props2,
+ AdditionalInfoParameterData.class);
+ assertTrue(byCriteria2.isRight());
+ assertEquals("check one result returned due to titan bug", TitanOperationStatus.NOT_FOUND,
+ byCriteria2.right().value());
+
+ ComponentInstanceData resourceInstanceData = new ComponentInstanceData();
+ resourceInstanceData.getComponentInstDataDefinition().setUniqueId("ri123");
+ resourceInstanceData.getComponentInstDataDefinition().setPosX("22");
+ resourceInstanceData.getComponentInstDataDefinition().setName("myresource_1");
+
+ Either<ComponentInstanceData, TitanOperationStatus> newNode3 = titanDao.createNode(resourceInstanceData,
+ ComponentInstanceData.class);
+ assertTrue(newNode3.isLeft());
+ log.debug("{}", newNode3.left().value());
+ // titanDao.commit();
+
+ resourceInstanceData.getComponentInstDataDefinition().setPosX("50");
+ Either<ComponentInstanceData, TitanOperationStatus> updateNode3 = titanDao.updateNode(resourceInstanceData,
+ ComponentInstanceData.class);
+ assertTrue(updateNode3.isLeft());
+ // titanDao.commit();
+
+ resourceInstanceData.getComponentInstDataDefinition().setName("myresource_2");
+ updateNode3 = titanDao.updateNode(resourceInstanceData, ComponentInstanceData.class);
+ assertTrue(updateNode3.isLeft());
+ // titanDao.commit();
+
+ Map<String, Object> props3 = new HashMap<>();
+ props3.put("positionX", "22");
+ Either<List<ComponentInstanceData>, TitanOperationStatus> byCriteria3 = titanDao
+ .getByCriteria(NodeTypeEnum.ResourceInstance, props3, ComponentInstanceData.class);
+ assertTrue(byCriteria3.isRight());
+ assertEquals("check one result returned due to titan bug", TitanOperationStatus.NOT_FOUND,
+ byCriteria3.right().value());
+
+ props3.put("positionX", "50");
+ byCriteria3 = titanDao.getByCriteria(NodeTypeEnum.ResourceInstance, props3, ComponentInstanceData.class);
+ assertTrue(byCriteria3.isLeft());
+
+ /////////////////////////// check integer ////////////////////////
+
+ ArtifactData artifactData = new ArtifactData();
+ artifactData.getArtifactDataDefinition().setUniqueId("ad234");
+ artifactData.getArtifactDataDefinition().setTimeout(100);
+
+ Either<ArtifactData, TitanOperationStatus> newNode4 = titanDao.createNode(artifactData, ArtifactData.class);
+ assertTrue(newNode4.isLeft());
+ log.debug("{}", newNode4.left().value());
+ // titanDao.commit();
+
+ artifactData.getArtifactDataDefinition().setTimeout(50);
+ Either<ArtifactData, TitanOperationStatus> updateNode4 = titanDao.updateNode(artifactData, ArtifactData.class);
+ assertTrue(updateNode4.isLeft());
+ // titanDao.commit();
+
+ Map<String, Object> props4 = new HashMap<>();
+ props4.put("timeout", 100);
+ Either<List<ArtifactData>, TitanOperationStatus> byCriteria4 = titanDao.getByCriteria(NodeTypeEnum.ArtifactRef,
+ props4, ArtifactData.class);
+ assertTrue(byCriteria4.isRight());
+ assertEquals("check one result returned due to titan bug", TitanOperationStatus.NOT_FOUND,
+ byCriteria4.right().value());
+
+ props4.put("timeout", 50);
+ byCriteria4 = titanDao.getByCriteria(NodeTypeEnum.ArtifactRef, props4, ArtifactData.class);
+ assertTrue(byCriteria4.isLeft());
+
+ titanDao.rollback();
+ }
+
+ @Test
+ public void testDuplicateResultUSeHasNotQueryDueToTitanBug() {
+
+ String name = "bbbb";
+
+ ResourceMetadataData resourceData1 = new ResourceMetadataData();
+ resourceData1.getMetadataDataDefinition().setUniqueId("A");
+ ((ResourceMetadataDataDefinition) resourceData1.getMetadataDataDefinition()).setAbstract(true);
+ resourceData1.getMetadataDataDefinition().setName(name);
+
+ Either<ResourceMetadataData, TitanOperationStatus> newNode1 = titanDao.createNode(resourceData1,
+ ResourceMetadataData.class);
+ assertTrue(newNode1.isLeft());
+ log.debug("{}", newNode1.left().value());
+ // titanDao.commit();
+
+ Map<String, Object> props = new HashMap<>();
+ props.put(GraphPropertiesDictionary.IS_ABSTRACT.getProperty(), true);
+ Either<List<ResourceMetadataData>, TitanOperationStatus> byCriteria = titanDao
+ .getByCriteria(NodeTypeEnum.Resource, props, ResourceMetadataData.class);
+ assertTrue(byCriteria.isLeft());
+ assertEquals("check one result returned", 1, byCriteria.left().value().size());
+ // titanDao.commit();
+
+ ResourceMetadataData resourceToUpdate = new ResourceMetadataData();
+ ((ResourceMetadataDataDefinition) resourceToUpdate.getMetadataDataDefinition()).setAbstract(false);
+ resourceToUpdate.getMetadataDataDefinition().setUniqueId("A");
+ Either<ResourceMetadataData, TitanOperationStatus> updateNode = titanDao.updateNode(resourceToUpdate,
+ ResourceMetadataData.class);
+ assertTrue(updateNode.isLeft());
+ // titanDao.commit();
+
+ // no result where isAbstract = true
+ byCriteria = titanDao.getByCriteria(NodeTypeEnum.Resource, props, ResourceMetadataData.class);
+ assertTrue(byCriteria.isRight());
+ assertEquals("check one result returned due to titan bug", TitanOperationStatus.NOT_FOUND,
+ byCriteria.right().value());
+
+ // one result where isAbstract != true
+ byCriteria = titanDao.getByCriteria(NodeTypeEnum.Resource, null, props, ResourceMetadataData.class);
+ assertTrue(byCriteria.isLeft());
+ assertEquals("check one result returned", 1, byCriteria.left().value().size());
+
+ props.put(GraphPropertiesDictionary.IS_ABSTRACT.getProperty(), false);
+ byCriteria = titanDao.getByCriteria(NodeTypeEnum.Resource, null, props, ResourceMetadataData.class);
+ assertTrue(byCriteria.isRight());
+ assertEquals("check one result returned due to titan bug", TitanOperationStatus.NOT_FOUND,
+ byCriteria.right().value());
+
+ titanDao.rollback();
+
+ }
+
+}
diff --git a/catalog-dao/src/test/resources/application-context-test.xml b/catalog-dao/src/test/resources/application-context-test.xml
new file mode 100644
index 0000000..468dab3
--- /dev/null
+++ b/catalog-dao/src/test/resources/application-context-test.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<beans xmlns="http://www.springframework.org/schema/beans" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"
+ xmlns:util="http://www.springframework.org/schema/util"
+ xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
+ http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-3.0.xsd
+ http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util-3.0.xsd">
+
+ <util:properties id="elasticsearchConfig" location="classpath:elasticsearch.yml" />
+
+
+ <context:component-scan
+ base-package="
+ org.openecomp.sdc.be.dao.impl,
+ org.openecomp.sdc.be.dao.es,
+ org.openecomp.sdc.be.dao.neo4j,
+ org.openecomp.sdc.be.dao.titan,
+ org.openecomp.sdc.be.resources.impl
+ ">
+
+ </context:component-scan>
+
+</beans>
diff --git a/catalog-dao/src/test/resources/cassandra.yaml b/catalog-dao/src/test/resources/cassandra.yaml
new file mode 100644
index 0000000..39f9871
--- /dev/null
+++ b/catalog-dao/src/test/resources/cassandra.yaml
@@ -0,0 +1,801 @@
+# Cassandra storage config YAML
+
+# NOTE:
+# See http://wiki.apache.org/cassandra/StorageConfiguration for
+# full explanations of configuration directives
+# /NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: 'Test Cluster'
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+#
+# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
+# and will use the initial_token as described below.
+#
+# Specifying initial_token will override this setting on the node's initial start,
+# on subsequent starts, this setting will apply even if initial token is set.
+#
+# If you already have a cluster with 1 token per node, and wish to migrate to
+# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
+num_tokens: 256
+
+# initial_token allows you to specify tokens manually. While you can use # it with
+# vnodes (num_tokens > 1, above) -- in which case you should provide a
+# comma-separated list -- it's primarily used when adding nodes # to legacy clusters
+# that do not have vnodes enabled.
+# initial_token:
+
+# See http://wiki.apache.org/cassandra/HintedHandoff
+# May either be "true" or "false" to enable globally, or contain a list
+# of data centers to enable per-datacenter.
+# hinted_handoff_enabled: DC1,DC2
+hinted_handoff_enabled: true
+# this defines the maximum amount of time a dead host will have hints
+# generated. After it has been dead this long, new hints for it will not be
+# created until it has been seen alive and gone down again.
+max_hint_window_in_ms: 10800000 # 3 hours
+# Maximum throttle in KBs per second, per delivery thread. This will be
+# reduced proportionally to the number of nodes in the cluster. (If there
+# are two nodes in the cluster, each delivery thread will use the maximum
+# rate; if there are three, each will throttle to half of the maximum,
+# since we expect two nodes to be delivering hints simultaneously.)
+hinted_handoff_throttle_in_kb: 1024
+# Number of threads with which to deliver hints;
+# Consider increasing this number when you have multi-dc deployments, since
+# cross-dc handoff tends to be slower
+max_hints_delivery_threads: 2
+
+# Maximum throttle in KBs per second, total. This will be
+# reduced proportionally to the number of nodes in the cluster.
+batchlog_replay_throttle_in_kb: 1024
+
+# Authentication backend, implementing IAuthenticator; used to identify users
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
+# PasswordAuthenticator}.
+#
+# - AllowAllAuthenticator performs no checks - set it to disable authentication.
+# - PasswordAuthenticator relies on username/password pairs to authenticate
+# users. It keeps usernames and hashed passwords in system_auth.credentials table.
+# Please increase system_auth keyspace replication factor if you use this authenticator.
+authenticator: AllowAllAuthenticator
+
+# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
+# CassandraAuthorizer}.
+#
+# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
+# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
+# increase system_auth keyspace replication factor if you use this authorizer.
+authorizer: AllowAllAuthorizer
+
+# Validity period for permissions cache (fetching permissions can be an
+# expensive operation depending on the authorizer, CassandraAuthorizer is
+# one example). Defaults to 2000, set to 0 to disable.
+# Will be disabled automatically for AllowAllAuthorizer.
+permissions_validity_in_ms: 2000
+
+# Refresh interval for permissions cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If permissions_validity_in_ms is non-zero, then this must be
+# also.
+# Defaults to the same value as permissions_validity_in_ms.
+# permissions_update_interval_in_ms: 1000
+
+# The partitioner is responsible for distributing groups of rows (by
+# partition key) across nodes in the cluster. You should leave this
+# alone for new clusters. The partitioner can NOT be changed without
+# reloading all data, so when upgrading you should set this to the
+# same partitioner you were already using.
+#
+# Besides Murmur3Partitioner, partitioners included for backwards
+# compatibility include RandomPartitioner, ByteOrderedPartitioner, and
+# OrderPreservingPartitioner.
+#
+partitioner: org.apache.cassandra.dht.Murmur3Partitioner
+
+# Directories where Cassandra should store data on disk. Cassandra
+# will spread data evenly across them, subject to the granularity of
+# the configured compaction strategy.
+# If not set, the default directory is $CASSANDRA_HOME/data/data.
+# data_file_directories:
+# - /var/lib/cassandra/data
+
+# commit log. when running on magnetic HDD, this should be a
+# separate spindle than the data directories.
+# If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
+# commitlog_directory: /var/lib/cassandra/commitlog
+
+# policy for data disk failures:
+# die: shut down gossip and Thrift and kill the JVM for any fs errors or
+# single-sstable errors, so the node can be replaced.
+# stop_paranoid: shut down gossip and Thrift even for single-sstable errors.
+# stop: shut down gossip and Thrift, leaving the node effectively dead, but
+# can still be inspected via JMX.
+# best_effort: stop using the failed disk and respond to requests based on
+# remaining available sstables. This means you WILL see obsolete
+# data at CL.ONE!
+# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
+disk_failure_policy: stop
+
+# policy for commit disk failures:
+# die: shut down gossip and Thrift and kill the JVM, so the node can be replaced.
+# stop: shut down gossip and Thrift, leaving the node effectively dead, but
+# can still be inspected via JMX.
+# stop_commit: shutdown the commit log, letting writes collect but
+# continuing to service reads, as in pre-2.0.5 Cassandra
+# ignore: ignore fatal errors and let the batches fail
+commit_failure_policy: stop
+
+# Maximum size of the key cache in memory.
+#
+# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
+# minimum, sometimes more. The key cache is fairly tiny for the amount of
+# time it saves, so it's worthwhile to use it at large numbers.
+# The row cache saves even more time, but must contain the entire row,
+# so it is extremely space-intensive. It's best to only use the
+# row cache if you have hot rows or static rows.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
+key_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# save the key cache. Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 14400 or 4 hours.
+key_cache_save_period: 14400
+
+# Number of keys from the key cache to save
+# Disabled by default, meaning all keys are going to be saved
+# key_cache_keys_to_save: 100
+
+# Maximum size of the row cache in memory.
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is 0, to disable row caching.
+row_cache_size_in_mb: 0
+
+# Duration in seconds after which Cassandra should
+# save the row cache. Caches are saved to saved_caches_directory as specified
+# in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 0 to disable saving the row cache.
+row_cache_save_period: 0
+
+# Number of keys from the row cache to save
+# Disabled by default, meaning all keys are going to be saved
+# row_cache_keys_to_save: 100
+
+# Maximum size of the counter cache in memory.
+#
+# Counter cache helps to reduce counter locks' contention for hot counter cells.
+# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before
+# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration
+# of the lock hold, helping with hot counter cell updates, but will not allow skipping
+# the read entirely. Only the local (clock, count) tuple of a counter cell is kept
+# in memory, not the whole counter, so it's relatively cheap.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache.
+# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
+counter_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# save the counter cache (keys only). Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Default is 7200 or 2 hours.
+counter_cache_save_period: 7200
+
+# Number of keys from the counter cache to save
+# Disabled by default, meaning all keys are going to be saved
+# counter_cache_keys_to_save: 100
+
+# The off-heap memory allocator. Affects storage engine metadata as
+# well as caches. Experiments show that JEMAlloc saves some memory
+# than the native GCC allocator (i.e., JEMalloc is more
+# fragmentation-resistant).
+#
+# Supported values are: NativeAllocator, JEMallocAllocator
+#
+# If you intend to use JEMallocAllocator you have to install JEMalloc as library and
+# modify cassandra-env.sh as directed in the file.
+#
+# Defaults to NativeAllocator
+# memory_allocator: NativeAllocator
+
+# saved caches
+# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.
+# saved_caches_directory: /var/lib/cassandra/saved_caches
+
+# commitlog_sync may be either "periodic" or "batch."
+#
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk. It will wait
+# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
+# This window should be kept short because the writer threads will
+# be unable to do extra work while waiting. (You may need to increase
+# concurrent_writes for the same reason.)
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 2
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments. A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentially from each columnfamily in the system) has been
+# flushed to sstables.
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+commitlog_segment_size_in_mb: 32
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map<String, String> of parameters will do.
+seed_provider:
+ # Addresses of hosts that are deemed contact points.
+ # Cassandra nodes use this list of hosts to find each other and learn
+ # the topology of the ring. You must change this if you are running
+ # multiple nodes!
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ # seeds is actually a comma-delimited list of addresses.
+ # Ex: "<ip1>,<ip2>,<ip3>"
+ - seeds: "127.0.0.1"
+
+# For workloads with more data than can fit in memory, Cassandra's
+# bottleneck will be reads that need to fetch data from
+# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
+# order to allow the operations to enqueue low enough in the stack
+# that the OS and drives can reorder them. Same applies to
+# "concurrent_counter_writes", since counter writes read the current
+# values before incrementing and writing them back.
+#
+# On the other hand, since writes are almost never IO bound, the ideal
+# number of "concurrent_writes" is dependent on the number of cores in
+# your system; (8 * number_of_cores) is a good rule of thumb.
+concurrent_reads: 32
+concurrent_writes: 32
+concurrent_counter_writes: 32
+
+# Total memory to use for sstable-reading buffers. Defaults to
+# the smaller of 1/4 of heap or 512MB.
+# file_cache_size_in_mb: 512
+
+# Total permitted memory to use for memtables. Cassandra will stop
+# accepting writes when the limit is exceeded until a flush completes,
+# and will trigger a flush based on memtable_cleanup_threshold
+# If omitted, Cassandra will set both to 1/4 the size of the heap.
+# memtable_heap_space_in_mb: 2048
+# memtable_offheap_space_in_mb: 2048
+
+# Ratio of occupied non-flushing memtable size to total permitted size
+# that will trigger a flush of the largest memtable. Lager mct will
+# mean larger flushes and hence less compaction, but also less concurrent
+# flush activity which can make it difficult to keep your disks fed
+# under heavy write load.
+#
+# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)
+# memtable_cleanup_threshold: 0.11
+
+# Specify the way Cassandra allocates and manages memtable memory.
+# Options are:
+# heap_buffers: on heap nio buffers
+# offheap_buffers: off heap (direct) nio buffers
+# offheap_objects: native memory, eliminating nio buffer heap overhead
+memtable_allocation_type: heap_buffers
+
+# Total space to use for commitlogs. Since commitlog segments are
+# mmapped, and hence use up address space, the default size is 32
+# on 32-bit JVMs, and 8192 on 64-bit JVMs.
+#
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Cassandra will flush every dirty CF in the oldest
+# segment and remove it. So a small total commitlog space will tend
+# to cause more flush activity on less-active columnfamilies.
+# commitlog_total_space_in_mb: 8192
+
+# This sets the amount of memtable flush writer threads. These will
+# be blocked by disk io, and each one will hold a memtable in memory
+# while blocked.
+#
+# memtable_flush_writers defaults to the smaller of (number of disks,
+# number of cores), with a minimum of 2 and a maximum of 8.
+#
+# If your data directories are backed by SSD, you should increase this
+# to the number of cores.
+#memtable_flush_writers: 8
+
+# A fixed memory pool size in MB for for SSTable index summaries. If left
+# empty, this will default to 5% of the heap size. If the memory usage of
+# all index summaries exceeds this limit, SSTables with low read rates will
+# shrink their index summaries in order to meet this limit. However, this
+# is a best-effort process. In extreme conditions Cassandra may need to use
+# more than this amount of memory.
+index_summary_capacity_in_mb:
+
+# How frequently index summaries should be resampled. This is done
+# periodically to redistribute memory from the fixed-size pool to sstables
+# proportional their recent read rates. Setting to -1 will disable this
+# process, leaving existing index summaries at their current sampling level.
+index_summary_resize_interval_in_minutes: 60
+
+# Whether to, when doing sequential writing, fsync() at intervals in
+# order to force the operating system to flush the dirty
+# buffers. Enable this to avoid sudden dirty buffer flushing from
+# impacting read latencies. Almost always a good idea on SSDs; not
+# necessarily on platters.
+trickle_fsync: false
+trickle_fsync_interval_in_kb: 10240
+
+# TCP port, for commands and data
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+storage_port: 7000
+
+# SSL port, for encrypted communication. Unused unless enabled in
+# encryption_options
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+ssl_storage_port: 7001
+
+# Address or interface to bind to and tell other Cassandra nodes to connect to.
+# You _must_ change this if you want multiple nodes to be able to communicate!
+#
+# Set listen_address OR listen_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+#
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing _if_ the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting listen_address to 0.0.0.0 is always wrong.
+#
+# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
+# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4
+# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
+# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
+listen_address: localhost
+# listen_interface: eth0
+# listen_interface_prefer_ipv6: false
+
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
+# Internode authentication backend, implementing IInternodeAuthenticator;
+# used to allow/disallow connections from peer nodes.
+# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
+
+# Whether to start the native transport server.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+start_native_transport: true
+# port for the CQL native transport to listen for clients on
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+native_transport_port: 9042
+# The maximum threads for handling requests when the native transport is used.
+# This is similar to rpc_max_threads though the default differs slightly (and
+# there is no native_transport_min_threads, idle threads will always be stopped
+# after 30 seconds).
+# native_transport_max_threads: 128
+#
+# The maximum size of allowed frame. Frame (requests) larger than this will
+# be rejected as invalid. The default is 256MB.
+# native_transport_max_frame_size_in_mb: 256
+
+# The maximum number of concurrent client connections.
+# The default is -1, which means unlimited.
+# native_transport_max_concurrent_connections: -1
+
+# The maximum number of concurrent client connections per source ip.
+# The default is -1, which means unlimited.
+# native_transport_max_concurrent_connections_per_ip: -1
+
+# Whether to start the thrift rpc server.
+start_rpc: true
+
+# The address or interface to bind the Thrift RPC service and native transport
+# server to.
+#
+# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+#
+# Leaving rpc_address blank has the same effect as on listen_address
+# (i.e. it will be based on the configured hostname of the node).
+#
+# Note that unlike listen_address, you can specify 0.0.0.0, but you must also
+# set broadcast_rpc_address to a value other than 0.0.0.0.
+#
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+#
+# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
+# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
+# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
+# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
+rpc_address: 0.0.0.0
+# rpc_interface: eth1
+# rpc_interface_prefer_ipv6: false
+
+# port for Thrift to listen for clients on
+rpc_port: 9160
+
+# RPC address to broadcast to drivers and other Cassandra nodes. This cannot
+# be set to 0.0.0.0. If left blank, this will be set to the value of
+# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
+# be set.
+broadcast_rpc_address: 127.0.0.1
+
+
+
+
+
+
+# enable or disable keepalive on rpc/native connections
+rpc_keepalive: true
+
+# Cassandra provides two out-of-the-box options for the RPC Server:
+#
+# sync -> One thread per thrift connection. For a very large number of clients, memory
+# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
+# per thread, and that will correspond to your use of virtual memory (but physical memory
+# may be limited depending on use of stack space).
+#
+# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
+# asynchronously using a small number of threads that does not vary with the amount
+# of thrift clients (and thus scales well to many clients). The rpc requests are still
+# synchronous (one thread per active request). If hsha is selected then it is essential
+# that rpc_max_threads is changed from the default value of unlimited.
+#
+# The default is sync because on Windows hsha is about 30% slower. On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+#
+# Alternatively, can provide your own RPC server by providing the fully-qualified class name
+# of an o.a.c.t.TServerFactory that can create an instance of it.
+rpc_server_type: sync
+
+# Uncomment rpc_min|max_thread to set request pool size limits.
+#
+# Regardless of your choice of RPC server (see above), the number of maximum requests in the
+# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
+# RPC server, it also dictates the number of clients that can be connected at all).
+#
+# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
+# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
+# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
+#
+# rpc_min_threads: 16
+# rpc_max_threads: 2048
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# Uncomment to set socket buffer size for internode communication
+# Note that when setting this, the buffer size is limited by net.core.wmem_max
+# and when not setting it it is defined by net.ipv4.tcp_wmem
+# See:
+# /proc/sys/net/core/wmem_max
+# /proc/sys/net/core/rmem_max
+# /proc/sys/net/ipv4/tcp_wmem
+# /proc/sys/net/ipv4/tcp_wmem
+# and: man tcp
+# internode_send_buff_size_in_bytes:
+# internode_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum message length).
+thrift_framed_transport_size_in_mb: 15
+
+# Set to true to have Cassandra create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# keyspace data. Removing these links is the operator's
+# responsibility.
+incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+auto_snapshot: true
+
+# When executing a scan, within or across a partition, we need to keep the
+# tombstones seen in memory so we can return them to the coordinator, which
+# will use them to make sure other replicas also know about the deleted rows.
+# With workloads that generate a lot of tombstones, this can cause performance
+# problems and even exaust the server heap.
+# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
+# Adjust the thresholds here if you understand the dangers and want to
+# scan more tombstones anyway. These thresholds may also be adjusted at runtime
+# using the StorageService mbean.
+tombstone_warn_threshold: 1000
+tombstone_failure_threshold: 100000
+
+# Granularity of the collation index of rows within a partition.
+# Increase if your rows are large, or if you have a very large
+# number of rows per partition. The competing goals are these:
+# 1) a smaller granularity means more index entries are generated
+# and looking up rows withing the partition by collation column
+# is faster
+# 2) but, Cassandra will keep the collation index in memory for hot
+# rows (as part of the key cache), so a larger granularity means
+# you can cache more hot rows
+column_index_size_in_kb: 64
+
+
+# Log WARN on any batch size exceeding this value. 5kb per batch by default.
+# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
+batch_size_warn_threshold_in_kb: 5
+
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair. Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
+# compaction_throughput_mb_per_sec first.
+#
+# concurrent_compactors defaults to the smaller of (number of disks,
+# number of cores), with a minimum of 2 and a maximum of 8.
+#
+# If your data directories are backed by SSD, you should increase this
+# to the number of cores.
+#concurrent_compactors: 1
+
+# Throttles compaction to the given total throughput across the entire
+# system. The faster you insert data, the faster you need to compact in
+# order to keep the sstable count down, but in general, setting this to
+# 16 to 32 times the rate you are inserting data is more than sufficient.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
+compaction_throughput_mb_per_sec: 16
+
+# When compacting, the replacement sstable(s) can be opened before they
+# are completely written, and used in place of the prior sstables for
+# any range that has been written. This helps to smoothly transfer reads
+# between the sstables, reducing page cache churn and keeping hot rows hot
+sstable_preemptive_open_interval_in_mb: 50
+
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 200 Mbps or 25 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 200
+
+# Throttles all streaming file transfer between the datacenters,
+# this setting allows users to throttle inter dc stream throughput in addition
+# to throttling all network stream traffic as configured with
+# stream_throughput_outbound_megabits_per_sec
+# inter_dc_stream_throughput_outbound_megabits_per_sec:
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 5000
+# How long the coordinator should wait for seq or index scans to complete
+range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 2000
+# How long the coordinator should wait for counter writes to complete
+counter_write_request_timeout_in_ms: 5000
+# How long a coordinator should continue to retry a CAS operation
+# that contends with other proposals for the same row
+cas_contention_timeout_in_ms: 1000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because unless auto_snapshot is disabled
+# we need to flush first so we can snapshot before removing the data.)
+truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+request_timeout_in_ms: 10000
+
+# Enable operation timeout information exchange between nodes to accurately
+# measure request timeouts. If disabled, replicas will assume that requests
+# were forwarded to them instantly by the coordinator, which means that
+# under overload conditions we will waste that much extra time processing
+# already-timed-out requests.
+#
+# Warning: before enabling this property make sure to ntp is installed
+# and the times are synchronized between the nodes.
+cross_node_timeout: false
+
+# Enable socket timeout for streaming operation.
+# When a timeout occurs during streaming, streaming is retried from the start
+# of the current file. This _can_ involve re-streaming an important amount of
+# data, so you should avoid setting the value too low.
+# Default value is 0, which never timeout streams.
+# streaming_socket_timeout_in_ms: 0
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch. The snitch has two functions:
+# - it teaches Cassandra enough about your network topology to route
+# requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+# correlated failures. It does this by grouping machines into
+# "datacenters" and "racks." Cassandra will do its best not to have
+# more than one replica on the same "rack" (which may not actually
+# be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
+# Out of the box, Cassandra provides
+# - SimpleSnitch:
+# Treats Strategy order as proximity. This can improve cache
+# locality when disabling read repair. Only appropriate for
+# single-datacenter deployments.
+# - GossipingPropertyFileSnitch
+# This should be your go-to snitch for production use. The rack
+# and datacenter for the local node are defined in
+# cassandra-rackdc.properties and propagated to other nodes via
+# gossip. If cassandra-topology.properties exists, it is used as a
+# fallback, allowing migration from the PropertyFileSnitch.
+# - PropertyFileSnitch:
+# Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-topology.properties.
+# - Ec2Snitch:
+# Appropriate for EC2 deployments in a single Region. Loads Region
+# and Availability Zone information from the EC2 API. The Region is
+# treated as the datacenter, and the Availability Zone as the rack.
+# Only private IPs are used, so this will not work across multiple
+# Regions.
+# - Ec2MultiRegionSnitch:
+# Uses public IPs as broadcast_address to allow cross-region
+# connectivity. (Thus, you should set seed addresses to the public
+# IP as well.) You will need to open the storage_port or
+# ssl_storage_port on the public IP firewall. (For intra-Region
+# traffic, Cassandra will switch to the private IP after
+# establishing a connection.)
+# - RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's IP
+# address, respectively. Unless this happens to match your
+# deployment conventions, this is best used as an example of
+# writing a custom Snitch class and is provided in that spirit.
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it. This is
+# expressed as a double which represents a percentage. Thus, a value of
+# 0.2 means Cassandra would continue to prefer the static snitch values
+# until the pinned host was 20% worse than the fastest.
+dynamic_snitch_badness_threshold: 0.1
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+# Scheduler Options vary based on the type of scheduler
+# NoScheduler - Has no options
+# RoundRobin
+# - throttle_limit -- The throttle_limit is the number of in-flight
+# requests per client. Requests beyond
+# that limit are queued up until
+# running requests can complete.
+# The value of 80 here is twice the number of
+# concurrent_reads + concurrent_writes.
+# - default_weight -- default_weight is optional and allows for
+# overriding the default which is 1.
+# - weights -- Weights are optional and will default to 1 or the
+# overridden default_weight. The weight translates into how
+# many requests are handled during each turn of the
+# RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+# throttle_limit: 80
+# default_weight: 5
+# weights:
+# Keyspace1: 1
+# Keyspace2: 5
+
+# request_scheduler_id -- An identifier based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# Enable or disable inter-node encryption
+# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
+# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
+# suite for authentication, key exchange and encryption of the actual data transfers.
+# Use the DHE/ECDHE ciphers if running in FIPS 140 compliant mode.
+# NOTE: No custom encryption options are enabled at the moment
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
+#
+# The passwords used in these options must match the passwords used when generating
+# the keystore and truststore. For instructions on generating these files, see:
+# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
+#
+server_encryption_options:
+ internode_encryption: none
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ truststore: conf/.truststore
+ truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
+ # require_client_auth: false
+
+# enable or disable client/server encryption.
+client_encryption_options:
+ enabled: false
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ # require_client_auth: false
+ # Set trustore and truststore_password if require_client_auth is true
+ # truststore: conf/.truststore
+ # truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# can be: all - all traffic is compressed
+# dc - traffic between different datacenters is compressed
+# none - nothing is compressed.
+internode_compression: all
+
+# Enable or disable tcp_nodelay for inter-dc communication.
+# Disabling it will result in larger (but fewer) network packets being sent,
+# reducing overhead from the TCP protocol itself, at the cost of increasing
+# latency if you block for cross-datacenter responses.
+inter_dc_tcp_nodelay: false
diff --git a/catalog-dao/src/test/resources/config/catalog-dao/configuration.yaml b/catalog-dao/src/test/resources/config/catalog-dao/configuration.yaml
new file mode 100644
index 0000000..0b43e3b
--- /dev/null
+++ b/catalog-dao/src/test/resources/config/catalog-dao/configuration.yaml
@@ -0,0 +1,120 @@
+identificationHeaderFields:
+ - HTTP_IV_USER
+ - HTTP_CSP_FIRSTNAME
+ - HTTP_CSP_LASTNAME
+ - HTTP_IV_REMOTE_ADDRESS
+ - HTTP_CSP_WSTYPE
+
+
+# catalog backend hostname
+beFqdn: 172.20.43.124:8080
+
+
+# catalog backend http port
+beHttpPort: 8080
+
+# catalog backend http context
+beContext: /sdc/rest/config/get
+
+# catalog backend protocol
+beProtocol: http
+
+# catalog backend ssl port
+beSslPort: 8443
+
+version: 1.0
+released: 2012-11-30
+
+titanCfgFile: src/main/resources/config/titan.properties
+titanInMemoryGraph: true
+titanLockTimeout: 30
+
+# Protocols
+protocols:
+ - http
+ - https
+
+# Users
+users:
+ tom: passwd
+ bob: passwd
+#Neoj4
+neo4j:
+ host: neo4jhost
+ port: 7474
+ user: neo4j
+ password: "12345"
+ maxHttpConnection: 100
+ maxHttpPerRoute: 20
+
+cassandraConfig:
+ cassandraHosts: ['mtanjv9sdcg44']
+ localDataCenter:
+ reconnectTimeout : 30000
+ authenticate: false
+ username: koko
+ password: bobo
+ ssl: false
+ truststorePath : /path/path
+ truststorePassword : 123123
+ keySpaces:
+ - { name: sdcaudit, replicationStrategy: SimpleStrategy, replicationInfo: ['1']}
+ - { name: sdcartifact, replicationStrategy: SimpleStrategy, replicationInfo: ['1']}
+
+#Application-specific settings of ES
+elasticSearch:
+ # Mapping of index prefix to time-based frame. For example, if below is configured:
+ #
+ # - indexPrefix: auditingevents
+ # creationPeriod: minute
+ #
+ # then ES object of type which is mapped to "auditingevents-*" template, and created on 2015-12-23 13:24:54, will enter "auditingevents-2015-12-23-13-24" index.
+ # Another object created on 2015-12-23 13:25:54, will enter "auditingevents-2015-12-23-13-25" index.
+ # If creationPeriod: month, both of the above will enter "auditingevents-2015-12" index.
+ #
+ # Legal values for creationPeriod - year, month, day, hour, minute, none (meaning no time-based behaviour).
+ #
+ # If no creationPeriod is configured for indexPrefix, default behavour is creationPeriod: month.
+
+ indicesTimeFrequency:
+ - indexPrefix: auditingevents
+ creationPeriod: month
+
+switchoverDetector:
+ gBeFqdn: AIO-BE.ecomp.idns.cip
+ gFeFqdn: AIO-FE.ecomp.idns.cip
+ beVip: 1.2.3.4
+ feVip: 1.2.3.4
+ beResolveAttempts: 3
+ feResolveAttempts: 3
+ enabled: false
+ interval: 60
+ changePriorityUser: ecompasdc
+ changePriorityPassword: ecompasdc123
+ publishNetworkUrl: "http://cora.web/crt/CipDomain.ECOMP-ASDC-DEVST/config/update_network?user=root"
+ publishNetworkBody: '{"note":"publish network"}'
+ groups:
+ beSet: { changePriorityUrl: "http://cora.web/crt/CipDomain.ECOMP-ASDC-DEVST/config/sites/AIO-BE.ecomp.idns.cip?user=root",
+ changePriorityBody: '{"name":"AIO-BE.ecomp.idns.cip","uri":"/crt/CipDomain.ECOMP-ASDC-DEVST/config/sites/AIO-BE.ecomp.idns.cip","no_ad_redirection":false,"v4groups":{"failover_groups":["/crt/CipDomain.ECOMP-ASDC-DEVST/config/groups/group_mg_be","/crt/CipDomain.ECOMP-ASDC-DEVST/config/groups/group_bs_be"],"failover_policy":["FAILALL"]},"comment":"AIO BE G-fqdn","intended_app_proto":"DNS"}'}
+ feSet: { changePriorityUrl: "http://cora.web/crt/CipDomain.ECOMP-ASDC-DEVST/config/sites/AIO-FE.ecomp.idns.cip?user=root",
+ changePriorityBody: '{"comment":"AIO G-fqdn","name":"AIO-FE.ecomp.idns.cip","v4groups":{"failover_groups":["/crt/CipDomain.ECOMP-ASDC-DEVST/config/groups/group_mg_fe","/crt/CipDomain.ECOMP-ASDC-DEVST/config/groups/group_bs_fe"],"failover_policy":["FAILALL"]},"no_ad_redirection":false,"intended_app_proto":"DNS","uri":"/crt/CipDomain.ECOMP-ASDC-DEVST/config/sites/AIO-FE.ecomp.idns.cip"}'}
+
+applicationL1Cache:
+ datatypes:
+ enabled: true
+ firstRunDelay: 10
+ pollIntervalInSec: 60
+
+applicationL2Cache:
+ enabled: true
+ catalogL1Cache:
+ enabled: true
+ resourcesSizeInCache: 300
+ servicesSizeInCache: 200
+ productsSizeInCache: 100
+ queue:
+ syncIntervalInSecondes: 60
+ waitOnShutDownInMinutes: 30
+ numberOfCacheWorkers: 4
+toscaValidators:
+ stringMaxLength: 1024
\ No newline at end of file
diff --git a/catalog-dao/src/test/resources/config/catalog-dao/ecomp-error-configuration.yaml b/catalog-dao/src/test/resources/config/catalog-dao/ecomp-error-configuration.yaml
new file mode 100644
index 0000000..9d7cd74
--- /dev/null
+++ b/catalog-dao/src/test/resources/config/catalog-dao/ecomp-error-configuration.yaml
@@ -0,0 +1,383 @@
+###########################################
+# Note the conventions of the field values:
+# type can be one of: CONFIG_ERROR, SYSTEM_ERROR, DATA_ERROR, CONNECTION_PROBLEM, AUTHENTICATION_PROBLEM
+# severity can be one of: WARN, ERROR, FATAL
+# alarmSeverity can be one of: CRITICAL,MAJOR,MINOR,INFORMATIONAL,NONE
+# code is a unique integer in range of 3003-9999 (3000-3002 are occupied for internal usage)
+# The above enumeration values are out-of-the-box and can be changed in code.
+# In case of config and code mismatch, the appropriate error will be printed to log
+#
+## Range of BE codes - 3010-7999
+
+errors:
+
+ BeRestApiGeneralError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_4000,
+ severity: ERROR,
+ description: "Unexpected error during BE REST API execution",
+ alarmSeverity: CRITICAL
+ }
+
+ BeHealthCheckError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_3010,
+ severity: ERROR,
+ description: "Error during BE Health Check",
+ alarmSeverity: CRITICAL
+ }
+
+ BeInitializationError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_4019,
+ severity: ERROR,
+ description: "Catalog-BE was not initialized properly",
+ alarmSeverity: CRITICAL
+ }
+
+ BeResourceMissingError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_3011,
+ severity: ERROR,
+ description: "Mandatory resource %s cannot be found in repository",
+ alarmSeverity: MAJOR
+ }
+
+ BeServiceMissingError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_3012,
+ severity: ERROR,
+ description: "Mandatory service %s cannot be found in repository",
+ alarmSeverity: MAJOR
+ }
+
+ BeFailedAddingResourceInstanceError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_3013,
+ severity: ERROR,
+ description: "Failed to add resource instance of resource %s to service %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeIncorrectServiceError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_3014,
+ severity: ERROR,
+ description: "Service %s is not valid",
+ alarmSeverity: MAJOR
+ }
+
+ BeRepositoryDeleteError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_3015,
+ severity: ERROR,
+ description: "Failed to delete object %s from repository",
+ alarmSeverity: CRITICAL
+ }
+
+ BeRepositoryQueryError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_3016,
+ severity: ERROR,
+ description: "Failed to fetch from repository %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeInvalidConfigurationError: {
+ type: CONFIG_ERROR,
+ code: ASDC_3017,
+ severity: FATAL,
+ description: "Configuration parameter %s is invalid. Value configured is %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeUebConnectionError: {
+ type: CONNECTION_PROBLEM,
+ code: ASDC_4001,
+ severity: ERROR,
+ description: "Connection problem towards U-EB server. Reason: %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeUebSystemError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_3019,
+ severity: ERROR,
+ description: "Error occured during access to U-EB Server. Operation: %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeUebObjectNotFoundError: {
+ type: DATA_ERROR,
+ code: ASDC_4005,
+ severity: ERROR,
+ description: "Error occured during access to U-EB Server. Data not found: %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeDistributionEngineSystemError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_3021,
+ severity: ERROR,
+ description: "Error occured in Distribution Engine. Failed operation: %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeUebAuthenticationError: {
+ type: AUTHENTICATION_PROBLEM,
+ code: ASDC_4003,
+ severity: ERROR,
+ description: "Authentication problem towards U-EB server. Reason: %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeUebUnkownHostError: {
+ type: CONNECTION_PROBLEM,
+ code: ASDC_4002,
+ severity: ERROR,
+ description: "Connection problem towards U-EB server. Cannot reach host %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeDistributionEngineInvalidArtifactType: {
+ type: DATA_ERROR,
+ code: ASDC_4006,
+ severity: WARN,
+ description: "The artifact type %s does not appear in the list of valid artifacts %s",
+ alarmSeverity: MAJOR
+ }
+ BeInvalidTypeError: {
+ type: DATA_ERROR,
+ code: ASDC_4008,
+ severity: WARN,
+ description: "The type %s of %s is invalid",
+ alarmSeverity: MAJOR
+ }
+ BeInvalidValueError: {
+ type: DATA_ERROR,
+ code: ASDC_3028,
+ severity: WARN,
+ description: "The value %s of %s from type %s is invalid",
+ alarmSeverity: MAJOR
+ }
+
+ BeFailedDeletingResourceInstanceError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_3029,
+ severity: ERROR,
+ description: "Failed to delete resource instance %s from service %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeMissingConfigurationError: {
+ type: CONFIG_ERROR,
+ code: ASDC_3030,
+ severity: FATAL,
+ description: "Configuration parameter %s is missing",
+ alarmSeverity: MAJOR
+ }
+
+ BeConfigurationInvalidListSizeError: {
+ type: CONFIG_ERROR,
+ code: ASDC_3031,
+ severity: FATAL,
+ description: "Configuration parameter %s is invalid. At least %s values shall be configured",
+ alarmSeverity: MAJOR
+ }
+
+ ErrorConfigFileFormat: {
+ type: CONFIG_ERROR,
+ code: ASDC_3032,
+ severity: ERROR,
+ description: "Error element not found in YAML name: %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeMissingArtifactInformationError: {
+ type: DATA_ERROR,
+ code: ASDC_4010,
+ severity: ERROR,
+ description: "Artifact uploaded has missing information. Missing %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeArtifactMissingError: {
+ type: DATA_ERROR,
+ code: ASDC_4011,
+ severity: ERROR,
+ description: "Artifact %s requested is not found",
+ alarmSeverity: MAJOR
+ }
+
+ BeArtifactPayloadInvalid: {
+ type: DATA_ERROR,
+ code: ASDC_4012,
+ severity: ERROR,
+ description: "Payload of artifact uploaded is invalid (invalid MD5 or encryption)",
+ alarmSeverity: MAJOR
+ }
+
+ BeUserMissingError: {
+ type: DATA_ERROR,
+ code: ASDC_4009,
+ severity: ERROR,
+ description: "User %s requested is not found",
+ alarmSeverity: MAJOR
+ }
+
+ BeArtifactInformationInvalidError: {
+ type: DATA_ERROR,
+ code: ASDC_4013,
+ severity: ERROR,
+ description: "Input for artifact metadata is invalid",
+ alarmSeverity: MAJOR
+ }
+ BeFailedAddingCapabilityTypeError: {
+ type: DATA_ERROR,
+ code: ASDC_4015,
+ severity: ERROR,
+ description: "Failed adding capability type",
+ alarmSeverity: CRITICAL
+ }
+
+ BeCapabilityTypeMissingError: {
+ type: DATA_ERROR,
+ code: ASDC_4016,
+ severity: ERROR,
+ description: "Capability Type %s not found",
+ alarmSeverity: CRITICAL
+ }
+
+ BeInterfaceMissingError: {
+ type: DATA_ERROR,
+ code: ASDC_4020,
+ severity: ERROR,
+ description: "Interface %s required is missing",
+ alarmSeverity: MAJOR
+ }
+
+ BeDaoSystemError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_4014,
+ severity: ERROR,
+ description: "Operation towards database failed",
+ alarmSeverity: CRITICAL
+ }
+
+ BeSystemError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_4017,
+ severity: ERROR,
+ description: "Unexpected error during operation",
+ alarmSeverity: CRITICAL
+ }
+
+ BeFailedLockObjectError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_4007,
+ severity: WARN,
+ description: "Failed to lock object for update",
+ alarmSeverity: CRITICAL
+ }
+
+ BeInvalidJsonInput: {
+ type: SYSTEM_ERROR,
+ code: ASDC_4018,
+ severity: ERROR,
+ description: "Failed to convert json input to object",
+ alarmSeverity: MAJOR
+ }
+
+ BeDistributionMissingError: {
+ type: DATA_ERROR,
+ code: ASDC_4021,
+ severity: ERROR,
+ description: "Distribution %s required is missing",
+ alarmSeverity: MAJOR
+ }
+
+ BeHealthCheckRecovery: {
+ type: RECOVERY,
+ code: ASDC_4022,
+ severity: INFO,
+ description: "BE Health Check Recovery",
+ alarmSeverity: INFORMATIONAL
+ }
+ BeFailedCreateNodeError: {
+ type: DATA_ERROR,
+ code: ASDC_6000,
+ severity: ERROR,
+ description: "Failed to create node %s on graph. status is %s",
+ alarmSeverity: MAJOR
+ }
+ BeFailedUpdateNodeError: {
+ type: DATA_ERROR,
+ code: ASDC_6001,
+ severity: ERROR,
+ description: "Failed to update node %s on graph. Status is %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeFailedDeleteNodeError: {
+ type: DATA_ERROR,
+ code: ASDC_6002,
+ severity: ERROR,
+ description: "Failed to delete node %s on graph. Status is %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeFailedRetrieveNodeError: {
+ type: DATA_ERROR,
+ code: ASDC_6003,
+ severity: ERROR,
+ description: "Failed to retrieve node %s from graph. Status is %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeExecuteRollbackError: {
+ type: DATA_ERROR,
+ code: ASDC_6004,
+ severity: ERROR,
+ description: "Going to execute rollback on graph.",
+ alarmSeverity: MAJOR
+ }
+
+ BeFailedFindParentError: {
+ type: DATA_ERROR,
+ code: ASDC_6005,
+ severity: ERROR,
+ description: "Failed to find parent node %s on graph. Status is %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeFailedFindAllNodesError: {
+ type: DATA_ERROR,
+ code: ASDC_6006,
+ severity: ERROR,
+ description: "Failed to fetch all nodes with type %s of parent node %s . Status is %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeFailedFindAssociationError: {
+ type: DATA_ERROR,
+ code: ASDC_6007,
+ severity: ERROR,
+ description: "Cannot find node with type %s associated with node %s . Status is %s",
+ alarmSeverity: MAJOR
+ }
+
+ BeFailedFindAssociationError: {
+ type: DATA_ERROR,
+ code: ASDC_6008,
+ severity: ERROR,
+ description: "Cannot find node with type %s associated with node %s . Status is %s",
+ alarmSeverity: MAJOR
+ }
+ BeComponentCleanerSystemError: {
+ type: SYSTEM_ERROR,
+ code: ASDC_6009,
+ severity: ERROR,
+ description: "Error occured in Component Cleaner Task. Failed operation: %s",
+ alarmSeverity: MAJOR
+ }
+
\ No newline at end of file
diff --git a/catalog-dao/src/test/resources/elasticsearch.yml b/catalog-dao/src/test/resources/elasticsearch.yml
new file mode 100644
index 0000000..e1808ad
--- /dev/null
+++ b/catalog-dao/src/test/resources/elasticsearch.yml
@@ -0,0 +1,392 @@
+
+elasticSearch.local: true
+elasticSearch.transportclient: false
+cluster.name: elasticsearch_1_5_2222
+
+discovery.zen.ping.multicast.enabled: false
+discovery.zen.ping.unicast.enabled: true
+discovery.zen.ping.unicast.hosts: 1.2.3.4
+transport.client.initial_nodes:
+ - 1.2.3.4:9300
+
+
+#plugin.types: "DeleteByQueryPlugin"
+
+##################### Elasticsearch Configuration Example #####################
+
+# This file contains an overview of various configuration settings,
+# targeted at operations staff. Application developers should
+# consult the guide at <http://elasticsearch.org/guide>.
+#
+# The installation procedure is covered at
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>.
+#
+# Elasticsearch comes with reasonable defaults for most settings,
+# so you can try it out without bothering with configuration.
+#
+# Most of the time, these defaults are just fine for running a production
+# cluster. If you're fine-tuning your cluster, or wondering about the
+# effect of certain configuration option, please _do ask_ on the
+# mailing list or IRC channel [http://elasticsearch.org/community].
+
+# Any element in the configuration can be replaced with environment variables
+# by placing them in ${...} notation. For example:
+#
+# node.rack: ${RACK_ENV_VAR}
+
+# For information on supported formats and syntax for the config file, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html>
+
+
+################################### Cluster ###################################
+
+# Cluster name identifies your cluster for auto-discovery. If you're running
+# multiple clusters on the same network, make sure you're using unique names.
+#
+# cluster.name: elasticsearch
+
+
+#################################### Node #####################################
+
+# Node names are generated dynamically on startup, so you're relieved
+# from configuring them manually. You can tie this node to a specific name:
+#
+# node.name: "Franz Kafka"
+
+# Every node can be configured to allow or deny being eligible as the master,
+# and to allow or deny to store the data.
+#
+# Allow this node to be eligible as a master node (enabled by default):
+#
+# node.master: true
+#
+# Allow this node to store data (enabled by default):
+#
+# node.data: true
+
+# You can exploit these settings to design advanced cluster topologies.
+#
+# 1. You want this node to never become a master node, only to hold data.
+# This will be the "workhorse" of your cluster.
+#
+# node.master: false
+# node.data: true
+#
+# 2. You want this node to only serve as a master: to not store any data and
+# to have free resources. This will be the "coordinator" of your cluster.
+#
+# node.master: true
+# node.data: false
+#
+# 3. You want this node to be neither master nor data node, but
+# to act as a "search load balancer" (fetching data from nodes,
+# aggregating results, etc.)
+#
+# node.master: false
+# node.data: false
+
+# Use the Cluster Health API [http://localhost:9200/_cluster/health], the
+# Node Info API [http://localhost:9200/_nodes] or GUI tools
+# such as <http://www.elasticsearch.org/overview/marvel/>,
+# <http://github.com/karmi/elasticsearch-paramedic>,
+# <http://github.com/lukas-vlcek/bigdesk> and
+# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state.
+
+# A node can have generic attributes associated with it, which can later be used
+# for customized shard allocation filtering, or allocation awareness. An attribute
+# is a simple key value pair, similar to node.key: value, here is an example:
+#
+# node.rack: rack314
+
+# By default, multiple nodes are allowed to start from the same installation location
+# to disable it, set the following:
+# node.max_local_storage_nodes: 1
+
+
+#################################### Index ####################################
+
+# You can set a number of options (such as shard/replica options, mapping
+# or analyzer definitions, translog settings, ...) for indices globally,
+# in this file.
+#
+# Note, that it makes more sense to configure index settings specifically for
+# a certain index, either when creating it or by using the index templates API.
+#
+# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html>
+# for more information.
+
+# Set the number of shards (splits) of an index (5 by default):
+#
+# index.number_of_shards: 5
+
+# Set the number of replicas (additional copies) of an index (1 by default):
+#
+# index.number_of_replicas: 1
+
+# Note, that for development on a local machine, with small indices, it usually
+# makes sense to "disable" the distributed features:
+#
+index.number_of_shards: 1
+index.number_of_replicas: 0
+
+# These settings directly affect the performance of index and search operations
+# in your cluster. Assuming you have enough machines to hold shards and
+# replicas, the rule of thumb is:
+#
+# 1. Having more *shards* enhances the _indexing_ performance and allows to
+# _distribute_ a big index across machines.
+# 2. Having more *replicas* enhances the _search_ performance and improves the
+# cluster _availability_.
+#
+# The "number_of_shards" is a one-time setting for an index.
+#
+# The "number_of_replicas" can be increased or decreased anytime,
+# by using the Index Update Settings API.
+#
+# Elasticsearch takes care about load balancing, relocating, gathering the
+# results from nodes, etc. Experiment with different settings to fine-tune
+# your setup.
+
+# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect
+# the index status.
+
+
+#################################### Paths ####################################
+path.home: /src/test/resources
+# Path to directory containing configuration (this file and logging.yml):
+#
+path.conf: /src/test/resources
+
+# Path to directory where to store index data allocated for this node.
+#
+path.data: target/esdata
+#
+# Can optionally include more than one location, causing data to be striped across
+# the locations (a la RAID 0) on a file level, favouring locations with most free
+# space on creation. For example:
+#
+# path.data: /path/to/data1,/path/to/data2
+
+# Path to temporary files:
+#
+path.work: /target/eswork
+
+# Path to log files:
+#
+path.logs: /target/eslogs
+
+# Path to where plugins are installed:
+#
+# path.plugins: /path/to/plugins
+
+
+#################################### Plugin ###################################
+
+# If a plugin listed here is not installed for current node, the node will not start.
+#
+# plugin.mandatory: mapper-attachments,lang-groovy
+
+
+################################### Memory ####################################
+
+# Elasticsearch performs poorly when JVM starts swapping: you should ensure that
+# it _never_ swaps.
+#
+# Set this property to true to lock the memory:
+#
+# bootstrap.mlockall: true
+
+# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
+# to the same value, and that the machine has enough memory to allocate
+# for Elasticsearch, leaving enough memory for the operating system itself.
+#
+# You should also make sure that the Elasticsearch process is allowed to lock
+# the memory, eg. by using `ulimit -l unlimited`.
+
+
+############################## Network And HTTP ###############################
+
+# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens
+# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
+# communication. (the range means that if the port is busy, it will automatically
+# try the next port).
+
+# Set the bind address specifically (IPv4 or IPv6):
+#
+# network.bind_host: 192.168.0.1
+
+# Set the address other nodes will use to communicate with this node. If not
+# set, it is automatically derived. It must point to an actual IP address.
+#
+# network.publish_host: 192.168.0.1
+
+# Set both 'bind_host' and 'publish_host':
+#
+# network.host: 192.168.0.1
+
+# Set a custom port for the node to node communication (9300 by default):
+#
+# transport.tcp.port: 9300
+
+# Enable compression for all communication between nodes (disabled by default):
+#
+# transport.tcp.compress: true
+
+# Set a custom port to listen for HTTP traffic:
+#
+# http.port: 9200
+
+# Set a custom allowed content length:
+#
+# http.max_content_length: 100mb
+
+# Disable HTTP completely:
+#
+# http.enabled: false
+
+
+################################### Gateway ###################################
+
+# The gateway allows for persisting the cluster state between full cluster
+# restarts. Every change to the state (such as adding an index) will be stored
+# in the gateway, and when the cluster starts up for the first time,
+# it will read its state from the gateway.
+
+# There are several types of gateway implementations. For more information, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>.
+
+# The default gateway type is the "local" gateway (recommended):
+#
+# gateway.type: local
+
+# Settings below control how and when to start the initial recovery process on
+# a full cluster restart (to reuse as much local data as possible when using shared
+# gateway).
+
+# Allow recovery process after N nodes in a cluster are up:
+#
+gateway.recover_after_nodes: 1
+
+# Set the timeout to initiate the recovery process, once the N nodes
+# from previous setting are up (accepts time value):
+#
+# gateway.recover_after_time: 5m
+
+# Set how many nodes are expected in this cluster. Once these N nodes
+# are up (and recover_after_nodes is met), begin recovery process immediately
+# (without waiting for recover_after_time to expire):
+#
+gateway.expected_nodes: 1
+
+
+############################# Recovery Throttling #############################
+
+# These settings allow to control the process of shards allocation between
+# nodes during initial recovery, replica allocation, rebalancing,
+# or when adding and removing nodes.
+
+# Set the number of concurrent recoveries happening on a node:
+#
+# 1. During the initial recovery
+#
+# cluster.routing.allocation.node_initial_primaries_recoveries: 4
+#
+# 2. During adding/removing nodes, rebalancing, etc
+#
+# cluster.routing.allocation.node_concurrent_recoveries: 2
+
+# Set to throttle throughput when recovering (eg. 100mb, by default 20mb):
+#
+# indices.recovery.max_bytes_per_sec: 20mb
+
+# Set to limit the number of open concurrent streams when
+# recovering a shard from a peer:
+#
+# indices.recovery.concurrent_streams: 5
+
+
+################################## Discovery ##################################
+
+# Discovery infrastructure ensures nodes can be found within a cluster
+# and master node is elected. Multicast discovery is the default.
+
+# Set to ensure a node sees N other master eligible nodes to be considered
+# operational within the cluster. Its recommended to set it to a higher value
+# than 1 when running more than 2 nodes in the cluster.
+#
+# discovery.zen.minimum_master_nodes: 1
+
+# Set the time to wait for ping responses from other nodes when discovering.
+# Set this option to a higher value on a slow or congested network
+# to minimize discovery failures:
+#
+# discovery.zen.ping.timeout: 3s
+
+# For more information, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html>
+
+# Unicast discovery allows to explicitly control which nodes will be used
+# to discover the cluster. It can be used when multicast is not present,
+# or to restrict the cluster communication-wise.
+#
+# 1. Disable multicast discovery (enabled by default):
+#
+# discovery.zen.ping.multicast.enabled: false
+#
+# 2. Configure an initial list of master nodes in the cluster
+# to perform discovery when new nodes (master or data) are started:
+#
+# discovery.zen.ping.unicast.hosts: ["host1", "host2:port"]
+
+# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
+#
+# You have to install the cloud-aws plugin for enabling the EC2 discovery.
+#
+# For more information, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html>
+#
+# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/>
+# for a step-by-step tutorial.
+
+# GCE discovery allows to use Google Compute Engine API in order to perform discovery.
+#
+# You have to install the cloud-gce plugin for enabling the GCE discovery.
+#
+# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>.
+
+# Azure discovery allows to use Azure API in order to perform discovery.
+#
+# You have to install the cloud-azure plugin for enabling the Azure discovery.
+#
+# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>.
+
+################################## Slow Log ##################################
+
+# Shard level query and fetch threshold logging.
+
+#index.search.slowlog.threshold.query.warn: 10s
+#index.search.slowlog.threshold.query.info: 5s
+#index.search.slowlog.threshold.query.debug: 2s
+#index.search.slowlog.threshold.query.trace: 500ms
+
+#index.search.slowlog.threshold.fetch.warn: 1s
+#index.search.slowlog.threshold.fetch.info: 800ms
+#index.search.slowlog.threshold.fetch.debug: 500ms
+#index.search.slowlog.threshold.fetch.trace: 200ms
+
+#index.indexing.slowlog.threshold.index.warn: 10s
+#index.indexing.slowlog.threshold.index.info: 5s
+#index.indexing.slowlog.threshold.index.debug: 2s
+#index.indexing.slowlog.threshold.index.trace: 500ms
+
+################################## GC Logging ################################
+
+#monitor.jvm.gc.young.warn: 1000ms
+#monitor.jvm.gc.young.info: 700ms
+#monitor.jvm.gc.young.debug: 400ms
+
+#monitor.jvm.gc.old.warn: 10s
+#monitor.jvm.gc.old.info: 5s
+#monitor.jvm.gc.old.debug: 2s
+
diff --git a/catalog-dao/src/test/resources/images/apache.png b/catalog-dao/src/test/resources/images/apache.png
new file mode 100644
index 0000000..8e9f402
--- /dev/null
+++ b/catalog-dao/src/test/resources/images/apache.png
Binary files differ
diff --git a/catalog-dao/src/test/resources/log4j.properties b/catalog-dao/src/test/resources/log4j.properties
new file mode 100644
index 0000000..c18c3da
--- /dev/null
+++ b/catalog-dao/src/test/resources/log4j.properties
@@ -0,0 +1,8 @@
+# Root logger option
+log4j.rootLogger=info, stdout
+
+# Direct log messages to stdout
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.Target=System.out
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
diff --git a/catalog-dao/src/test/resources/logback-test.xml b/catalog-dao/src/test/resources/logback-test.xml
new file mode 100644
index 0000000..4b7eb95
--- /dev/null
+++ b/catalog-dao/src/test/resources/logback-test.xml
@@ -0,0 +1,13 @@
+<!-- only one line, shut up logback ! -->
+<configuration >
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <Pattern>
+ %d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{36} - %msg%n
+ </Pattern>
+ </encoder>
+ </appender>
+ <root level="OFF">
+ <appender-ref ref="STDOUT" />
+ </root>
+</configuration>