Table of Contents
1. Description
Executes arbitrary groovy script
2. Connection
2.1. Attributes
There are no special attributes supported for this driver.
2.2. Parameters
The only parameter need to be
language=groovy
3. Query
Executes the the groovy query. Used for the read statements.
Entire code should be enclosed in <![CDATA[ ]]> element.
In order to proceed to next query processor, one need to explicitly call
query.next()
You can use any <script> in the body of the query to process every record returned by the query one by one. The easiest way to return/forward current entry into another subquery/script is to use etl.globals object for example:
etl.globals[
'my_value'
] =
"some value"
In order to use results from previous <query> element, use:
etl.getParameter(
'name_of_property'
)
4. Script
Executes any read or write statements. Used for "write" statements mainly.
5. Examples
Query example: Reads a graphson document, parses each vertex into a hashmap and processes the entries one by one. After that parses each edge into a hashmap and processes the entries one by one.
<!DOCTYPE etl SYSTEM "
https://scriptella.org/dtd/etl.dtd
">
<
etl
>
<
description
>Graphlytic job</
description
>
<
properties
>
job_name=Graphlytic job
</
properties
>
<!-- CONNECTIONS -->
<
connection
id
=
"graphdb"
driver
=
"graphConnection"
>
project_id=1
</
connection
>
<
connection
id
=
"groovy"
driver
=
"script"
>language=groovy</
connection
>
<
connection
id
=
"logInfo"
driver
=
"log"
>
level=INFO
</
connection
>
<!-- JOB STEPS -->
<
script
connection-id
=
"logInfo"
>
STARTING JOB "$job_name"
</
script
>
<!-- CLEAR DB PRIOR TO IMPORT -->
<
script
connection-id
=
"graphdb"
>
MATCH (n) DETACH DELETE n
</
script
>
<
query
connection-id
=
"groovy"
>
<![CDATA[
import com.fasterxml.jackson.databind.JsonNode
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.core.type.TypeReference
import java.nio.file.Files
import java.nio.file.Paths
java.nio.file.Path path = java.nio.file.Paths.get("C:/path/to/data/dataset.graphson")
String jsonData = new String(Files.readAllBytes(path), "UTF-8")
ObjectMapper mapper = new ObjectMapper();
JsonNode tree = mapper.readTree(jsonData);
if (tree.has("vertices")) {
tree.get("vertices").elements().forEachRemaining((entity)-> {
etl.globals['isEdge'] = false;
etl.globals['entity'] = mapper.convertValue(entity, new TypeReference<Map<String, Object>>(){});
query.next()
});
}
if (tree.has("edges")) {
tree.get("edges").elements().forEachRemaining((entity)-> {
etl.globals['isEdge'] = true;
etl.globals['entity'] = mapper.convertValue(entity, new TypeReference<Map<String, Object>>(){});
query.next();
});
}
]]>
<
script
connection-id
=
"logInfo"
if
=
"etl.globals['isEdge'] == false"
>
Importing VERTEX ${etl.globals['entity']};
</
script
>
<
script
connection-id
=
"graphdb"
if
=
"etl.globals['isEdge'] == false"
>
CREATE (n:IMPORTED {name: '${etl.globals['entity']['name']}', age: ${etl.globals['entity']['age']}, uid: '${etl.globals['entity']['_id']}'}) RETURN n
</
script
>
<
script
connection-id
=
"logInfo"
if
=
"etl.globals['isEdge'] == true"
>
Importing EDGE ${etl.globals['entity']};
</
script
>
<
script
connection-id
=
"graphdb"
if
=
"etl.globals['isEdge'] == true"
>
MATCH (n), (m) WHERE n.uid='${etl.globals['entity']['_outV']}' and m.uid='${etl.globals['entity']['_inV']}'
CREATE (m)-[r:${etl.globals['entity']['_label']}]->(n)
RETURN n,r,m
</
script
>
</
query
>
</
etl
>