blob: 0861eda84e839d2cdc96abf681b7d97cc3de50e6 [file] [log] [blame]
Thomas Kulikfb8a0ee2020-03-11 13:13:52 +01001#!/bin/bash
2
3#set -x # uncomment for bash script debugging
4
5### ============================================================================
6### Licensed under the Apache License, Version 2.0 (the "License");
7### you may not use this file except in compliance with the License.
8### You may obtain a copy of the License at
9###
10### http://www.apache.org/licenses/LICENSE-2.0
11###
12### Unless required by applicable law or agreed to in writing, software
13### distributed under the License is distributed on an "AS IS" BASIS,
14### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15### See the License for the specific language governing permissions and
16### limitations under the License.
17### ============LICENSE_END=====================================================
18
19
20###
21### c2m
22###
23### AUTHOR(S):
24### Thomas Kulik, Deutsche Telekom AG, 2020
25###
26### DESCRIPTION:
27### c2m automates additional tasks required in case you want to export and
28### convert a set of wiki pages. the export and first conversion to markdown is
29### done by confluence2md, provided by viaboxx.
30### c2m processes a list of (to be exported) wiki pages, creates corresponding
31### export directories, exports and converts pages (in various formats if
32### required), opens an editor and cleans up afterwards.
33### c2m checks also for problematic content in the export and creates a warning
34### in case of detection.
35###
36### ISSUES:
37### - markdown (md) output of confluence2md contains sometimes tags that are
38### somehow "merged" with the topic headline; manual edit is required here
39###
40### OPEN:
41### - confluence2md does not support all of the currently used confluence page
42### types (structured-macros) - result for unsupported pages is
43### "not satisfying"; enhancements (java) are required
44### - opt: toc creation in root document in case you export a tree of documents
45### to separate files
46### - opt: remove wiki credentials from script
47###
48### REQUIRED:
49### - pandoc, retext, confluence2md, java (older version for confluence2md),
50### login for the confluence wiki
51###
52### SEE ALSO:
53### - https://www.viaboxx.de/code/confluence2md/
54### - https://github.com/viaboxxsystems/confluence2md
55###
56
57
58###
59### CHANGELOG (LATEST ON TOP)
60###
61### 1.1.0 (2020-03-10) added support for http/https proxy and anonymous wiki
62### access. thx to eric, nicolas and sylvain (orange, france)
63### confluence2md jar file now has to be in the same path as
64### c2m.
65### 1.0.0 (2020-03-09) initial release
66###
67
68
69###
70### c2m example pagelist
71###
72### example pagelist (field descriptions below); it uses the delimiter "|" for
73### the four fields per line.
74### copy/paste page id and title from wiki; to get the wiki page_id you have to
75### login to the wiki, open the page and choose e.g. the history.
76### depth: use depth to follow down the child-pages hierarchy if required:
77### -1=infinte, 0=no children, #=number of child-pages to follow.
78### every hierarchy "0" entry will lead into the creation of a dedicated working
79### directory where the page and child-pages are stored.
80### for better readability you can add spaces to the list, but use "|" as a
81### delimiter. lines starting with a # are filtered by c2m.
82###
83### hierarchy | page_id | page_title | depth
84###
85### 0 | 1018748 | ONAP Portal | 0
86### 1.1 | 1018759 | ONAP Portal for users | 0
87### 1.2 | 1018762 | ONAP Portal for administrators | 0
88### 1.2.1 | 1018764 | Admins | 0
89### 1.2.2 | 1018811 | Users | 0
90### 1.2.3 | 1018821 | Portal Admins | 0
91### 1.2.4 | 1018826 | Application Onboarding | 0
92### 1.2.5 | 1018832 | Widget Onboarding | 0
93### 1.2.6 | 1018835 | Edit Functional Menu | 0
94### 1.2.7 | 16004953 | Portal Microservices Onboarding | 0
95###
96### in case you want to export to only one single output page (that contains all
97### child-pages of the above example) use:
98###
99### 0 | 1018748 | ONAP Portal | -1
100###
101
102
103###
104### some initial variables
105###
106
107script_version="1.1.0 (2020-03-10)"
108
109 user="*****"; # replace ***** with your wiki login name
110 passwd="*****"; # replace ***** with your wiki password
111 credentials="${user}":"${passwd}";
112 server="https://wiki.onap.org";
113 rst_editor="retext --preview";
114
115# remove credentials for those using anonymous access
116test "${credentials}" = "*****:*****" && credentials=""
117
118# explicit script dir to locate jar file
119basedir="$(cd "$(dirname "$0")"; pwd)"
120
121###
122### some inital tasks after script has been started
123###
124
125###
126### print script version, date and time
127###
128
129echo "INFO ***************************************************************************"
130echo "INFO c2m Version ${script_version}, started $(date)";
131
132###
133### simple script argument handling
134###
135
136page_list=$1;
137
138# check if there is an argument at all
139if [[ "$page_list" == "" ]] ; then
140 echo 'Usage: c2m [PAGELIST]'
141 exit 1
142fi
143
144# check if argument is a file
145if [ ! -f $page_list ] ; then
146 echo "Error: can't find pagelist \"$page_list\""
147 exit 1
148fi
149
150###
151### declare the functions of this script
152###
153
154###
155### function: create working directory; save (only the last) existing one; remove older versions; do some error handling
156###
157
158function create_working_dir {
159
160 # compose name for working directory
161 #working_dir="${page_id}-${page_title}";
162 #working_dir="${page_title}-id${page_id}";
163 working_dir="${page_title}";
164 echo "INFO ***************************************************************************"
165 echo "INFO working directory \"$working_dir\" will be created"
166
167 # check if current working directory is already in the list
168 if [[ " ${existing_working_dirs[@]} " =~ " ${working_dir} " ]]; then
169 echo "ERRR ***************************************************************************"
170 echo "ERRR working directory \"${working_dir}\" already exists - check entries in page_list for duplicates"
171 echo "ERRR exiting ..."
172 exit -1
173 else
174 # store working_dir name for error handling
175 existing_working_dirs+=(${working_dir})
176 fi
177
178 # sample code
179 #if [[ ! " ${array[@]} " =~ " ${value} " ]]; then
180 # # whatever you want to do when arr doesn't contain value
181 #fi
182
183 # check existence of working directory
184 if [ -d "$working_dir" ]; then
185 # check existence of old saved working directory
186 if [ -d "${working_dir}.old" ]; then
187 # remove the old saved working directory
188 rm -r "${working_dir}.old";
189 fi
190 # save (only) the latest working directory
191 mv $working_dir "$working_dir.old";
192 fi
193 # finally create the working directory and cd into it
194 mkdir $working_dir;
195 cd $working_dir;
196}
197
198###
199### function: pull pages from wiki - currently we are testing some export variations
200###
201
202function pull_pages_from_wiki {
203
204 # define outfile name
205 #out_file="${page_title}-id${page_id}";
206 out_file="${page_title}";
207
208 # set proxy for those who need
209 test -n "${http_proxy}" && proxy="$(echo $http_proxy |sed -e 's,http://,-Dhttp.proxyHost=,' -e 's/:/ -Dhttp.proxyPort=/' -e 's:/$::')"
210 test -n "${https_proxy}" && proxy="$proxy $(echo $https_proxy |sed -e 's,http://,-Dhttps.proxyHost=,' -e 's/:/ -Dhttps.proxyPort=/' -e 's:/$::')"
211
212 # pull pages from wiki and convert to markdown (as a source for conversion by pandoc)
213 java $proxy -jar "${basedir}"/confluence2md-2.1-fat.jar +H true +T false +RootPageTitle false +FootNotes true -maxHeaderDepth 7 -depth $depth -v true -o ${out_file}.md -u "${credentials}" -server $server $page_id
214}
215
216###
217### function: simple search and (red colored) warning if special terms are detected in the md output file
218###
219
220function detect_unwanted_content_in_md_outfile {
221for search_term in "ecomp" "wiki.onap.com" "10.53.199.7" "at&t"
222do
223 if grep $search_term ${out_file}.md; then
224 echo -e "\e[31mWARN ***************************************************************************\e[39m";
225 echo -e "\e[31mWARN term \"${search_term}\" detected in ${out_file}.md\e[39m";
226 fi
227done
228}
229
230###
231### function: pandoc conversion from md (variants) to rst - currenty testing some conversion formats
232###
233
234function convert_md_outfile_to_rst {
235 #depending on the given source format (--from) the results may vary
236 #pandoc -s --toc --toc-depth=5 --from markdown_mmd --to rst "${out_file}.md" -o "${out_file}-markdown_mmd.rst"
237 #pandoc -s --toc --toc-depth=5 --from markdown_strict --to rst "${out_file}.md" -o "${out_file}-markdown_strict.rst"
238 #pandoc -s --toc --toc-depth=5 --from markdown_phpextra --to rst "${out_file}.md" -o "${out_file}-markdown_phpextra.rst"
239 #pandoc -s --toc-depth=5 --from markdown_phpextra --to rst "${out_file}.md" -o "${out_file}-markdown_phpextra.rst"
240 pandoc -s --toc-depth=5 --from markdown_phpextra --to rst "${out_file}.md" -o "${out_file}.rst"
241}
242
243###
244### function: check results in rst editor
245###
246
247function open_rst_editor {
248 #echo "DBUG ***************************************************************************"
249 #echo "DBUG open \"${out_file}\*.rst\" with rst editor"
250 $rst_editor ${out_file}*.rst &
251}
252
253###
254### function: clean up export directories from files no longer needed
255###
256
257function clean_up {
258 rm *.md 2>/dev/null
259 rm attachments/*.json 2>/dev/null
260 rm attachments/.*.json 2>/dev/null
261}
262
263###
264### main: let's start the work ...
265###
266
267# read in pagelist file, filter lines starting with a comment and create an array that contains all (uncommented) lines of the file
268
269# sample code
270# IFS=',' read -r -a page_array <<< "$page_list" # in case $page_list was defined as a varable in this script; use "," as the delimiter
271#readarray -t page_array < $page_list; # old version
272
273readarray -t page_array < <(grep -v "^#" $page_list); # new version which skips line with comments
274
275# INFO: show list of pages by printing every line of the array
276echo "INFO ***************************************************************************"
277for line in "${page_array[@]}"
278do
279 echo "INFO $line"
280done
281
282# the main loop reads the page_array line by line and processes the content
283for line in "${page_array[@]}"
284do
285
286 # cut out values from the current line (delimiter is now the "|") and assign them to the correct variables
287 hierarchy=$(echo $line | cut -f1 -d\|)
288 page_id=$(echo $line | cut -f2 -d\|)
289 page_title=$(echo $line | cut -f3 -d\|)
290 depth=$(echo $line | cut -f4 -d\|)
291
292 # remove leading and trailing spaces from variables
293 hierarchy="$(echo -e "${hierarchy}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')";
294 page_id="$(echo -e "${page_id}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')";
295 page_title="$(echo -e "${page_title}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')";
296 depth="$(echo -e "${depth}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')";
297
298 # substitude all blanks in page_title with a minus sign
299 page_title=$(echo -e ${page_title} | tr '[:blank:]' '-');
300 echo "DBUG page_title=\"$page_title\""
301
302 # convert page_title to lowercase
303 page_title=$(echo -e ${page_title} | tr '[:upper:]' '[:lower:]');
304 #echo "DBUG page_title=\"$page_title\""
305
306 # remove all characters from page_title which may cause problems in the shell ... or are reserved by conventions of this script
307 #page_title="$(echo -e "${page_title}" | sed -e 's/[^A-Za-z0-9._-]//g')"; # a less strict version
308 page_title="$(echo -e "${page_title}" | sed -e 's/[^A-Za-z0-9-]//g')";
309 echo "DBUG page_title=\"$page_title\""
310
311 # INFO: print variables to check content
312 echo "INFO ***************************************************************************"
313 echo "INFO hierarchy = \"$hierarchy\""
314 echo "INFO page_id = \"$page_id\""
315 echo "INFO page_title = \"$page_title\""
316 echo "INFO depth = \"$depth\""
317
318 # create working directory - done for every! "hierarchy 0" entry of page_list
319 if [ "$hierarchy" == "0" ]
320 then
321 create_working_dir
322 fi
323
324 # call functions to process page
325 pull_pages_from_wiki
326 detect_unwanted_content_in_md_outfile
327 convert_md_outfile_to_rst
328 open_rst_editor
329 clean_up
330
331# main loop end
332done
333
334# bye!
335echo "INFO ***************************************************************************"
336echo "INFO c2m Version ${script_version}, ended $(date)"
337echo ""
338exit 0