Coverage for gws-app/gws/test/inspire_validator/main.py: 0%
187 statements
« prev ^ index » next coverage.py v7.11.0, created at 2025-10-16 23:09 +0200
« prev ^ index » next coverage.py v7.11.0, created at 2025-10-16 23:09 +0200
1import os
2import sys
3import re
5import jq
7LOCAL_APP_DIR = os.path.abspath(os.path.dirname(__file__) + '/../../..')
8sys.path.insert(0, LOCAL_APP_DIR)
10import gws
11import gws.lib.cli as cli
12import gws.lib.jsonx as jsonx
13import gws.lib.net
15USAGE = """
16GWS INSPIRE validator
17~~~~~~~~~~~~~~~~~~~~~
19Commands:
21 main.py dir
22 - show available test suites
24 main.py run <suite-filter> <url>
25 - run suites matching the filter against the given URL
27Options:
28 --host - validator host (default: 127.0.0.1)
29 --port - validator port (default: 8090)
30 --all - report all tests (default: only failed)
31 --save-parsed <path> - save parsed output
32 --save-raw <path> - save raw output
34The validator docker container must be started before running this script (see `inspire_validator/docker-compose.yml`).
36"""
38OPTIONS = {}
41def main(args):
42 OPTIONS['host'] = args.get('host', '127.0.0.1')
43 OPTIONS['port'] = args.get('port', '8090')
44 OPTIONS['save-raw'] = args.get('save-raw', '')
45 OPTIONS['save-parsed'] = args.get('save-parsed', '')
46 OPTIONS['all'] = args.get('all', False)
48 cmd = args.get(1)
50 if cmd == 'dir':
51 _show_suites()
52 return 0
54 if cmd == 'run':
55 uids = list(_get_suite_uids(args.get(2, '')))
56 if not uids:
57 cli.fatal('no test suites found')
58 url = args.get(3)
59 if not url:
60 cli.fatal('no --url option')
61 ok = _run_tests(uids, url)
62 return 0 if ok else 1
65##
67def _get_object_types():
68 res = _invoke('TestObjectTypes.json')
69 for q in _get_list('.. | .TestObjectType?', res):
70 yield q['id'], q['label'], q['description']
73def _get_suite_uids(search):
74 for s in _get_suites():
75 if s['id'] == search or search.lower() in s['label'].lower():
76 yield s['id']
79def _show_suites():
80 print(cli.text_table(_get_suites(), header='auto'))
83def _get_suites():
84 types = {t[0]: t[1] for t in _get_object_types()}
86 res = _invoke('ExecutableTestSuites.json')
88 for q in _get_list('.. | .ExecutableTestSuite?', res):
89 href = jq.first('.supportedTestObjectTypes.testObjectType.href', q)
90 # like "http://localhost:8090/validator/v2/TestObjectTypes/5a60dded-0cb0-4977-9b06-16c6c2321d2e.json"
91 typ = ''
92 m = re.search(r'([^/]+)\.json$', href)
93 if m:
94 typ = types.get('EID' + m.group(1))
95 yield dict(id=q['id'], label=q['label'], type=typ)
98def _run_tests(suite_uids, url):
99 cli.info(f'running suites {suite_uids}')
100 res = _invoke_tests(suite_uids, url)
101 return _save_and_report(res)
104def _invoke_tests(suite_uids, url):
105 request = {
106 "executableTestSuiteIds": suite_uids,
107 "label": "test",
108 "arguments": {},
109 "testObject": {
110 "resources": {
111 "serviceEndpoint": url
112 }
113 }
114 }
115 res = _invoke('TestRuns', method='POST', json=request)
116 if res.get('error'):
117 cli.fatal(res['error'])
119 uid = jq.first('.EtfItemCollection.testRuns.TestRun.id', res)
120 if not uid:
121 cli.fatal('no test run uid')
123 while True:
124 gws.u.sleep(3)
125 res = _invoke(f'TestRuns/{uid}')
126 if res.get('error'):
127 cli.fatal(res['error'])
128 status = jq.first('.EtfItemCollection.testRuns.TestRun.status', res)
129 if status != 'UNDEFINED':
130 return res
131 cli.info(f'test {uid}: waiting...')
132 continue
135def _save_and_report(res):
136 if OPTIONS['save-raw']:
137 jsonx.to_path(OPTIONS['save-raw'], res, pretty=True)
139 results = _parse_test_results(res)
140 stats = f'TOTAL={len(results)}'
142 by_status = {}
143 for r in results:
144 by_status[r['status']] = by_status.get(r['status'], 0) + 1
145 for k, v in sorted(by_status.items()):
146 stats += f' {k}={v}'
148 if not OPTIONS['all']:
149 results = [r for r in results if r['status'] != 'passed']
151 if OPTIONS['save-parsed']:
152 jsonx.to_path(OPTIONS['save-parsed'], results, pretty=True)
154 cli.info(f'')
155 cli.info(stats)
156 cli.info(f'')
158 for r in results:
159 _report_result(r)
161 return by_status.get('passed', 0) == len(results)
164def _report_result(r):
165 fn = cli.error if r['status'] == 'FAILED' else cli.info
167 fn(r['id'])
169 msg = r['status']
170 if r['message']:
171 msg += ': ' + r['message']
172 fn(msg)
174 if r['specs']:
175 _report_expressions(r['specs'][0], fn)
177 for s in r['specs']:
178 label = s.get('label', 'UNKNOWN')
179 # desc = _unhtml(s.get('description', ''))
180 # if desc:
181 # label += f' ({desc})'
182 fn('>> ' + label)
184 fn('')
185 fn('=' * 80)
186 fn('')
189def _report_expressions(s, fn):
190 q = s.get('expression', '')
191 if q:
192 fn('| ')
193 for ln in str(q).splitlines():
194 fn('| ' + ln)
196 q = s.get('statementForExecution', '')
197 if q and q != 'NOT_APPLICABLE':
198 fn('| ')
199 for ln in str(q).splitlines():
200 fn('| ' + ln)
202 q = s.get('expectedResult', '')
203 if q and q != 'NOT_APPLICABLE':
204 fn('| ')
205 fn('| EXPECTED=' + str(q))
207 fn('| ')
210def _parse_test_results(res):
211 all_messages = {}
213 for msg in jq.all('.. | .LangTranslationTemplateCollection? | .[]?', res):
214 all_messages[msg['name']] = jq.first('.translationTemplates.TranslationTemplate["$"]', msg)
215 # the default is just silly
216 all_messages['TR.fallbackInfo'] = '{INFO}'
218 all_results = []
220 for step in _get_list('.. | .TestStepResult?', res):
221 if 'testAssertionResults' in step:
222 all_results.extend(_get_list('.. | .TestAssertionResult?', step))
223 else:
224 all_results.append(step)
226 return [
227 _parse_single_result(q, res, all_messages)
228 for q in all_results
229 ]
232def _parse_single_result(q, res, all_messages):
233 r = dict(
234 id=q['id'],
235 status='passed' if q['status'].startswith('PASSED') else q['status'],
236 message='',
237 specs=[],
238 )
240 for m in _get_list('.messages.message', q):
241 msg = all_messages.get(m['ref'], '')
242 for a in _get_list('.. | .argument?', m):
243 msg = msg.replace('{' + a['token'] + '}', str(a['$']))
244 r['message'] += m['ref'] + ': ' + msg
246 ref = jq.first('. | .resultedFrom? | .href', q)
247 if ref:
248 r['specs'] = [dict(label=ref)]
250 ref = jq.first('. | .resultedFrom? | .ref', q)
251 while ref:
252 q = jq.first(f'.. | select(.id? == "{ref}")', res)
253 r['specs'].append(_pick(q, 'id', 'label', 'description', 'statementForExecution', 'expression', 'expectedResult'))
254 ref = jq.first('. | .parent? | .ref', q)
256 return r
259def _invoke(path, **kwargs):
260 url = f'http://{OPTIONS["host"]}:{OPTIONS["port"]}/validator/v2/{path}'
261 cli.info(f'>> {url}')
262 try:
263 res = gws.lib.net.http_request(url, headers={'Accept': 'application/json'}, **kwargs)
264 res.raise_if_failed()
265 return jsonx.from_string(res.text)
266 except gws.lib.net.Error as exc:
267 try:
268 res = jsonx.from_string(exc.args[1])
269 cli.error(jsonx.to_pretty_string(res))
270 except:
271 cli.error(exc)
272 cli.fatal(f'HTTP ERROR')
275def _get_list(q, where):
276 for s in jq.all(q, where):
277 if not s:
278 continue
279 if isinstance(s, list):
280 yield from s
281 continue
282 yield s
285def _pick(d, *keys):
286 o = {}
287 for k in keys:
288 if k not in d or d[k] is None:
289 o[k] = ''
290 else:
291 o[k] = d[k]
292 return o
295def _unhtml(s):
296 s = re.sub(r'<[^>]+>', '', s)
297 return re.sub(r'\s+', ' ', s.strip())
300def _pr(r):
301 print(jsonx.to_pretty_string(r))
304##
306if __name__ == '__main__':
307 cli.main('test', main, USAGE)