import io
import re
import logging
-from tidylib import tidy_document
import xml.etree.ElementTree as ET
import subprocess
from urllib.parse import urlencode
from collections import OrderedDict
-from nose.tools import * # for assert functions
+
+from check_functions import Almost
logger = logging.getLogger(__name__)
if h == 'ID':
pass
elif h == 'osm':
- assert_equal(res['osm_type'], row[h][0])
- assert_equal(res['osm_id'], row[h][1:])
+ assert res['osm_type'] == row[h][0]
+ assert res['osm_id'] == int(row[h][1:])
elif h == 'centroid':
x, y = row[h].split(' ')
- assert_almost_equal(float(y), float(res['lat']))
- assert_almost_equal(float(x), float(res['lon']))
+ assert Almost(float(y)) == float(res['lat'])
+ assert Almost(float(x)) == float(res['lon'])
elif row[h].startswith("^"):
- assert_in(h, res)
- assert_is_not_none(re.fullmatch(row[h], res[h]),
- "attribute '%s': expected: '%s', got '%s'"
- % (h, row[h], res[h]))
+ assert h in res
+ assert re.fullmatch(row[h], res[h]) is not None, \
+ "attribute '%s': expected: '%s', got '%s'" % (h, row[h], res[h])
else:
- assert_in(h, res)
- assert_equal(str(res[h]), str(row[h]))
+ assert h in res
+ assert str(res[h]) == str(row[h])
def property_list(self, prop):
return [ x[prop] for x in self.result ]
self.header['json_func'] = m.group(1)
self.result = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(code)
- def parse_html(self):
- content, errors = tidy_document(self.page,
- options={'char-encoding' : 'utf8'})
- #eq_(len(errors), 0 , "Errors found in HTML document:\n%s" % errors)
-
- b = content.find('nominatim_results =')
- e = content.find('</script>')
- content = content[b:e]
- b = content.find('[')
- e = content.rfind(']')
+ def parse_geojson(self):
+ self.parse_json()
+ self.result = geojson_results_to_json_results(self.result)
- self.result = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(content[b:e+1])
+ def parse_geocodejson(self):
+ self.parse_geojson()
+ if self.result is not None:
+ self.result = [r['geocoding'] for r in self.result]
def parse_xml(self):
et = ET.fromstring(self.page)
self.header = dict(et.attrib)
for child in et:
- assert_equal(child.tag, "place")
+ assert child.tag == "place"
self.result.append(dict(child.attrib))
address = {}
if errorcode == 200:
getattr(self, 'parse_' + fmt)()
- def parse_html(self):
- content, errors = tidy_document(self.page,
- options={'char-encoding' : 'utf8'})
- #eq_(len(errors), 0 , "Errors found in HTML document:\n%s" % errors)
-
- b = content.find('nominatim_results =')
- e = content.find('</script>')
- content = content[b:e]
- b = content.find('[')
- e = content.rfind(']')
-
- self.result = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(content[b:e+1])
-
def parse_json(self):
m = re.fullmatch(r'([\w$][^(]*)\((.*)\)', self.page)
if m is None:
self.header['json_func'] = m.group(1)
self.result = [json.JSONDecoder(object_pairs_hook=OrderedDict).decode(code)]
+ def parse_geojson(self):
+ self.parse_json()
+ if 'error' in self.result:
+ return
+ self.result = geojson_results_to_json_results(self.result[0])
+
+ def parse_geocodejson(self):
+ self.parse_geojson()
+ if self.result is not None:
+ self.result = [r['geocoding'] for r in self.result]
+
def parse_xml(self):
et = ET.fromstring(self.page)
for child in et:
if child.tag == 'result':
- eq_(0, len(self.result), "More than one result in reverse result")
+ assert len(self.result) == 0, "More than one result in reverse result"
self.result.append(dict(child.attrib))
elif child.tag == 'addressparts':
address = {}
if errorcode == 200:
getattr(self, 'parse_' + fmt)()
- def parse_html(self):
- content, errors = tidy_document(self.page,
- options={'char-encoding' : 'utf8'})
- self.result = {}
+ def parse_json(self):
+ self.result = [json.JSONDecoder(object_pairs_hook=OrderedDict).decode(self.page)]
+
+
+class StatusResponse(GenericResponse):
+
+ def __init__(self, page, fmt='text', errorcode=200):
+ self.page = page
+ self.format = fmt
+ self.errorcode = errorcode
+
+ if errorcode == 200 and fmt != 'text':
+ getattr(self, 'parse_' + fmt)()
def parse_json(self):
self.result = [json.JSONDecoder(object_pairs_hook=OrderedDict).decode(self.page)]
+def geojson_result_to_json_result(geojson_result):
+ result = geojson_result['properties']
+ result['geojson'] = geojson_result['geometry']
+ if 'bbox' in geojson_result:
+ # bbox is minlon, minlat, maxlon, maxlat
+ # boundingbox is minlat, maxlat, minlon, maxlon
+ result['boundingbox'] = [
+ geojson_result['bbox'][1],
+ geojson_result['bbox'][3],
+ geojson_result['bbox'][0],
+ geojson_result['bbox'][2]
+ ]
+ return result
+
+
+def geojson_results_to_json_results(geojson_results):
+ if 'error' in geojson_results:
+ return
+ return list(map(geojson_result_to_json_result, geojson_results['features']))
+
+
@when(u'searching for "(?P<query>.*)"(?P<dups> with dups)?')
def query_cmd(context, query, dups):
""" Query directly via PHP script.
"""
cmd = ['/usr/bin/env', 'php']
cmd.append(os.path.join(context.nominatim.build_dir, 'utils', 'query.php'))
- cmd.extend(['--search', query])
+ if query:
+ cmd.extend(['--search', query])
# add more parameters in table form
if context.table:
for h in context.table.headings:
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(outp, err) = proc.communicate()
- assert_equals (0, proc.returncode, "query.php failed with message: %s\noutput: %s" % (err, outp))
+ assert proc.returncode == 0, "query.php failed with message: %s\noutput: %s" % (err, outp)
+ logger.debug("run_nominatim_script: %s\n%s\n" % (cmd, outp.decode('utf-8').replace('\\n', '\n')))
context.response = SearchResponse(outp.decode('utf-8'), 'json')
env['SCRIPT_NAME'] = '/%s.php' % endpoint
env['REQUEST_URI'] = '%s?%s' % (env['SCRIPT_NAME'], env['QUERY_STRING'])
- env['CONTEXT_DOCUMENT_ROOT'] = os.path.join(context.nominatim.build_dir, 'website')
+ env['CONTEXT_DOCUMENT_ROOT'] = os.path.join(context.nominatim.website_dir.name, 'website')
env['SCRIPT_FILENAME'] = os.path.join(env['CONTEXT_DOCUMENT_ROOT'],
'%s.php' % endpoint)
- env['NOMINATIM_SETTINGS'] = context.nominatim.local_settings_file
logger.debug("Environment:" + json.dumps(env, sort_keys=True, indent=2))
for k,v in params.items():
cmd.append("%s=%s" % (k, v))
- proc = subprocess.Popen(cmd, cwd=context.nominatim.build_dir, env=env,
+ proc = subprocess.Popen(cmd, cwd=context.nominatim.website_dir.name, env=env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(outp, err) = proc.communicate()
outp = outp.decode('utf-8')
+ err = err.decode("utf-8")
logger.debug("Result: \n===============================\n"
+ outp + "\n===============================\n")
- assert_equals(0, proc.returncode,
- "%s failed with message: %s\noutput: %s" % (env['SCRIPT_FILENAME'], err, outp))
+ assert proc.returncode == 0, \
+ "%s failed with message: %s" % (
+ os.path.basename(env['SCRIPT_FILENAME']), err)
- assert_equals(0, len(err), "Unexpected PHP error: %s" % (err))
+ assert len(err) == 0, "Unexpected PHP error: %s" % (err)
if outp.startswith('Status: '):
status = int(outp[8:11])
outp, status = send_api_query('search', params, fmt, context)
- if fmt is None:
- outfmt = 'html'
- elif fmt == 'jsonv2 ':
+ if fmt is None or fmt == 'jsonv2 ':
outfmt = 'json'
else:
outfmt = fmt.strip()
outp, status = send_api_query('details', params, fmt, context)
if fmt is None:
- outfmt = 'html'
+ outfmt = 'json'
else:
outfmt = fmt.strip()
if fmt == 'json ':
outfmt = 'json'
+ elif fmt == 'jsonv2 ':
+ outfmt = 'json'
+ elif fmt == 'geojson ':
+ outfmt = 'geojson'
+ elif fmt == 'geocodejson ':
+ outfmt = 'geocodejson'
else:
outfmt = 'xml'
context.response = SearchResponse(outp, outfmt, status)
+@when(u'sending (?P<fmt>\S+ )?status query')
+def website_status_request(context, fmt):
+ params = {}
+ outp, status = send_api_query('status', params, fmt, context)
+
+ if fmt is None:
+ outfmt = 'text'
+ else:
+ outfmt = fmt.strip()
+
+ context.response = StatusResponse(outp, outfmt, status)
+
@step(u'(?P<operator>less than|more than|exactly|at least|at most) (?P<number>\d+) results? (?:is|are) returned')
def validate_result_number(context, operator, number):
- eq_(context.response.errorcode, 200)
+ assert context.response.errorcode == 200
numres = len(context.response.result)
- ok_(compare(operator, numres, int(number)),
- "Bad number of results: expected %s %s, got %d." % (operator, number, numres))
+ assert compare(operator, numres, int(number)), \
+ "Bad number of results: expected %s %s, got %d." % (operator, number, numres)
@then(u'a HTTP (?P<status>\d+) is returned')
def check_http_return_status(context, status):
- eq_(context.response.errorcode, int(status))
+ assert context.response.errorcode == int(status)
+
+@then(u'the page contents equals "(?P<text>.+)"')
+def check_page_content_equals(context, text):
+ assert context.response.page == text
@then(u'the result is valid (?P<fmt>\w+)')
def step_impl(context, fmt):
context.execute_steps("Then a HTTP 200 is returned")
- eq_(context.response.format, fmt)
+ assert context.response.format == fmt
+
+@then(u'a (?P<fmt>\w+) user error is returned')
+def check_page_error(context, fmt):
+ context.execute_steps("Then a HTTP 400 is returned")
+ assert context.response.format == fmt
+
+ if fmt == 'xml':
+ assert re.search(r'<error>.+</error>', context.response.page, re.DOTALL) is not None
+ else:
+ assert re.search(r'({"error":)', context.response.page, re.DOTALL) is not None
@then(u'result header contains')
def check_header_attr(context):
for line in context.table:
- assert_is_not_none(re.fullmatch(line['value'], context.response.header[line['attr']]),
- "attribute '%s': expected: '%s', got '%s'"
- % (line['attr'], line['value'],
- context.response.header[line['attr']]))
+ assert re.fullmatch(line['value'], context.response.header[line['attr']]) is not None, \
+ "attribute '%s': expected: '%s', got '%s'" % (
+ line['attr'], line['value'],
+ context.response.header[line['attr']])
@then(u'result header has (?P<neg>not )?attributes (?P<attrs>.*)')
def check_header_no_attr(context, neg, attrs):
for attr in attrs.split(','):
if neg:
- assert_not_in(attr, context.response.header)
+ assert attr not in context.response.header
else:
- assert_in(attr, context.response.header)
+ assert attr in context.response.header
@then(u'results contain')
def step_impl(context):
for i in idx:
for attr in attrs.split(','):
if neg:
- assert_not_in(attr, context.response.result[i])
+ assert attr not in context.response.result[i]
else:
- assert_in(attr, context.response.result[i])
+ assert attr in context.response.result[i]
@then(u'result addresses contain')
def step_impl(context):
for h in context.table.headings:
if h != 'ID':
for p in addr_parts:
- assert_in(h, p)
- assert_equal(p[h], line[h], "Bad address value for %s" % h)
+ assert h in p
+ assert p[h] == line[h], "Bad address value for %s" % h
@then(u'address of result (?P<lid>\d+) has(?P<neg> no)? types (?P<attrs>.*)')
def check_address(context, lid, neg, attrs):
for attr in attrs.split(','):
if neg:
- assert_not_in(attr, addr_parts)
+ assert attr not in addr_parts
else:
- assert_in(attr, addr_parts)
+ assert attr in addr_parts
-@then(u'address of result (?P<lid>\d+) is')
-def check_address(context, lid):
+@then(u'address of result (?P<lid>\d+) (?P<complete>is|contains)')
+def check_address(context, lid, complete):
context.execute_steps("then more than %s results are returned" % lid)
addr_parts = dict(context.response.result[int(lid)]['address'])
for line in context.table:
- assert_in(line['type'], addr_parts)
- assert_equal(addr_parts[line['type']], line['value'],
- "Bad address value for %s" % line['type'])
+ assert line['type'] in addr_parts
+ assert addr_parts[line['type']] == line['value'], \
+ "Bad address value for %s" % line['type']
del addr_parts[line['type']]
- eq_(0, len(addr_parts), "Additional address parts found: %s" % str(addr_parts))
+ if complete == 'is':
+ assert len(addr_parts) == 0, "Additional address parts found: %s" % str(addr_parts)
@then(u'result (?P<lid>\d+ )?has bounding box in (?P<coords>[\d,.-]+)')
def step_impl(context, lid, coords):
bbox = bbox.split(',')
bbox = [ float(x) for x in bbox ]
- assert_greater_equal(bbox[0], coord[0])
- assert_less_equal(bbox[1], coord[1])
- assert_greater_equal(bbox[2], coord[2])
- assert_less_equal(bbox[3], coord[3])
+ assert bbox[0] >= coord[0]
+ assert bbox[1] <= coord[1]
+ assert bbox[2] >= coord[2]
+ assert bbox[3] <= coord[3]
+
+@then(u'result (?P<lid>\d+ )?has centroid in (?P<coords>[\d,.-]+)')
+def step_impl(context, lid, coords):
+ if lid is None:
+ context.execute_steps("then at least 1 result is returned")
+ bboxes = zip(context.response.property_list('lat'),
+ context.response.property_list('lon'))
+ else:
+ context.execute_steps("then more than %sresults are returned" % lid)
+ res = context.response.result[int(lid)]
+ bboxes = [ (res['lat'], res['lon']) ]
+
+ coord = [ float(x) for x in coords.split(',') ]
+
+ for lat, lon in bboxes:
+ lat = float(lat)
+ lon = float(lon)
+ assert lat >= coord[0]
+ assert lat <= coord[1]
+ assert lon >= coord[2]
+ assert lon <= coord[3]
@then(u'there are(?P<neg> no)? duplicates')
def check_for_duplicates(context, neg):