1 # SPDX-License-Identifier: GPL-3.0-or-later
3 # This file is part of Nominatim. (https://nominatim.org)
5 # Copyright (C) 2024 by the Nominatim developer community.
6 # For a full list of authors see the git log.
8 Helper function for parsing parameters and and outputting data
9 specifically for the v1 version of the API.
11 from typing import Tuple, Optional, Any, Dict, Iterable
12 from itertools import chain
15 from ..results import SearchResult, SearchResults, SourceTable
16 from ..types import SearchDetails, GeometryFormat
18 REVERSE_MAX_RANKS = [2, 2, 2, # 0-2 Continent/Sea
25 19, # 13 Village/Suburb
26 22, # 14 Hamlet/Neighbourhood
28 26, # 16 Major Streets
29 27, # 17 Minor Streets
34 def zoom_to_rank(zoom: int) -> int:
35 """ Convert a zoom parameter into a rank according to the v1 API spec.
37 return REVERSE_MAX_RANKS[max(0, min(18, zoom))]
40 FEATURE_TYPE_TO_RANK: Dict[Optional[str], Tuple[int, int]] = {
48 def feature_type_to_rank(feature_type: Optional[str]) -> Tuple[int, int]:
49 """ Convert a feature type parameter to a tuple of
50 feature type name, minimum rank and maximum rank.
52 return FEATURE_TYPE_TO_RANK.get(feature_type, (0, 30))
55 #pylint: disable=too-many-arguments,too-many-branches
56 def extend_query_parts(queryparts: Dict[str, Any], details: Dict[str, Any],
57 feature_type: Optional[str],
58 namedetails: bool, extratags: bool,
59 excluded: Iterable[str]) -> None:
60 """ Add parameters from details dictionary to the query parts
61 dictionary which is suitable as URL parameter dictionary.
63 parsed = SearchDetails.from_kwargs(details)
64 if parsed.geometry_output != GeometryFormat.NONE:
65 if GeometryFormat.GEOJSON in parsed.geometry_output:
66 queryparts['polygon_geojson'] = '1'
67 if GeometryFormat.KML in parsed.geometry_output:
68 queryparts['polygon_kml'] = '1'
69 if GeometryFormat.SVG in parsed.geometry_output:
70 queryparts['polygon_svg'] = '1'
71 if GeometryFormat.TEXT in parsed.geometry_output:
72 queryparts['polygon_text'] = '1'
73 if parsed.address_details:
74 queryparts['addressdetails'] = '1'
76 queryparts['namedetails'] = '1'
78 queryparts['extratags'] = '1'
79 if parsed.geometry_simplification > 0.0:
80 queryparts['polygon_threshold'] = f"{parsed.geometry_simplification:.6g}"
81 if parsed.max_results != 10:
82 queryparts['limit'] = str(parsed.max_results)
84 queryparts['countrycodes'] = ','.join(parsed.countries)
85 queryparts['exclude_place_ids'] = \
86 ','.join(chain(excluded, map(str, (e for e in parsed.excluded if e > 0))))
88 queryparts['viewbox'] = ','.join(f"{c:.7g}" for c in parsed.viewbox.coords)
89 if parsed.bounded_viewbox:
90 queryparts['bounded'] = '1'
91 if not details['dedupe']:
92 queryparts['dedupe'] = '0'
93 if feature_type in FEATURE_TYPE_TO_RANK:
94 queryparts['featureType'] = feature_type
97 def deduplicate_results(results: SearchResults, max_results: int) -> SearchResults:
98 """ Remove results that look like duplicates.
100 Two results are considered the same if they have the same OSM ID
101 or if they have the same category, display name and rank.
104 classification_done = set()
105 deduped = SearchResults()
106 for result in results:
107 if result.source_table == SourceTable.POSTCODE:
108 assert result.names and 'ref' in result.names
109 if any(_is_postcode_relation_for(r, result.names['ref']) for r in results):
111 if result.source_table == SourceTable.PLACEX:
112 classification = (result.osm_object[0] if result.osm_object else None,
116 if result.osm_object not in osm_ids_done \
117 and classification not in classification_done:
118 deduped.append(result)
119 osm_ids_done.add(result.osm_object)
120 classification_done.add(classification)
122 deduped.append(result)
123 if len(deduped) >= max_results:
129 def _is_postcode_relation_for(result: SearchResult, postcode: str) -> bool:
130 return result.source_table == SourceTable.PLACEX \
131 and result.osm_object is not None \
132 and result.osm_object[0] == 'R' \
133 and result.category == ('boundary', 'postal_code') \
134 and result.names is not None \
135 and result.names.get('ref') == postcode
138 def _deg(axis:str) -> str:
139 return f"(?P<{axis}_deg>\\d+\\.\\d+)°?"
141 def _deg_min(axis: str) -> str:
142 return f"(?P<{axis}_deg>\\d+)[°\\s]+(?P<{axis}_min>[\\d.]+)[′']*"
144 def _deg_min_sec(axis: str) -> str:
145 return f"(?P<{axis}_deg>\\d+)[°\\s]+(?P<{axis}_min>\\d+)[′'\\s]+(?P<{axis}_sec>[\\d.]+)[\"″]*"
147 COORD_REGEX = [re.compile(r'(?:(?P<pre>.*?)\s+)??' + r + r'(?:\s+(?P<post>.*))?') for r in (
148 r"(?P<ns>[NS])\s*" + _deg('lat') + r"[\s,]+" + r"(?P<ew>[EW])\s*" + _deg('lon'),
149 _deg('lat') + r"\s*(?P<ns>[NS])[\s,]+" + _deg('lon') + r"\s*(?P<ew>[EW])",
150 r"(?P<ns>[NS])\s*" + _deg_min('lat') + r"[\s,]+" + r"(?P<ew>[EW])\s*" + _deg_min('lon'),
151 _deg_min('lat') + r"\s*(?P<ns>[NS])[\s,]+" + _deg_min('lon') + r"\s*(?P<ew>[EW])",
152 r"(?P<ns>[NS])\s*" + _deg_min_sec('lat') + r"[\s,]+" + r"(?P<ew>[EW])\s*" + _deg_min_sec('lon'),
153 _deg_min_sec('lat') + r"\s*(?P<ns>[NS])[\s,]+" + _deg_min_sec('lon') + r"\s*(?P<ew>[EW])",
154 r"\[?(?P<lat_deg>[+-]?\d+\.\d+)[\s,]+(?P<lon_deg>[+-]?\d+\.\d+)\]?"
157 def extract_coords_from_query(query: str) -> Tuple[str, Optional[float], Optional[float]]:
158 """ Look for something that is formatted like a coordinate at the
159 beginning or end of the query. If found, extract the coordinate and
160 return the remaining query (or the empty string if the query
161 consisted of nothing but a coordinate).
163 Only the first match will be returned.
165 for regex in COORD_REGEX:
166 match = regex.fullmatch(query)
169 groups = match.groupdict()
170 if not groups['pre'] or not groups['post']:
171 x = float(groups['lon_deg']) \
172 + float(groups.get('lon_min', 0.0)) / 60.0 \
173 + float(groups.get('lon_sec', 0.0)) / 3600.0
174 if groups.get('ew') == 'W':
176 y = float(groups['lat_deg']) \
177 + float(groups.get('lat_min', 0.0)) / 60.0 \
178 + float(groups.get('lat_sec', 0.0)) / 3600.0
179 if groups.get('ns') == 'S':
181 return groups['pre'] or groups['post'] or '', x, y
183 return query, None, None
186 CATEGORY_REGEX = re.compile(r'(?P<pre>.*?)\[(?P<cls>[a-zA-Z_]+)=(?P<typ>[a-zA-Z_]+)\](?P<post>.*)')
188 def extract_category_from_query(query: str) -> Tuple[str, Optional[str], Optional[str]]:
189 """ Extract a hidden category specification of the form '[key=value]' from
190 the query. If found, extract key and value and
191 return the remaining query (or the empty string if the query
192 consisted of nothing but a category).
194 Only the first match will be returned.
196 match = CATEGORY_REGEX.search(query)
197 if match is not None:
198 return (match.group('pre').strip() + ' ' + match.group('post').strip()).strip(), \
199 match.group('cls'), match.group('typ')
201 return query, None, None