# To catch remaining results, lookup by name and address
# We only do this if there is a reasonable number of results expected.
- exp_count = exp_count / (2**len(addr_tokens)) if addr_tokens else exp_count
+ exp_count /= 2**len(addr_tokens)
if exp_count < 10000 and addr_count < 20000:
penalty += 0.35 * max(1 if name_fulls else 0.1,
5 - len(name_partials) - len(addr_tokens))
except ValueError as exc:
raise UsageError('Point parameter needs to be numbers.') from exc
- if x < -180.0 or x > 180.0 or y < -90.0 or y > 90.0:
+ if not -180 <= x <= 180 or not -90 <= y <= 90.0:
raise UsageError('Point coordinates invalid.')
return Point(x, y)
elif rank < 26 and extratags and 'linked_place' in extratags:
label = extratags['linked_place']
elif category == ('boundary', 'administrative'):
- label = ADMIN_LABELS.get((country or '', int(rank/2)))\
- or ADMIN_LABELS.get(('', int(rank/2)))\
+ label = ADMIN_LABELS.get((country or '', rank // 2))\
+ or ADMIN_LABELS.get(('', rank // 2))\
or 'Administrative'
elif category[1] == 'postal_code':
label = 'postcode'
Converts correctly for pre-10 and post-10 PostgreSQL versions.
"""
version = conn.info.server_version
- if version < 100000:
- return (int(version / 10000), int((version % 10000) / 100))
-
- return (int(version / 10000), version % 10000)
+ major, minor = divmod(version, 10000)
+ if major < 10:
+ minor //= 100
+ return major, minor
def postgis_version_tuple(conn: Connection) -> Tuple[int, int]:
places_per_sec = self.done_places / done_time
eta = (self.total_places - self.done_places) / places_per_sec
- LOG.warning("Done %d in %d @ %.3f per second - %s ETA (seconds): %.2f",
- self.done_places, int(done_time),
+ LOG.warning("Done %d in %.0f @ %.3f per second - %s ETA (seconds): %.2f",
+ self.done_places, done_time,
places_per_sec, self.name, eta)
self.next_info += int(places_per_sec) * self.log_interval
diff_seconds = (rank_end_time - self.rank_start_time).total_seconds()
places_per_sec = self.done_places / diff_seconds
- LOG.warning("Done %d/%d in %d @ %.3f per second - FINISHED %s\n",
- self.done_places, self.total_places, int(diff_seconds),
+ LOG.warning("Done %d/%d in %.0f @ %.3f per second - FINISHED %s\n",
+ self.done_places, self.total_places, diff_seconds,
places_per_sec, self.name)
return self.done_places
async with QueryPool(dsn, place_threads, autocommit=True) as pool:
with tokenizer.name_analyzer() as analyzer:
- lines = 0
- for row in tar:
+ for lineno, row in enumerate(tar, 1):
try:
address = dict(street=row['street'], postcode=row['postcode'])
args = ('SRID=4326;' + row['geometry'],
%s::INT, %s::TEXT, %s::JSONB, %s::TEXT)""",
args)
- lines += 1
- if lines == 1000:
+ if not lineno % 1000:
print('.', end='', flush=True)
- lines = 0
print('', flush=True)
if self.count == 0:
raise ValueError("No points available for centroid.")
- return (float(self.sum_x/self.count)/10000000,
- float(self.sum_y/self.count)/10000000)
+ return (self.sum_x / self.count / 10_000_000,
+ self.sum_y / self.count / 10_000_000)
def __len__(self) -> int:
return self.count
if isinstance(other, Collection) and len(other) == 2:
if all(isinstance(p, (float, int)) for p in other):
x, y = other
- self.sum_x += int(x * 10000000)
- self.sum_y += int(y * 10000000)
+ self.sum_x += int(x * 10_000_000)
+ self.sum_y += int(y * 10_000_000)
self.count += 1
return self