From 90de37d820b646cfaa4ccf9cded63c78bc747652 Mon Sep 17 00:00:00 2001 From: Sean Gillies Date: Thu, 9 Jun 2022 20:45:25 -0600 Subject: [PATCH] Deprecate use of feature and geometry dicts (#1116) * Deprecate use of feature and geometry dicts And eliminate internal usage. All test modules with changes have been reformatted using black. * Fix fio-cat --precision errors --- CHANGES.txt | 15 + fiona/_geometry.pyx | 43 +- fiona/_transform.pyx | 115 +---- fiona/collection.py | 189 ++++---- fiona/env.py | 16 +- fiona/errors.py | 2 +- fiona/fio/cat.py | 61 ++- fiona/fio/collect.py | 162 ++++--- fiona/fio/helpers.py | 128 +++--- fiona/fio/load.py | 44 +- fiona/model.py | 86 ++-- fiona/ogrext.pyx | 55 ++- fiona/transform.py | 48 +- tests/conftest.py | 378 ++++++++++------ tests/test_bigint.py | 63 +-- tests/test_binary_field.py | 36 +- tests/test_bounds.py | 13 +- tests/test_bytescollection.py | 77 ++-- tests/test_collection.py | 808 ++++++++++++++++++++-------------- tests/test_datetime.py | 645 +++++++++++++++++---------- tests/test_driver_options.py | 39 +- tests/test_env.py | 105 +++-- tests/test_feature.py | 172 +++++--- tests/test_fio_rm.py | 60 ++- tests/test_geojson.py | 103 +++-- tests/test_geometry.py | 152 ++++--- tests/test_geopackage.py | 94 ++-- tests/test_integration.py | 33 +- tests/test_model.py | 3 +- tests/test_multiconxn.py | 45 +- tests/test_open.py | 33 +- tests/test_props.py | 106 +++-- tests/test_remove.py | 32 +- tests/test_schema.py | 461 ++++++++++--------- tests/test_schema_geom.py | 70 ++- tests/test_slice.py | 189 ++++---- tests/test_subtypes.py | 33 +- tests/test_transactions.py | 35 +- tests/test_transform.py | 150 ++++--- tests/test_unicode.py | 141 +++--- tests/test_vfs.py | 125 +++--- tests/test_write.py | 39 +- 42 files changed, 3043 insertions(+), 2161 deletions(-) diff --git a/CHANGES.txt b/CHANGES.txt index 90a1bb2c2..b66e3f18d 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -6,6 +6,21 @@ All issue numbers are relative to https://github.com/Toblerity/Fiona/issues. 1.9a2 (TBD) ----------- +Deprecations: + +- Fiona's API methods will accept feature and geometry dicts in 1.9.0, but this + usage is deprecated. Instances of Feature and Geometry will be required in + 2.0. +- The precision keyword argument of fiona.transform.transform_geom is + deprecated and will be removed in version 2.0. +- Deprecated usage has been eliminated in the project. Fiona's tests pass when + run with a -Werror::DeprecationWarning filter. + +Changes: + +- Fiona's FionaDeprecationWarning now sub-classes DeprecationWarning. +- Some test modules have beeen re-formatted using black. + New features: - Fiona Collections now carry a context exit stack into which we can push fiona diff --git a/fiona/_geometry.pyx b/fiona/_geometry.pyx index 2eef13ebf..1b0fb8ca8 100644 --- a/fiona/_geometry.pyx +++ b/fiona/_geometry.pyx @@ -5,7 +5,7 @@ from __future__ import absolute_import import logging from fiona.errors import UnsupportedGeometryTypeError -from fiona.model import GEOMETRY_TYPES, Geometry +from fiona.model import _guard_model_object, GEOMETRY_TYPES, Geometry from fiona._err cimport exc_wrap_int @@ -100,16 +100,16 @@ cdef class GeomBuilder: values.append(OGR_G_GetZ(geom, i)) coords.append(tuple(values)) return coords - + cpdef _buildPoint(self): return {'type': 'Point', 'coordinates': self._buildCoords(self.geom)[0]} - + cpdef _buildLineString(self): return {'type': 'LineString', 'coordinates': self._buildCoords(self.geom)} - + cpdef _buildLinearRing(self): return {'type': 'LinearRing', 'coordinates': self._buildCoords(self.geom)} - + cdef _buildParts(self, void *geom): cdef int j cdef void *part @@ -120,19 +120,19 @@ cdef class GeomBuilder: part = OGR_G_GetGeometryRef(geom, j) parts.append(GeomBuilder().build(part)) return parts - + cpdef _buildPolygon(self): coordinates = [p['coordinates'] for p in self._buildParts(self.geom)] return {'type': 'Polygon', 'coordinates': coordinates} - + cpdef _buildMultiPoint(self): coordinates = [p['coordinates'] for p in self._buildParts(self.geom)] return {'type': 'MultiPoint', 'coordinates': coordinates} - + cpdef _buildMultiLineString(self): coordinates = [p['coordinates'] for p in self._buildParts(self.geom)] return {'type': 'MultiLineString', 'coordinates': coordinates} - + cpdef _buildMultiPolygon(self): coordinates = [p['coordinates'] for p in self._buildParts(self.geom)] return {'type': 'MultiPolygon', 'coordinates': coordinates} @@ -140,7 +140,7 @@ cdef class GeomBuilder: cpdef _buildGeometryCollection(self): parts = self._buildParts(self.geom) return {'type': 'GeometryCollection', 'geometries': parts} - + cdef build(self, void *geom): # The only method anyone needs to call if geom == NULL: @@ -157,7 +157,7 @@ cdef class GeomBuilder: self.ndims = OGR_G_GetCoordinateDimension(geom) self.geom = geom built = getattr(self, '_build' + self.geomtypename)() - return Geometry(**built) + return Geometry.from_dict(**built) cpdef build_wkb(self, object wkb): # The only other method anyone needs to call @@ -189,20 +189,20 @@ cdef class OGRGeomBuilder: cdef void *cogr_geometry = self._createOgrGeometry(GEOJSON2OGR_GEOMETRY_TYPES['Point']) self._addPointToGeometry(cogr_geometry, coordinates) return cogr_geometry - + cdef void * _buildLineString(self, object coordinates) except NULL: cdef void *cogr_geometry = self._createOgrGeometry(GEOJSON2OGR_GEOMETRY_TYPES['LineString']) for coordinate in coordinates: self._addPointToGeometry(cogr_geometry, coordinate) return cogr_geometry - + cdef void * _buildLinearRing(self, object coordinates) except NULL: cdef void *cogr_geometry = self._createOgrGeometry(GEOJSON2OGR_GEOMETRY_TYPES['LinearRing']) for coordinate in coordinates: self._addPointToGeometry(cogr_geometry, coordinate) OGR_G_CloseRings(cogr_geometry) return cogr_geometry - + cdef void * _buildPolygon(self, object coordinates) except NULL: cdef void *cogr_ring cdef void *cogr_geometry = self._createOgrGeometry(GEOJSON2OGR_GEOMETRY_TYPES['Polygon']) @@ -235,17 +235,18 @@ cdef class OGRGeomBuilder: exc_wrap_int(OGR_G_AddGeometryDirectly(cogr_geometry, cogr_part)) return cogr_geometry - cdef void * _buildGeometryCollection(self, object coordinates) except NULL: + cdef void * _buildGeometryCollection(self, object geometries) except NULL: cdef void *cogr_part cdef void *cogr_geometry = self._createOgrGeometry(GEOJSON2OGR_GEOMETRY_TYPES['GeometryCollection']) - for part in coordinates: + for part in geometries: cogr_part = OGRGeomBuilder().build(part) exc_wrap_int(OGR_G_AddGeometryDirectly(cogr_geometry, cogr_part)) return cogr_geometry cdef void * build(self, object geometry) except NULL: - cdef object typename = geometry['type'] - cdef object coordinates = geometry.get('coordinates') + cdef object typename = geometry.type + cdef object coordinates = geometry.coordinates + cdef object geometries = geometry.geometries if typename == 'Point': return self._buildPoint(coordinates) elif typename == 'LineString': @@ -261,14 +262,14 @@ cdef class OGRGeomBuilder: elif typename == 'MultiPolygon': return self._buildMultiPolygon(coordinates) elif typename == 'GeometryCollection': - coordinates = geometry.get('geometries') - return self._buildGeometryCollection(coordinates) + return self._buildGeometryCollection(geometries) else: raise UnsupportedGeometryTypeError("Unsupported geometry type %s" % typename) -def geometryRT(geometry): +def geometryRT(geom): # For testing purposes only, leaks the JSON data + geometry = _guard_model_object(geom) cdef void *cogr_geometry = OGRGeomBuilder().build(geometry) result = GeomBuilder().build(cogr_geometry) _deleteOgrGeom(cogr_geometry) diff --git a/fiona/_transform.pyx b/fiona/_transform.pyx index 23a70b164..03a3e5e35 100644 --- a/fiona/_transform.pyx +++ b/fiona/_transform.pyx @@ -103,14 +103,10 @@ cdef object _transform_single_geom( OGRGeometryFactory *factory, void *transform, char **options, - object precision ): + """Transform a single geometry.""" cdef void *src_ogr_geom = NULL cdef void *dst_ogr_geom = NULL - cdef int i - - if not isinstance(single_geom, Geometry): - single_geom = Geometry.from_dict(**single_geom) src_ogr_geom = _geometry.OGRGeomBuilder().build(single_geom) dst_ogr_geom = factory.transformWithOptions( @@ -120,10 +116,7 @@ cdef object _transform_single_geom( if dst_ogr_geom == NULL: warnings.warn( - "Full reprojection failed, but partial is possible. To enable partial " - "reprojection wrap the transform_geom call like so:\n" - "with fiona.Env(OGR_ENABLE_PARTIAL_REPROJECTION=True):\n" - " transform_geom(...)" + "Full reprojection failed. To enable partial reprojection set OGR_ENABLE_PARTIAL_REPROJECTION=True" ) return None else: @@ -133,89 +126,11 @@ cdef object _transform_single_geom( if src_ogr_geom != NULL: _geometry.OGR_G_DestroyGeometry(src_ogr_geom) - if precision >= 0: - - def round_point(g): - coords = list(g['coordinates']) - x, y = coords[:2] - x = round(x, precision) - y = round(y, precision) - new_coords = [x, y] - if len(coords) == 3: - z = coords[2] - new_coords.append(round(z, precision)) - return new_coords - - def round_linestring(g): - coords = list(zip(*g['coordinates'])) - xp, yp = coords[:2] - xp = [round(v, precision) for v in xp] - yp = [round(v, precision) for v in yp] - if len(coords) == 3: - zp = coords[2] - zp = [round(v, precision) for v in zp] - new_coords = list(zip(xp, yp, zp)) - else: - new_coords = list(zip(xp, yp)) - return new_coords - - def round_polygon(g): - new_coords = [] - for piece in out_geom['coordinates']: - coords = list(zip(*piece)) - xp, yp = coords[:2] - xp = [round(v, precision) for v in xp] - yp = [round(v, precision) for v in yp] - if len(coords) == 3: - zp = coords[2] - zp = [round(v, precision) for v in zp] - new_coords.append(list(zip(xp, yp, zp))) - else: - new_coords.append(list(zip(xp, yp))) - return new_coords - - def round_multipolygon(g): - parts = g['coordinates'] - new_coords = [] - for part in parts: - inner_coords = [] - for ring in part: - coords = list(zip(*ring)) - xp, yp = coords[:2] - xp = [round(v, precision) for v in xp] - yp = [round(v, precision) for v in yp] - if len(coords) == 3: - zp = coords[2] - zp = [round(v, precision) for v in zp] - inner_coords.append(list(zip(xp, yp, zp))) - else: - inner_coords.append(list(zip(xp, yp))) - new_coords.append(inner_coords) - return new_coords - - def round_geometry(g): - if g['type'] == 'Point': - g['coordinates'] = round_point(g) - elif g['type'] in ['LineString', 'MultiPoint']: - g['coordinates'] = round_linestring(g) - elif g['type'] in ['Polygon', 'MultiLineString']: - g['coordinates'] = round_polygon(g) - elif g['type'] == 'MultiPolygon': - g['coordinates'] = round_multipolygon(g) - else: - raise RuntimeError("Unsupported geometry type: {}".format(g['type'])) - - if out_geom['type'] == 'GeometryCollection': - for _g in out_geom['geometries']: - round_geometry(_g) - else: - round_geometry(out_geom) - return out_geom def _transform_geom(src_crs, dst_crs, geom, antimeridian_cutting, antimeridian_offset, precision): - """Return a transformed geometry. + """Return transformed geometries. """ cdef char *proj_c = NULL @@ -246,11 +161,15 @@ def _transform_geom(src_crs, dst_crs, geom, antimeridian_cutting, antimeridian_o factory = new OGRGeometryFactory() - if isinstance(geom, DICT_TYPES): - out_geom = _transform_single_geom(geom, factory, transform, options, precision) + if isinstance(geom, Geometry): + out_geom = recursive_round( + _transform_single_geom(geom, factory, transform, options), precision) else: out_geom = [ - _transform_single_geom(single_geom, factory, transform, options, precision) + recursive_round( + _transform_single_geom(single_geom, factory, transform, options), + precision, + ) for single_geom in geom ] @@ -263,3 +182,17 @@ def _transform_geom(src_crs, dst_crs, geom, antimeridian_cutting, antimeridian_o OSRRelease(dst) return out_geom + + +def recursive_round(obj, precision): + """Recursively round coordinates.""" + if precision < 0: + return obj + if getattr(obj, 'geometries', None): + return Geometry(geometries=[recursive_round(part, precision) for part in obj.geometries]) + elif getattr(obj, 'coordinates', None): + return Geometry(coordinates=[recursive_round(part, precision) for part in obj.coordinates]) + if isinstance(obj, (int, float)): + return round(obj, precision) + else: + return [recursive_round(part, precision) for part in obj] diff --git a/fiona/collection.py b/fiona/collection.py index 4b8604894..d39da2099 100644 --- a/fiona/collection.py +++ b/fiona/collection.py @@ -53,12 +53,25 @@ class Collection(object): represented as GeoJSON-like mappings. """ - def __init__(self, path, mode='r', driver=None, schema=None, crs=None, - encoding=None, layer=None, vsi=None, archive=None, - enabled_drivers=None, crs_wkt=None, ignore_fields=None, - ignore_geometry=False, include_fields=None, - wkt_version=None, - **kwargs): + def __init__( + self, + path, + mode="r", + driver=None, + schema=None, + crs=None, + encoding=None, + layer=None, + vsi=None, + archive=None, + enabled_drivers=None, + crs_wkt=None, + ignore_fields=None, + ignore_geometry=False, + include_fields=None, + wkt_version=None, + **kwargs + ): """The required ``path`` is the absolute or relative path to a file, such as '/data/test_uk.shp'. In ``mode`` 'r', data can @@ -73,15 +86,17 @@ def __init__(self, path, mode='r', driver=None, schema=None, crs=None, In 'w' mode, kwargs will be mapped to OGR layer creation options. + """ + self._closed = True if not isinstance(path, (str, Path)): raise TypeError("invalid path: %r" % path) - if not isinstance(mode, str) or mode not in ('r', 'w', 'a'): + if not isinstance(mode, str) or mode not in ("r", "w", "a"): raise TypeError("invalid mode: %r" % mode) if driver and not isinstance(driver, str): raise TypeError("invalid driver: %r" % driver) - if schema and not hasattr(schema, 'get'): + if schema and not hasattr(schema, "get"): raise TypeError("invalid schema: %r" % schema) if crs and not isinstance(crs, compat.DICT_TYPES + (str, CRS)): raise TypeError("invalid crs: %r" % crs) @@ -137,15 +152,23 @@ def __init__(self, path, mode='r', driver=None, schema=None, crs=None, self._closed = True # Check GDAL version against drivers - if driver in driver_mode_mingdal[mode] and get_gdal_version_tuple() < driver_mode_mingdal[mode][driver]: - min_gdal_version = ".".join(list(map(str, driver_mode_mingdal[mode][driver]))) + if ( + driver in driver_mode_mingdal[mode] + and get_gdal_version_tuple() < driver_mode_mingdal[mode][driver] + ): + min_gdal_version = ".".join( + list(map(str, driver_mode_mingdal[mode][driver])) + ) raise DriverError( "{driver} driver requires at least GDAL {min_gdal_version} for mode '{mode}', " - "Fiona was compiled against: {gdal}".format(driver=driver, - mode=mode, - min_gdal_version=min_gdal_version, - gdal=get_gdal_release_name())) + "Fiona was compiled against: {gdal}".format( + driver=driver, + mode=mode, + min_gdal_version=min_gdal_version, + gdal=get_gdal_release_name(), + ) + ) if vsi: self.path = vfs.vsi_path(path, vsi, archive) @@ -154,13 +177,13 @@ def __init__(self, path, mode='r', driver=None, schema=None, crs=None, path = parse_path(path) self.path = vsi_path(path) - if mode == 'w': + if mode == "w": if layer and not isinstance(layer, str): raise ValueError("in 'w' mode, layer names must be strings") - if driver == 'GeoJSON': + if driver == "GeoJSON": if layer is not None: raise ValueError("the GeoJSON format does not have layers") - self.name = 'OgrGeoJSON' + self.name = "OgrGeoJSON" # TODO: raise ValueError as above for other single-layer formats. else: self.name = layer or os.path.basename(os.path.splitext(path.path)[0]) @@ -172,17 +195,15 @@ def __init__(self, path, mode='r', driver=None, schema=None, crs=None, self.mode = mode - if self.mode == 'w': - if driver == 'Shapefile': - driver = 'ESRI Shapefile' + if self.mode == "w": + if driver == "Shapefile": + driver = "ESRI Shapefile" if not driver: raise DriverError("no driver") elif driver not in supported_drivers: - raise DriverError( - "unsupported driver: %r" % driver) + raise DriverError("unsupported driver: %r" % driver) elif self.mode not in supported_drivers[driver]: - raise DriverError( - "unsupported mode: %r" % self.mode) + raise DriverError("unsupported mode: %r" % self.mode) self._driver = driver if not schema: @@ -210,10 +231,10 @@ def __init__(self, path, mode='r', driver=None, schema=None, crs=None, self.encoding = encoding try: - if self.mode == 'r': + if self.mode == "r": self.session = Session() self.session.start(self, **kwargs) - elif self.mode in ('a', 'w'): + elif self.mode in ("a", "w"): self.session = WritingSession() self.session.start(self, **kwargs) except OSError: @@ -235,7 +256,8 @@ def __repr__(self): self.closed and "closed" or "open", self.path + ":" + str(self.name), self.mode, - hex(id(self))) + hex(id(self)), + ) def guard_driver_mode(self): driver = self.session.get_driver() @@ -382,8 +404,11 @@ def meta(self): """Returns a mapping with the driver, schema, crs, and additional properties.""" return { - 'driver': self.driver, 'schema': self.schema, 'crs': self.crs, - 'crs_wkt': self.crs_wkt} + "driver": self.driver, + "schema": self.schema, + "crs": self.crs, + "crs_wkt": self.crs_wkt, + } profile = meta @@ -403,7 +428,7 @@ def filter(self, *args, **kwds): """ if self.closed: raise ValueError("I/O operation on closed collection") - elif self.mode != 'r': + elif self.mode != "r": raise OSError("collection not open for reading") if args: s = slice(*args) @@ -412,13 +437,12 @@ def filter(self, *args, **kwds): step = s.step else: start = stop = step = None - bbox = kwds.get('bbox') - mask = kwds.get('mask') + bbox = kwds.get("bbox") + mask = kwds.get("mask") if bbox and mask: raise ValueError("mask and bbox can not be set together") - where = kwds.get('where') - self.iterator = Iterator( - self, start, stop, step, bbox, mask, where) + where = kwds.get("where") + self.iterator = Iterator(self, start, stop, step, bbox, mask, where) return self.iterator def items(self, *args, **kwds): @@ -438,7 +462,7 @@ def items(self, *args, **kwds): """ if self.closed: raise ValueError("I/O operation on closed collection") - elif self.mode != 'r': + elif self.mode != "r": raise OSError("collection not open for reading") if args: s = slice(*args) @@ -447,13 +471,12 @@ def items(self, *args, **kwds): step = s.step else: start = stop = step = None - bbox = kwds.get('bbox') - mask = kwds.get('mask') + bbox = kwds.get("bbox") + mask = kwds.get("mask") if bbox and mask: raise ValueError("mask and bbox can not be set together") - where = kwds.get('where') - self.iterator = ItemsIterator( - self, start, stop, step, bbox, mask, where) + where = kwds.get("where") + self.iterator = ItemsIterator(self, start, stop, step, bbox, mask, where) return self.iterator def keys(self, *args, **kwds): @@ -472,7 +495,7 @@ def keys(self, *args, **kwds): """ if self.closed: raise ValueError("I/O operation on closed collection") - elif self.mode != 'r': + elif self.mode != "r": raise OSError("collection not open for reading") if args: s = slice(*args) @@ -481,13 +504,12 @@ def keys(self, *args, **kwds): step = s.step else: start = stop = step = None - bbox = kwds.get('bbox') - mask = kwds.get('mask') + bbox = kwds.get("bbox") + mask = kwds.get("mask") if bbox and mask: raise ValueError("mask and bbox can not be set together") - where = kwds.get('where') - self.iterator = KeysIterator( - self, start, stop, step, bbox, mask, where) + where = kwds.get("where") + self.iterator = KeysIterator(self, start, stop, step, bbox, mask, where) return self.iterator def __contains__(self, fid): @@ -501,9 +523,12 @@ def __iter__(self): def __next__(self): """Returns next record from iterator.""" - warnings.warn("Collection.__next__() is buggy and will be removed in " - "Fiona 2.0. Switch to `next(iter(collection))`.", - FionaDeprecationWarning, stacklevel=2) + warnings.warn( + "Collection.__next__() is buggy and will be removed in " + "Fiona 2.0. Switch to `next(iter(collection))`.", + FionaDeprecationWarning, + stacklevel=2, + ) if not self.iterator: iter(self) return next(self.iterator) @@ -520,7 +545,7 @@ def writerecords(self, records): """Stages multiple records for writing to disk.""" if self.closed: raise ValueError("I/O operation on closed collection") - if self.mode not in ('a', 'w'): + if self.mode not in ("a", "w"): raise OSError("collection not open for writing") self.session.writerecs(records, self) self._len = self.session.get_length() @@ -537,10 +562,9 @@ def validate_record(self, record): """ # Currently we only compare keys of properties, not the types of # values. - return ( - set(record['properties'].keys()) == - set(self.schema['properties'].keys()) and - self.validate_record_geometry(record)) + return set(record["properties"].keys()) == set( + self.schema["properties"].keys() + ) and self.validate_record_geometry(record) def validate_record_geometry(self, record): """Compares the record's geometry to the collection's schema. @@ -551,15 +575,15 @@ def validate_record_geometry(self, record): # OGR reports these mixed files as type "Polygon" or "LineString" # but will return either these or their multi counterparts when # reading features. - if (self.driver == "ESRI Shapefile" and - "Point" not in record['geometry']['type']): - return record['geometry']['type'].lstrip( - "Multi") == self.schema['geometry'].lstrip("3D ").lstrip( - "Multi") + if ( + self.driver == "ESRI Shapefile" + and "Point" not in record["geometry"]["type"] + ): + return record["geometry"]["type"].lstrip("Multi") == self.schema[ + "geometry" + ].lstrip("3D ").lstrip("Multi") else: - return ( - record['geometry']['type'] == - self.schema['geometry'].lstrip("3D ")) + return record["geometry"]["type"] == self.schema["geometry"].lstrip("3D ") def __len__(self): if self._len <= 0 and self.session is not None: @@ -656,11 +680,11 @@ def closed(self): def __enter__(self): self._env.enter_context(env_ctx_if_needed()) - logging.getLogger('fiona.ogrext').addFilter(self.field_skip_log_filter) + logging.getLogger("fiona.ogrext").addFilter(self.field_skip_log_filter) return self def __exit__(self, type, value, traceback): - logging.getLogger('fiona.ogrext').removeFilter(self.field_skip_log_filter) + logging.getLogger("fiona.ogrext").removeFilter(self.field_skip_log_filter) self.close() def __del__(self): @@ -670,9 +694,13 @@ def __del__(self): self.close() -ALL_GEOMETRY_TYPES = set([ - geom_type for geom_type in GEOMETRY_TYPES.values() - if "3D " not in geom_type and geom_type != "None"]) +ALL_GEOMETRY_TYPES = set( + [ + geom_type + for geom_type in GEOMETRY_TYPES.values() + if "3D " not in geom_type and geom_type != "None" + ] +) ALL_GEOMETRY_TYPES.add("None") @@ -704,20 +732,24 @@ def get_filetype(bytesbuf): """Detect compression type of bytesbuf. ZIP only. TODO: add others relevant to GDAL/OGR.""" - if bytesbuf[:4].startswith(b'PK\x03\x04'): - return 'zip' + if bytesbuf[:4].startswith(b"PK\x03\x04"): + return "zip" else: - return '' + return "" class BytesCollection(Collection): """BytesCollection takes a buffer of bytes and maps that to a virtual file that can then be opened by fiona. """ + def __init__(self, bytesbuf, **kwds): """Takes buffer of bytes whose contents is something we'd like to open with Fiona and maps it to a virtual file. + """ + self._closed = True + if not isinstance(bytesbuf, bytes): raise ValueError("input buffer must be bytes") @@ -730,16 +762,18 @@ def __init__(self, bytesbuf, **kwds): # it. If the requested driver is for GeoJSON, we append an an # appropriate extension to ensure the driver reads it. filetype = get_filetype(self.bytesbuf) - ext = '' - if filetype == 'zip': - ext = '.zip' - elif kwds.get('driver') == "GeoJSON": - ext = '.json' + ext = "" + if filetype == "zip": + ext = ".zip" + elif kwds.get("driver") == "GeoJSON": + ext = ".json" self.virtual_file = buffer_to_virtual_file(self.bytesbuf, ext=ext) # Instantiate the parent class. super().__init__(self.virtual_file, vsi=filetype, **kwds) + self._closed = False + def close(self): """Removes the virtual file associated with the class.""" super().close() @@ -753,4 +787,5 @@ def __repr__(self): self.closed and "closed" or "open", self.path + ":" + str(self.name), self.mode, - hex(id(self))) + hex(id(self)), + ) diff --git a/fiona/env.py b/fiona/env.py index 54f49dcd8..fe9b97903 100644 --- a/fiona/env.py +++ b/fiona/env.py @@ -111,9 +111,9 @@ def default_options(cls): """ return { - 'CHECK_WITH_INVERT_PROJ': True, - 'GTIFF_IMPLICIT_JPEG_OVR': False, - "FIONA_ENV": True + "CHECK_WITH_INVERT_PROJ": True, + "GTIFF_IMPLICIT_JPEG_OVR": False, + "FIONA_ENV": True, } def __init__( @@ -170,8 +170,7 @@ def __init__( """ aws_access_key_id = options.pop("aws_access_key_id", None) - # Before 1.0, Fiona only supported AWS. We will special - # case AWS in 1.0.x. TODO: warn deprecation in 1.1. + # Warn deprecation in 1.9, remove in 2.0. if aws_access_key_id: warnings.warn( "Passing abstract session keyword arguments is deprecated. " @@ -186,11 +185,13 @@ def __init__( if not {"AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"}.isdisjoint(options): raise EnvError( "GDAL's AWS config options can not be directly set. " - "AWS credentials are handled exclusively by boto3.") + "AWS credentials are handled exclusively by boto3." + ) if session: # Passing a session via keyword argument is the canonical # way to configure access to secured cloud resources. + # Warn deprecation in 1.9, remove in 2.0. if not isinstance(session, Session): warnings.warn( "Passing a boto3 session is deprecated. Pass a Fiona AWSSession object instead.", @@ -411,6 +412,7 @@ def ensure_env(f): nothing and immediately calls f with the given arguments. """ + @wraps(f) def wrapper(*args, **kwargs): if local._env: @@ -444,6 +446,7 @@ def ensure_env_with_credentials(f): nothing and immediately calls f with the given arguments. """ + @wraps(f) def wrapper(*args, **kwds): if local._env: @@ -479,6 +482,7 @@ class GDALVersion(object): and ignores everything except the major and minor components. """ + major = attr.ib(default=0, validator=attr.validators.instance_of(int)) minor = attr.ib(default=0, validator=attr.validators.instance_of(int)) diff --git a/fiona/errors.py b/fiona/errors.py index a90bd8d9c..55fdbcaa4 100644 --- a/fiona/errors.py +++ b/fiona/errors.py @@ -71,7 +71,7 @@ class GDALVersionError(FionaError): """ -class FionaDeprecationWarning(UserWarning): +class FionaDeprecationWarning(DeprecationWarning): """A warning about deprecation of Fiona features""" diff --git a/fiona/fio/cat.py b/fiona/fio/cat.py index 4a61bd530..fcb43603d 100644 --- a/fiona/fio/cat.py +++ b/fiona/fio/cat.py @@ -9,26 +9,34 @@ import fiona from fiona.transform import transform_geom -from fiona.model import ObjectEncoder +from fiona.model import Feature, Geometry, ObjectEncoder from fiona.fio import options, with_context_env +from fiona.fio.helpers import recursive_round from fiona.errors import AttributeFilterError -warnings.simplefilter('default') +warnings.simplefilter("default") # Cat command @click.command(short_help="Concatenate and print the features of datasets") -@click.argument('files', nargs=-1, required=True, metavar="INPUTS...") -@click.option('--layer', default=None, multiple=True, - callback=options.cb_multilayer, - help="Input layer(s), specified as 'fileindex:layer` " - "For example, '1:foo,2:bar' will concatenate layer foo " - "from file 1 and layer bar from file 2") +@click.argument("files", nargs=-1, required=True, metavar="INPUTS...") +@click.option( + "--layer", + default=None, + multiple=True, + callback=options.cb_multilayer, + help="Input layer(s), specified as 'fileindex:layer` " + "For example, '1:foo,2:bar' will concatenate layer foo " + "from file 1 and layer bar from file 2", +) @cligj.precision_opt @cligj.indent_opt @cligj.compact_opt -@click.option('--ignore-errors/--no-ignore-errors', default=False, - help="log errors but do not stop serialization.") +@click.option( + "--ignore-errors/--no-ignore-errors", + default=False, + help="log errors but do not stop serialization.", +) @options.dst_crs_opt @cligj.use_rs_opt @click.option( @@ -74,11 +82,11 @@ def cat( """ log = logging.getLogger(__name__) - dump_kwds = {'sort_keys': True} + dump_kwds = {"sort_keys": True} if indent: - dump_kwds['indent'] = indent + dump_kwds["indent"] = indent if compact: - dump_kwds['separators'] = (',', ':') + dump_kwds["separators"] = (",", ":") # Validate file idexes provided in --layer option # (can't pass the files to option callback) @@ -93,22 +101,33 @@ def cat( try: if bbox: try: - bbox = tuple(map(float, bbox.split(','))) + bbox = tuple(map(float, bbox.split(","))) except ValueError: bbox = json.loads(bbox) for i, path in enumerate(files, 1): for lyr in layer[str(i)]: with fiona.open(path, layer=lyr) as src: for i, feat in src.items(bbox=bbox, where=where): - if dst_crs or precision >= 0: - g = transform_geom( - src.crs, dst_crs, feat['geometry'], + geom = feat.geometry + if dst_crs: + geom = transform_geom( + src.crs, + dst_crs, + geom, antimeridian_cutting=cut_at_antimeridian, - precision=precision) - feat['geometry'] = g - feat['bbox'] = fiona.bounds(g) + ) + + if precision >= 0: + geom = recursive_round(geom, precision) + + feat = Feature( + id=feat.id, + properties=feat.properties, + geometry=geom, + bbox=fiona.bounds(geom), + ) if use_rs: - click.echo('\x1e', nl=False) + click.echo("\x1e", nl=False) click.echo(json.dumps(feat, cls=ObjectEncoder, **dump_kwds)) except AttributeFilterError as e: diff --git a/fiona/fio/collect.py b/fiona/fio/collect.py index 08c2297df..85b9e9f52 100644 --- a/fiona/fio/collect.py +++ b/fiona/fio/collect.py @@ -8,7 +8,7 @@ import cligj from fiona.fio import helpers, options, with_context_env -from fiona.model import Feature, ObjectEncoder +from fiona.model import Geometry, ObjectEncoder from fiona.transform import transform_geom @@ -16,42 +16,72 @@ @cligj.precision_opt @cligj.indent_opt @cligj.compact_opt -@click.option('--record-buffered/--no-record-buffered', default=False, - help="Economical buffering of writes at record, not collection " - "(default), level.") -@click.option('--ignore-errors/--no-ignore-errors', default=False, - help="log errors but do not stop serialization.") +@click.option( + "--record-buffered/--no-record-buffered", + default=False, + help="Economical buffering of writes at record, not collection " + "(default), level.", +) +@click.option( + "--ignore-errors/--no-ignore-errors", + default=False, + help="log errors but do not stop serialization.", +) @options.src_crs_opt -@click.option('--with-ld-context/--without-ld-context', default=False, - help="add a JSON-LD context to JSON output.") -@click.option('--add-ld-context-item', multiple=True, - help="map a term to a URI and add it to the output's JSON LD " - "context.") -@click.option('--parse/--no-parse', default=True, - help="load and dump the geojson feature (default is True)") +@click.option( + "--with-ld-context/--without-ld-context", + default=False, + help="add a JSON-LD context to JSON output.", +) +@click.option( + "--add-ld-context-item", + multiple=True, + help="map a term to a URI and add it to the output's JSON LD " "context.", +) +@click.option( + "--parse/--no-parse", + default=True, + help="load and dump the geojson feature (default is True)", +) @click.pass_context @with_context_env -def collect(ctx, precision, indent, compact, record_buffered, ignore_errors, - src_crs, with_ld_context, add_ld_context_item, parse): +def collect( + ctx, + precision, + indent, + compact, + record_buffered, + ignore_errors, + src_crs, + with_ld_context, + add_ld_context_item, + parse, +): """Make a GeoJSON feature collection from a sequence of GeoJSON features and print it.""" logger = logging.getLogger(__name__) - stdin = click.get_text_stream('stdin') - sink = click.get_text_stream('stdout') + stdin = click.get_text_stream("stdin") + sink = click.get_text_stream("stdout") - dump_kwds = {'sort_keys': True} + dump_kwds = {"sort_keys": True} if indent: - dump_kwds['indent'] = indent + dump_kwds["indent"] = indent if compact: - dump_kwds['separators'] = (',', ':') - item_sep = compact and ',' or ', ' + dump_kwds["separators"] = (",", ":") + item_sep = compact and "," or ", " if src_crs: if not parse: raise click.UsageError("Can't specify --src-crs with --no-parse") - transformer = partial(transform_geom, src_crs, 'EPSG:4326', - antimeridian_cutting=True, precision=precision) + transformer = partial( + transform_geom, + src_crs, + "EPSG:4326", + antimeridian_cutting=True, + precision=precision, + ) else: + def transformer(x): return x @@ -60,53 +90,61 @@ def transformer(x): # If parsing geojson if parse: # If input is RS-delimited JSON sequence. - if first_line.startswith('\x1e'): + if first_line.startswith("\x1e"): + def feature_text_gen(): - buffer = first_line.strip('\x1e') + buffer = first_line.strip("\x1e") for line in stdin: - if line.startswith('\x1e'): + if line.startswith("\x1e"): if buffer: feat = json.loads(buffer) - feat['geometry'] = transformer(feat['geometry']) - feat = Feature.from_dict(**feat) + feat["geometry"] = transformer( + Geometry.from_dict(**feat["geometry"]) + ) yield json.dumps(feat, cls=ObjectEncoder, **dump_kwds) - buffer = line.strip('\x1e') + buffer = line.strip("\x1e") else: buffer += line else: feat = json.loads(buffer) - feat['geometry'] = transformer(feat['geometry']) - feat = Feature.from_dict(**feat) + feat["geometry"] = transformer( + Geometry.from_dict(**feat["geometry"]) + ) yield json.dumps(feat, cls=ObjectEncoder, **dump_kwds) + else: + def feature_text_gen(): feat = json.loads(first_line) - feat['geometry'] = transformer(feat['geometry']) - feat = Feature.from_dict(**feat) + feat["geometry"] = transformer(Geometry.from_dict(**feat["geometry"])) yield json.dumps(feat, cls=ObjectEncoder, **dump_kwds) for line in stdin: feat = json.loads(line) - feat['geometry'] = transformer(feat['geometry']) - feat = Feature.from_dict(**feat) + feat["geometry"] = transformer( + Geometry.from_dict(**feat["geometry"]) + ) yield json.dumps(feat, cls=ObjectEncoder, **dump_kwds) # If *not* parsing geojson else: # If input is RS-delimited JSON sequence. - if first_line.startswith('\x1e'): + if first_line.startswith("\x1e"): + def feature_text_gen(): - buffer = first_line.strip('\x1e') + buffer = first_line.strip("\x1e") for line in stdin: - if line.startswith('\x1e'): + if line.startswith("\x1e"): if buffer: yield buffer - buffer = line.strip('\x1e') + buffer = line.strip("\x1e") else: buffer += line else: yield buffer + else: + def feature_text_gen(): yield first_line for line in stdin: @@ -121,14 +159,13 @@ def feature_text_gen(): indented = bool(indent) rec_indent = "\n" + " " * (2 * (indent or 0)) - collection = { - 'type': 'FeatureCollection', - 'features': []} + collection = {"type": "FeatureCollection", "features": []} if with_ld_context: - collection['@context'] = helpers.make_ld_context( - add_ld_context_item) + collection["@context"] = helpers.make_ld_context(add_ld_context_item) - head, tail = json.dumps(collection, cls=ObjectEncoder, **dump_kwds).split('[]') + head, tail = json.dumps(collection, cls=ObjectEncoder, **dump_kwds).split( + "[]" + ) sink.write(head) sink.write("[") @@ -147,16 +184,14 @@ def feature_text_gen(): # Ignoring errors is *not* the default. if ignore_errors: logger.error( - "failed to serialize file record %d (%s), " - "continuing", - i, exc) + "failed to serialize file record %d (%s), " "continuing", i, exc + ) else: # Log error and close up the GeoJSON, leaving it # more or less valid no matter what happens above. logger.critical( - "failed to serialize file record %d (%s), " - "quiting", - i, exc) + "failed to serialize file record %d (%s), " "quiting", i, exc + ) sink.write("]") sink.write(tail) if indented: @@ -177,14 +212,16 @@ def feature_text_gen(): except Exception as exc: if ignore_errors: logger.error( - "failed to serialize file record %d (%s), " - "continuing", - i, exc) + "failed to serialize file record %d (%s), " "continuing", + i, + exc, + ) else: logger.critical( - "failed to serialize file record %d (%s), " - "quiting", - i, exc) + "failed to serialize file record %d (%s), " "quiting", + i, + exc, + ) sink.write("]") sink.write(tail) if indented: @@ -199,14 +236,13 @@ def feature_text_gen(): else: # Buffer GeoJSON data at the collection level. The default. - collection = { - 'type': 'FeatureCollection', - 'features': []} + collection = {"type": "FeatureCollection", "features": []} if with_ld_context: - collection['@context'] = helpers.make_ld_context( - add_ld_context_item) + collection["@context"] = helpers.make_ld_context(add_ld_context_item) - head, tail = json.dumps(collection, cls=ObjectEncoder, **dump_kwds).split('[]') + head, tail = json.dumps(collection, cls=ObjectEncoder, **dump_kwds).split( + "[]" + ) sink.write(head) sink.write("[") sink.write(",".join(source)) diff --git a/fiona/fio/helpers.py b/fiona/fio/helpers.py index 396dcc1ef..5dbfafe32 100644 --- a/fiona/fio/helpers.py +++ b/fiona/fio/helpers.py @@ -1,6 +1,7 @@ +"""Helper objects needed by multiple CLI commands. + """ -Helper objects needed by multiple CLI commands. -""" + from functools import partial import json import math @@ -8,32 +9,36 @@ from munch import munchify -from fiona.model import to_dict +from fiona.model import Geometry, to_dict -warnings.simplefilter('default') +warnings.simplefilter("default") def obj_gen(lines, object_hook=None): """Return a generator of JSON objects loaded from ``lines``.""" first_line = next(lines) - if first_line.startswith('\x1e'): + if first_line.startswith("\x1e"): + def gen(): - buffer = first_line.strip('\x1e') + buffer = first_line.strip("\x1e") for line in lines: - if line.startswith('\x1e'): + if line.startswith("\x1e"): if buffer: yield json.loads(buffer, object_hook=object_hook) - buffer = line.strip('\x1e') + buffer = line.strip("\x1e") else: buffer += line else: yield json.loads(buffer, object_hook=object_hook) + else: + def gen(): yield json.loads(first_line, object_hook=object_hook) for line in lines: yield json.loads(line, object_hook=object_hook) + return gen() @@ -45,22 +50,25 @@ def nullable(val, cast): def eval_feature_expression(feature, expression): - safe_dict = {'f': munchify(to_dict(feature))} - safe_dict.update({ - 'sum': sum, - 'pow': pow, - 'min': min, - 'max': max, - 'math': math, - 'bool': bool, - 'int': partial(nullable, int), - 'str': partial(nullable, str), - 'float': partial(nullable, float), - 'len': partial(nullable, len), - }) + safe_dict = {"f": munchify(to_dict(feature))} + safe_dict.update( + { + "sum": sum, + "pow": pow, + "min": min, + "max": max, + "math": math, + "bool": bool, + "int": partial(nullable, int), + "str": partial(nullable, str), + "float": partial(nullable, float), + "len": partial(nullable, len), + } + ) try: from shapely.geometry import shape - safe_dict['shape'] = shape + + safe_dict["shape"] = shape except ImportError: pass return eval(expression, {"__builtins__": None}, safe_dict) @@ -71,37 +79,31 @@ def make_ld_context(context_items): See https://json-ld.org/spec/latest/json-ld/.""" ctx = { - "@context": { - "geojson": "http://ld.geojson.org/vocab#", - "Feature": "geojson:Feature", - "FeatureCollection": "geojson:FeatureCollection", - "GeometryCollection": "geojson:GeometryCollection", - "LineString": "geojson:LineString", - "MultiLineString": "geojson:MultiLineString", - "MultiPoint": "geojson:MultiPoint", - "MultiPolygon": "geojson:MultiPolygon", - "Point": "geojson:Point", - "Polygon": "geojson:Polygon", - "bbox": { - "@container": "@list", - "@id": "geojson:bbox" - }, - "coordinates": "geojson:coordinates", - "datetime": "http://www.w3.org/2006/time#inXSDDateTime", - "description": "http://purl.org/dc/terms/description", - "features": { - "@container": "@set", - "@id": "geojson:features" - }, - "geometry": "geojson:geometry", - "id": "@id", - "properties": "geojson:properties", - "start": "http://www.w3.org/2006/time#hasBeginning", - "stop": "http://www.w3.org/2006/time#hasEnding", - "title": "http://purl.org/dc/terms/title", - "type": "@type", - "when": "geojson:when" - } + "@context": { + "geojson": "http://ld.geojson.org/vocab#", + "Feature": "geojson:Feature", + "FeatureCollection": "geojson:FeatureCollection", + "GeometryCollection": "geojson:GeometryCollection", + "LineString": "geojson:LineString", + "MultiLineString": "geojson:MultiLineString", + "MultiPoint": "geojson:MultiPoint", + "MultiPolygon": "geojson:MultiPolygon", + "Point": "geojson:Point", + "Polygon": "geojson:Polygon", + "bbox": {"@container": "@list", "@id": "geojson:bbox"}, + "coordinates": "geojson:coordinates", + "datetime": "http://www.w3.org/2006/time#inXSDDateTime", + "description": "http://purl.org/dc/terms/description", + "features": {"@container": "@set", "@id": "geojson:features"}, + "geometry": "geojson:geometry", + "id": "@id", + "properties": "geojson:properties", + "start": "http://www.w3.org/2006/time#hasBeginning", + "stop": "http://www.w3.org/2006/time#hasEnding", + "title": "http://purl.org/dc/terms/title", + "type": "@type", + "when": "geojson:when", + } } for item in context_items or []: t, uri = item.split("=") @@ -111,5 +113,23 @@ def make_ld_context(context_items): def id_record(rec): """Converts a record's id to a blank node id and returns the record.""" - rec['id'] = '_:f%s' % rec['id'] + rec["id"] = "_:f%s" % rec["id"] return rec + + +def recursive_round(obj, precision): + """Recursively round coordinates.""" + if precision < 0: + return obj + if getattr(obj, "geometries", None): + return Geometry( + geometries=[recursive_round(part, precision) for part in obj.geometries] + ) + elif getattr(obj, "coordinates", None): + return Geometry( + coordinates=[recursive_round(part, precision) for part in obj.coordinates] + ) + if isinstance(obj, (int, float)): + return round(obj, precision) + else: + return [recursive_round(part, precision) for part in obj] diff --git a/fiona/fio/load.py b/fiona/fio/load.py index 2d34e123d..79c94f4d0 100644 --- a/fiona/fio/load.py +++ b/fiona/fio/load.py @@ -9,6 +9,7 @@ import fiona from fiona.fio import options, with_context_env +from fiona.model import Feature, Geometry from fiona.schema import FIELD_TYPES_MAP_REV from fiona.transform import transform_geom @@ -45,12 +46,14 @@ def _cb_key_val(ctx, param, value): @click.command(short_help="Load GeoJSON to a dataset in another format.") -@click.argument('output', required=True) -@click.option('-f', '--format', '--driver', 'driver', - help="Output format driver name.") +@click.argument("output", required=True) +@click.option("-f", "--format", "--driver", "driver", help="Output format driver name.") @options.src_crs_opt -@click.option('--dst-crs', '--dst_crs', - help="Destination CRS. Defaults to --src-crs when not given.") +@click.option( + "--dst-crs", + "--dst_crs", + help="Destination CRS. Defaults to --src-crs when not given.", +) @cligj.features_in_arg @click.option( "--layer", @@ -82,16 +85,25 @@ def load(ctx, output, driver, src_crs, dst_crs, features, layer, creation_option dst_crs = dst_crs or src_crs if src_crs and dst_crs and src_crs != dst_crs: - transformer = partial(transform_geom, src_crs, dst_crs, - antimeridian_cutting=True, precision=-1) + transformer = partial( + transform_geom, src_crs, dst_crs, antimeridian_cutting=True + ) else: + def transformer(x): - return x + return Geometry.from_dict(**x) def feature_gen(): + """Convert stream of JSON to features. + + Yields + ------ + Feature + + """ for feat in features: - feat['geometry'] = transformer(feat['geometry']) - yield feat + feat["geometry"] = transformer(Geometry.from_dict(**feat["geometry"])) + yield Feature.from_dict(**feat) try: source = feature_gen() @@ -99,10 +111,14 @@ def feature_gen(): # Use schema of first feature as a template. # TODO: schema specified on command line? first = next(source) - schema = {'geometry': first['geometry']['type']} - schema['properties'] = dict([ - (k, FIELD_TYPES_MAP_REV.get(type(v)) or 'str') - for k, v in first['properties'].items()]) + # print(first, first.geometry) + schema = {"geometry": first.geometry.type} + schema["properties"] = dict( + [ + (k, FIELD_TYPES_MAP_REV.get(type(v)) or "str") + for k, v in first.properties.items() + ] + ) with fiona.open( output, diff --git a/fiona/model.py b/fiona/model.py index f5519132a..1c68ffb54 100644 --- a/fiona/model.py +++ b/fiona/model.py @@ -96,7 +96,7 @@ def _props(self): return { k: getattr(self._delegate, k) for k in self._delegated_properties - if k is not None + if k is not None # getattr(self._delegate, k) is not None } def __getitem__(self, item): @@ -139,9 +139,10 @@ def __eq__(self, other): class _Geometry(object): - def __init__(self, coordinates=None, type=None): + def __init__(self, coordinates=None, type=None, geometries=None): self.coordinates = coordinates self.type = type + self.geometries = geometries class Geometry(Object): @@ -154,10 +155,12 @@ class Geometry(Object): """ - _delegated_properties = ["coordinates", "type"] + _delegated_properties = ["coordinates", "type", "geometries"] - def __init__(self, coordinates=None, type=None, **data): - self._delegate = _Geometry(coordinates=coordinates, type=type) + def __init__(self, coordinates=None, type=None, geometries=None, **data): + self._delegate = _Geometry( + coordinates=coordinates, type=type, geometries=geometries + ) super(Geometry, self).__init__(**data) @classmethod @@ -166,6 +169,10 @@ def from_dict(cls, mapping=None, **kwargs): return Geometry( coordinates=data.pop("coordinates", None), type=data.pop("type", None), + geometries=[ + Geometry.from_dict(**part) + for part in data.pop("geometries", None) or [] + ], **data ) @@ -191,6 +198,17 @@ def type(self): """ return self._delegate.type + @property + def geometries(self): + """A collection's geometries. + + Returns + ------- + list + + """ + return self._delegate.geometries + class _Feature(object): def __init__(self, geometry=None, id=None, properties=None): @@ -212,6 +230,8 @@ class Feature(Object): _delegated_properties = ["geometry", "id", "properties"] def __init__(self, geometry=None, id=None, properties=None, **data): + if properties is None: + properties = Properties() self._delegate = _Feature(geometry=geometry, id=id, properties=properties) super(Feature, self).__init__(**data) @@ -223,15 +243,7 @@ def from_dict(cls, mapping=None, **kwargs): if isinstance(geom_data, Geometry): geom = geom_data else: - geom = ( - Geometry( - coordinates=geom_data.pop("coordinates", None), - type=geom_data.pop("type", None), - **geom_data - ) - if geom_data is not None - else None - ) + geom = Geometry.from_dict(**geom_data) if geom_data is not None else None props_data = data.pop("properties", None) @@ -296,9 +308,7 @@ def type(self): class Properties(Object): - """A GeoJSON-like feature's properties - - """ + """A GeoJSON-like feature's properties""" def __init__(self, **kwds): super(Properties, self).__init__(**kwds) @@ -314,7 +324,7 @@ class ObjectEncoder(JSONEncoder): def default(self, o): if isinstance(o, (Geometry, Properties)): - return dict(**o) + return {k: v for k, v in o.items() if v is not None} elif isinstance(o, Feature): o_dict = dict(**o) o_dict["type"] = "Feature" @@ -327,12 +337,12 @@ def default(self, o): return JSONEncoder().default(o) -def decode_object(o): +def decode_object(obj): """A json.loads object_hook Parameters ---------- - o : dict + obj : dict A decoded dict. Returns @@ -340,26 +350,34 @@ def decode_object(o): Feature, Geometry, or dict """ - if "type" in o: - if o["type"] == "Feature": - val = Feature.from_dict(**o) - elif o["type"] in list(GEOMETRY_TYPES.values())[:8]: - val = Geometry.from_dict(**o) - else: - val = o + if (obj.get("type", None) == "Feature") or "geometry" in obj: + return Feature.from_dict(**obj) + elif obj.get("type", None) in list(GEOMETRY_TYPES.values())[:8]: + return Geometry.from_dict(**obj) else: - val = o - - return val + return obj def to_dict(val): """Converts an object to a dict""" try: - o = ObjectEncoder().default(val) + obj = ObjectEncoder().default(val) except TypeError: - pass + return val else: - return o + return obj + + +def _guard_model_object(obj): + """Convert dict to Geometry or Feature. + + For use during the 1.9-2.0 transition. Will be removed in 2.0. - return val + """ + if not isinstance(obj, Object): + warn( + "Support for feature and geometry dicts is deprecated. Instances of Feature and Geometry will be required in 2.0.", + FionaDeprecationWarning, + stacklevel=2, + ) + return decode_object(obj) diff --git a/fiona/ogrext.pyx b/fiona/ogrext.pyx index b95eb4d9f..e6fb4cbb0 100644 --- a/fiona/ogrext.pyx +++ b/fiona/ogrext.pyx @@ -32,7 +32,7 @@ from fiona.errors import ( DriverError, DriverIOError, SchemaError, CRSError, FionaValueError, TransactionError, GeometryTypeValidationError, DatasetDeleteError, AttributeFilterError, FeatureWarning, FionaDeprecationWarning) -from fiona.model import Feature, Properties +from fiona.model import _guard_model_object, Feature, Geometry, Properties from fiona.path import vsi_path from fiona.rfc3339 import parse_date, parse_datetime, parse_time from fiona.rfc3339 import FionaDateType, FionaDateTimeType, FionaTimeType @@ -392,32 +392,33 @@ cdef class OGRFeatureBuilder: Allocates one OGR Feature which should be destroyed by the caller. Borrows a layer definition from the collection. - """ + """ cdef void * build(self, feature, collection) except NULL: cdef void *cogr_geometry = NULL cdef const char *string_c = NULL - cdef WritingSession session - session = collection.session + cdef WritingSession session = collection.session cdef void *cogr_layer = session.cogr_layer + cdef void *cogr_featuredefn = OGR_L_GetLayerDefn(cogr_layer) + cdef void *cogr_feature = OGR_F_Create(cogr_featuredefn) + if cogr_layer == NULL: raise ValueError("Null layer") - cdef void *cogr_featuredefn = OGR_L_GetLayerDefn(cogr_layer) + if cogr_featuredefn == NULL: raise ValueError("Null feature definition") - cdef void *cogr_feature = OGR_F_Create(cogr_featuredefn) + if cogr_feature == NULL: raise ValueError("Null feature") - if feature['geometry'] is not None: - cogr_geometry = OGRGeomBuilder().build( - feature['geometry']) + if feature.geometry is not None: + cogr_geometry = OGRGeomBuilder().build(feature.geometry) exc_wrap_int(OGR_F_SetGeometryDirectly(cogr_feature, cogr_geometry)) # OGR_F_SetFieldString takes encoded strings ('bytes' in Python 3). encoding = session._get_internal_encoding() - for key, value in feature['properties'].items(): + for key, value in feature.properties.items(): ogr_key = session._schema_mapping[key] schema_type = normalize_field_type(collection.schema['properties'][key]) @@ -519,8 +520,9 @@ cdef _deleteOgrFeature(void *cogr_feature): cogr_feature = NULL -def featureRT(feature, collection): +def featureRT(feat, collection): # For testing purposes only, leaks the JSON data + feature = _guard_model_object(feat) cdef void *cogr_feature = OGRFeatureBuilder().build(feature, collection) cdef void *cogr_geometry = OGR_F_GetGeometryRef(cogr_feature) if cogr_geometry == NULL: @@ -1283,10 +1285,12 @@ cdef class WritingSession(Session): driver_name = OGR_Dr_GetName(cogr_driver).decode("utf-8") valid_geom_types = collection._valid_geom_types + def validate_geometry_type(record): if record["geometry"] is None: return True return record["geometry"]["type"].lstrip("3D ") in valid_geom_types + transactions_supported = GDALDatasetTestCapability(self.cogr_ds, ODsCTransactions) log.debug("Transaction supported: {}".format(transactions_supported)) if transactions_supported: @@ -1297,37 +1301,43 @@ cdef class WritingSession(Session): schema_props_keys = set(collection.schema['properties'].keys()) - for record in records: + for _rec in records: + record = _guard_model_object(_rec) + # Check for optional elements - if 'properties' not in record: - record['properties'] = {} - if 'geometry' not in record: - record['geometry'] = None + # if 'properties' not in _rec: + # _rec['properties'] = {} + # if 'geometry' not in _rec: + # _rec['geometry'] = None # Validate against collection's schema. - if set(record['properties'].keys()) != schema_props_keys: + if set(record.properties.keys()) != schema_props_keys: raise ValueError( "Record does not match collection schema: %r != %r" % ( - record['properties'].keys(), + record.properties.keys(), list(schema_props_keys) )) + if not validate_geometry_type(record): raise GeometryTypeValidationError( "Record's geometry type does not match " "collection schema's geometry type: %r != %r" % ( - record['geometry']['type'], + record.geometry.type, collection.schema['geometry'] )) + # Validate against collection's schema to give useful message - if set(record['properties'].keys()) != schema_props_keys: + if set(record.properties.keys()) != schema_props_keys: raise SchemaError( "Record does not match collection schema: %r != %r" % ( - record['properties'].keys(), + record.properties.keys(), list(schema_props_keys) )) + cogr_feature = OGRFeatureBuilder().build(record, collection) result = OGR_L_CreateFeature(cogr_layer, cogr_feature) if result != OGRERR_NONE: msg = get_last_error_msg() raise RuntimeError("GDAL Error: {msg} \n \n Failed to write record: " "{record}".format(msg=msg, record=record)) + _deleteOgrFeature(cogr_feature) if transactions_supported: @@ -1470,7 +1480,8 @@ cdef class Iterator: OGR_L_SetSpatialFilterRect( cogr_layer, bbox[0], bbox[1], bbox[2], bbox[3]) elif mask: - cogr_geometry = OGRGeomBuilder().build(mask) + mask_geom = _guard_model_object(mask) + cogr_geometry = OGRGeomBuilder().build(mask_geom) OGR_L_SetSpatialFilter(cogr_layer, cogr_geometry) OGR_G_DestroyGeometry(cogr_geometry) diff --git a/fiona/transform.py b/fiona/transform.py index 96eb904f9..b82743a39 100644 --- a/fiona/transform.py +++ b/fiona/transform.py @@ -1,8 +1,14 @@ """Coordinate and geometry warping and reprojection""" +from warnings import warn + import fiona._loading + with fiona._loading.add_gdal_dll_directories(): from fiona._transform import _transform, _transform_geom + from fiona.compat import DICT_TYPES + from fiona.errors import FionaDeprecationWarning + from fiona.model import _guard_model_object, Geometry def transform(src_crs, dst_crs, xs, ys): @@ -44,8 +50,13 @@ def transform(src_crs, dst_crs, xs, ys): def transform_geom( - src_crs, dst_crs, geom, - antimeridian_cutting=False, antimeridian_offset=10.0, precision=-1): + src_crs, + dst_crs, + geom, + antimeridian_cutting=False, + antimeridian_offset=10.0, + precision=-1, +): """Transform a geometry obj from one reference system to another. Parameters @@ -68,15 +79,15 @@ def transform_geom( A distance in decimal degrees from the antimeridian, outside of which geometries will not be cut. precision: int, optional - Optional rounding precision of output coordinates, in number - of decimal places. + Round geometry coordinates to this number of decimal places. + This parameter is deprecated and will be removed in 2.0. Returns ------- obj A new GeoJSON-like geometry (or a list of GeoJSON-like geometries if an iterable was given as input) with transformed coordinates. Note - that if the output is at the antimeridian, it may be cut and + that if the output is at the antimeridian, it may be cut and of a different geometry ``type`` than the input, e.g., a polygon input may result in multi-polygon output. @@ -89,7 +100,28 @@ def transform_geom( {'type': 'Point', 'coordinates': (957097.0952383667, 378940.8419189212)} """ + if precision >= 0: + warn( + "The precision keyword argument is deprecated and will be removed in 2.0", + FionaDeprecationWarning, + ) + # Function is implemented in the _transform C extension module. - return _transform_geom( - src_crs, dst_crs, geom, - antimeridian_cutting, antimeridian_offset, precision) + if isinstance(geom, (Geometry,) + DICT_TYPES): + return _transform_geom( + src_crs, + dst_crs, + _guard_model_object(geom), + antimeridian_cutting, + antimeridian_offset, + precision, + ) + else: + return _transform_geom( + src_crs, + dst_crs, + (_guard_model_object(g) for g in geom), + antimeridian_cutting, + antimeridian_offset, + precision, + ) diff --git a/tests/conftest.py b/tests/conftest.py index 239b39942..101edd781 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -13,18 +13,22 @@ from fiona.crs import CRS from fiona.env import GDALVersion from fiona.meta import extensions -from fiona.model import ObjectEncoder, to_dict +from fiona.model import Feature, ObjectEncoder, to_dict def pytest_report_header(config): headers = [] # gdal version number gdal_release_name = fiona.get_gdal_release_name() - headers.append('GDAL: {} ({})'.format(gdal_release_name, fiona.get_gdal_version_num())) - supported_drivers = ", ".join(sorted(list(fiona.drvsupport.supported_drivers.keys()))) + headers.append( + "GDAL: {} ({})".format(gdal_release_name, fiona.get_gdal_version_num()) + ) + supported_drivers = ", ".join( + sorted(list(fiona.drvsupport.supported_drivers.keys())) + ) # supported drivers headers.append("Supported drivers: {}".format(supported_drivers)) - return '\n'.join(headers) + return "\n".join(headers) def get_temp_filename(driver): @@ -39,7 +43,11 @@ def get_temp_filename(driver): _COUTWILDRNP_FILES = [ - 'coutwildrnp.shp', 'coutwildrnp.shx', 'coutwildrnp.dbf', 'coutwildrnp.prj'] + "coutwildrnp.shp", + "coutwildrnp.shx", + "coutwildrnp.dbf", + "coutwildrnp.prj", +] def _read_file(name): @@ -52,7 +60,7 @@ def _read_file(name): requires_gpkg = pytest.mark.skipif(not has_gpkg, reason=has_gpkg_reason) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def gdalenv(request): import fiona.env @@ -64,13 +72,13 @@ def fin(): request.addfinalizer(fin) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def data_dir(): """Absolute file path to the directory containing test datasets.""" - return os.path.abspath(os.path.join(os.path.dirname(__file__), 'data')) + return os.path.abspath(os.path.join(os.path.dirname(__file__), "data")) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def data(tmpdir, data_dir): """A temporary directory containing a copy of the files in data.""" for filename in _COUTWILDRNP_FILES: @@ -78,100 +86,99 @@ def data(tmpdir, data_dir): return tmpdir -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def path_curves_line_csv(data_dir): """Path to ```curves_line.csv``""" - return os.path.join(data_dir, 'curves_line.csv') + return os.path.join(data_dir, "curves_line.csv") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def path_test_tin_shp(data_dir): """Path to ```test_tin.shp``""" - return os.path.join(data_dir, 'test_tin.shp') + return os.path.join(data_dir, "test_tin.shp") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def path_test_tin_csv(data_dir): """Path to ```test_tin.csv``""" - return os.path.join(data_dir, 'test_tin.csv') + return os.path.join(data_dir, "test_tin.csv") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def path_coutwildrnp_shp(data_dir): """Path to ```coutwildrnp.shp``""" - return os.path.join(data_dir, 'coutwildrnp.shp') + return os.path.join(data_dir, "coutwildrnp.shp") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def path_coutwildrnp_zip(data_dir): """Creates ``coutwildrnp.zip`` if it does not exist and returns the absolute file path.""" - path = os.path.join(data_dir, 'coutwildrnp.zip') + path = os.path.join(data_dir, "coutwildrnp.zip") if not os.path.exists(path): - with zipfile.ZipFile(path, 'w') as zip: + with zipfile.ZipFile(path, "w") as zip: for filename in _COUTWILDRNP_FILES: zip.write(os.path.join(data_dir, filename), filename) return path -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def path_grenada_geojson(data_dir): """Path to ```grenada.geojson```""" - return os.path.join(data_dir, 'grenada.geojson') + return os.path.join(data_dir, "grenada.geojson") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def bytes_coutwildrnp_zip(path_coutwildrnp_zip): """The zip file's bytes""" - with open(path_coutwildrnp_zip, 'rb') as src: + with open(path_coutwildrnp_zip, "rb") as src: return src.read() -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def path_coutwildrnp_tar(data_dir): """Creates ``coutwildrnp.tar`` if it does not exist and returns the absolute file path.""" - path = os.path.join(data_dir, 'coutwildrnp.tar') + path = os.path.join(data_dir, "coutwildrnp.tar") if not os.path.exists(path): - with tarfile.open(path, 'w') as tar: + with tarfile.open(path, "w") as tar: for filename in _COUTWILDRNP_FILES: tar.add( os.path.join(data_dir, filename), - arcname=os.path.join('testing', filename)) + arcname=os.path.join("testing", filename), + ) return path -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def path_coutwildrnp_json(data_dir): """Creates ``coutwildrnp.json`` if it does not exist and returns the absolute file path.""" - path = os.path.join(data_dir, 'coutwildrnp.json') + path = os.path.join(data_dir, "coutwildrnp.json") if not os.path.exists(path): name = _COUTWILDRNP_FILES[0] - with fiona.open(os.path.join(data_dir, name), 'r') as source: + with fiona.open(os.path.join(data_dir, name), "r") as source: features = [feat for feat in source] - my_layer = { - 'type': 'FeatureCollection', - 'features': features} - with open(path, 'w') as f: + my_layer = {"type": "FeatureCollection", "features": features} + with open(path, "w") as f: f.write(json.dumps(my_layer, cls=ObjectEncoder)) return path -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def bytes_grenada_geojson(path_grenada_geojson): """The geojson as bytes.""" - with open(path_grenada_geojson, 'rb') as src: + with open(path_grenada_geojson, "rb") as src: return src.read() -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def path_coutwildrnp_gpkg(data_dir): """Creates ``coutwildrnp.gpkg`` if it does not exist and returns the absolute file path.""" if not has_gpkg: raise RuntimeError("GDAL has not been compiled with GPKG support") - path = os.path.join(data_dir, 'coutwildrnp.gpkg') + path = os.path.join(data_dir, "coutwildrnp.gpkg") if not os.path.exists(path): filename_shp = _COUTWILDRNP_FILES[0] path_shp = os.path.join(data_dir, filename_shp) @@ -183,70 +190,70 @@ def path_coutwildrnp_gpkg(data_dir): return path -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def path_gpx(data_dir): - return os.path.join(data_dir, 'test_gpx.gpx') + return os.path.join(data_dir, "test_gpx.gpx") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def feature_collection(): """GeoJSON feature collection on a single line.""" - return _read_file(os.path.join('data', 'collection.txt')) + return _read_file(os.path.join("data", "collection.txt")) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def feature_collection_pp(): """Same as above but with pretty-print styling applied.""" - return _read_file(os.path.join('data', 'collection-pp.txt')) + return _read_file(os.path.join("data", "collection-pp.txt")) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def feature_seq(): """One feature per line.""" - return _read_file(os.path.join('data', 'sequence.txt')) + return _read_file(os.path.join("data", "sequence.txt")) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def feature_seq_pp_rs(): """Same as above but each feature has pretty-print styling""" - return _read_file(os.path.join('data', 'sequence-pp.txt')) + return _read_file(os.path.join("data", "sequence-pp.txt")) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def runner(): """Returns a ```click.testing.CliRunner()`` instance.""" return CliRunner() -@pytest.fixture(scope='class') +@pytest.fixture(scope="class") def uttc_path_coutwildrnp_zip(path_coutwildrnp_zip, request): """Make the ``path_coutwildrnp_zip`` fixture work with a ``unittest.TestCase()``. ``uttc`` stands for unittest test case.""" request.cls.path_coutwildrnp_zip = path_coutwildrnp_zip -@pytest.fixture(scope='class') +@pytest.fixture(scope="class") def uttc_path_coutwildrnp_tar(path_coutwildrnp_tar, request): """Make the ``path_coutwildrnp_tar`` fixture work with a ``unittest.TestCase()``. ``uttc`` stands for unittest test case.""" request.cls.path_coutwildrnp_tar = path_coutwildrnp_tar -@pytest.fixture(scope='class') +@pytest.fixture(scope="class") def uttc_path_coutwildrnp_json(path_coutwildrnp_json, request): """Make the ``path_coutwildrnp_json`` fixture work with a ``unittest.TestCase()``. ``uttc`` stands for unittest test case.""" request.cls.path_coutwildrnp_json = path_coutwildrnp_json -@pytest.fixture(scope='class') +@pytest.fixture(scope="class") def uttc_data_dir(data_dir, request): """Make the ``data_dir`` fixture work with a ``unittest.TestCase()``. ``uttc`` stands for unittest test case.""" request.cls.data_dir = data_dir -@pytest.fixture(scope='class') +@pytest.fixture(scope="class") def uttc_path_gpx(path_gpx, request): """Make the ``path_gpx`` fixture work with a ``unittest.TestCase()``. ``uttc`` stands for unittest test case.""" @@ -255,38 +262,40 @@ def uttc_path_gpx(path_gpx, request): # GDAL 2.3.x silently converts ESRI WKT to OGC WKT # The regular expression below will match against either -WGS84PATTERN = r'GEOGCS\["(?:GCS_WGS_1984|WGS 84)",DATUM\["WGS_1984",SPHEROID\["WGS[_ ]84"' +WGS84PATTERN = ( + r'GEOGCS\["(?:GCS_WGS_1984|WGS 84)",DATUM\["WGS_1984",SPHEROID\["WGS[_ ]84"' +) # Define helpers to skip tests based on GDAL version gdal_version = GDALVersion.runtime() requires_only_gdal1 = pytest.mark.skipif( - gdal_version.major != 1, - reason="Only relevant for GDAL 1.x") + gdal_version.major != 1, reason="Only relevant for GDAL 1.x" +) requires_gdal2 = pytest.mark.skipif( - not gdal_version.major >= 2, - reason="Requires at least GDAL 2.x") + not gdal_version.major >= 2, reason="Requires at least GDAL 2.x" +) requires_gdal21 = pytest.mark.skipif( - not gdal_version.at_least('2.1'), - reason="Requires at least GDAL 2.1.x") + not gdal_version.at_least("2.1"), reason="Requires at least GDAL 2.1.x" +) requires_gdal22 = pytest.mark.skipif( - not gdal_version.at_least('2.2'), - reason="Requires at least GDAL 2.2.x") + not gdal_version.at_least("2.2"), reason="Requires at least GDAL 2.2.x" +) requires_gdal23 = pytest.mark.skipif( - not gdal_version.at_least('2.3'), - reason="Requires at least GDAL 2.3.x") + not gdal_version.at_least("2.3"), reason="Requires at least GDAL 2.3.x" +) requires_gdal24 = pytest.mark.skipif( - not gdal_version.at_least('2.4'), - reason="Requires at least GDAL 2.4.x") + not gdal_version.at_least("2.4"), reason="Requires at least GDAL 2.4.x" +) requires_gdal_lt_3 = pytest.mark.skipif( - not gdal_version.major < 3, - reason="Requires at least GDAL < 3") + not gdal_version.major < 3, reason="Requires at least GDAL < 3" +) requires_gdal3 = pytest.mark.skipif( not gdal_version.major >= 3, reason="Requires at least GDAL 3.x" @@ -323,95 +332,178 @@ def unittest_path_coutwildrnp_shp(path_coutwildrnp_shp, request): @pytest.fixture() def testdata_generator(): - """ Helper function to create test data sets for ideally all supported drivers - """ + """Helper function to create test data sets for ideally all supported drivers""" def get_schema(driver): - special_schemas = {'CSV': {'geometry': None, 'properties': OrderedDict([('position', 'int')])}, - 'BNA': {'geometry': 'Point', 'properties': {}}, - 'DXF': {'properties': OrderedDict( - [('Layer', 'str'), - ('SubClasses', 'str'), - ('Linetype', 'str'), - ('EntityHandle', 'str'), - ('Text', 'str')]), - 'geometry': 'Point'}, - 'GPX': {'geometry': 'Point', - 'properties': OrderedDict([('ele', 'float'), ('time', 'datetime')])}, - 'GPSTrackMaker': {'properties': OrderedDict([]), 'geometry': 'Point'}, - 'DGN': {'properties': OrderedDict([]), 'geometry': 'LineString'}, - 'MapInfo File': {'geometry': 'Point', 'properties': OrderedDict([('position', 'str')])} - } - - return special_schemas.get(driver, {'geometry': 'Point', 'properties': OrderedDict([('position', 'int')])}) + special_schemas = { + "CSV": {"geometry": None, "properties": OrderedDict([("position", "int")])}, + "BNA": {"geometry": "Point", "properties": {}}, + "DXF": { + "properties": OrderedDict( + [ + ("Layer", "str"), + ("SubClasses", "str"), + ("Linetype", "str"), + ("EntityHandle", "str"), + ("Text", "str"), + ] + ), + "geometry": "Point", + }, + "GPX": { + "geometry": "Point", + "properties": OrderedDict([("ele", "float"), ("time", "datetime")]), + }, + "GPSTrackMaker": {"properties": OrderedDict([]), "geometry": "Point"}, + "DGN": {"properties": OrderedDict([]), "geometry": "LineString"}, + "MapInfo File": { + "geometry": "Point", + "properties": OrderedDict([("position", "str")]), + }, + } + + return special_schemas.get( + driver, + {"geometry": "Point", "properties": OrderedDict([("position", "int")])}, + ) def get_crs(driver): special_crs = {"MapInfo File": CRS.from_epsg(4326)} return special_crs.get(driver, None) def get_records(driver, range): - special_records1 = {'CSV': [{'geometry': None, 'properties': {'position': i}} for i in range], - 'BNA': [{'geometry': {'type': 'Point', 'coordinates': (0.0, float(i))}, 'properties': {}} - for i - in range], - 'DXF': [ - {'geometry': {'type': 'Point', 'coordinates': (0.0, float(i))}, - 'properties': OrderedDict( - [('Layer', '0'), - ('SubClasses', 'AcDbEntity:AcDbPoint'), - ('Linetype', None), - ('EntityHandle', str(i + 20000)), - ('Text', None)])} for i in range], - 'GPX': [{'geometry': {'type': 'Point', 'coordinates': (0.0, float(i))}, - 'properties': {'ele': 0.0, 'time': '2020-03-24T16:08:40+00:00'}} for i - in range], - 'GPSTrackMaker': [{'geometry': {'type': 'Point', 'coordinates': (0.0, float(i))}, - 'properties': {}} for i in range], - 'DGN': [ - {'geometry': {'type': 'LineString', 'coordinates': [(float(i), 0.0), (0.0, 0.0)]}, - 'properties': {}} for i in range], - 'MapInfo File': [ - {'geometry': {'type': 'Point', 'coordinates': (0.0, float(i))}, - 'properties': {'position': str(i)}} for i in range], - 'PCIDSK': [{'geometry': {'type': 'Point', 'coordinates': (0.0, float(i), 0.0)}, - 'properties': {'position': i}} for i in range] - } - return special_records1.get(driver, [ - {'geometry': {'type': 'Point', 'coordinates': (0.0, float(i))}, 'properties': {'position': i}} for i in - range]) + special_records1 = { + "CSV": [ + Feature.from_dict(**{"geometry": None, "properties": {"position": i}}) + for i in range + ], + "BNA": [ + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0.0, float(i))}, + "properties": {}, + } + ) + for i in range + ], + "DXF": [ + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0.0, float(i))}, + "properties": OrderedDict( + [ + ("Layer", "0"), + ("SubClasses", "AcDbEntity:AcDbPoint"), + ("Linetype", None), + ("EntityHandle", str(i + 20000)), + ("Text", None), + ] + ), + } + ) + for i in range + ], + "GPX": [ + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0.0, float(i))}, + "properties": {"ele": 0.0, "time": "2020-03-24T16:08:40+00:00"}, + } + ) + for i in range + ], + "GPSTrackMaker": [ + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0.0, float(i))}, + "properties": {}, + } + ) + for i in range + ], + "DGN": [ + Feature.from_dict( + **{ + "geometry": { + "type": "LineString", + "coordinates": [(float(i), 0.0), (0.0, 0.0)], + }, + "properties": {}, + } + ) + for i in range + ], + "MapInfo File": [ + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0.0, float(i))}, + "properties": {"position": str(i)}, + } + ) + for i in range + ], + "PCIDSK": [ + Feature.from_dict( + **{ + "geometry": { + "type": "Point", + "coordinates": (0.0, float(i), 0.0), + }, + "properties": {"position": i}, + } + ) + for i in range + ], + } + return special_records1.get( + driver, + [ + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0.0, float(i))}, + "properties": {"position": i}, + } + ) + for i in range + ], + ) def get_records2(driver, range): special_records2 = { "DGN": [ - { - "geometry": { - "type": "LineString", - "coordinates": [(float(i), 0.0), (0.0, 0.0)], - }, - "properties": OrderedDict( - [ - ("Type", 4), - ("Level", 0), - ("GraphicGroup", 0), - ("ColorIndex", 0), - ("Weight", 0), - ("Style", 0), - ("EntityNum", None), - ("MSLink", None), - ("Text", None), - ] - + ([("ULink", None)] if gdal_version.at_least("3.3") else []) - ), - } + Feature.from_dict( + **{ + "geometry": { + "type": "LineString", + "coordinates": [(float(i), 0.0), (0.0, 0.0)], + }, + "properties": OrderedDict( + [ + ("Type", 4), + ("Level", 0), + ("GraphicGroup", 0), + ("ColorIndex", 0), + ("Weight", 0), + ("Style", 0), + ("EntityNum", None), + ("MSLink", None), + ("Text", None), + ] + + ( + [("ULink", None)] + if gdal_version.at_least("3.3") + else [] + ) + ), + } + ) for i in range ], } return special_records2.get(driver, get_records(driver, range)) def get_create_kwargs(driver): - kwargs = { - 'FlatGeobuf': {'SPATIAL_INDEX': False} - } + kwargs = {"FlatGeobuf": {"SPATIAL_INDEX": False}} return kwargs.get(driver, {}) def test_equal(driver, val_in, val_out): @@ -472,7 +564,7 @@ def _testdata_generator(driver, range1, range2): return _testdata_generator -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def path_test_tz_geojson(data_dir): """Path to ```test_tz.geojson``""" - return os.path.join(data_dir, 'test_tz.geojson') + return os.path.join(data_dir, "test_tz.geojson") diff --git a/tests/test_bigint.py b/tests/test_bigint.py index 269c11660..799266358 100644 --- a/tests/test_bigint.py +++ b/tests/test_bigint.py @@ -19,51 +19,58 @@ import fiona from fiona.env import calc_gdal_version_num, get_gdal_version_num +from fiona.model import Feature -@pytest.mark.xfail(fiona.gdal_version.major < 2, - reason="64-bit integer fields require GDAL 2+") def testCreateBigIntSchema(tmpdir): - name = str(tmpdir.join('output1.shp')) + name = str(tmpdir.join("output1.shp")) a_bigint = 10 ** 18 - 1 - fieldname = 'abigint' + fieldname = "abigint" kwargs = { - 'driver': 'ESRI Shapefile', - 'crs': 'EPSG:4326', - 'schema': { - 'geometry': 'Point', - 'properties': [(fieldname, 'int:10')]}} + "driver": "ESRI Shapefile", + "crs": "EPSG:4326", + "schema": {"geometry": "Point", "properties": [(fieldname, "int:10")]}, + } - with fiona.open(name, 'w', **kwargs) as dst: + with fiona.open(name, "w", **kwargs) as dst: rec = {} - rec['geometry'] = {'type': 'Point', 'coordinates': (0, 0)} - rec['properties'] = {fieldname: a_bigint} - dst.write(rec) + rec["geometry"] = {"type": "Point", "coordinates": (0, 0)} + rec["properties"] = {fieldname: a_bigint} + dst.write(Feature.from_dict(**rec)) with fiona.open(name) as src: if fiona.gdal_version >= (2, 0, 0): first = next(iter(src)) - assert first['properties'][fieldname] == a_bigint + assert first["properties"][fieldname] == a_bigint -@pytest.mark.skipif(get_gdal_version_num() < calc_gdal_version_num(2, 0, 0), - reason="Test requires GDAL 2+") -@pytest.mark.parametrize('dtype', ['int', 'int64']) +@pytest.mark.parametrize("dtype", ["int", "int64"]) def test_issue691(tmpdir, dtype): """Type 'int' maps to 'int64'""" - schema = {'geometry': 'Any', 'properties': {'foo': dtype}} + schema = {"geometry": "Any", "properties": {"foo": dtype}} with fiona.open( - str(tmpdir.join('test.shp')), 'w', driver='Shapefile', - schema=schema, crs='epsg:4326') as dst: - dst.write({ - 'type': 'Feature', - 'geometry': {'type': 'Point', - 'coordinates': (-122.278015, 37.868995)}, - 'properties': {'foo': 3694063472}}) + str(tmpdir.join("test.shp")), + "w", + driver="Shapefile", + schema=schema, + crs="epsg:4326", + ) as dst: + dst.write( + Feature.from_dict( + **{ + "type": "Feature", + "geometry": { + "type": "Point", + "coordinates": (-122.278015, 37.868995), + }, + "properties": {"foo": 3694063472}, + } + ) + ) - with fiona.open(str(tmpdir.join('test.shp'))) as src: - assert src.schema['properties']['foo'] == 'int:18' + with fiona.open(str(tmpdir.join("test.shp"))) as src: + assert src.schema["properties"]["foo"] == "int:18" first = next(iter(src)) - assert first['properties']['foo'] == 3694063472 + assert first["properties"]["foo"] == 3694063472 diff --git a/tests/test_binary_field.py b/tests/test_binary_field.py index 19e69a975..b8e6bbacc 100644 --- a/tests/test_binary_field.py +++ b/tests/test_binary_field.py @@ -1,8 +1,12 @@ -import fiona +"""Binary BLOB field testing.""" import pytest import struct from collections import OrderedDict + +import fiona +from fiona.model import Feature + from .conftest import requires_gpkg @@ -12,11 +16,13 @@ def test_binary_field(tmpdir): "driver": "GPKG", "schema": { "geometry": "Point", - "properties": OrderedDict([ - ("name", "str"), - ("data", "bytes"), - ]) - } + "properties": OrderedDict( + [ + ("name", "str"), + ("data", "bytes"), + ] + ), + }, } # create some binary data @@ -25,18 +31,20 @@ def test_binary_field(tmpdir): # write the binary data to a BLOB field filename = str(tmpdir.join("binary_test.gpkg")) with fiona.open(filename, "w", **meta) as dst: - feature = { - "geometry": {"type": "Point", "coordinates": ((0, 0))}, - "properties": { - "name": "test", - "data": input_data, + feature = Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": ((0, 0))}, + "properties": { + "name": "test", + "data": input_data, + }, } - } + ) dst.write(feature) # read the data back and check consistency with fiona.open(filename, "r") as src: feature = next(iter(src)) - assert feature["properties"]["name"] == "test" - output_data = feature["properties"]["data"] + assert feature.properties["name"] == "test" + output_data = feature.properties["data"] assert output_data == input_data diff --git a/tests/test_bounds.py b/tests/test_bounds.py index 4080ebad0..73f94b876 100644 --- a/tests/test_bounds.py +++ b/tests/test_bounds.py @@ -9,24 +9,25 @@ def test_bounds_point(): - g = {'type': 'Point', 'coordinates': [10, 10]} + g = {"type": "Point", "coordinates": [10, 10]} assert fiona.bounds(g) == (10, 10, 10, 10) def test_bounds_line(): - g = {'type': 'LineString', 'coordinates': [[0, 0], [10, 10]]} + g = {"type": "LineString", "coordinates": [[0, 0], [10, 10]]} assert fiona.bounds(g) == (0, 0, 10, 10) def test_bounds_polygon(): - g = {'type': 'Polygon', 'coordinates': [[[0, 0], [10, 10], [10, 0]]]} + g = {"type": "Polygon", "coordinates": [[[0, 0], [10, 10], [10, 0]]]} assert fiona.bounds(g) == (0, 0, 10, 10) def test_bounds_z(): - g = {'type': 'Point', 'coordinates': [10, 10, 10]} + g = {"type": "Point", "coordinates": [10, 10, 10]} assert fiona.bounds(g) == (10, 10, 10, 10) + # MapInfo File driver requires that the bounds (geographical extents) of a new file # be set before writing the first feature (https://gdal.org/drivers/vector/mitab.html) @@ -64,8 +65,8 @@ def calc_bounds(records): xs = [] ys = [] for r in records: - xs.append(r["geometry"]["coordinates"][0]) - ys.append(r["geometry"]["coordinates"][1]) + xs.append(r.geometry["coordinates"][0]) + ys.append(r.geometry["coordinates"][1]) return min(xs), max(xs), min(ys), max(ys) with fiona.open(path, "w", driver=driver, schema=schema) as c: diff --git a/tests/test_bytescollection.py b/tests/test_bytescollection.py index 5f7cddd84..ad4c7112a 100644 --- a/tests/test_bytescollection.py +++ b/tests/test_bytescollection.py @@ -4,13 +4,14 @@ import pytest import fiona +from fiona.model import Geometry class TestReading(object): @pytest.fixture(autouse=True) def bytes_collection_object(self, path_coutwildrnp_json): with open(path_coutwildrnp_json) as src: - bytesbuf = src.read().encode('utf-8') + bytesbuf = src.read().encode("utf-8") self.c = fiona.BytesCollection(bytesbuf, encoding="utf-8") yield self.c.close() @@ -47,10 +48,10 @@ def test_name(self): assert len(self.c.name) > 0 def test_mode(self): - assert self.c.mode == 'r' + assert self.c.mode == "r" def test_collection(self): - assert self.c.encoding == 'utf-8' + assert self.c.encoding == "utf-8" def test_iter(self): assert iter(self.c) @@ -87,12 +88,12 @@ def test_driver_closed_driver(self): assert self.c.driver == "GeoJSON" def test_schema(self): - s = self.c.schema['properties'] - assert s['PERIMETER'] == "float" - assert s['NAME'] == "str" - assert s['URL'] == "str" - assert s['STATE_FIPS'] == "str" - assert s['WILDRNP020'] == "int" + s = self.c.schema["properties"] + assert s["PERIMETER"] == "float" + assert s["NAME"] == "str" + assert s["URL"] == "str" + assert s["STATE_FIPS"] == "str" + assert s["WILDRNP020"] == "int" def test_closed_schema(self): # Schema is lazy too, never computed in this case. TODO? @@ -102,10 +103,10 @@ def test_closed_schema(self): def test_schema_closed_schema(self): self.c.schema self.c.close() - assert sorted(self.c.schema.keys()) == ['geometry', 'properties'] + assert sorted(self.c.schema.keys()) == ["geometry", "properties"] def test_crs(self): - assert self.c.crs['init'] == 'epsg:4326' + assert self.c.crs["init"] == "epsg:4326" def test_crs_wkt(self): assert self.c.crs_wkt.startswith('GEOGCS["WGS 84"') @@ -118,11 +119,10 @@ def test_closed_crs(self): def test_crs_closed_crs(self): self.c.crs self.c.close() - assert sorted(self.c.crs.keys()) == ['init'] + assert sorted(self.c.crs.keys()) == ["init"] def test_meta(self): - assert (sorted(self.c.meta.keys()) == - ['crs', 'crs_wkt', 'driver', 'schema']) + assert sorted(self.c.meta.keys()) == ["crs", "crs_wkt", "driver", "schema"] def test_bounds(self): assert self.c.bounds[0] == pytest.approx(-113.564247) @@ -133,24 +133,24 @@ def test_bounds(self): def test_iter_one(self): itr = iter(self.c) f = next(itr) - assert f['id'] == "0" - assert f['properties']['STATE'] == 'UT' + assert f["id"] == "0" + assert f["properties"]["STATE"] == "UT" def test_iter_list(self): f = list(self.c)[0] - assert f['id'] == "0" - assert f['properties']['STATE'] == 'UT' + assert f["id"] == "0" + assert f["properties"]["STATE"] == "UT" def test_re_iter_list(self): f = list(self.c)[0] # Run through iterator f = list(self.c)[0] # Run through a new, reset iterator - assert f['id'] == "0" - assert f['properties']['STATE'] == 'UT' + assert f["id"] == "0" + assert f["properties"]["STATE"] == "UT" def test_getitem_one(self): f = self.c[0] - assert f['id'] == "0" - assert f['properties']['STATE'] == 'UT' + assert f["id"] == "0" + assert f["properties"]["STATE"] == "UT" def test_no_write(self): with pytest.raises(OSError): @@ -159,8 +159,8 @@ def test_no_write(self): def test_iter_items_list(self): i, f = list(self.c.items())[0] assert i == 0 - assert f['id'] == "0" - assert f['properties']['STATE'] == 'UT' + assert f["id"] == "0" + assert f["properties"]["STATE"] == "UT" def test_iter_keys_list(self): i = list(self.c.keys())[0] @@ -175,7 +175,7 @@ class TestFilterReading(object): @pytest.fixture(autouse=True) def bytes_collection_object(self, path_coutwildrnp_json): with open(path_coutwildrnp_json) as src: - bytesbuf = src.read().encode('utf-8') + bytesbuf = src.read().encode("utf-8") self.c = fiona.BytesCollection(bytesbuf) yield self.c.close() @@ -184,8 +184,8 @@ def test_filter_1(self): results = list(self.c.filter(bbox=(-120.0, 30.0, -100.0, 50.0))) assert len(results) == 67 f = results[0] - assert f['id'] == "0" - assert f['properties']['STATE'] == 'UT' + assert f["id"] == "0" + assert f["properties"]["STATE"] == "UT" def test_filter_reset(self): results = list(self.c.filter(bbox=(-112.0, 38.0, -106.0, 40.0))) @@ -194,10 +194,14 @@ def test_filter_reset(self): assert len(results) == 67 def test_filter_mask(self): - mask = { - 'type': 'Polygon', - 'coordinates': ( - ((-112, 38), (-112, 40), (-106, 40), (-106, 38), (-112, 38)),)} + mask = Geometry.from_dict( + **{ + "type": "Polygon", + "coordinates": ( + ((-112, 38), (-112, 40), (-106, 40), (-106, 38), (-112, 38)), + ), + } + ) results = list(self.c.filter(mask=mask)) assert len(results) == 26 @@ -205,12 +209,15 @@ def test_filter_mask(self): def test_zipped_bytes_collection(bytes_coutwildrnp_zip): """Open a zipped stream of bytes as a collection""" with fiona.BytesCollection(bytes_coutwildrnp_zip) as col: - assert col.name == 'coutwildrnp' + assert col.name == "coutwildrnp" assert len(col) == 67 -@pytest.mark.skipif(fiona.gdal_version >= (2, 3, 0), + +@pytest.mark.skipif( + fiona.gdal_version >= (2, 3, 0), reason="Changed behavior with gdal 2.3, possibly related to RFC 70:" - "Guessing output format from output file name extension for utilities") + "Guessing output format from output file name extension for utilities", +) def test_grenada_bytes_geojson(bytes_grenada_geojson): """Read grenada.geojson as BytesCollection. @@ -224,5 +231,5 @@ def test_grenada_bytes_geojson(bytes_grenada_geojson): pass # If told what driver to use, we should be good. - with fiona.BytesCollection(bytes_grenada_geojson, driver='GeoJSON') as col: + with fiona.BytesCollection(bytes_grenada_geojson, driver="GeoJSON") as col: assert len(col) == 1 diff --git a/tests/test_collection.py b/tests/test_collection.py index 8ac1a384e..90aba9f4b 100644 --- a/tests/test_collection.py +++ b/tests/test_collection.py @@ -14,7 +14,10 @@ from fiona.drvsupport import supported_drivers, driver_mode_mingdal from fiona.env import getenv, GDALVersion from fiona.errors import ( - AttributeFilterError, FionaValueError, DriverError, FionaDeprecationWarning + AttributeFilterError, + FionaValueError, + DriverError, + FionaDeprecationWarning, ) from fiona.model import Feature, Geometry @@ -22,7 +25,6 @@ class TestSupportedDrivers(object): - def test_shapefile(self): assert "ESRI Shapefile" in supported_drivers assert set(supported_drivers["ESRI Shapefile"]) == set("raw") @@ -33,7 +35,6 @@ def test_map(self): class TestCollectionArgs(object): - def test_path(self): with pytest.raises(TypeError): Collection(0) @@ -44,51 +45,49 @@ def test_mode(self): def test_driver(self): with pytest.raises(TypeError): - Collection("foo", mode='w', driver=1) + Collection("foo", mode="w", driver=1) def test_schema(self): with pytest.raises(TypeError): - Collection("foo", mode='w', driver="ESRI Shapefile", schema=1) + Collection("foo", mode="w", driver="ESRI Shapefile", schema=1) def test_crs(self): with pytest.raises(TypeError): - Collection("foo", mode='w', driver="ESRI Shapefile", schema=0, - crs=1) + Collection("foo", mode="w", driver="ESRI Shapefile", schema=0, crs=1) def test_encoding(self): with pytest.raises(TypeError): - Collection("foo", mode='r', encoding=1) + Collection("foo", mode="r", encoding=1) def test_layer(self): with pytest.raises(TypeError): - Collection("foo", mode='r', layer=0.5) + Collection("foo", mode="r", layer=0.5) def test_vsi(self): with pytest.raises(TypeError): - Collection("foo", mode='r', vsi='git') + Collection("foo", mode="r", vsi="git") def test_archive(self): with pytest.raises(TypeError): - Collection("foo", mode='r', archive=1) + Collection("foo", mode="r", archive=1) def test_write_numeric_layer(self): with pytest.raises(ValueError): - Collection("foo", mode='w', layer=1) + Collection("foo", mode="w", layer=1) def test_write_geojson_layer(self): with pytest.raises(ValueError): - Collection("foo", mode='w', driver='GeoJSON', layer='foo') + Collection("foo", mode="w", driver="GeoJSON", layer="foo") def test_append_geojson(self): with pytest.raises(ValueError): - Collection("foo", mode='w', driver='ARCGEN') + Collection("foo", mode="w", driver="ARCGEN") class TestOpenException(object): - def test_no_archive(self): with pytest.warns(FionaDeprecationWarning), pytest.raises(DriverError): - fiona.open("/", mode='r', vfs="zip:///foo.zip") + fiona.open("/", mode="r", vfs="zip:///foo.zip") class TestReading(object): @@ -99,28 +98,26 @@ def shapefile(self, path_coutwildrnp_shp): self.c.close() def test_open_repr(self, path_coutwildrnp_shp): - assert ( - repr(self.c) == - ("".format(hexid=hex(id(self.c)), - path=path_coutwildrnp_shp))) + assert repr(self.c) == ( + "".format(hexid=hex(id(self.c)), path=path_coutwildrnp_shp) + ) def test_closed_repr(self, path_coutwildrnp_shp): self.c.close() - assert ( - repr(self.c) == - ("".format(hexid=hex(id(self.c)), - path=path_coutwildrnp_shp))) + assert repr(self.c) == ( + "".format(hexid=hex(id(self.c)), path=path_coutwildrnp_shp) + ) def test_path(self, path_coutwildrnp_shp): assert self.c.path == path_coutwildrnp_shp def test_name(self): - assert self.c.name == 'coutwildrnp' + assert self.c.name == "coutwildrnp" def test_mode(self): - assert self.c.mode == 'r' + assert self.c.mode == "r" def test_encoding(self): assert self.c.encoding is None @@ -160,12 +157,12 @@ def test_driver_closed_driver(self): assert self.c.driver == "ESRI Shapefile" def test_schema(self): - s = self.c.schema['properties'] - assert s['PERIMETER'] == "float:24.15" - assert s['NAME'] == "str:80" - assert s['URL'] == "str:101" - assert s['STATE_FIPS'] == "str:80" - assert s['WILDRNP020'] == "int:10" + s = self.c.schema["properties"] + assert s["PERIMETER"] == "float:24.15" + assert s["NAME"] == "str:80" + assert s["URL"] == "str:101" + assert s["STATE_FIPS"] == "str:80" + assert s["WILDRNP020"] == "int:10" def test_closed_schema(self): # Schema is lazy too, never computed in this case. TODO? @@ -175,11 +172,11 @@ def test_closed_schema(self): def test_schema_closed_schema(self): self.c.schema self.c.close() - assert sorted(self.c.schema.keys()) == ['geometry', 'properties'] + assert sorted(self.c.schema.keys()) == ["geometry", "properties"] def test_crs(self): crs = self.c.crs - assert crs['init'] == 'epsg:4326' + assert crs["init"] == "epsg:4326" def test_crs_wkt(self): crs = self.c.crs_wkt @@ -193,15 +190,13 @@ def test_closed_crs(self): def test_crs_closed_crs(self): self.c.crs self.c.close() - assert sorted(self.c.crs.keys()) == ['init'] + assert sorted(self.c.crs.keys()) == ["init"] def test_meta(self): - assert (sorted(self.c.meta.keys()) == - ['crs', 'crs_wkt', 'driver', 'schema']) + assert sorted(self.c.meta.keys()) == ["crs", "crs_wkt", "driver", "schema"] def test_profile(self): - assert (sorted(self.c.profile.keys()) == - ['crs', 'crs_wkt', 'driver', 'schema']) + assert sorted(self.c.profile.keys()) == ["crs", "crs_wkt", "driver", "schema"] def test_bounds(self): assert self.c.bounds[0] == pytest.approx(-113.564247) @@ -211,7 +206,7 @@ def test_bounds(self): def test_context(self, path_coutwildrnp_shp): with fiona.open(path_coutwildrnp_shp, "r") as c: - assert c.name == 'coutwildrnp' + assert c.name == "coutwildrnp" assert len(c) == 67 assert c.crs assert c.closed @@ -219,34 +214,34 @@ def test_context(self, path_coutwildrnp_shp): def test_iter_one(self): itr = iter(self.c) f = next(itr) - assert f['id'] == "0" - assert f['properties']['STATE'] == 'UT' + assert f.id == "0" + assert f.properties["STATE"] == "UT" def test_iter_list(self): f = list(self.c)[0] - assert f['id'] == "0" - assert f['properties']['STATE'] == 'UT' + assert f.id == "0" + assert f.properties["STATE"] == "UT" def test_re_iter_list(self): f = list(self.c)[0] # Run through iterator f = list(self.c)[0] # Run through a new, reset iterator - assert f['id'] == "0" - assert f['properties']['STATE'] == 'UT' + assert f.id == "0" + assert f.properties["STATE"] == "UT" def test_getitem_one(self): f = self.c[0] - assert f['id'] == "0" - assert f['properties']['STATE'] == 'UT' + assert f.id == "0" + assert f.properties["STATE"] == "UT" def test_getitem_iter_combo(self): i = iter(self.c) f = next(i) f = next(i) - assert f['id'] == "1" + assert f.id == "1" f = self.c[0] - assert f['id'] == "0" + assert f.id == "0" f = next(i) - assert f['id'] == "2" + assert f.id == "2" def test_no_write(self): with pytest.raises(OSError): @@ -255,8 +250,8 @@ def test_no_write(self): def test_iter_items_list(self): i, f = list(self.c.items())[0] assert i == 0 - assert f['id'] == "0" - assert f['properties']['STATE'] == 'UT' + assert f.id == "0" + assert f.properties["STATE"] == "UT" def test_iter_keys_list(self): i = list(self.c.keys())[0] @@ -271,40 +266,43 @@ class TestReadingPathTest(object): def test_open_path(self, path_coutwildrnp_shp): pathlib = pytest.importorskip("pathlib") with fiona.open(pathlib.Path(path_coutwildrnp_shp)) as collection: - assert collection.name == 'coutwildrnp' + assert collection.name == "coutwildrnp" @pytest.mark.usefixtures("unittest_path_coutwildrnp_shp") class TestIgnoreFieldsAndGeometry(object): - def test_without_ignore(self): with fiona.open(self.path_coutwildrnp_shp, "r") as collection: - assert("AREA" in collection.schema["properties"].keys()) - assert("STATE" in collection.schema["properties"].keys()) - assert("NAME" in collection.schema["properties"].keys()) - assert("geometry" in collection.schema.keys()) + assert "AREA" in collection.schema["properties"].keys() + assert "STATE" in collection.schema["properties"].keys() + assert "NAME" in collection.schema["properties"].keys() + assert "geometry" in collection.schema.keys() feature = next(iter(collection)) - assert(feature["properties"]["AREA"] is not None) - assert(feature["properties"]["STATE"] is not None) - assert(feature["properties"]["NAME"] is not None) - assert(feature["geometry"] is not None) + assert feature["properties"]["AREA"] is not None + assert feature["properties"]["STATE"] is not None + assert feature["properties"]["NAME"] is not None + assert feature["geometry"] is not None def test_ignore_fields(self): - with fiona.open(self.path_coutwildrnp_shp, "r", ignore_fields=["AREA", "STATE"]) as collection: - assert("AREA" not in collection.schema["properties"].keys()) - assert("STATE" not in collection.schema["properties"].keys()) - assert("NAME" in collection.schema["properties"].keys()) - assert("geometry" in collection.schema.keys()) + with fiona.open( + self.path_coutwildrnp_shp, "r", ignore_fields=["AREA", "STATE"] + ) as collection: + assert "AREA" not in collection.schema["properties"].keys() + assert "STATE" not in collection.schema["properties"].keys() + assert "NAME" in collection.schema["properties"].keys() + assert "geometry" in collection.schema.keys() feature = next(iter(collection)) - assert("AREA" not in feature["properties"].keys()) - assert("STATE" not in feature["properties"].keys()) - assert(feature["properties"]["NAME"] is not None) - assert(feature["geometry"] is not None) + assert "AREA" not in feature["properties"].keys() + assert "STATE" not in feature["properties"].keys() + assert feature["properties"]["NAME"] is not None + assert feature["geometry"] is not None def test_ignore_invalid_field_missing(self): - with fiona.open(self.path_coutwildrnp_shp, "r", ignore_fields=["DOES_NOT_EXIST"]): + with fiona.open( + self.path_coutwildrnp_shp, "r", ignore_fields=["DOES_NOT_EXIST"] + ): pass def test_ignore_invalid_field_not_string(self): @@ -313,42 +311,53 @@ def test_ignore_invalid_field_not_string(self): pass def test_include_fields(self): - with fiona.open(self.path_coutwildrnp_shp, "r", include_fields=["AREA", "STATE"]) as collection: + with fiona.open( + self.path_coutwildrnp_shp, "r", include_fields=["AREA", "STATE"] + ) as collection: assert sorted(collection.schema["properties"]) == ["AREA", "STATE"] - assert("geometry" in collection.schema.keys()) + assert "geometry" in collection.schema.keys() feature = next(iter(collection)) assert sorted(feature["properties"]) == ["AREA", "STATE"] - assert(feature["properties"]["AREA"] is not None) - assert(feature["properties"]["STATE"] is not None) - assert(feature["geometry"] is not None) + assert feature["properties"]["AREA"] is not None + assert feature["properties"]["STATE"] is not None + assert feature["geometry"] is not None def test_include_fields__geom_only(self): - with fiona.open(self.path_coutwildrnp_shp, "r", include_fields=()) as collection: + with fiona.open( + self.path_coutwildrnp_shp, "r", include_fields=() + ) as collection: assert sorted(collection.schema["properties"]) == [] - assert("geometry" in collection.schema.keys()) + assert "geometry" in collection.schema.keys() feature = next(iter(collection)) assert sorted(feature["properties"]) == [] - assert(feature["geometry"] is not None) + assert feature["geometry"] is not None def test_include_fields__ignore_fields_error(self): with pytest.raises(ValueError): - with fiona.open(self.path_coutwildrnp_shp, "r", include_fields=["AREA"], ignore_fields=["STATE"]) as collection: + with fiona.open( + self.path_coutwildrnp_shp, + "r", + include_fields=["AREA"], + ignore_fields=["STATE"], + ) as collection: pass def test_ignore_geometry(self): - with fiona.open(self.path_coutwildrnp_shp, "r", ignore_geometry=True) as collection: - assert("AREA" in collection.schema["properties"].keys()) - assert("STATE" in collection.schema["properties"].keys()) - assert("NAME" in collection.schema["properties"].keys()) - assert("geometry" not in collection.schema.keys()) + with fiona.open( + self.path_coutwildrnp_shp, "r", ignore_geometry=True + ) as collection: + assert "AREA" in collection.schema["properties"].keys() + assert "STATE" in collection.schema["properties"].keys() + assert "NAME" in collection.schema["properties"].keys() + assert "geometry" not in collection.schema.keys() feature = next(iter(collection)) - assert(feature.properties["AREA"] is not None) - assert(feature.properties["STATE"] is not None) - assert(feature.properties["NAME"] is not None) - assert(feature.geometry is None) + assert feature.properties["AREA"] is not None + assert feature.properties["STATE"] is not None + assert feature.properties["NAME"] is not None + assert feature.geometry is None class TestFilterReading(object): @@ -362,8 +371,8 @@ def test_filter_1(self): results = list(self.c.filter(bbox=(-120.0, 30.0, -100.0, 50.0))) assert len(results) == 67 f = results[0] - assert f['id'] == "0" - assert f['properties']['STATE'] == 'UT' + assert f.id == "0" + assert f.properties["STATE"] == "UT" def test_filter_reset(self): results = list(self.c.filter(bbox=(-112.0, 38.0, -106.0, 40.0))) @@ -372,18 +381,21 @@ def test_filter_reset(self): assert len(results) == 67 def test_filter_mask(self): - mask = { - 'type': 'Polygon', - 'coordinates': ( - ((-112, 38), (-112, 40), (-106, 40), (-106, 38), (-112, 38)),)} + mask = Geometry.from_dict( + **{ + "type": "Polygon", + "coordinates": ( + ((-112, 38), (-112, 40), (-106, 40), (-106, 38), (-112, 38)), + ), + } + ) results = list(self.c.filter(mask=mask)) assert len(results) == 26 def test_filter_where(self): results = list(self.c.filter(where="NAME LIKE 'Mount%'")) assert len(results) == 9 - assert all([x['properties']['NAME'].startswith('Mount') - for x in results]) + assert all([x.properties["NAME"].startswith("Mount") for x in results]) results = list(self.c.filter(where="NAME LIKE '%foo%'")) assert len(results) == 0 results = list(self.c.filter()) @@ -391,8 +403,9 @@ def test_filter_where(self): def test_filter_bbox_where(self): # combined filter criteria - results = set(self.c.keys( - bbox=(-120.0, 40.0, -100.0, 50.0), where="NAME LIKE 'Mount%'")) + results = set( + self.c.keys(bbox=(-120.0, 40.0, -100.0, 50.0), where="NAME LIKE 'Mount%'") + ) assert results == set([0, 2, 5, 13]) results = set(self.c.keys()) assert len(results) == 67 @@ -404,11 +417,11 @@ def test_filter_where_error(self): class TestUnsupportedDriver(object): - def test_immediate_fail_driver(self, tmpdir): schema = { - 'geometry': 'Point', - 'properties': {'label': 'str', 'verit\xe9': 'int'}} + "geometry": "Point", + "properties": {"label": "str", "verit\xe9": "int"}, + } with pytest.raises(DriverError): fiona.open(str(tmpdir.join("foo")), "w", "Bogus", schema=schema) @@ -418,16 +431,21 @@ class TestGenericWritingTest(object): @pytest.fixture(autouse=True) def no_iter_shp(self, tmpdir): schema = { - 'geometry': 'Point', - 'properties': [('label', 'str'), ('verit\xe9', 'int')]} - self.c = fiona.open(str(tmpdir.join("test-no-iter.shp")), - 'w', driver="ESRI Shapefile", schema=schema, - encoding='Windows-1252') + "geometry": "Point", + "properties": [("label", "str"), ("verit\xe9", "int")], + } + self.c = fiona.open( + str(tmpdir.join("test-no-iter.shp")), + "w", + driver="ESRI Shapefile", + schema=schema, + encoding="Windows-1252", + ) yield self.c.close() def test_encoding(self): - assert self.c.encoding == 'Windows-1252' + assert self.c.encoding == "Windows-1252" def test_no_iter(self): with pytest.raises(OSError): @@ -445,207 +463,216 @@ def shapefile(self, tmpdir): _records_with_float_property1 = [ { - 'geometry': {'type': 'Point', 'coordinates': (0.0, 0.1)}, - 'properties': {'property1': 12.22} + "geometry": {"type": "Point", "coordinates": (0.0, 0.1)}, + "properties": {"property1": 12.22}, }, { - 'geometry': {'type': 'Point', 'coordinates': (0.0, 0.2)}, - 'properties': {'property1': 12.88} - } + "geometry": {"type": "Point", "coordinates": (0.0, 0.2)}, + "properties": {"property1": 12.88}, + }, ] _records_with_float_property1_as_string = [ { - 'geometry': {'type': 'Point', 'coordinates': (0.0, 0.1)}, - 'properties': {'property1': '12.22'} + "geometry": {"type": "Point", "coordinates": (0.0, 0.1)}, + "properties": {"property1": "12.22"}, }, { - 'geometry': {'type': 'Point', 'coordinates': (0.0, 0.2)}, - 'properties': {'property1': '12.88'} - } + "geometry": {"type": "Point", "coordinates": (0.0, 0.2)}, + "properties": {"property1": "12.88"}, + }, ] _records_with_invalid_number_property1 = [ { - 'geometry': {'type': 'Point', 'coordinates': (0.0, 0.3)}, - 'properties': {'property1': 'invalid number'} + "geometry": {"type": "Point", "coordinates": (0.0, 0.3)}, + "properties": {"property1": "invalid number"}, } ] def _write_collection(self, records, schema, driver): with fiona.open( - self.filename, - "w", - driver=driver, - schema=schema, - crs='epsg:4326', - encoding='utf-8' + self.filename, + "w", + driver=driver, + schema=schema, + crs="epsg:4326", + encoding="utf-8", ) as c: - c.writerecords(records) + c.writerecords([Feature.from_dict(**rec) for rec in records]) def test_shape_driver_truncates_float_property_to_requested_int_format(self): driver = "ESRI Shapefile" self._write_collection( self._records_with_float_property1, - {'geometry': 'Point', 'properties': [('property1', 'int')]}, - driver + {"geometry": "Point", "properties": [("property1", "int")]}, + driver, ) - with fiona.open(self.filename, driver=driver, encoding='utf-8') as c: + with fiona.open(self.filename, driver=driver, encoding="utf-8") as c: assert 2 == len(c) rf1, rf2 = list(c) - assert 12 == rf1['properties']['property1'] - assert 12 == rf2['properties']['property1'] + assert 12 == rf1.properties["property1"] + assert 12 == rf2.properties["property1"] def test_shape_driver_rounds_float_property_to_requested_digits_number(self): driver = "ESRI Shapefile" self._write_collection( self._records_with_float_property1, - {'geometry': 'Point', 'properties': [('property1', 'float:15.1')]}, - driver + {"geometry": "Point", "properties": [("property1", "float:15.1")]}, + driver, ) - with fiona.open(self.filename, driver=driver, encoding='utf-8') as c: + with fiona.open(self.filename, driver=driver, encoding="utf-8") as c: assert 2 == len(c) rf1, rf2 = list(c) - assert 12.2 == rf1['properties']['property1'] - assert 12.9 == rf2['properties']['property1'] + assert 12.2 == rf1.properties["property1"] + assert 12.9 == rf2.properties["property1"] - def test_string_is_converted_to_number_and_truncated_to_requested_int_by_shape_driver(self): + def test_string_is_converted_to_number_and_truncated_to_requested_int_by_shape_driver( + self, + ): driver = "ESRI Shapefile" self._write_collection( self._records_with_float_property1_as_string, - {'geometry': 'Point', 'properties': [('property1', 'int')]}, - driver + {"geometry": "Point", "properties": [("property1", "int")]}, + driver, ) - with fiona.open(self.filename, driver=driver, encoding='utf-8') as c: + with fiona.open(self.filename, driver=driver, encoding="utf-8") as c: assert 2 == len(c) rf1, rf2 = list(c) - assert 12 == rf1['properties']['property1'] - assert 12 == rf2['properties']['property1'] + assert 12 == rf1.properties["property1"] + assert 12 == rf2.properties["property1"] - def test_string_is_converted_to_number_and_rounded_to_requested_digits_number_by_shape_driver(self): + def test_string_is_converted_to_number_and_rounded_to_requested_digits_number_by_shape_driver( + self, + ): driver = "ESRI Shapefile" self._write_collection( self._records_with_float_property1_as_string, - {'geometry': 'Point', 'properties': [('property1', 'float:15.1')]}, - driver + {"geometry": "Point", "properties": [("property1", "float:15.1")]}, + driver, ) - with fiona.open(self.filename, driver=driver, encoding='utf-8') as c: + with fiona.open(self.filename, driver=driver, encoding="utf-8") as c: assert 2 == len(c) rf1, rf2 = list(c) - assert 12.2 == rf1['properties']['property1'] - assert 12.9 == rf2['properties']['property1'] + assert 12.2 == rf1.properties["property1"] + assert 12.9 == rf2.properties["property1"] def test_invalid_number_is_converted_to_0_and_written_by_shape_driver(self): driver = "ESRI Shapefile" self._write_collection( self._records_with_invalid_number_property1, # {'geometry': 'Point', 'properties': [('property1', 'int')]}, - {'geometry': 'Point', 'properties': [('property1', 'float:15.1')]}, - driver + {"geometry": "Point", "properties": [("property1", "float:15.1")]}, + driver, ) - with fiona.open(self.filename, driver=driver, encoding='utf-8') as c: + with fiona.open(self.filename, driver=driver, encoding="utf-8") as c: assert 1 == len(c) rf1 = c[0] - assert 0 == rf1['properties']['property1'] + assert 0 == rf1.properties["property1"] def test_geojson_driver_truncates_float_property_to_requested_int_format(self): driver = "GeoJSON" self._write_collection( self._records_with_float_property1, - {'geometry': 'Point', 'properties': [('property1', 'int')]}, - driver + {"geometry": "Point", "properties": [("property1", "int")]}, + driver, ) - with fiona.open(self.filename, driver=driver, encoding='utf-8') as c: + with fiona.open(self.filename, driver=driver, encoding="utf-8") as c: assert 2 == len(c) rf1, rf2 = list(c) - assert 12 == rf1['properties']['property1'] - assert 12 == rf2['properties']['property1'] + assert 12 == rf1.properties["property1"] + assert 12 == rf2.properties["property1"] - def test_geojson_driver_does_not_round_float_property_to_requested_digits_number(self): + def test_geojson_driver_does_not_round_float_property_to_requested_digits_number( + self, + ): driver = "GeoJSON" self._write_collection( self._records_with_float_property1, - {'geometry': 'Point', 'properties': [('property1', 'float:15.1')]}, - driver + {"geometry": "Point", "properties": [("property1", "float:15.1")]}, + driver, ) - with fiona.open(self.filename, driver=driver, encoding='utf-8') as c: + with fiona.open(self.filename, driver=driver, encoding="utf-8") as c: assert 2 == len(c) rf1, rf2 = list(c) # **************************************** # FLOAT FORMATTING IS NOT RESPECTED... - assert 12.22 == rf1['properties']['property1'] - assert 12.88 == rf2['properties']['property1'] + assert 12.22 == rf1.properties["property1"] + assert 12.88 == rf2.properties["property1"] - def test_string_is_converted_to_number_and_truncated_to_requested_int_by_geojson_driver(self): + def test_string_is_converted_to_number_and_truncated_to_requested_int_by_geojson_driver( + self, + ): driver = "GeoJSON" self._write_collection( self._records_with_float_property1_as_string, - {'geometry': 'Point', 'properties': [('property1', 'int')]}, - driver + {"geometry": "Point", "properties": [("property1", "int")]}, + driver, ) - with fiona.open(self.filename, driver=driver, encoding='utf-8') as c: + with fiona.open(self.filename, driver=driver, encoding="utf-8") as c: assert 2 == len(c) rf1, rf2 = list(c) - assert 12 == rf1['properties']['property1'] - assert 12 == rf2['properties']['property1'] + assert 12 == rf1.properties["property1"] + assert 12 == rf2.properties["property1"] - def test_string_is_converted_to_number_but_not_rounded_to_requested_digits_number_by_geojson_driver(self): + def test_string_is_converted_to_number_but_not_rounded_to_requested_digits_number_by_geojson_driver( + self, + ): driver = "GeoJSON" self._write_collection( self._records_with_float_property1_as_string, - {'geometry': 'Point', 'properties': [('property1', 'float:15.1')]}, - driver + {"geometry": "Point", "properties": [("property1", "float:15.1")]}, + driver, ) - with fiona.open(self.filename, driver=driver, encoding='utf-8') as c: + with fiona.open(self.filename, driver=driver, encoding="utf-8") as c: assert 2 == len(c) rf1, rf2 = list(c) # **************************************** # FLOAT FORMATTING IS NOT RESPECTED... - assert 12.22 == rf1['properties']['property1'] - assert 12.88 == rf2['properties']['property1'] + assert 12.22 == rf1.properties["property1"] + assert 12.88 == rf2.properties["property1"] def test_invalid_number_is_converted_to_0_and_written_by_geojson_driver(self): driver = "GeoJSON" self._write_collection( self._records_with_invalid_number_property1, - # {'geometry': 'Point', 'properties': [('property1', 'int')]}, - {'geometry': 'Point', 'properties': [('property1', 'float:15.1')]}, - driver + {"geometry": "Point", "properties": [("property1", "float:15.1")]}, + driver, ) - with fiona.open(self.filename, driver=driver, encoding='utf-8') as c: + with fiona.open(self.filename, driver=driver, encoding="utf-8") as c: assert 1 == len(c) rf1 = c[0] - assert 0 == rf1['properties']['property1'] + assert 0 == rf1.properties["property1"] class TestPointWriting(object): @@ -657,10 +684,12 @@ def shapefile(self, tmpdir): "w", driver="ESRI Shapefile", schema={ - 'geometry': 'Point', - 'properties': [('title', 'str'), ('date', 'date')]}, - crs='epsg:4326', - encoding='utf-8') + "geometry": "Point", + "properties": [("title", "str"), ("date", "date")], + }, + crs="epsg:4326", + encoding="utf-8", + ) yield self.sink.close() @@ -673,9 +702,12 @@ def test_cpg(self, tmpdir): def test_write_one(self): assert len(self.sink) == 0 assert self.sink.bounds == (0.0, 0.0, 0.0, 0.0) - f = { - 'geometry': {'type': 'Point', 'coordinates': (0.0, 0.1)}, - 'properties': {'title': 'point one', 'date': "2012-01-29"}} + f = Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0.0, 0.1)}, + "properties": {"title": "point one", "date": "2012-01-29"}, + } + ) self.sink.writerecords([f]) assert len(self.sink) == 1 assert self.sink.bounds == (0.0, 0.1, 0.0, 0.1) @@ -684,12 +716,18 @@ def test_write_one(self): def test_write_two(self): assert len(self.sink) == 0 assert self.sink.bounds == (0.0, 0.0, 0.0, 0.0) - f1 = { - 'geometry': {'type': 'Point', 'coordinates': (0.0, 0.1)}, - 'properties': {'title': 'point one', 'date': "2012-01-29"}} - f2 = { - 'geometry': {'type': 'Point', 'coordinates': (0.0, -0.1)}, - 'properties': {'title': 'point two', 'date': "2012-01-29"}} + f1 = Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0.0, 0.1)}, + "properties": {"title": "point one", "date": "2012-01-29"}, + } + ) + f2 = Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0.0, -0.1)}, + "properties": {"title": "point two", "date": "2012-01-29"}, + } + ) self.sink.writerecords([f1, f2]) assert len(self.sink) == 2 assert self.sink.bounds == (0.0, -0.1, 0.0, 0.1) @@ -697,20 +735,25 @@ def test_write_two(self): def test_write_one_null_geom(self): assert len(self.sink) == 0 assert self.sink.bounds == (0.0, 0.0, 0.0, 0.0) - f = { - 'geometry': None, - 'properties': {'title': 'point one', 'date': "2012-01-29"}} + f = Feature.from_dict( + **{ + "geometry": None, + "properties": {"title": "point one", "date": "2012-01-29"}, + } + ) self.sink.writerecords([f]) assert len(self.sink) == 1 assert self.sink.bounds == (0.0, 0.0, 0.0, 0.0) def test_validate_record(self): fvalid = { - 'geometry': {'type': 'Point', 'coordinates': (0.0, 0.1)}, - 'properties': {'title': 'point one', 'date': "2012-01-29"}} + "geometry": {"type": "Point", "coordinates": (0.0, 0.1)}, + "properties": {"title": "point one", "date": "2012-01-29"}, + } finvalid = { - 'geometry': {'type': 'Point', 'coordinates': (0.0, -0.1)}, - 'properties': {'not-a-title': 'point two', 'date': "2012-01-29"}} + "geometry": {"type": "Point", "coordinates": (0.0, -0.1)}, + "properties": {"not-a-title": "point two", "date": "2012-01-29"}, + } assert self.sink.validate_record(fvalid) assert not self.sink.validate_record(finvalid) @@ -723,19 +766,26 @@ def shapefile(self, tmpdir): "w", driver="ESRI Shapefile", schema={ - 'geometry': 'LineString', - 'properties': [('title', 'str'), ('date', 'date')]}, - crs={'init': "epsg:4326", 'no_defs': True}) + "geometry": "LineString", + "properties": [("title", "str"), ("date", "date")], + }, + crs={"init": "epsg:4326", "no_defs": True}, + ) yield self.sink.close() def test_write_one(self): assert len(self.sink) == 0 assert self.sink.bounds == (0.0, 0.0, 0.0, 0.0) - f = { - 'geometry': {'type': 'LineString', - 'coordinates': [(0.0, 0.1), (0.0, 0.2)]}, - 'properties': {'title': 'line one', 'date': "2012-01-29"}} + f = Feature.from_dict( + **{ + "geometry": { + "type": "LineString", + "coordinates": [(0.0, 0.1), (0.0, 0.2)], + }, + "properties": {"title": "line one", "date": "2012-01-29"}, + } + ) self.sink.writerecords([f]) assert len(self.sink) == 1 assert self.sink.bounds == (0.0, 0.1, 0.0, 0.2) @@ -743,15 +793,27 @@ def test_write_one(self): def test_write_two(self): assert len(self.sink) == 0 assert self.sink.bounds == (0.0, 0.0, 0.0, 0.0) - f1 = { - 'geometry': {'type': 'LineString', - 'coordinates': [(0.0, 0.1), (0.0, 0.2)]}, - 'properties': {'title': 'line one', 'date': "2012-01-29"}} - f2 = { - 'geometry': {'type': 'MultiLineString', - 'coordinates': [[(0.0, 0.0), (0.0, -0.1)], - [(0.0, -0.1), (0.0, -0.2)]]}, - 'properties': {'title': 'line two', 'date': "2012-01-29"}} + f1 = Feature.from_dict( + **{ + "geometry": { + "type": "LineString", + "coordinates": [(0.0, 0.1), (0.0, 0.2)], + }, + "properties": {"title": "line one", "date": "2012-01-29"}, + } + ) + f2 = Feature.from_dict( + **{ + "geometry": { + "type": "MultiLineString", + "coordinates": [ + [(0.0, 0.0), (0.0, -0.1)], + [(0.0, -0.1), (0.0, -0.2)], + ], + }, + "properties": {"title": "line two", "date": "2012-01-29"}, + } + ) self.sink.writerecords([f1, f2]) assert len(self.sink) == 2 assert self.sink.bounds == (0.0, -0.2, 0.0, 0.2) @@ -762,29 +824,46 @@ class TestPointAppend(object): def shapefile(self, tmpdir, path_coutwildrnp_shp): with fiona.open(path_coutwildrnp_shp, "r") as input: output_schema = input.schema - output_schema['geometry'] = '3D Point' + output_schema["geometry"] = "3D Point" with fiona.open( - str(tmpdir.join("test_append_point.shp")), - 'w', crs=None, driver="ESRI Shapefile", - schema=output_schema) as output: + str(tmpdir.join("test_append_point.shp")), + "w", + crs=None, + driver="ESRI Shapefile", + schema=output_schema, + ) as output: for f in input: - fnew = Feature(id=f.id, properties=f.properties, geometry=Geometry(type="Point", coordinates=f.geometry.coordinates[0][0])) + fnew = Feature( + id=f.id, + properties=f.properties, + geometry=Geometry( + type="Point", coordinates=f.geometry.coordinates[0][0] + ), + ) output.write(fnew) def test_append_point(self, tmpdir): with fiona.open(str(tmpdir.join("test_append_point.shp")), "a") as c: - assert c.schema['geometry'] == '3D Point' - c.write({'geometry': {'type': 'Point', 'coordinates': (0.0, 45.0)}, - 'properties': {'PERIMETER': 1.0, - 'FEATURE2': None, - 'NAME': 'Foo', - 'FEATURE1': None, - 'URL': 'http://example.com', - 'AGBUR': 'BAR', - 'AREA': 0.0, - 'STATE_FIPS': 1, - 'WILDRNP020': 1, - 'STATE': 'XL'}}) + assert c.schema["geometry"] == "3D Point" + c.write( + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0.0, 45.0)}, + "properties": { + "PERIMETER": 1.0, + "FEATURE2": None, + "NAME": "Foo", + "FEATURE1": None, + "URL": "http://example.com", + "AGBUR": "BAR", + "AREA": 0.0, + "STATE_FIPS": 1, + "WILDRNP020": 1, + "STATE": "XL", + }, + } + ) + ) assert len(c) == 68 @@ -792,53 +871,79 @@ class TestLineAppend(object): @pytest.fixture(autouse=True) def shapefile(self, tmpdir): with fiona.open( - str(tmpdir.join("test_append_line.shp")), - "w", - driver="ESRI Shapefile", - schema={ - 'geometry': 'MultiLineString', - 'properties': {'title': 'str', 'date': 'date'}}, - crs={'init': "epsg:4326", 'no_defs': True}) as output: - f = {'geometry': {'type': 'MultiLineString', - 'coordinates': [[(0.0, 0.1), (0.0, 0.2)]]}, - 'properties': {'title': 'line one', 'date': "2012-01-29"}} + str(tmpdir.join("test_append_line.shp")), + "w", + driver="ESRI Shapefile", + schema={ + "geometry": "MultiLineString", + "properties": {"title": "str", "date": "date"}, + }, + crs={"init": "epsg:4326", "no_defs": True}, + ) as output: + f = Feature.from_dict( + **{ + "geometry": { + "type": "MultiLineString", + "coordinates": [[(0.0, 0.1), (0.0, 0.2)]], + }, + "properties": {"title": "line one", "date": "2012-01-29"}, + } + ) output.writerecords([f]) def test_append_line(self, tmpdir): with fiona.open(str(tmpdir.join("test_append_line.shp")), "a") as c: - assert c.schema['geometry'] == 'LineString' - f1 = { - 'geometry': {'type': 'LineString', - 'coordinates': [(0.0, 0.1), (0.0, 0.2)]}, - 'properties': {'title': 'line one', 'date': "2012-01-29"}} - f2 = { - 'geometry': {'type': 'MultiLineString', - 'coordinates': [[(0.0, 0.0), (0.0, -0.1)], - [(0.0, -0.1), (0.0, -0.2)]]}, - 'properties': {'title': 'line two', 'date': "2012-01-29"}} + assert c.schema["geometry"] == "LineString" + f1 = Feature.from_dict( + **{ + "geometry": { + "type": "LineString", + "coordinates": [(0.0, 0.1), (0.0, 0.2)], + }, + "properties": {"title": "line one", "date": "2012-01-29"}, + } + ) + f2 = Feature.from_dict( + **{ + "geometry": { + "type": "MultiLineString", + "coordinates": [ + [(0.0, 0.0), (0.0, -0.1)], + [(0.0, -0.1), (0.0, -0.2)], + ], + }, + "properties": {"title": "line two", "date": "2012-01-29"}, + } + ) c.writerecords([f1, f2]) assert len(c) == 3 assert c.bounds == (0.0, -0.2, 0.0, 0.2) def test_shapefile_field_width(tmpdir): - name = str(tmpdir.join('textfield.shp')) + name = str(tmpdir.join("textfield.shp")) with fiona.open( - name, 'w', - schema={'geometry': 'Point', 'properties': {'text': 'str:254'}}, - driver="ESRI Shapefile") as c: + name, + "w", + schema={"geometry": "Point", "properties": {"text": "str:254"}}, + driver="ESRI Shapefile", + ) as c: c.write( - {'geometry': {'type': 'Point', 'coordinates': (0.0, 45.0)}, - 'properties': {'text': 'a' * 254}}) + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0.0, 45.0)}, + "properties": {"text": "a" * 254}, + } + ) + ) c = fiona.open(name, "r") - assert c.schema['properties']['text'] == 'str:254' + assert c.schema["properties"]["text"] == "str:254" f = next(iter(c)) - assert f['properties']['text'] == 'a' * 254 + assert f.properties["text"] == "a" * 254 c.close() class TestCollection(object): - def test_invalid_mode(self, tmpdir): with pytest.raises(ValueError): fiona.open(str(tmpdir.join("bogus.shp")), "r+") @@ -857,7 +962,9 @@ def test_no_read_conn_str(self): with pytest.raises(DriverError): fiona.open("PG:dbname=databasename", "r") - @pytest.mark.skipif(sys.platform.startswith("win"), reason="test only for *nix based system") + @pytest.mark.skipif( + sys.platform.startswith("win"), reason="test only for *nix based system" + ) def test_no_read_directory(self): with pytest.raises(DriverError): fiona.open("/dev/null", "r") @@ -866,22 +973,27 @@ def test_no_read_directory(self): def test_date(tmpdir): name = str(tmpdir.join("date_test.shp")) sink = fiona.open( - name, "w", + name, + "w", driver="ESRI Shapefile", - schema={ - 'geometry': 'Point', - 'properties': [('id', 'int'), ('date', 'date')]}, - crs={'init': "epsg:4326", 'no_defs': True}) - - recs = [{ - 'geometry': {'type': 'Point', - 'coordinates': (7.0, 50.0)}, - 'properties': {'id': 1, 'date': '2013-02-25'} - }, { - 'geometry': {'type': 'Point', - 'coordinates': (7.0, 50.2)}, - 'properties': {'id': 1, 'date': datetime.date(2014, 2, 3)} - }] + schema={"geometry": "Point", "properties": [("id", "int"), ("date", "date")]}, + crs={"init": "epsg:4326", "no_defs": True}, + ) + + recs = [ + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (7.0, 50.0)}, + "properties": {"id": 1, "date": "2013-02-25"}, + } + ), + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (7.0, 50.2)}, + "properties": {"id": 1, "date": datetime.date(2014, 2, 3)}, + } + ), + ] sink.writerecords(recs) sink.close() assert len(sink) == 2 @@ -890,22 +1002,21 @@ def test_date(tmpdir): assert len(c) == 2 rf1, rf2 = list(c) - assert rf1['properties']['date'] == '2013-02-25' - assert rf2['properties']['date'] == '2014-02-03' + assert rf1.properties["date"] == "2013-02-25" + assert rf2.properties["date"] == "2014-02-03" def test_open_kwargs(tmpdir, path_coutwildrnp_shp): - dstfile = str(tmpdir.join('test.json')) + dstfile = str(tmpdir.join("test.json")) with fiona.open(path_coutwildrnp_shp) as src: kwds = src.profile - kwds['driver'] = 'GeoJSON' - kwds['coordinate_precision'] = 2 - with fiona.open(dstfile, 'w', **kwds) as dst: + kwds["driver"] = "GeoJSON" + kwds["coordinate_precision"] = 2 + with fiona.open(dstfile, "w", **kwds) as dst: dst.writerecords(ftr for ftr in src) with open(dstfile) as f: - assert '"coordinates": [ [ [ -111.74, 42.0 ], [ -111.66, 42.0 ]' in \ - f.read(2000) + assert '"coordinates": [ [ [ -111.74, 42.0 ], [ -111.66, 42.0 ]' in f.read(2000) @pytest.mark.network @@ -936,7 +1047,13 @@ def test_collection_zip_http(): def test_encoding_option_warning(tmpdir, caplog): """There is no ENCODING creation option log warning for GeoJSON""" - fiona.Collection(str(tmpdir.join("test.geojson")), "w", driver="GeoJSON", crs="epsg:4326", schema={"geometry": "Point", "properties": {"foo": "int"}}) + fiona.Collection( + str(tmpdir.join("test.geojson")), + "w", + driver="GeoJSON", + crs="epsg:4326", + schema={"geometry": "Point", "properties": {"foo": "int"}}, + ) assert not caplog.text @@ -961,31 +1078,65 @@ def test_collection_no_env(path_coutwildrnp_shp): def test_collection_env(path_coutwildrnp_shp): """We have a GDAL env within collection context""" with fiona.open(path_coutwildrnp_shp): - assert 'FIONA_ENV' in getenv() + assert "FIONA_ENV" in getenv() -@pytest.mark.parametrize('driver,filename', [('ESRI Shapefile', 'test.shp'), - ('GeoJSON', 'test.json'), - ('GPKG', 'test.gpkg')]) +@pytest.mark.parametrize( + "driver,filename", + [("ESRI Shapefile", "test.shp"), ("GeoJSON", "test.json"), ("GPKG", "test.gpkg")], +) def test_mask_polygon_triangle(tmpdir, driver, filename): - """ Test if mask works for non trivial geometries""" - schema = {'geometry': 'Polygon', 'properties': OrderedDict([('position_i', 'int'), ('position_j', 'int')])} - records = [{'geometry': {'type': 'Polygon', 'coordinates': (((float(i), float(j)), (float(i + 1), float(j)), - (float(i + 1), float(j + 1)), (float(i), float(j + 1)), - (float(i), float(j))),)}, - 'properties': {'position_i': i, 'position_j': j}} for i in range(10) for j in range(10)] + """Test if mask works for non trivial geometries""" + schema = { + "geometry": "Polygon", + "properties": OrderedDict([("position_i", "int"), ("position_j", "int")]), + } + records = [ + Feature.from_dict( + **{ + "geometry": { + "type": "Polygon", + "coordinates": ( + ( + (float(i), float(j)), + (float(i + 1), float(j)), + (float(i + 1), float(j + 1)), + (float(i), float(j + 1)), + (float(i), float(j)), + ), + ), + }, + "properties": {"position_i": i, "position_j": j}, + } + ) + for i in range(10) + for j in range(10) + ] random.shuffle(records) path = str(tmpdir.join(filename)) - with fiona.open(path, 'w', - driver=driver, - schema=schema,) as c: + with fiona.open( + path, + "w", + driver=driver, + schema=schema, + ) as c: c.writerecords(records) with fiona.open(path) as c: items = list( - c.items(mask={'type': 'Polygon', 'coordinates': (((2.0, 2.0), (4.0, 4.0), (4.0, 6.0), (2.0, 2.0)),)})) + c.items( + mask=Geometry.from_dict( + **{ + "type": "Polygon", + "coordinates": ( + ((2.0, 2.0), (4.0, 4.0), (4.0, 6.0), (2.0, 2.0)), + ), + } + ) + ) + ) assert len(items) == 15 @@ -993,39 +1144,48 @@ def test_collection__empty_column_name(tmpdir): """Based on pull #955""" tmpfile = str(tmpdir.join("test_empty.geojson")) with pytest.warns(UserWarning, match="Empty field name at index 0"): - with fiona.open(tmpfile, "w", driver="GeoJSON", schema={ - "geometry": "Point", - "properties": {"": "str", "name": "str"} - }) as tmp: - tmp.writerecords([{ - "geometry": {"type": "Point", "coordinates": [ 8, 49 ] }, - "properties": { "": "", "name": "test" } - }]) + with fiona.open( + tmpfile, + "w", + driver="GeoJSON", + schema={"geometry": "Point", "properties": {"": "str", "name": "str"}}, + ) as tmp: + tmp.writerecords( + [ + { + "geometry": {"type": "Point", "coordinates": [8, 49]}, + "properties": {"": "", "name": "test"}, + } + ] + ) with fiona.open(tmpfile) as tmp: with pytest.warns(UserWarning, match="Empty field name at index 0"): assert tmp.schema == { "geometry": "Point", - "properties": {"": "str", "name": "str"} + "properties": {"": "str", "name": "str"}, } with pytest.warns(UserWarning, match="Empty field name at index 0"): next(tmp) -@pytest.mark.parametrize("extension, driver", [ - ("shp", "ESRI Shapefile"), - ("geojson", "GeoJSON"), - ("json", "GeoJSON"), - ("gpkg", "GPKG"), - ("SHP", "ESRI Shapefile"), -]) +@pytest.mark.parametrize( + "extension, driver", + [ + ("shp", "ESRI Shapefile"), + ("geojson", "GeoJSON"), + ("json", "GeoJSON"), + ("gpkg", "GPKG"), + ("SHP", "ESRI Shapefile"), + ], +) def test_driver_detection(tmpdir, extension, driver): with fiona.open( str(tmpdir.join("test.{}".format(extension))), "w", schema={ - 'geometry': 'MultiLineString', - 'properties': {'title': 'str', 'date': 'date'} + "geometry": "MultiLineString", + "properties": {"title": "str", "date": "date"}, }, crs="EPSG:4326", ) as output: diff --git a/tests/test_datetime.py b/tests/test_datetime.py index 4de805307..a1ffb9d45 100644 --- a/tests/test_datetime.py +++ b/tests/test_datetime.py @@ -10,10 +10,18 @@ from fiona.rfc3339 import parse_time, parse_datetime from .conftest import get_temp_filename from fiona.env import GDALVersion +from fiona.model import Feature import datetime -from fiona.drvsupport import (supported_drivers, driver_mode_mingdal, _driver_converts_field_type_silently_to_str, - _driver_supports_field, _driver_converts_to_str, _driver_supports_timezones, - _driver_supports_milliseconds, _driver_supports_mode) +from fiona.drvsupport import ( + supported_drivers, + driver_mode_mingdal, + _driver_converts_field_type_silently_to_str, + _driver_supports_field, + _driver_converts_to_str, + _driver_supports_timezones, + _driver_supports_milliseconds, + _driver_supports_mode, +) import pytz from pytz import timezone @@ -21,49 +29,80 @@ def get_schema(driver, field_type): - if driver == 'GPX': - return {'properties': OrderedDict([('ele', 'float'), - ('time', field_type)]), - 'geometry': 'Point'} - if driver == 'GPSTrackMaker': + if driver == "GPX": return { - 'properties': OrderedDict([('name', 'str'), ('comment', 'str'), ('icon', 'int'), ('time', field_type)]), - 'geometry': 'Point'} - if driver == 'CSV': + "properties": OrderedDict([("ele", "float"), ("time", field_type)]), + "geometry": "Point", + } + if driver == "GPSTrackMaker": + return { + "properties": OrderedDict( + [ + ("name", "str"), + ("comment", "str"), + ("icon", "int"), + ("time", field_type), + ] + ), + "geometry": "Point", + } + if driver == "CSV": return {"properties": {"datefield": field_type}} - return {"geometry": "Point", - "properties": {"datefield": field_type}} + return {"geometry": "Point", "properties": {"datefield": field_type}} def get_records(driver, values): - if driver == 'GPX': - return [{"geometry": {"type": "Point", "coordinates": [1, 2]}, - "properties": {'ele': 0, "time": val}} for val in values] - if driver == 'GPSTrackMaker': - return [{"geometry": {"type": "Point", "coordinates": [1, 2]}, - "properties": OrderedDict([('name', ''), ('comment', ''), ('icon', 48), ('time', val)])} for - val in values] - if driver == 'CSV': - return [{"properties": {"datefield": val}} for val in values] - - return [{"geometry": {"type": "Point", "coordinates": [1, 2]}, - "properties": {"datefield": val}} for val in values] + if driver == "GPX": + return [ + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": [1, 2]}, + "properties": {"ele": 0, "time": val}, + } + ) + for val in values + ] + if driver == "GPSTrackMaker": + return [ + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": [1, 2]}, + "properties": OrderedDict( + [("name", ""), ("comment", ""), ("icon", 48), ("time", val)] + ), + } + ) + for val in values + ] + if driver == "CSV": + return [ + Feature.from_dict(**{"properties": {"datefield": val}}) for val in values + ] + + return [ + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": [1, 2]}, + "properties": {"datefield": val}, + } + ) + for val in values + ] def get_schema_field(driver, schema): - if driver in {'GPX', 'GPSTrackMaker'}: + if driver in {"GPX", "GPSTrackMaker"}: return schema["properties"]["time"] return schema["properties"]["datefield"] def get_field(driver, f): - if driver in {'GPX', 'GPSTrackMaker'}: + if driver in {"GPX", "GPSTrackMaker"}: return f["properties"]["time"] - return f['properties']['datefield'] + return f.properties["datefield"] class TZ(datetime.tzinfo): - def __init__(self, minutes): self.minutes = minutes @@ -72,64 +111,127 @@ def utcoffset(self, dt): def generate_testdata(field_type, driver): - """ Generate test cases for test_datefield + """Generate test cases for test_datefield Each test case has the format [(in_value1, true_value as datetime.*object), (in_value2, true_value as datetime.*object), ...] """ # Test data for 'date' data type - if field_type == 'date': - return [("2018-03-25", datetime.date(2018, 3, 25)), - (datetime.date(2018, 3, 25), datetime.date(2018, 3, 25))] + if field_type == "date": + return [ + ("2018-03-25", datetime.date(2018, 3, 25)), + (datetime.date(2018, 3, 25), datetime.date(2018, 3, 25)), + ] # Test data for 'datetime' data type - if field_type == 'datetime': - return [("2018-03-25T22:49:05", datetime.datetime(2018, 3, 25, 22, 49, 5)), - (datetime.datetime(2018, 3, 25, 22, 49, 5), datetime.datetime(2018, 3, 25, 22, 49, 5)), - ("2018-03-25T22:49:05.23", datetime.datetime(2018, 3, 25, 22, 49, 5, 230000)), - (datetime.datetime(2018, 3, 25, 22, 49, 5, 230000), datetime.datetime(2018, 3, 25, 22, 49, 5, 230000)), - ("2018-03-25T22:49:05.123456", datetime.datetime(2018, 3, 25, 22, 49, 5, 123000)), - (datetime.datetime(2018, 3, 25, 22, 49, 5, 123456), datetime.datetime(2018, 3, 25, 22, 49, 5, 123000)), - ("2018-03-25T22:49:05+01:30", datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(90))), - ("2018-03-25T22:49:05-01:30", datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-90))), - (datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(90)), - datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(90))), - (datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-90)), - datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-90))), - (datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('Europe/Zurich')), - datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('Europe/Zurich'))), - (datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('US/Mountain')), - datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('US/Mountain'))), - (datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(60 * 24 - 15)), - datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(60 * 24 - 15))), - (datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-60 * 24 + 15)), - datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-60 * 24 + 15))), - ("2018-03-25T22:49:05-23:45", datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-60 * 24 + 15))), - ("2018-03-25T22:49:05+23:45", datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(60 * 24 - 15)))] + if field_type == "datetime": + return [ + ("2018-03-25T22:49:05", datetime.datetime(2018, 3, 25, 22, 49, 5)), + ( + datetime.datetime(2018, 3, 25, 22, 49, 5), + datetime.datetime(2018, 3, 25, 22, 49, 5), + ), + ( + "2018-03-25T22:49:05.23", + datetime.datetime(2018, 3, 25, 22, 49, 5, 230000), + ), + ( + datetime.datetime(2018, 3, 25, 22, 49, 5, 230000), + datetime.datetime(2018, 3, 25, 22, 49, 5, 230000), + ), + ( + "2018-03-25T22:49:05.123456", + datetime.datetime(2018, 3, 25, 22, 49, 5, 123000), + ), + ( + datetime.datetime(2018, 3, 25, 22, 49, 5, 123456), + datetime.datetime(2018, 3, 25, 22, 49, 5, 123000), + ), + ( + "2018-03-25T22:49:05+01:30", + datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(90)), + ), + ( + "2018-03-25T22:49:05-01:30", + datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-90)), + ), + ( + datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(90)), + datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(90)), + ), + ( + datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-90)), + datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-90)), + ), + ( + datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone( + timezone("Europe/Zurich") + ), + datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone( + timezone("Europe/Zurich") + ), + ), + ( + datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone( + timezone("US/Mountain") + ), + datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone( + timezone("US/Mountain") + ), + ), + ( + datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(60 * 24 - 15)), + datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(60 * 24 - 15)), + ), + ( + datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-60 * 24 + 15)), + datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-60 * 24 + 15)), + ), + ( + "2018-03-25T22:49:05-23:45", + datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-60 * 24 + 15)), + ), + ( + "2018-03-25T22:49:05+23:45", + datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(60 * 24 - 15)), + ), + ] # Test data for 'time' data type - elif field_type == 'time': - return [("22:49:05", datetime.time(22, 49, 5)), - (datetime.time(22, 49, 5), datetime.time(22, 49, 5)), - ("22:49:05.23", datetime.time(22, 49, 5, 230000)), - (datetime.time(22, 49, 5, 230000), datetime.time(22, 49, 5, 230000)), - ("22:49:05.123456", datetime.time(22, 49, 5, 123000)), - (datetime.time(22, 49, 5, 123456), datetime.time(22, 49, 5, 123000)), - ("22:49:05+01:30", datetime.time(22, 49, 5, tzinfo=TZ(90))), - ("22:49:05-01:30", datetime.time(22, 49, 5, tzinfo=TZ(-90))), - (datetime.time(22, 49, 5, tzinfo=TZ(90)), datetime.time(22, 49, 5, tzinfo=TZ(90))), - (datetime.time(22, 49, 5, tzinfo=TZ(-90)), datetime.time(22, 49, 5, tzinfo=TZ(-90))), - (datetime.time(22, 49, 5, tzinfo=TZ(60 * 24 - 15)), - datetime.time(22, 49, 5, tzinfo=TZ(60 * 24 - 15))), - (datetime.time(22, 49, 5, tzinfo=TZ(-60 * 24 + 15)), - datetime.time(22, 49, 5, tzinfo=TZ(-60 * 24 + 15))), - ("22:49:05-23:45", datetime.time(22, 49, 5, tzinfo=TZ(-60 * 24 + 15))), - ("22:49:05+23:45", datetime.time(22, 49, 5, tzinfo=TZ(60 * 24 - 15)))] + elif field_type == "time": + return [ + ("22:49:05", datetime.time(22, 49, 5)), + (datetime.time(22, 49, 5), datetime.time(22, 49, 5)), + ("22:49:05.23", datetime.time(22, 49, 5, 230000)), + (datetime.time(22, 49, 5, 230000), datetime.time(22, 49, 5, 230000)), + ("22:49:05.123456", datetime.time(22, 49, 5, 123000)), + (datetime.time(22, 49, 5, 123456), datetime.time(22, 49, 5, 123000)), + ("22:49:05+01:30", datetime.time(22, 49, 5, tzinfo=TZ(90))), + ("22:49:05-01:30", datetime.time(22, 49, 5, tzinfo=TZ(-90))), + ( + datetime.time(22, 49, 5, tzinfo=TZ(90)), + datetime.time(22, 49, 5, tzinfo=TZ(90)), + ), + ( + datetime.time(22, 49, 5, tzinfo=TZ(-90)), + datetime.time(22, 49, 5, tzinfo=TZ(-90)), + ), + ( + datetime.time(22, 49, 5, tzinfo=TZ(60 * 24 - 15)), + datetime.time(22, 49, 5, tzinfo=TZ(60 * 24 - 15)), + ), + ( + datetime.time(22, 49, 5, tzinfo=TZ(-60 * 24 + 15)), + datetime.time(22, 49, 5, tzinfo=TZ(-60 * 24 + 15)), + ), + ("22:49:05-23:45", datetime.time(22, 49, 5, tzinfo=TZ(-60 * 24 + 15))), + ("22:49:05+23:45", datetime.time(22, 49, 5, tzinfo=TZ(60 * 24 - 15))), + ] def compare_datetimes_utc(d1, d2): - """ Test if two time objects are the same. Native times are assumed to be UTC""" + """Test if two time objects are the same. Native times are assumed to be UTC""" if d1.tzinfo is None: d1 = d1.replace(tzinfo=TZ(0)) @@ -141,7 +243,7 @@ def compare_datetimes_utc(d1, d2): def test_compare_datetimes_utc(): - """ Test compare_datetimes_utc """ + """Test compare_datetimes_utc""" d1 = datetime.datetime(2020, 1, 21, 12, 30, 0, tzinfo=TZ(60)) d2 = datetime.datetime(2020, 1, 21, 11, 30, 0, tzinfo=TZ(0)) assert d1 == d2 @@ -157,31 +259,43 @@ def test_compare_datetimes_utc(): assert d1 == d2 assert compare_datetimes_utc(d1, d2) - d1 = datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('Europe/Zurich')) + d1 = datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone( + timezone("Europe/Zurich") + ) d2 = datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc) assert d1 == d2 assert compare_datetimes_utc(d1, d2) - d1 = datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('Europe/Zurich')) - d2 = datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('US/Mountain')) + d1 = datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone( + timezone("Europe/Zurich") + ) + d2 = datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone( + timezone("US/Mountain") + ) assert d1 == d2 assert compare_datetimes_utc(d1, d2) - d1 = datetime.datetime(2020, 6, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('Europe/Zurich')) - d2 = datetime.datetime(2020, 6, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('US/Mountain')) + d1 = datetime.datetime(2020, 6, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone( + timezone("Europe/Zurich") + ) + d2 = datetime.datetime(2020, 6, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone( + timezone("US/Mountain") + ) assert d1 == d2 assert compare_datetimes_utc(d1, d2) def convert_time_to_utc(d): - """ Convert datetime.time object to UTC""" - d = datetime.datetime(1900, 1, 1, d.hour, d.minute, d.second, d.microsecond, d.tzinfo) + """Convert datetime.time object to UTC""" + d = datetime.datetime( + 1900, 1, 1, d.hour, d.minute, d.second, d.microsecond, d.tzinfo + ) d -= d.utcoffset() return d.time() def compare_times_utc(d1, d2): - """ Test if two datetime.time objects with fixed timezones have the same UTC time""" + """Test if two datetime.time objects with fixed timezones have the same UTC time""" if d1.tzinfo is not None: d1 = convert_time_to_utc(d1) @@ -207,15 +321,23 @@ def test_compare_times_utc(): d2 = datetime.time(5, 0, 0, tzinfo=TZ(-60 * 7)) assert compare_times_utc(d1, d2) - d1 = datetime.datetime(2020, 6, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('MET')).timetz() - d2 = datetime.datetime(2020, 6, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('EST')).timetz() + d1 = ( + datetime.datetime(2020, 6, 21, 12, 0, 0, tzinfo=pytz.utc) + .astimezone(timezone("MET")) + .timetz() + ) + d2 = ( + datetime.datetime(2020, 6, 21, 12, 0, 0, tzinfo=pytz.utc) + .astimezone(timezone("EST")) + .timetz() + ) assert compare_times_utc(d1, d2) def get_tz_offset(d): - """ Returns a Timezone (sign, hours, minutes) tuples + """Returns a Timezone (sign, hours, minutes) tuples - E.g.: for '2020-01-21T12:30:00+01:30' ('+', 1, 30) is returned + E.g.: for '2020-01-21T12:30:00+01:30' ('+', 1, 30) is returned """ offset_minutes = d.utcoffset().total_seconds() / 60 @@ -229,31 +351,31 @@ def get_tz_offset(d): def test_get_tz_offset(): - """ Test get_tz_offset""" + """Test get_tz_offset""" d = datetime.datetime(2020, 1, 21, 12, 30, 0, tzinfo=TZ(90)) - assert get_tz_offset(d) == ('+', 1, 30) + assert get_tz_offset(d) == ("+", 1, 30) d = datetime.datetime(2020, 1, 21, 12, 30, 0, tzinfo=TZ(-90)) - assert get_tz_offset(d) == ('-', 1, 30) + assert get_tz_offset(d) == ("-", 1, 30) d = datetime.datetime(2020, 1, 21, 12, 30, 0, tzinfo=TZ(60 * 24 - 15)) - assert get_tz_offset(d) == ('+', 23, 45) + assert get_tz_offset(d) == ("+", 23, 45) d = datetime.datetime(2020, 1, 21, 12, 30, 0, tzinfo=TZ(-60 * 24 + 15)) - assert get_tz_offset(d) == ('-', 23, 45) + assert get_tz_offset(d) == ("-", 23, 45) def generate_testcases(): - """ Generate test cases for drivers that support datefields, convert datefields to string or do not support + """Generate test cases for drivers that support datefields, convert datefields to string or do not support datefiels""" _test_cases_datefield = [] _test_cases_datefield_to_str = [] _test_cases_datefield_not_supported = [] - for field_type in ['time', 'datetime', 'date']: + for field_type in ["time", "datetime", "date"]: # Select only driver that are capable of writing fields for driver, raw in supported_drivers.items(): - if _driver_supports_mode(driver, 'w'): + if _driver_supports_mode(driver, "w"): if _driver_supports_field(driver, field_type): if _driver_converts_field_type_silently_to_str(driver, field_type): _test_cases_datefield_to_str.append((driver, field_type)) @@ -262,10 +384,18 @@ def generate_testcases(): else: _test_cases_datefield_not_supported.append((driver, field_type)) - return _test_cases_datefield, _test_cases_datefield_to_str, _test_cases_datefield_not_supported + return ( + _test_cases_datefield, + _test_cases_datefield_to_str, + _test_cases_datefield_not_supported, + ) -test_cases_datefield, test_cases_datefield_to_str, test_cases_datefield_not_supported = generate_testcases() +( + test_cases_datefield, + test_cases_datefield_to_str, + test_cases_datefield_not_supported, +) = generate_testcases() @pytest.mark.parametrize("driver, field_type", test_cases_datefield) @@ -276,10 +406,10 @@ def test_datefield(tmpdir, driver, field_type): def _validate(val, val_exp, field_type, driver): - if field_type == 'date': + if field_type == "date": return val == val_exp.isoformat() - elif field_type == 'datetime': + elif field_type == "datetime": # some drivers do not support timezones. In this case, Fiona converts datetime fields with a timezone other # than UTC to UTC. Thus, both the datetime read by Fiona, as well as expected value are first converted to @@ -300,7 +430,7 @@ def _validate(val, val_exp, field_type, driver): val_d = datetime.datetime(y, m, d, hh, mm, ss, ms, tz) return compare_datetimes_utc(val_d, val_exp.replace(microsecond=0)) - elif field_type == 'time': + elif field_type == "time": # some drivers do not support timezones. In this case, Fiona converts datetime fields with a timezone other # than UTC to UTC. Thus, both the time read by Fiona, as well as expected value are first converted to UTC @@ -327,18 +457,17 @@ def _validate(val, val_exp, field_type, driver): values_in, values_exp = zip(*generate_testdata(field_type, driver)) records = get_records(driver, values_in) - with fiona.open(path, 'w', - driver=driver, - schema=schema) as c: + with fiona.open(path, "w", driver=driver, schema=schema) as c: c.writerecords(records) - with fiona.open(path, 'r') as c: + with fiona.open(path, "r") as c: assert get_schema_field(driver, c.schema) == field_type items = [get_field(driver, f) for f in c] assert len(items) == len(values_in) for val, val_exp in zip(items, values_exp): - assert _validate(val, val_exp, field_type, driver), \ - "{} does not match {}".format(val, val_exp.isoformat()) + assert _validate( + val, val_exp, field_type, driver + ), "{} does not match {}".format(val, val_exp.isoformat()) @pytest.mark.parametrize("driver, field_type", test_cases_datefield_to_str) @@ -354,14 +483,19 @@ def test_datefield_driver_converts_to_string(tmpdir, driver, field_type): def _validate(val, val_exp, field_type, driver): - if field_type == 'date': - if (str(val_exp.year) in val and - str(val_exp.month) in val and - str(val_exp.day) in val): + if field_type == "date": + if ( + str(val_exp.year) in val + and str(val_exp.month) in val + and str(val_exp.day) in val + ): return True - elif field_type == 'datetime': + elif field_type == "datetime": - if not _driver_supports_timezones(driver, field_type) and val_exp.utcoffset() is not None: + if ( + not _driver_supports_timezones(driver, field_type) + and val_exp.utcoffset() is not None + ): val_exp = convert_time_to_utc(val_exp) # datetime fields can, depending on the driver, support: @@ -372,135 +506,162 @@ def _validate(val, val_exp, field_type, driver): if val_exp.utcoffset() is None: # No Milliseconds if not _driver_supports_milliseconds(driver): - if (str(val_exp.year) in val and - str(val_exp.month) in val and - str(val_exp.day) in val and - str(val_exp.hour) in val and - str(val_exp.minute) in val and - str(val_exp.second) in val): + if ( + str(val_exp.year) in val + and str(val_exp.month) in val + and str(val_exp.day) in val + and str(val_exp.hour) in val + and str(val_exp.minute) in val + and str(val_exp.second) in val + ): return True else: # Microseconds - if (str(val_exp.year) in val and - str(val_exp.month) in val and - str(val_exp.day) in val and - str(val_exp.hour) in val and - str(val_exp.minute) in val and - str(val_exp.second) in val and - str(val_exp.microsecond) in val): + if ( + str(val_exp.year) in val + and str(val_exp.month) in val + and str(val_exp.day) in val + and str(val_exp.hour) in val + and str(val_exp.minute) in val + and str(val_exp.second) in val + and str(val_exp.microsecond) in val + ): return True # Milliseconds - elif (str(val_exp.year) in val and - str(val_exp.month) in val and - str(val_exp.day) in val and - str(val_exp.hour) in val and - str(val_exp.minute) in val and - str(val_exp.second) in val and - str(int(val_exp.microsecond / 1000)) in val): + elif ( + str(val_exp.year) in val + and str(val_exp.month) in val + and str(val_exp.day) in val + and str(val_exp.hour) in val + and str(val_exp.minute) in val + and str(val_exp.second) in val + and str(int(val_exp.microsecond / 1000)) in val + ): return True # With timezone else: sign, hours, minutes = get_tz_offset(val_exp) if minutes > 0: - tz = "{sign}{hours:02d}{minutes:02d}".format(sign=sign, - hours=int(hours), - minutes=int(minutes)) + tz = "{sign}{hours:02d}{minutes:02d}".format( + sign=sign, hours=int(hours), minutes=int(minutes) + ) else: tz = "{sign}{hours:02d}".format(sign=sign, hours=int(hours)) print("tz", tz) # No Milliseconds if not _driver_supports_milliseconds(driver): - if (str(val_exp.year) in val and - str(val_exp.month) in val and - str(val_exp.day) in val and - str(val_exp.hour) in val and - str(val_exp.minute) in val and - str(val_exp.second) in val and - tz in val): + if ( + str(val_exp.year) in val + and str(val_exp.month) in val + and str(val_exp.day) in val + and str(val_exp.hour) in val + and str(val_exp.minute) in val + and str(val_exp.second) in val + and tz in val + ): return True else: # Microseconds - if (str(val_exp.year) in val and - str(val_exp.month) in val and - str(val_exp.day) in val and - str(val_exp.hour) in val and - str(val_exp.minute) in val and - str(val_exp.second) in val and - str(val_exp.microsecond) in val and - tz in val): + if ( + str(val_exp.year) in val + and str(val_exp.month) in val + and str(val_exp.day) in val + and str(val_exp.hour) in val + and str(val_exp.minute) in val + and str(val_exp.second) in val + and str(val_exp.microsecond) in val + and tz in val + ): return True # Milliseconds - elif (str(val_exp.year) in val and - str(val_exp.month) in val and - str(val_exp.day) in val and - str(val_exp.hour) in val and - str(val_exp.minute) in val and - str(val_exp.second) in val and - str(int(val_exp.microsecond / 1000)) in val and - tz in val): + elif ( + str(val_exp.year) in val + and str(val_exp.month) in val + and str(val_exp.day) in val + and str(val_exp.hour) in val + and str(val_exp.minute) in val + and str(val_exp.second) in val + and str(int(val_exp.microsecond / 1000)) in val + and tz in val + ): return True - elif field_type == 'time': + elif field_type == "time": # time fields can, depending on the driver, support: # - Timezones # - Milliseconds, respectively Microseconds - if not _driver_supports_timezones(driver, field_type) and val_exp.utcoffset() is not None: + if ( + not _driver_supports_timezones(driver, field_type) + and val_exp.utcoffset() is not None + ): val_exp = convert_time_to_utc(val_exp) # No timezone if val_exp.utcoffset() is None: # No Milliseconds if not _driver_supports_milliseconds(driver): - if (str(val_exp.hour) in val and - str(val_exp.minute) in val and - str(val_exp.second) in val): + if ( + str(val_exp.hour) in val + and str(val_exp.minute) in val + and str(val_exp.second) in val + ): return True else: # Microseconds - if (str(val_exp.hour) in val and - str(val_exp.minute) in val and - str(val_exp.second) in val and - str(val_exp.microsecond) in val): + if ( + str(val_exp.hour) in val + and str(val_exp.minute) in val + and str(val_exp.second) in val + and str(val_exp.microsecond) in val + ): return True # Milliseconds - elif (str(val_exp.hour) in val and - str(val_exp.minute) in val and - str(val_exp.second) in val and - str(int(val_exp.microsecond / 1000)) in val): + elif ( + str(val_exp.hour) in val + and str(val_exp.minute) in val + and str(val_exp.second) in val + and str(int(val_exp.microsecond / 1000)) in val + ): return True # With timezone else: sign, hours, minutes = get_tz_offset(val_exp) if minutes > 0: - tz = "{sign}{hours:02d}{minutes:02d}".format(sign=sign, - hours=int(hours), - minutes=int(minutes)) + tz = "{sign}{hours:02d}{minutes:02d}".format( + sign=sign, hours=int(hours), minutes=int(minutes) + ) else: tz = "{sign}{hours:02d}".format(sign=sign, hours=int(hours)) # No Milliseconds if not _driver_supports_milliseconds(driver): - if (str(val_exp.hour) in val and - str(val_exp.minute) in val and - str(val_exp.second) in val and - tz in val): + if ( + str(val_exp.hour) in val + and str(val_exp.minute) in val + and str(val_exp.second) in val + and tz in val + ): return True else: # Microseconds - if (str(val_exp.hour) in val and - str(val_exp.minute) in val and - str(val_exp.second) in val and - str(val_exp.microsecond) in val and - tz in val): + if ( + str(val_exp.hour) in val + and str(val_exp.minute) in val + and str(val_exp.second) in val + and str(val_exp.microsecond) in val + and tz in val + ): return True # Milliseconds - elif (str(val_exp.hour) in val and - str(val_exp.minute) in val and - str(val_exp.second) in val and - str(int(val_exp.microsecond / 1000)) in val - and tz in val): + elif ( + str(val_exp.hour) in val + and str(val_exp.minute) in val + and str(val_exp.second) in val + and str(int(val_exp.microsecond / 1000)) in val + and tz in val + ): return True return False @@ -510,34 +671,40 @@ def _validate(val, val_exp, field_type, driver): records = get_records(driver, values_exp) with pytest.warns(UserWarning) as record: - with fiona.open(path, 'w', - driver=driver, - schema=schema) as c: + with fiona.open(path, "w", driver=driver, schema=schema) as c: c.writerecords(records) assert len(record) == 1 assert "silently converts" in record[0].message.args[0] - with fiona.open(path, 'r') as c: - assert get_schema_field(driver, c.schema) == 'str' + with fiona.open(path, "r") as c: + assert get_schema_field(driver, c.schema) == "str" items = [get_field(driver, f) for f in c] assert len(items) == len(values_in) for val, val_exp in zip(items, values_exp): - assert _validate(val, val_exp, field_type, driver), \ - "{} does not match {}".format(val, val_exp.isoformat()) + assert _validate( + val, val_exp, field_type, driver + ), "{} does not match {}".format(val, val_exp.isoformat()) -@pytest.mark.filterwarnings('ignore:.*driver silently converts *:UserWarning') -@pytest.mark.parametrize("driver,field_type", test_cases_datefield + test_cases_datefield_to_str) +@pytest.mark.filterwarnings("ignore:.*driver silently converts *:UserWarning") +@pytest.mark.parametrize( + "driver,field_type", test_cases_datefield + test_cases_datefield_to_str +) def test_datefield_null(tmpdir, driver, field_type): """ Test handling of null values for date, time, datetime types for write capable drivers """ def _validate(val, val_exp, field_type, driver): - if (driver == 'MapInfo File' and field_type == 'time' and - calc_gdal_version_num(2, 0, 0) <= get_gdal_version_num() < calc_gdal_version_num(3, 1, 1)): - return val == '00:00:00' - if val is None or val == '': + if ( + driver == "MapInfo File" + and field_type == "time" + and calc_gdal_version_num(2, 0, 0) + <= get_gdal_version_num() + < calc_gdal_version_num(3, 1, 1) + ): + return val == "00:00:00" + if val is None or val == "": return True return False @@ -546,37 +713,36 @@ def _validate(val, val_exp, field_type, driver): values_in = [None] records = get_records(driver, values_in) - with fiona.open(path, 'w', - driver=driver, - schema=schema) as c: + with fiona.open(path, "w", driver=driver, schema=schema) as c: c.writerecords(records) - with fiona.open(path, 'r') as c: + with fiona.open(path, "r") as c: items = [get_field(driver, f) for f in c] assert len(items) == 1 - assert _validate(items[0], None, field_type, driver), \ - "{} does not match {}".format(items[0], None) + assert _validate( + items[0], None, field_type, driver + ), "{} does not match {}".format(items[0], None) @pytest.mark.parametrize("driver, field_type", test_cases_datefield_not_supported) def test_datetime_field_unsupported(tmpdir, driver, field_type): - """ Test if DriverSupportError is raised for unsupported field_types""" + """Test if DriverSupportError is raised for unsupported field_types""" schema = get_schema(driver, field_type) path = str(tmpdir.join(get_temp_filename(driver))) values_in, values_out = zip(*generate_testdata(field_type, driver)) records = get_records(driver, values_in) with pytest.raises(DriverSupportError): - with fiona.open(path, 'w', - driver=driver, - schema=schema) as c: + with fiona.open(path, "w", driver=driver, schema=schema) as c: c.writerecords(records) @pytest.mark.parametrize("driver, field_type", test_cases_datefield_not_supported) -def test_datetime_field_type_marked_not_supported_is_not_supported(tmpdir, driver, field_type, monkeypatch): - """ Test if a date/datetime/time field type marked as not not supported is really not supported +def test_datetime_field_type_marked_not_supported_is_not_supported( + tmpdir, driver, field_type, monkeypatch +): + """Test if a date/datetime/time field type marked as not not supported is really not supported Warning: Success of this test does not necessary mean that a field is not supported. E.g. errors can occour due to special schema requirements of drivers. This test only covers the standard case. @@ -586,7 +752,9 @@ def test_datetime_field_type_marked_not_supported_is_not_supported(tmpdir, drive if driver == "BNA" and GDALVersion.runtime() < GDALVersion(2, 0): pytest.skip("BNA driver segfaults with gdal 1.11") - monkeypatch.delitem(fiona.drvsupport._driver_field_type_unsupported[field_type], driver) + monkeypatch.delitem( + fiona.drvsupport._driver_field_type_unsupported[field_type], driver + ) schema = get_schema(driver, field_type) path = str(tmpdir.join(get_temp_filename(driver))) @@ -595,12 +763,10 @@ def test_datetime_field_type_marked_not_supported_is_not_supported(tmpdir, drive is_good = True try: - with fiona.open(path, 'w', - driver=driver, - schema=schema) as c: + with fiona.open(path, "w", driver=driver, schema=schema) as c: c.writerecords(records) - with fiona.open(path, 'r') as c: + with fiona.open(path, "r") as c: if not get_schema_field(driver, c.schema) == field_type: is_good = False items = [get_field(driver, f) for f in c] @@ -613,28 +779,37 @@ def test_datetime_field_type_marked_not_supported_is_not_supported(tmpdir, drive def generate_tostr_testcases(): - """ Flatten driver_converts_to_str to a list of (field_type, driver) tuples""" + """Flatten driver_converts_to_str to a list of (field_type, driver) tuples""" cases = [] for field_type in _driver_converts_to_str: for driver in _driver_converts_to_str[field_type]: driver_supported = driver in supported_drivers - driver_can_write = _driver_supports_mode(driver, 'w') + driver_can_write = _driver_supports_mode(driver, "w") field_supported = _driver_supports_field(driver, field_type) - converts_to_str = _driver_converts_field_type_silently_to_str(driver, field_type) - if driver_supported and driver_can_write and converts_to_str and field_supported: + converts_to_str = _driver_converts_field_type_silently_to_str( + driver, field_type + ) + if ( + driver_supported + and driver_can_write + and converts_to_str + and field_supported + ): cases.append((field_type, driver)) return cases -@pytest.mark.filterwarnings('ignore:.*driver silently converts *:UserWarning') +@pytest.mark.filterwarnings("ignore:.*driver silently converts *:UserWarning") @pytest.mark.parametrize("driver,field_type", test_cases_datefield_to_str) -def test_driver_marked_as_silently_converts_to_str_converts_silently_to_str(tmpdir, driver, field_type, monkeypatch): - """ Test if a driver and field_type is marked in fiona.drvsupport.driver_converts_to_str to convert to str really - silently converts to str - - If this test fails, it should be considered to replace the respective None value in - fiona.drvsupport.driver_converts_to_str with a GDALVersion(major, minor) value. - """ +def test_driver_marked_as_silently_converts_to_str_converts_silently_to_str( + tmpdir, driver, field_type, monkeypatch +): + """Test if a driver and field_type is marked in fiona.drvsupport.driver_converts_to_str to convert to str really + silently converts to str + + If this test fails, it should be considered to replace the respective None value in + fiona.drvsupport.driver_converts_to_str with a GDALVersion(major, minor) value. + """ monkeypatch.delitem(fiona.drvsupport._driver_converts_to_str[field_type], driver) @@ -643,17 +818,15 @@ def test_driver_marked_as_silently_converts_to_str_converts_silently_to_str(tmpd values_in, values_out = zip(*generate_testdata(field_type, driver)) records = get_records(driver, values_in) - with fiona.open(path, 'w', - driver=driver, - schema=schema) as c: + with fiona.open(path, "w", driver=driver, schema=schema) as c: c.writerecords(records) - with fiona.open(path, 'r') as c: - assert get_schema_field(driver, c.schema) == 'str' + with fiona.open(path, "r") as c: + assert get_schema_field(driver, c.schema) == "str" def test_read_timezone_geojson(path_test_tz_geojson): """Test if timezones are read correctly""" with fiona.open(path_test_tz_geojson) as c: items = list(c) - assert items[0]['properties']['test'] == '2015-04-22T00:00:00+07:00' + assert items[0]["properties"]["test"] == "2015-04-22T00:00:00+07:00" diff --git a/tests/test_driver_options.py b/tests/test_driver_options.py index 9fc4ad7c7..7c6b68a76 100644 --- a/tests/test_driver_options.py +++ b/tests/test_driver_options.py @@ -1,30 +1,35 @@ -import os -import tempfile from collections import OrderedDict import glob +import os +import tempfile + import fiona -from tests.conftest import get_temp_filename, requires_gdal2 +from fiona.model import Feature + +from .conftest import get_temp_filename, requires_gdal2 -@requires_gdal2 -def test_gml_format_option(): - """ Test GML dataset creation option FORMAT (see https://github.com/Toblerity/Fiona/issues/968)""" +def test_gml_format_option(tmp_path): + """Test GML dataset creation option FORMAT (see gh-968)""" - schema = {'geometry': 'Point', 'properties': OrderedDict([('position', 'int')])} - records = [{'geometry': {'type': 'Point', 'coordinates': (0.0, float(i))}, 'properties': {'position': i}} for i in - range(10)] + schema = {"geometry": "Point", "properties": OrderedDict([("position", "int")])} + records = [ + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0.0, float(i))}, + "properties": {"position": i}, + } + ) + for i in range(10) + ] - tmpdir = tempfile.mkdtemp() - fpath = os.path.join(tmpdir, get_temp_filename('GML')) + fpath = tmp_path.joinpath(get_temp_filename("GML")) - with fiona.open(fpath, - 'w', - driver="GML", - schema=schema, - FORMAT="GML3") as out: + with fiona.open(fpath, "w", driver="GML", schema=schema, FORMAT="GML3") as out: out.writerecords(records) - xsd_path = glob.glob(os.path.join(tmpdir, "*.xsd"))[0] + xsd_path = list(tmp_path.glob("*.xsd"))[0] + with open(xsd_path) as f: xsd = f.read() assert "http://schemas.opengis.net/gml/3.1.1" in xsd diff --git a/tests/test_env.py b/tests/test_env.py index 076fc5b66..52ce1b186 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -2,16 +2,19 @@ import os import sys + try: from unittest import mock except ImportError: import mock import boto3 +import pytest import fiona from fiona import _env from fiona.env import getenv, hasenv, ensure_env, ensure_env_with_credentials +from fiona.errors import FionaDeprecationWarning from fiona.session import AWSSession, GSSession @@ -22,76 +25,93 @@ def test_nested_credentials(monkeypatch): def fake_opener(path): return fiona.env.getenv() - with fiona.env.Env(session=AWSSession(aws_access_key_id='foo', aws_secret_access_key='bar')): - assert fiona.env.getenv()['AWS_ACCESS_KEY_ID'] == 'foo' - assert fiona.env.getenv()['AWS_SECRET_ACCESS_KEY'] == 'bar' + with fiona.env.Env( + session=AWSSession(aws_access_key_id="foo", aws_secret_access_key="bar") + ): + assert fiona.env.getenv()["AWS_ACCESS_KEY_ID"] == "foo" + assert fiona.env.getenv()["AWS_SECRET_ACCESS_KEY"] == "bar" - monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'lol') - monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'wut') - gdalenv = fake_opener('s3://foo/bar') - assert gdalenv['AWS_ACCESS_KEY_ID'] == 'foo' - assert gdalenv['AWS_SECRET_ACCESS_KEY'] == 'bar' + monkeypatch.setenv("AWS_ACCESS_KEY_ID", "lol") + monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "wut") + gdalenv = fake_opener("s3://foo/bar") + assert gdalenv["AWS_ACCESS_KEY_ID"] == "foo" + assert gdalenv["AWS_SECRET_ACCESS_KEY"] == "bar" def test_ensure_env_decorator(gdalenv): @ensure_env def f(): - return getenv()['FIONA_ENV'] + return getenv()["FIONA_ENV"] + assert f() is True def test_ensure_env_decorator_sets_gdal_data(gdalenv, monkeypatch): """fiona.env.ensure_env finds GDAL from environment""" + @ensure_env def f(): - return getenv()['GDAL_DATA'] + return getenv()["GDAL_DATA"] - monkeypatch.setenv('GDAL_DATA', '/lol/wut') - assert f() == '/lol/wut' + monkeypatch.setenv("GDAL_DATA", "/lol/wut") + assert f() == "/lol/wut" @mock.patch("fiona._env.GDALDataFinder.find_file") -def test_ensure_env_decorator_sets_gdal_data_prefix(find_file, gdalenv, monkeypatch, tmpdir): +def test_ensure_env_decorator_sets_gdal_data_prefix( + find_file, gdalenv, monkeypatch, tmpdir +): """fiona.env.ensure_env finds GDAL data under a prefix""" + @ensure_env def f(): - return getenv()['GDAL_DATA'] + return getenv()["GDAL_DATA"] find_file.return_value = None tmpdir.ensure("share/gdal/header.dxf") - monkeypatch.delenv('GDAL_DATA', raising=False) - monkeypatch.setattr(_env, '__file__', str(tmpdir.join("fake.py"))) - monkeypatch.setattr(sys, 'prefix', str(tmpdir)) + monkeypatch.delenv("GDAL_DATA", raising=False) + monkeypatch.setattr(_env, "__file__", str(tmpdir.join("fake.py"))) + monkeypatch.setattr(sys, "prefix", str(tmpdir)) assert f() == str(tmpdir.join("share").join("gdal")) @mock.patch("fiona._env.GDALDataFinder.find_file") -def test_ensure_env_decorator_sets_gdal_data_wheel(find_file, gdalenv, monkeypatch, tmpdir): +def test_ensure_env_decorator_sets_gdal_data_wheel( + find_file, gdalenv, monkeypatch, tmpdir +): """fiona.env.ensure_env finds GDAL data in a wheel""" + @ensure_env def f(): - return getenv()['GDAL_DATA'] + return getenv()["GDAL_DATA"] find_file.return_value = None tmpdir.ensure("gdal_data/header.dxf") - monkeypatch.delenv('GDAL_DATA', raising=False) - monkeypatch.setattr(_env, '__file__', str(tmpdir.join(os.path.basename(_env.__file__)))) + monkeypatch.delenv("GDAL_DATA", raising=False) + monkeypatch.setattr( + _env, "__file__", str(tmpdir.join(os.path.basename(_env.__file__))) + ) assert f() == str(tmpdir.join("gdal_data")) @mock.patch("fiona._env.GDALDataFinder.find_file") -def test_ensure_env_with_decorator_sets_gdal_data_wheel(find_file, gdalenv, monkeypatch, tmpdir): +def test_ensure_env_with_decorator_sets_gdal_data_wheel( + find_file, gdalenv, monkeypatch, tmpdir +): """fiona.env.ensure_env finds GDAL data in a wheel""" + @ensure_env_with_credentials def f(*args): - return getenv()['GDAL_DATA'] + return getenv()["GDAL_DATA"] find_file.return_value = None tmpdir.ensure("gdal_data/header.dxf") - monkeypatch.delenv('GDAL_DATA', raising=False) - monkeypatch.setattr(_env, '__file__', str(tmpdir.join(os.path.basename(_env.__file__)))) + monkeypatch.delenv("GDAL_DATA", raising=False) + monkeypatch.setattr( + _env, "__file__", str(tmpdir.join(os.path.basename(_env.__file__))) + ) assert f("foo") == str(tmpdir.join("gdal_data")) @@ -113,20 +133,33 @@ def test_nested_gs_credentials(monkeypatch): def fake_opener(path): return fiona.env.getenv() - with fiona.env.Env(session=GSSession(google_application_credentials='foo')): - assert fiona.env.getenv()['GOOGLE_APPLICATION_CREDENTIALS'] == 'foo' + with fiona.env.Env(session=GSSession(google_application_credentials="foo")): + assert fiona.env.getenv()["GOOGLE_APPLICATION_CREDENTIALS"] == "foo" - gdalenv = fake_opener('gs://foo/bar') - assert gdalenv['GOOGLE_APPLICATION_CREDENTIALS'] == 'foo' + gdalenv = fake_opener("gs://foo/bar") + assert gdalenv["GOOGLE_APPLICATION_CREDENTIALS"] == "foo" def test_aws_session(gdalenv): """Create an Env with a boto3 session.""" aws_session = boto3.Session( - aws_access_key_id='id', aws_secret_access_key='key', - aws_session_token='token', region_name='null-island-1') - with fiona.env.Env(session=aws_session) as s: - assert s.session._session.get_credentials().get_frozen_credentials().access_key == 'id' - assert s.session._session.get_credentials().get_frozen_credentials().secret_key == 'key' - assert s.session._session.get_credentials().get_frozen_credentials().token == 'token' - assert s.session._session.region_name == 'null-island-1' + aws_access_key_id="id", + aws_secret_access_key="key", + aws_session_token="token", + region_name="null-island-1", + ) + with pytest.warns(FionaDeprecationWarning): + with fiona.env.Env(session=aws_session) as s: + assert ( + s.session._session.get_credentials().get_frozen_credentials().access_key + == "id" + ) + assert ( + s.session._session.get_credentials().get_frozen_credentials().secret_key + == "key" + ) + assert ( + s.session._session.get_credentials().get_frozen_credentials().token + == "token" + ) + assert s.session._session.region_name == "null-island-1" diff --git a/tests/test_feature.py b/tests/test_feature.py index 5efa9e49a..e4615ac1b 100644 --- a/tests/test_feature.py +++ b/tests/test_feature.py @@ -10,136 +10,166 @@ import fiona from fiona import collection from fiona.collection import Collection +from fiona.errors import FionaDeprecationWarning +from fiona.model import Feature from fiona.ogrext import featureRT class TestPointRoundTrip(object): - def setup(self): self.tempdir = tempfile.mkdtemp() - schema = {'geometry': 'Point', 'properties': {'title': 'str'}} - self.c = Collection(os.path.join(self.tempdir, "foo.shp"), - "w", driver="ESRI Shapefile", schema=schema) + schema = {"geometry": "Point", "properties": {"title": "str"}} + self.c = Collection( + os.path.join(self.tempdir, "foo.shp"), + "w", + driver="ESRI Shapefile", + schema=schema, + ) def teardown(self): self.c.close() shutil.rmtree(self.tempdir) def test_geometry(self): - f = { 'id': '1', - 'geometry': {'type': 'Point', 'coordinates': (0.0, 0.0)}, - 'properties': {'title': 'foo'} } - g = featureRT(f, self.c) - assert ( - sorted(g['geometry'].items()) == - [('coordinates', (0.0, 0.0)), ('type', 'Point')]) + f = { + "id": "1", + "geometry": {"type": "Point", "coordinates": (0.0, 0.0)}, + "properties": {"title": "foo"}, + } + with pytest.warns(FionaDeprecationWarning): + g = featureRT(f, self.c) + assert g.geometry.type == "Point" + assert g.geometry.coordinates == (0.0, 0.0) def test_properties(self): - f = { 'id': '1', - 'geometry': {'type': 'Point', 'coordinates': (0.0, 0.0)}, - 'properties': {'title': 'foo'} } + f = Feature.from_dict( + **{ + "id": "1", + "geometry": {"type": "Point", "coordinates": (0.0, 0.0)}, + "properties": {"title": "foo"}, + } + ) g = featureRT(f, self.c) - assert g['properties']['title'] == 'foo' + assert g.properties["title"] == "foo" def test_none_property(self): - f = { 'id': '1', - 'geometry': {'type': 'Point', 'coordinates': (0.0, 0.0)}, - 'properties': {'title': None} } + f = Feature.from_dict( + **{ + "id": "1", + "geometry": {"type": "Point", "coordinates": (0.0, 0.0)}, + "properties": {"title": None}, + } + ) g = featureRT(f, self.c) - assert g['properties']['title'] is None + assert g.properties["title"] is None class TestLineStringRoundTrip(object): - def setup(self): self.tempdir = tempfile.mkdtemp() - schema = {'geometry': 'LineString', 'properties': {'title': 'str'}} - self.c = Collection(os.path.join(self.tempdir, "foo.shp"), - "w", "ESRI Shapefile", schema=schema) + schema = {"geometry": "LineString", "properties": {"title": "str"}} + self.c = Collection( + os.path.join(self.tempdir, "foo.shp"), "w", "ESRI Shapefile", schema=schema + ) def teardown(self): self.c.close() shutil.rmtree(self.tempdir) def test_geometry(self): - f = { 'id': '1', - 'geometry': { 'type': 'LineString', - 'coordinates': [(0.0, 0.0), (1.0, 1.0)] }, - 'properties': {'title': 'foo'} } + f = Feature.from_dict( + **{ + "id": "1", + "geometry": { + "type": "LineString", + "coordinates": [(0.0, 0.0), (1.0, 1.0)], + }, + "properties": {"title": "foo"}, + } + ) g = featureRT(f, self.c) - assert ( - sorted(g['geometry'].items()) == - [('coordinates', [(0.0, 0.0), (1.0, 1.0)]), - ('type', 'LineString')]) + assert g.geometry.type == "LineString" + assert g.geometry.coordinates == [(0.0, 0.0), (1.0, 1.0)] def test_properties(self): - f = { 'id': '1', - 'geometry': {'type': 'Point', 'coordinates': (0.0, 0.0)}, - 'properties': {'title': 'foo'} } + f = Feature.from_dict( + **{ + "id": "1", + "geometry": {"type": "Point", "coordinates": (0.0, 0.0)}, + "properties": {"title": "foo"}, + } + ) g = featureRT(f, self.c) - assert g['properties']['title'] == 'foo' + assert g.properties["title"] == "foo" class TestPolygonRoundTrip(object): - def setup(self): self.tempdir = tempfile.mkdtemp() - schema = {'geometry': 'Polygon', 'properties': {'title': 'str'}} - self.c = Collection(os.path.join(self.tempdir, "foo.shp"), - "w", "ESRI Shapefile", schema=schema) + schema = {"geometry": "Polygon", "properties": {"title": "str"}} + self.c = Collection( + os.path.join(self.tempdir, "foo.shp"), "w", "ESRI Shapefile", schema=schema + ) def teardown(self): self.c.close() shutil.rmtree(self.tempdir) def test_geometry(self): - f = { 'id': '1', - 'geometry': { 'type': 'Polygon', - 'coordinates': - [[(0.0, 0.0), - (0.0, 1.0), - (1.0, 1.0), - (1.0, 0.0), - (0.0, 0.0)]] }, - 'properties': {'title': 'foo'} } + f = Feature.from_dict( + **{ + "id": "1", + "geometry": { + "type": "Polygon", + "coordinates": [ + [(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)] + ], + }, + "properties": {"title": "foo"}, + } + ) g = featureRT(f, self.c) - assert ( - sorted(g['geometry'].items()) == - [('coordinates', [[(0.0, 0.0), - (0.0, 1.0), - (1.0, 1.0), - (1.0, 0.0), - (0.0, 0.0)]]), - ('type', 'Polygon')]) + assert g.geometry.type == "Polygon" + assert g.geometry.coordinates == [ + [(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)] + ] def test_properties(self): - f = { 'id': '1', - 'geometry': { 'type': 'Polygon', - 'coordinates': - [[(0.0, 0.0), - (0.0, 1.0), - (1.0, 1.0), - (1.0, 0.0), - (0.0, 0.0)]] }, - 'properties': {'title': 'foo'} } + f = Feature.from_dict( + **{ + "id": "1", + "geometry": { + "type": "Polygon", + "coordinates": [ + [(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)] + ], + }, + "properties": {"title": "foo"}, + } + ) g = featureRT(f, self.c) - assert g['properties']['title'] == 'foo' + assert g.properties["title"] == "foo" -@pytest.mark.parametrize("driver, extension", [("ESRI Shapefile", "shp"), ("GeoJSON", "geojson")]) +@pytest.mark.parametrize( + "driver, extension", [("ESRI Shapefile", "shp"), ("GeoJSON", "geojson")] +) def test_feature_null_field(tmpdir, driver, extension): """ In GDAL 2.2 the behaviour of OGR_F_IsFieldSet slightly changed. Some drivers (e.g. GeoJSON) also require fields to be explicitly set to null. See GH #460. """ - meta = {"driver": driver, "schema": {"geometry": "Point", "properties": {"RETURN_P": "str"}}} - filename = os.path.join(str(tmpdir), "test_null."+extension) + meta = { + "driver": driver, + "schema": {"geometry": "Point", "properties": {"RETURN_P": "str"}}, + } + filename = os.path.join(str(tmpdir), "test_null." + extension) with fiona.open(filename, "w", **meta) as dst: g = {"coordinates": [1.0, 2.0], "type": "Point"} - feature = {"geometry": g, "properties": {"RETURN_P": None}} + feature = Feature.from_dict(**{"geometry": g, "properties": {"RETURN_P": None}}) dst.write(feature) with fiona.open(filename, "r") as src: feature = next(iter(src)) - assert(feature["properties"]["RETURN_P"] is None) + assert feature.properties["RETURN_P"] is None diff --git a/tests/test_fio_rm.py b/tests/test_fio_rm.py index 70505b7a8..057a07e16 100644 --- a/tests/test_fio_rm.py +++ b/tests/test_fio_rm.py @@ -1,47 +1,49 @@ import os + import pytest -import fiona from click.testing import CliRunner + +import fiona +from fiona.model import Feature from fiona.fio.main import main_group + def create_sample_data(filename, driver, **extra_meta): - meta = { - 'driver': driver, - 'schema': { - 'geometry': 'Point', - 'properties': {} - } - } + meta = {"driver": driver, "schema": {"geometry": "Point", "properties": {}}} meta.update(extra_meta) - with fiona.open(filename, 'w', **meta) as dst: - dst.write({ - 'geometry': { - 'type': 'Point', - 'coordinates': (0, 0), - }, - 'properties': {}, - }) - assert(os.path.exists(filename)) + with fiona.open(filename, "w", **meta) as dst: + dst.write( + Feature.from_dict( + **{ + "geometry": { + "type": "Point", + "coordinates": (0, 0), + }, + "properties": {}, + } + ) + ) + assert os.path.exists(filename) + drivers = ["ESRI Shapefile", "GeoJSON"] + + @pytest.mark.parametrize("driver", drivers) def test_remove(tmpdir, driver): extension = {"ESRI Shapefile": "shp", "GeoJSON": "json"}[driver] filename = "delete_me.{extension}".format(extension=extension) filename = str(tmpdir.join(filename)) create_sample_data(filename, driver) - - result = CliRunner().invoke(main_group, [ - "rm", - filename, - "--yes" - ]) - print(result.output) + + result = CliRunner().invoke(main_group, ["rm", filename, "--yes"]) assert result.exit_code == 0 assert not os.path.exists(filename) has_gpkg = "GPKG" in fiona.supported_drivers.keys() + + @pytest.mark.skipif(not has_gpkg, reason="Requires GPKG driver") def test_remove_layer(tmpdir): filename = str(tmpdir.join("a_filename.gpkg")) @@ -49,13 +51,9 @@ def test_remove_layer(tmpdir): create_sample_data(filename, "GPKG", layer="layer2") assert fiona.listlayers(filename) == ["layer1", "layer2"] - result = CliRunner().invoke(main_group, [ - "rm", - filename, - "--layer", "layer2", - "--yes" - ]) - print(result.output) + result = CliRunner().invoke( + main_group, ["rm", filename, "--layer", "layer2", "--yes"] + ) assert result.exit_code == 0 assert os.path.exists(filename) assert fiona.listlayers(filename) == ["layer1"] diff --git a/tests/test_geojson.py b/tests/test_geojson.py index cabbbff02..8cf6ff07f 100644 --- a/tests/test_geojson.py +++ b/tests/test_geojson.py @@ -3,54 +3,77 @@ import fiona from fiona.collection import supported_drivers from fiona.errors import FionaValueError, DriverError, SchemaError, CRSError +from fiona.model import Feature def test_json_read(path_coutwildrnp_json): - with fiona.open(path_coutwildrnp_json, 'r') as c: + with fiona.open(path_coutwildrnp_json, "r") as c: assert len(c) == 67 def test_json(tmpdir): """Write a simple GeoJSON file""" - path = str(tmpdir.join('foo.json')) - with fiona.open(path, 'w', - driver='GeoJSON', - schema={'geometry': 'Unknown', - 'properties': [('title', 'str')]}) as c: - c.writerecords([{ - 'geometry': {'type': 'Point', 'coordinates': [0.0, 0.0]}, - 'properties': {'title': 'One'}}]) - c.writerecords([{ - 'geometry': {'type': 'MultiPoint', 'coordinates': [[0.0, 0.0]]}, - 'properties': {'title': 'Two'}}]) + path = str(tmpdir.join("foo.json")) + with fiona.open( + path, + "w", + driver="GeoJSON", + schema={"geometry": "Unknown", "properties": [("title", "str")]}, + ) as c: + c.writerecords( + [ + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": [0.0, 0.0]}, + "properties": {"title": "One"}, + } + ) + ] + ) + c.writerecords( + [ + Feature.from_dict( + **{ + "geometry": {"type": "MultiPoint", "coordinates": [[0.0, 0.0]]}, + "properties": {"title": "Two"}, + } + ) + ] + ) with fiona.open(path) as c: - assert c.schema['geometry'] == 'Unknown' + assert c.schema["geometry"] == "Unknown" assert len(c) == 2 def test_json_overwrite(tmpdir): """Overwrite an existing GeoJSON file""" - path = str(tmpdir.join('foo.json')) + path = str(tmpdir.join("foo.json")) driver = "GeoJSON" schema1 = {"geometry": "Unknown", "properties": [("title", "str")]} schema2 = {"geometry": "Unknown", "properties": [("other", "str")]} features1 = [ - { - "geometry": {"type": "Point", "coordinates": [0.0, 0.0]}, - "properties": {"title": "One"}, - }, - { - "geometry": {"type": "MultiPoint", "coordinates": [[0.0, 0.0]]}, - "properties": {"title": "Two"}, - } + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": [0.0, 0.0]}, + "properties": {"title": "One"}, + } + ), + Feature.from_dict( + **{ + "geometry": {"type": "MultiPoint", "coordinates": [[0.0, 0.0]]}, + "properties": {"title": "Two"}, + } + ), ] features2 = [ - { - "geometry": {"type": "Point", "coordinates": [0.0, 0.0]}, - "properties": {"other": "Three"}, - }, + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": [0.0, 0.0]}, + "properties": {"other": "Three"}, + } + ), ] # write some data to a file @@ -61,7 +84,7 @@ def test_json_overwrite(tmpdir): with fiona.open(path, "r") as c: assert len(c) == 2 feature = next(iter(c)) - assert feature["properties"]["title"] == "One" + assert feature.properties["title"] == "One" # attempt to overwrite the existing file with some new data with fiona.open(path, "w", driver=driver, schema=schema2) as c: @@ -71,27 +94,31 @@ def test_json_overwrite(tmpdir): with fiona.open(path, "r") as c: assert len(c) == 1 feature = next(iter(c)) - assert feature["properties"]["other"] == "Three" + assert feature.properties["other"] == "Three" def test_json_overwrite_invalid(tmpdir): """Overwrite an existing file that isn't a valid GeoJSON""" # write some invalid data to a file - path = str(tmpdir.join('foo.json')) + path = str(tmpdir.join("foo.json")) with open(path, "w") as f: f.write("This isn't a valid GeoJSON file!!!") schema1 = {"geometry": "Unknown", "properties": [("title", "str")]} features1 = [ - { - "geometry": {"type": "Point", "coordinates": [0.0, 0.0]}, - "properties": {"title": "One"}, - }, - { - "geometry": {"type": "MultiPoint", "coordinates": [[0.0, 0.0]]}, - "properties": {"title": "Two"}, - } + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": [0.0, 0.0]}, + "properties": {"title": "One"}, + } + ), + Feature.from_dict( + **{ + "geometry": {"type": "MultiPoint", "coordinates": [[0.0, 0.0]]}, + "properties": {"title": "Two"}, + } + ), ] # attempt to overwrite it with a valid file @@ -105,7 +132,7 @@ def test_json_overwrite_invalid(tmpdir): def test_write_json_invalid_directory(tmpdir): """Attempt to create a file in a directory that doesn't exist""" - path = str(tmpdir.join('does-not-exist', 'foo.json')) + path = str(tmpdir.join("does-not-exist", "foo.json")) schema = {"geometry": "Unknown", "properties": [("title", "str")]} with pytest.raises(DriverError): fiona.open(path, "w", driver="GeoJSON", schema=schema) diff --git a/tests/test_geometry.py b/tests/test_geometry.py index 996813dcb..83bb2e17e 100644 --- a/tests/test_geometry.py +++ b/tests/test_geometry.py @@ -4,46 +4,58 @@ from fiona._geometry import GeomBuilder, geometryRT from fiona.errors import UnsupportedGeometryTypeError +from fiona.model import Geometry def geometry_wkb(wkb): try: wkb = bytes.fromhex(wkb) except AttributeError: - wkb = wkb.decode('hex') + wkb = wkb.decode("hex") return GeomBuilder().build_wkb(wkb) def test_ogr_builder_exceptions(): - geom = {'type': "Bogus", 'coordinates': None} + geom = Geometry.from_dict(**{"type": "Bogus", "coordinates": None}) with pytest.raises(UnsupportedGeometryTypeError): geometryRT(geom) -@pytest.mark.parametrize('geom_type, coordinates', [ - ('Point', (0.0, 0.0)), - ('LineString', [(0.0, 0.0), (1.0, 1.0)]), - ('Polygon', - [[(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)]]), - ('MultiPoint', [(0.0, 0.0), (1.0, 1.0)]), - ('MultiLineString', [[(0.0, 0.0), (1.0, 1.0)]]), - ('MultiPolygon', - [[[(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)]]]), -]) +@pytest.mark.parametrize( + "geom_type, coordinates", + [ + ("Point", (0.0, 0.0)), + ("LineString", [(0.0, 0.0), (1.0, 1.0)]), + ("Polygon", [[(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)]]), + ("MultiPoint", [(0.0, 0.0), (1.0, 1.0)]), + ("MultiLineString", [[(0.0, 0.0), (1.0, 1.0)]]), + ( + "MultiPolygon", + [[[(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)]]], + ), + ], +) def test_round_tripping(geom_type, coordinates): - result = geometryRT({'type': geom_type, 'coordinates': coordinates}) - assert result['type'] == geom_type - assert result['coordinates'] == coordinates - - -@pytest.mark.parametrize('geom_type, coordinates', [ - ('Polygon', [[(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)]]), - ('MultiPolygon', [[[(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)]]]), -]) + result = geometryRT( + Geometry.from_dict(**{"type": geom_type, "coordinates": coordinates}) + ) + assert result.type == geom_type + assert result.coordinates == coordinates + + +@pytest.mark.parametrize( + "geom_type, coordinates", + [ + ("Polygon", [[(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)]]), + ("MultiPolygon", [[[(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)]]]), + ], +) def test_implicitly_closed_round_tripping(geom_type, coordinates): - result = geometryRT({'type': geom_type, 'coordinates': coordinates}) - assert result['type'] == geom_type - result_coordinates = result['coordinates'] + result = geometryRT( + Geometry.from_dict(**{"type": geom_type, "coordinates": coordinates}) + ) + assert result.type == geom_type + result_coordinates = result.coordinates while not isinstance(coordinates[0], tuple): result_coordinates = result_coordinates[0] coordinates = coordinates[0] @@ -52,44 +64,50 @@ def test_implicitly_closed_round_tripping(geom_type, coordinates): def test_geometry_collection_round_trip(): geom = { - 'type': "GeometryCollection", - 'geometries': [ - {'type': "Point", 'coordinates': (0.0, 0.0)}, { - 'type': "LineString", - 'coordinates': [(0.0, 0.0), (1.0, 1.0)]}]} + "type": "GeometryCollection", + "geometries": [ + {"type": "Point", "coordinates": (0.0, 0.0)}, + {"type": "LineString", "coordinates": [(0.0, 0.0), (1.0, 1.0)]}, + ], + } - result = geometryRT(geom) - assert len(result['geometries']) == 2 - assert [g['type'] for g in result['geometries']] == ['Point', 'LineString'] + with pytest.warns(DeprecationWarning): + result = geometryRT(geom) + assert len(result["geometries"]) == 2 + assert [g["type"] for g in result["geometries"]] == ["Point", "LineString"] def test_point_wkb(): # Hex-encoded Point (0 0) wkb = "010100000000000000000000000000000000000000" geom = geometry_wkb(wkb) - assert geom['type'] == "Point" - assert geom['coordinates'] == (0.0, 0.0) + assert geom["type"] == "Point" + assert geom["coordinates"] == (0.0, 0.0) def test_line_wkb(): # Hex-encoded LineString (0 0, 1 1) - wkb = ("01020000000200000000000000000000000000000000000000000000000000f03f" - "000000000000f03f") + wkb = ( + "01020000000200000000000000000000000000000000000000000000000000f03f" + "000000000000f03f" + ) geom = geometry_wkb(wkb) - assert geom['type'] == "LineString" - assert geom['coordinates'] == [(0.0, 0.0), (1.0, 1.0)] + assert geom["type"] == "LineString" + assert geom["coordinates"] == [(0.0, 0.0), (1.0, 1.0)] def test_polygon_wkb(): # 1 x 1 box (0, 0, 1, 1) - wkb = ("01030000000100000005000000000000000000f03f000000000000000000000000" - "0000f03f000000000000f03f0000000000000000000000000000f03f0000000000" - "0000000000000000000000000000000000f03f0000000000000000") + wkb = ( + "01030000000100000005000000000000000000f03f000000000000000000000000" + "0000f03f000000000000f03f0000000000000000000000000000f03f0000000000" + "0000000000000000000000000000000000f03f0000000000000000" + ) geom = geometry_wkb(wkb) - assert geom['type'], "Polygon" - assert len(geom['coordinates']) == 1 - assert len(geom['coordinates'][0]) == 5 - x, y = zip(*geom['coordinates'][0]) + assert geom["type"], "Polygon" + assert len(geom["coordinates"]) == 1 + assert len(geom["coordinates"][0]) == 5 + x, y = zip(*geom["coordinates"][0]) assert min(x) == 0.0 assert min(y) == 0.0 assert max(x) == 1.0 @@ -97,36 +115,42 @@ def test_polygon_wkb(): def test_multipoint_wkb(): - wkb = ("010400000002000000010100000000000000000000000000000000000000010100" - "0000000000000000f03f000000000000f03f") + wkb = ( + "010400000002000000010100000000000000000000000000000000000000010100" + "0000000000000000f03f000000000000f03f" + ) geom = geometry_wkb(wkb) - assert geom['type'] == "MultiPoint" - assert geom['coordinates'] == [(0.0, 0.0), (1.0, 1.0)] + assert geom["type"] == "MultiPoint" + assert geom["coordinates"] == [(0.0, 0.0), (1.0, 1.0)] def test_multilinestring_wkb(): # Hex-encoded LineString (0 0, 1 1) - wkb = ("010500000001000000010200000002000000000000000000000000000000000000" - "00000000000000f03f000000000000f03f") + wkb = ( + "010500000001000000010200000002000000000000000000000000000000000000" + "00000000000000f03f000000000000f03f" + ) geom = geometry_wkb(wkb) - assert geom['type'] == "MultiLineString" - assert len(geom['coordinates']) == 1 - assert len(geom['coordinates'][0]) == 2 - assert geom['coordinates'][0] == [(0.0, 0.0), (1.0, 1.0)] + assert geom["type"] == "MultiLineString" + assert len(geom["coordinates"]) == 1 + assert len(geom["coordinates"][0]) == 2 + assert geom["coordinates"][0] == [(0.0, 0.0), (1.0, 1.0)] def test_multipolygon_wkb(): # [1 x 1 box (0, 0, 1, 1)] - wkb = ("01060000000100000001030000000100000005000000000000000000f03f000000" - "0000000000000000000000f03f000000000000f03f000000000000000000000000" - "0000f03f00000000000000000000000000000000000000000000f03f0000000000" - "000000") + wkb = ( + "01060000000100000001030000000100000005000000000000000000f03f000000" + "0000000000000000000000f03f000000000000f03f000000000000000000000000" + "0000f03f00000000000000000000000000000000000000000000f03f0000000000" + "000000" + ) geom = geometry_wkb(wkb) - assert geom['type'] == "MultiPolygon" - assert len(geom['coordinates']) == 1 - assert len(geom['coordinates'][0]) == 1 - assert len(geom['coordinates'][0][0]) == 5 - x, y = zip(*geom['coordinates'][0][0]) + assert geom["type"] == "MultiPolygon" + assert len(geom["coordinates"]) == 1 + assert len(geom["coordinates"][0]) == 1 + assert len(geom["coordinates"][0][0]) == 5 + x, y = zip(*geom["coordinates"][0][0]) assert min(x) == 0.0 assert min(y) == 0.0 assert max(x) == 1.0 diff --git a/tests/test_geopackage.py b/tests/test_geopackage.py index 6d82d3b4f..d7b24ec4a 100644 --- a/tests/test_geopackage.py +++ b/tests/test_geopackage.py @@ -1,40 +1,49 @@ import os import pytest import fiona +from fiona.model import Feature + from .conftest import requires_gpkg example_schema = { - 'geometry': 'Point', - 'properties': [('title', 'str')], + "geometry": "Point", + "properties": [("title", "str")], } example_crs = { - 'a': 6370997, - 'lon_0': -100, - 'y_0': 0, - 'no_defs': True, - 'proj': 'laea', - 'x_0': 0, - 'units': 'm', - 'b': 6370997, - 'lat_0': 45, + "a": 6370997, + "lon_0": -100, + "y_0": 0, + "no_defs": True, + "proj": "laea", + "x_0": 0, + "units": "m", + "b": 6370997, + "lat_0": 45, } example_features = [ - { - "geometry": {"type": "Point", "coordinates": [0.0, 0.0]}, - "properties": {"title": "One"}, - }, - { - "geometry": {"type": "Point", "coordinates": [1.0, 2.0]}, - "properties": {"title": "Two"}, - }, - { - "geometry": {"type": "Point", "coordinates": [3.0, 4.0]}, - "properties": {"title": "Three"}, - }, + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": [0.0, 0.0]}, + "properties": {"title": "One"}, + } + ), + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": [1.0, 2.0]}, + "properties": {"title": "Two"}, + } + ), + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": [3.0, 4.0]}, + "properties": {"title": "Three"}, + } + ), ] + @requires_gpkg def test_read_gpkg(path_coutwildrnp_gpkg): """ @@ -44,49 +53,46 @@ def test_read_gpkg(path_coutwildrnp_gpkg): with fiona.open(path_coutwildrnp_gpkg, "r") as src: assert len(src) == 67 feature = next(iter(src)) - assert feature["geometry"]["type"] == "Polygon" - assert feature["properties"]["NAME"] == "Mount Naomi Wilderness" + assert feature.geometry["type"] == "Polygon" + assert feature.properties["NAME"] == "Mount Naomi Wilderness" + @requires_gpkg def test_write_gpkg(tmpdir): - path = str(tmpdir.join('foo.gpkg')) + path = str(tmpdir.join("foo.gpkg")) - with fiona.open(path, 'w', - driver='GPKG', - schema=example_schema, - crs=example_crs) as dst: + with fiona.open( + path, "w", driver="GPKG", schema=example_schema, crs=example_crs + ) as dst: dst.writerecords(example_features) with fiona.open(path) as src: - assert src.schema['geometry'] == 'Point' + assert src.schema["geometry"] == "Point" assert len(src) == 3 + @requires_gpkg def test_write_multilayer_gpkg(tmpdir): """ Test that writing a second layer to an existing geopackage doesn't remove and existing layer for the dataset. """ - path = str(tmpdir.join('foo.gpkg')) + path = str(tmpdir.join("foo.gpkg")) - with fiona.open(path, 'w', - driver='GPKG', - schema=example_schema, - layer="layer1", - crs=example_crs) as dst: + with fiona.open( + path, "w", driver="GPKG", schema=example_schema, layer="layer1", crs=example_crs + ) as dst: dst.writerecords(example_features[0:2]) - with fiona.open(path, 'w', - driver='GPKG', - schema=example_schema, - layer="layer2", - crs=example_crs) as dst: + with fiona.open( + path, "w", driver="GPKG", schema=example_schema, layer="layer2", crs=example_crs + ) as dst: dst.writerecords(example_features[2:]) with fiona.open(path, layer="layer1") as src: - assert src.schema['geometry'] == 'Point' + assert src.schema["geometry"] == "Point" assert len(src) == 2 with fiona.open(path, layer="layer2") as src: - assert src.schema['geometry'] == 'Point' + assert src.schema["geometry"] == "Point" assert len(src) == 1 diff --git a/tests/test_integration.py b/tests/test_integration.py index 949e0039f..bccfb5344 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -4,6 +4,7 @@ from collections import UserDict import fiona +from fiona.model import Feature def test_dict_subclass(tmpdir): @@ -17,27 +18,25 @@ def test_dict_subclass(tmpdir): class CRS(UserDict): pass - outfile = str(tmpdir.join('test_UserDict.geojson')) + outfile = str(tmpdir.join("test_UserDict.geojson")) profile = { - 'crs': CRS(init='EPSG:4326'), - 'driver': 'GeoJSON', - 'schema': { - 'geometry': 'Point', - 'properties': {} - } + "crs": CRS(init="EPSG:4326"), + "driver": "GeoJSON", + "schema": {"geometry": "Point", "properties": {}}, } - with fiona.open(outfile, 'w', **profile) as dst: - dst.write({ - 'type': 'Feature', - 'properties': {}, - 'geometry': { - 'type': 'Point', - 'coordinates': (10, -10) - } - }) + with fiona.open(outfile, "w", **profile) as dst: + dst.write( + Feature.from_dict( + **{ + "type": "Feature", + "properties": {}, + "geometry": {"type": "Point", "coordinates": (10, -10)}, + } + ) + ) with fiona.open(outfile) as src: assert len(src) == 1 - assert src.crs == {'init': 'epsg:4326'} + assert src.crs == {"init": "epsg:4326"} diff --git a/tests/test_model.py b/tests/test_model.py index 656dd890b..a478d36a5 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -126,6 +126,7 @@ def test_geometry__props(): assert Geometry(coordinates=(0, 0), type="Point")._props() == { "coordinates": (0, 0), "type": "Point", + "geometries": None, } @@ -157,7 +158,7 @@ def test_feature_id(): def test_feature_no_properties(): """Feature has no properties""" feat = Feature() - assert feat.properties is None + assert len(feat.properties) == 0 def test_feature_properties(): diff --git a/tests/test_multiconxn.py b/tests/test_multiconxn.py index 16223f53c..36d9adb64 100644 --- a/tests/test_multiconxn.py +++ b/tests/test_multiconxn.py @@ -3,7 +3,7 @@ import pytest import fiona -from fiona.model import Feature, Geometry +from fiona.model import Feature, Geometry, Properties class TestReadAccess(object): @@ -12,18 +12,18 @@ class TestReadAccess(object): def test_meta(self, path_coutwildrnp_shp): with fiona.open(path_coutwildrnp_shp, "r", layer="coutwildrnp") as c: - with fiona.open(path_coutwildrnp_shp, "r", - layer="coutwildrnp") as c2: + with fiona.open(path_coutwildrnp_shp, "r", layer="coutwildrnp") as c2: assert len(c) == len(c2) assert sorted(c.schema.items()) == sorted(c2.schema.items()) def test_feat(self, path_coutwildrnp_shp): with fiona.open(path_coutwildrnp_shp, "r", layer="coutwildrnp") as c: f1 = next(iter(c)) - with fiona.open(path_coutwildrnp_shp, "r", - layer="coutwildrnp") as c2: + with fiona.open(path_coutwildrnp_shp, "r", layer="coutwildrnp") as c2: f2 = next(iter(c2)) - assert f1 == f2 + assert f1.id == f2.id + assert f1.properties == f2.properties + assert f1.geometry.type == f2.geometry.type class TestReadWriteAccess: @@ -34,7 +34,8 @@ class TestReadWriteAccess: def multi_write_test_shp(self, tmpdir): self.shapefile_path = str(tmpdir.join("multi_write_test.shp")) self.c = fiona.open( - self.shapefile_path, "w", + self.shapefile_path, + "w", driver="ESRI Shapefile", schema={ "geometry": "Point", @@ -46,7 +47,7 @@ def multi_write_test_shp(self, tmpdir): self.f = Feature( id="0", geometry=Geometry(type="Point", coordinates=(0.0, 0.1)), - properties=OrderedDict([("title", "point one"), ("date", "2012-01-29")]), + properties=Properties(title="point one", date="2012-01-29"), ) self.c.writerecords([self.f]) self.c.flush() @@ -62,7 +63,9 @@ def test_meta(self): def test_read(self): c2 = fiona.open(self.shapefile_path, "r") f2 = next(iter(c2)) - assert self.f == f2 + assert self.f.id == f2.id + assert self.f.properties == f2.properties + assert self.f.geometry.type == f2.geometry.type c2.close() def test_read_after_close(self): @@ -76,17 +79,23 @@ def test_read_after_close(self): class TestLayerCreation(object): @pytest.fixture(autouse=True) def layer_creation_shp(self, tmpdir): - self.dir = tmpdir.mkdir('layer_creation') + self.dir = tmpdir.mkdir("layer_creation") self.c = fiona.open( - str(self.dir), 'w', - layer='write_test', - driver='ESRI Shapefile', + str(self.dir), + "w", + layer="write_test", + driver="ESRI Shapefile", schema={ - 'geometry': 'Point', - 'properties': [('title', 'str:80'), ('date', 'date')]}, - crs={'init': "epsg:4326", 'no_defs': True}, - encoding='utf-8') - self.f = Feature(geometry=Geometry(type="Point", coordinates=(0.0, 0.1)), properties=OrderedDict([('title', 'point one'), ('date', '2012-01-29')])) + "geometry": "Point", + "properties": [("title", "str:80"), ("date", "date")], + }, + crs={"init": "epsg:4326", "no_defs": True}, + encoding="utf-8", + ) + self.f = Feature( + geometry=Geometry(type="Point", coordinates=(0.0, 0.1)), + properties=OrderedDict([("title", "point one"), ("date", "2012-01-29")]), + ) self.c.writerecords([self.f]) self.c.flush() yield diff --git a/tests/test_open.py b/tests/test_open.py index 5ba1fcd8b..e35ee2bf2 100644 --- a/tests/test_open.py +++ b/tests/test_open.py @@ -8,6 +8,7 @@ import fiona from fiona.crs import CRS from fiona.errors import DriverError +from fiona.model import Feature def test_open_shp(path_coutwildrnp_shp): @@ -28,18 +29,24 @@ def test_write_memfile_crs_wkt(): } example_features = [ - { - "geometry": {"type": "Point", "coordinates": [0.0, 0.0]}, - "properties": {"title": "One"}, - }, - { - "geometry": {"type": "Point", "coordinates": [1.0, 2.0]}, - "properties": {"title": "Two"}, - }, - { - "geometry": {"type": "Point", "coordinates": [3.0, 4.0]}, - "properties": {"title": "Three"}, - }, + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": [0.0, 0.0]}, + "properties": {"title": "One"}, + } + ), + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": [1.0, 2.0]}, + "properties": {"title": "Two"}, + } + ), + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": [3.0, 4.0]}, + "properties": {"title": "Three"}, + } + ), ] with io.BytesIO() as fd: @@ -55,4 +62,4 @@ def test_write_memfile_crs_wkt(): fd.seek(0) with fiona.open(fd) as src: assert src.driver == "GPKG" - assert src.crs == {"init": "epsg:32611"} + assert src.crs == "EPSG:32611" diff --git a/tests/test_props.py b/tests/test_props.py index f50aa049e..e3e1fdba4 100644 --- a/tests/test_props.py +++ b/tests/test_props.py @@ -4,26 +4,27 @@ import fiona from fiona import prop_type, prop_width +from fiona.model import Feature from fiona.rfc3339 import FionaDateType def test_width_str(): - assert prop_width('str:254') == 254 - assert prop_width('str') == 80 + assert prop_width("str:254") == 254 + assert prop_width("str") == 80 def test_width_other(): - assert prop_width('int') == None - assert prop_width('float') == None - assert prop_width('date') == None + assert prop_width("int") == None + assert prop_width("float") == None + assert prop_width("date") == None def test_types(): - assert prop_type('str:254') == str - assert prop_type('str') == str - assert isinstance(0, prop_type('int')) - assert isinstance(0.0, prop_type('float')) - assert prop_type('date') == FionaDateType + assert prop_type("str:254") == str + assert prop_type("str") == str + assert isinstance(0, prop_type("int")) + assert isinstance(0.0, prop_type("float")) + assert prop_type("date") == FionaDateType def test_read_json_object_properties(): @@ -73,17 +74,17 @@ def test_read_json_object_properties(): } """ tmpdir = tempfile.mkdtemp() - filename = os.path.join(tmpdir, 'test.json') + filename = os.path.join(tmpdir, "test.json") - with open(filename, 'w') as f: + with open(filename, "w") as f: f.write(data) with fiona.open(filename) as src: ftr = next(iter(src)) - props = ftr['properties'] - assert props['upperLeftCoordinate']['latitude'] == 45.66894 - assert props['upperLeftCoordinate']['longitude'] == 87.91166 - assert props['tricky'] == "{gotcha" + props = ftr["properties"] + assert props["upperLeftCoordinate"]["latitude"] == 45.66894 + assert props["upperLeftCoordinate"]["longitude"] == 87.91166 + assert props["tricky"] == "{gotcha" def test_write_json_object_properties(): @@ -132,64 +133,59 @@ def test_write_json_object_properties(): ] } """ - data = json.loads(data)['features'][0] + data = Feature.from_dict(**json.loads(data)["features"][0]) tmpdir = tempfile.mkdtemp() - filename = os.path.join(tmpdir, 'test.json') + filename = os.path.join(tmpdir, "test.json") with fiona.open( - filename, 'w', - driver='GeoJSON', - schema={ - 'geometry': 'Polygon', - 'properties': {'upperLeftCoordinate': 'str', 'tricky': 'str'}} - ) as dst: + filename, + "w", + driver="GeoJSON", + schema={ + "geometry": "Polygon", + "properties": {"upperLeftCoordinate": "str", "tricky": "str"}, + }, + ) as dst: dst.write(data) with fiona.open(filename) as src: ftr = next(iter(src)) - props = ftr['properties'] - assert props['upperLeftCoordinate']['latitude'] == 45.66894 - assert props['upperLeftCoordinate']['longitude'] == 87.91166 - assert props['tricky'] == "{gotcha" + props = ftr["properties"] + assert props["upperLeftCoordinate"]["latitude"] == 45.66894 + assert props["upperLeftCoordinate"]["longitude"] == 87.91166 + assert props["tricky"] == "{gotcha" def test_json_prop_decode_non_geojson_driver(): - feature = { - "type": "Feature", - "properties": { - "ulc": { - "latitude": 45.66894, - "longitude": 87.91166 + feature = Feature.from_dict( + **{ + "type": "Feature", + "properties": { + "ulc": {"latitude": 45.66894, "longitude": 87.91166}, + "tricky": "{gotcha", }, - "tricky": "{gotcha" - }, - "geometry": { - "type": "Point", - "coordinates": [10, 15] + "geometry": {"type": "Point", "coordinates": [10, 15]}, } - } + ) meta = { - 'crs': 'EPSG:4326', - 'driver': 'ESRI Shapefile', - 'schema': { - 'geometry': 'Point', - 'properties': { - 'ulc': 'str:255', - 'tricky': 'str:255' - } - } + "crs": "EPSG:4326", + "driver": "ESRI Shapefile", + "schema": { + "geometry": "Point", + "properties": {"ulc": "str:255", "tricky": "str:255"}, + }, } tmpdir = tempfile.mkdtemp() - filename = os.path.join(tmpdir, 'test.json') - with fiona.open(filename, 'w', **meta) as dst: + filename = os.path.join(tmpdir, "test.json") + with fiona.open(filename, "w", **meta) as dst: dst.write(feature) with fiona.open(filename) as src: actual = next(iter(src)) - assert isinstance(actual['properties']['ulc'], str) - a = json.loads(actual['properties']['ulc']) - e = json.loads(actual['properties']['ulc']) + assert isinstance(actual["properties"]["ulc"], str) + a = json.loads(actual["properties"]["ulc"]) + e = json.loads(actual["properties"]["ulc"]) assert e == a - assert actual['properties']['tricky'].startswith('{') + assert actual["properties"]["tricky"].startswith("{") diff --git a/tests/test_remove.py b/tests/test_remove.py index 2d2ce96eb..7ed811626 100644 --- a/tests/test_remove.py +++ b/tests/test_remove.py @@ -8,26 +8,25 @@ import fiona from fiona.errors import DatasetDeleteError +from fiona.model import Feature def create_sample_data(filename, driver, **extra_meta): - meta = { - 'driver': driver, - 'schema': { - 'geometry': 'Point', - 'properties': {} - } - } + meta = {"driver": driver, "schema": {"geometry": "Point", "properties": {}}} meta.update(extra_meta) - with fiona.open(filename, 'w', **meta) as dst: - dst.write({ - 'geometry': { - 'type': 'Point', - 'coordinates': (0, 0), - }, - 'properties': {}, - }) - assert(os.path.exists(filename)) + with fiona.open(filename, "w", **meta) as dst: + dst.write( + Feature.from_dict( + **{ + "geometry": { + "type": "Point", + "coordinates": (0, 0), + }, + "properties": {}, + } + ) + ) + assert os.path.exists(filename) drivers = ["ESRI Shapefile", "GeoJSON"] @@ -64,6 +63,7 @@ def test_remove_nonexistent(tmpdir): with pytest.raises(OSError): fiona.remove(filename) + @requires_gpkg def test_remove_layer(tmpdir): filename = str(tmpdir.join("a_filename.gpkg")) diff --git a/tests/test_schema.py b/tests/test_schema.py index c49d06fa7..bd7d2632a 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -1,183 +1,204 @@ from collections import OrderedDict - -import fiona -from fiona.errors import SchemaError, UnsupportedGeometryTypeError, \ - DriverSupportError -from fiona.schema import FIELD_TYPES, normalize_field_type import os import tempfile -from .conftest import get_temp_filename + +import pytest + +import fiona from fiona.drvsupport import driver_mode_mingdal from fiona.env import GDALVersion -import pytest +from fiona.errors import SchemaError, UnsupportedGeometryTypeError, DriverSupportError +from fiona.model import Feature +from fiona.schema import FIELD_TYPES, normalize_field_type +from .conftest import get_temp_filename from .conftest import requires_only_gdal1, requires_gdal2 def test_schema_ordering_items(tmpdir): - name = str(tmpdir.join('test_scheme.shp')) - items = [('title', 'str:80'), ('date', 'date')] - with fiona.open(name, 'w', - driver="ESRI Shapefile", - schema={ - 'geometry': 'LineString', - 'properties': items}) as c: - assert list(c.schema['properties'].items()) == items + name = str(tmpdir.join("test_scheme.shp")) + items = [("title", "str:80"), ("date", "date")] + with fiona.open( + name, + "w", + driver="ESRI Shapefile", + schema={"geometry": "LineString", "properties": items}, + ) as c: + assert list(c.schema["properties"].items()) == items with fiona.open(name) as c: - assert list(c.schema['properties'].items()) == items + assert list(c.schema["properties"].items()) == items def test_shapefile_schema(tmpdir): - name = str(tmpdir.join('test_schema.shp')) - items = sorted({ - 'AWATER10': 'float', - 'CLASSFP10': 'str', - 'ZipCodeType': 'str', - 'EstimatedPopulation': 'float', - 'LocationType': 'str', - 'ALAND10': 'float', - 'TotalWages': 'float', - 'FUNCSTAT10': 'str', - 'Long': 'float', - 'City': 'str', - 'TaxReturnsFiled': 'float', - 'State': 'str', - 'Location': 'str', - 'GSrchCnt': 'float', - 'INTPTLAT10': 'str', - 'Lat': 'float', - 'MTFCC10': 'str', - 'Decommisioned': 'str', - 'GEOID10': 'str', - 'INTPTLON10': 'str'}.items()) - with fiona.open(name, 'w', - driver="ESRI Shapefile", - schema={'geometry': 'Polygon', 'properties': items}) as c: - assert list(c.schema['properties'].items()) == items + name = str(tmpdir.join("test_schema.shp")) + items = sorted( + { + "AWATER10": "float", + "CLASSFP10": "str", + "ZipCodeType": "str", + "EstimatedPopulation": "float", + "LocationType": "str", + "ALAND10": "float", + "TotalWages": "float", + "FUNCSTAT10": "str", + "Long": "float", + "City": "str", + "TaxReturnsFiled": "float", + "State": "str", + "Location": "str", + "GSrchCnt": "float", + "INTPTLAT10": "str", + "Lat": "float", + "MTFCC10": "str", + "Decommisioned": "str", + "GEOID10": "str", + "INTPTLON10": "str", + }.items() + ) + with fiona.open( + name, + "w", + driver="ESRI Shapefile", + schema={"geometry": "Polygon", "properties": items}, + ) as c: + assert list(c.schema["properties"].items()) == items c.write( - {'geometry': {'coordinates': [[(-117.882442, 33.783633), - (-117.882284, 33.783817), - (-117.863348, 33.760016), - (-117.863478, 33.760016), - (-117.863869, 33.760017), - (-117.864, 33.760017999999995), - (-117.864239, 33.760019), - (-117.876608, 33.755769), - (-117.882886, 33.783114), - (-117.882688, 33.783345), - (-117.882639, 33.783401999999995), - (-117.88259, 33.78346), - (-117.882442, 33.783633)]], - 'type': 'Polygon'}, - 'id': '1', - 'properties': { - 'ALAND10': 8819240.0, - 'AWATER10': 309767.0, - 'CLASSFP10': 'B5', - 'City': 'SANTA ANA', - 'Decommisioned': False, - 'EstimatedPopulation': 27773.0, - 'FUNCSTAT10': 'S', - 'GEOID10': '92706', - 'GSrchCnt': 0.0, - 'INTPTLAT10': '+33.7653010', - 'INTPTLON10': '-117.8819759', - 'Lat': 33.759999999999998, - 'Location': 'NA-US-CA-SANTA ANA', - 'LocationType': 'PRIMARY', - 'Long': -117.88, - 'MTFCC10': 'G6350', - 'State': 'CA', - 'TaxReturnsFiled': 14635.0, - 'TotalWages': 521280485.0, - 'ZipCodeType': 'STANDARD'}, - 'type': 'Feature'}) + Feature.from_dict( + **{ + "geometry": { + "coordinates": [ + [ + (-117.882442, 33.783633), + (-117.882284, 33.783817), + (-117.863348, 33.760016), + (-117.863478, 33.760016), + (-117.863869, 33.760017), + (-117.864, 33.760017999999995), + (-117.864239, 33.760019), + (-117.876608, 33.755769), + (-117.882886, 33.783114), + (-117.882688, 33.783345), + (-117.882639, 33.783401999999995), + (-117.88259, 33.78346), + (-117.882442, 33.783633), + ] + ], + "type": "Polygon", + }, + "id": "1", + "properties": { + "ALAND10": 8819240.0, + "AWATER10": 309767.0, + "CLASSFP10": "B5", + "City": "SANTA ANA", + "Decommisioned": False, + "EstimatedPopulation": 27773.0, + "FUNCSTAT10": "S", + "GEOID10": "92706", + "GSrchCnt": 0.0, + "INTPTLAT10": "+33.7653010", + "INTPTLON10": "-117.8819759", + "Lat": 33.759999999999998, + "Location": "NA-US-CA-SANTA ANA", + "LocationType": "PRIMARY", + "Long": -117.88, + "MTFCC10": "G6350", + "State": "CA", + "TaxReturnsFiled": 14635.0, + "TotalWages": 521280485.0, + "ZipCodeType": "STANDARD", + }, + "type": "Feature", + } + ) + ) assert len(c) == 1 with fiona.open(name) as c: - assert ( - list(c.schema['properties'].items()) == - sorted([('AWATER10', 'float:24.15'), - ('CLASSFP10', 'str:80'), - ('ZipCodeTyp', 'str:80'), - ('EstimatedP', 'float:24.15'), - ('LocationTy', 'str:80'), - ('ALAND10', 'float:24.15'), - ('INTPTLAT10', 'str:80'), - ('FUNCSTAT10', 'str:80'), - ('Long', 'float:24.15'), - ('City', 'str:80'), - ('TaxReturns', 'float:24.15'), - ('State', 'str:80'), - ('Location', 'str:80'), - ('GSrchCnt', 'float:24.15'), - ('TotalWages', 'float:24.15'), - ('Lat', 'float:24.15'), - ('MTFCC10', 'str:80'), - ('INTPTLON10', 'str:80'), - ('GEOID10', 'str:80'), - ('Decommisio', 'str:80')])) + assert list(c.schema["properties"].items()) == sorted( + [ + ("AWATER10", "float:24.15"), + ("CLASSFP10", "str:80"), + ("ZipCodeTyp", "str:80"), + ("EstimatedP", "float:24.15"), + ("LocationTy", "str:80"), + ("ALAND10", "float:24.15"), + ("INTPTLAT10", "str:80"), + ("FUNCSTAT10", "str:80"), + ("Long", "float:24.15"), + ("City", "str:80"), + ("TaxReturns", "float:24.15"), + ("State", "str:80"), + ("Location", "str:80"), + ("GSrchCnt", "float:24.15"), + ("TotalWages", "float:24.15"), + ("Lat", "float:24.15"), + ("MTFCC10", "str:80"), + ("INTPTLON10", "str:80"), + ("GEOID10", "str:80"), + ("Decommisio", "str:80"), + ] + ) f = next(iter(c)) - assert f['properties']['EstimatedP'] == 27773.0 + assert f.properties["EstimatedP"] == 27773.0 def test_field_truncation_issue177(tmpdir): - name = str(tmpdir.join('output.shp')) + name = str(tmpdir.join("output.shp")) kwargs = { - 'driver': 'ESRI Shapefile', - 'crs': 'EPSG:4326', - 'schema': { - 'geometry': 'Point', - 'properties': [('a_fieldname', 'float')]}} + "driver": "ESRI Shapefile", + "crs": "EPSG:4326", + "schema": {"geometry": "Point", "properties": [("a_fieldname", "float")]}, + } - with fiona.open(name, 'w', **kwargs) as dst: + with fiona.open(name, "w", **kwargs) as dst: rec = {} - rec['geometry'] = {'type': 'Point', 'coordinates': (0, 0)} - rec['properties'] = {'a_fieldname': 3.0} - dst.write(rec) + rec["geometry"] = {"type": "Point", "coordinates": (0, 0)} + rec["properties"] = {"a_fieldname": 3.0} + dst.write(Feature.from_dict(**rec)) with fiona.open(name) as src: first = next(iter(src)) - assert first['geometry'] == {'type': 'Point', 'coordinates': (0, 0)} - assert first['properties']['a_fieldnam'] == 3.0 + assert first.geometry.type == "Point" + assert first.geometry.coordinates == (0, 0) + assert first.properties["a_fieldnam"] == 3.0 def test_unsupported_geometry_type(): tmpdir = tempfile.mkdtemp() - tmpfile = os.path.join(tmpdir, 'test-test-geom.shp') + tmpfile = os.path.join(tmpdir, "test-test-geom.shp") profile = { - 'driver': 'ESRI Shapefile', - 'schema': { - 'geometry': 'BOGUS', - 'properties': {}}} + "driver": "ESRI Shapefile", + "schema": {"geometry": "BOGUS", "properties": {}}, + } with pytest.raises(UnsupportedGeometryTypeError): - fiona.open(tmpfile, 'w', **profile) + fiona.open(tmpfile, "w", **profile) -@pytest.mark.parametrize('x', list(range(1, 10))) +@pytest.mark.parametrize("x", list(range(1, 10))) def test_normalize_int32(x): - assert normalize_field_type('int:{}'.format(x)) == 'int32' + assert normalize_field_type("int:{}".format(x)) == "int32" @requires_gdal2 -@pytest.mark.parametrize('x', list(range(10, 20))) +@pytest.mark.parametrize("x", list(range(10, 20))) def test_normalize_int64(x): - assert normalize_field_type('int:{}'.format(x)) == 'int64' + assert normalize_field_type("int:{}".format(x)) == "int64" -@pytest.mark.parametrize('x', list(range(0, 20))) +@pytest.mark.parametrize("x", list(range(0, 20))) def test_normalize_str(x): - assert normalize_field_type('str:{}'.format(x)) == 'str' + assert normalize_field_type("str:{}".format(x)) == "str" def test_normalize_bool(): - assert normalize_field_type('bool') == 'bool' + assert normalize_field_type("bool") == "bool" def test_normalize_float(): - assert normalize_field_type('float:25.8') == 'float' + assert normalize_field_type("float:25.8") == "float" def generate_field_types(): @@ -191,76 +212,83 @@ def generate_field_types(): return list(sorted(types)) + [None] -@pytest.mark.parametrize('x', generate_field_types()) +@pytest.mark.parametrize("x", generate_field_types()) def test_normalize_std(x): assert normalize_field_type(x) == x def test_normalize_error(): with pytest.raises(SchemaError): - assert normalize_field_type('thingy') + assert normalize_field_type("thingy") @requires_only_gdal1 -@pytest.mark.parametrize('field_type', ['time', 'datetime']) +@pytest.mark.parametrize("field_type", ["time", "datetime"]) def test_check_schema_driver_support_shp(tmpdir, field_type): with pytest.raises(DriverSupportError): - name = str(tmpdir.join('test_scheme.shp')) - items = [('field1', field_type)] - with fiona.open(name, 'w', - driver="ESRI Shapefile", - schema={ - 'geometry': 'LineString', - 'properties': items}) as c: - pass + name = str(tmpdir.join("test_scheme.shp")) + items = [("field1", field_type)] + with fiona.open( + name, + "w", + driver="ESRI Shapefile", + schema={"geometry": "LineString", "properties": items}, + ) as c: + pass @requires_only_gdal1 def test_check_schema_driver_support_gpkg(tmpdir): with pytest.raises(DriverSupportError): - name = str(tmpdir.join('test_scheme.gpkg')) - items = [('field1', 'time')] - with fiona.open(name, 'w', - driver="GPKG", - schema={ - 'geometry': 'LineString', - 'properties': items}) as c: + name = str(tmpdir.join("test_scheme.gpkg")) + items = [("field1", "time")] + with fiona.open( + name, + "w", + driver="GPKG", + schema={"geometry": "LineString", "properties": items}, + ) as c: pass -@pytest.mark.parametrize('driver', ['GPKG', 'GeoJSON']) +@pytest.mark.parametrize("driver", ["GPKG", "GeoJSON"]) def test_geometry_only_schema_write(tmpdir, driver): schema = { "geometry": "Polygon", # No properties defined here. } - record = {'geometry': {'type': 'Polygon', 'coordinates': [[(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)]]}} + record = Feature.from_dict( + **{ + "geometry": { + "type": "Polygon", + "coordinates": [ + [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)] + ], + } + } + ) path = str(tmpdir.join(get_temp_filename(driver))) - with fiona.open(path, - mode='w', - driver=driver, - schema=schema) as c: + with fiona.open(path, mode="w", driver=driver, schema=schema) as c: c.write(record) - with fiona.open(path, - mode='r', - driver=driver) as c: + with fiona.open(path, mode="r", driver=driver) as c: data = [f for f in c] assert len(data) == 1 - assert len(data[0].get('properties', {})) == 0 - assert data[0]['geometry'] == record['geometry'] + assert len(data[0].properties) == 0 + assert data[0].geometry.type == record.geometry["type"] -@pytest.mark.parametrize('driver', ['GPKG', 'GeoJSON']) +@pytest.mark.parametrize("driver", ["GPKG", "GeoJSON"]) def test_geometry_only_schema_update(tmpdir, driver): # Guard unsupported drivers - if driver in driver_mode_mingdal['a'] and GDALVersion.runtime() < GDALVersion( - *driver_mode_mingdal['a'][driver][:2]): + if driver in driver_mode_mingdal["a"] and GDALVersion.runtime() < GDALVersion( + *driver_mode_mingdal["a"][driver][:2] + ): return schema = { @@ -268,119 +296,118 @@ def test_geometry_only_schema_update(tmpdir, driver): # No properties defined here. } - record1 = { - 'geometry': {'type': 'Polygon', 'coordinates': [[(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)]]}} - record2 = { - 'geometry': {'type': 'Polygon', 'coordinates': [[(0.0, 0.0), (2.0, 0.0), (2.0, 2.0), (2.0, 0.0), (0.0, 0.0)]]}} + record1 = Feature.from_dict( + **{ + "geometry": { + "type": "Polygon", + "coordinates": [ + [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)] + ], + } + } + ) + record2 = Feature.from_dict( + **{ + "geometry": { + "type": "Polygon", + "coordinates": [ + [(0.0, 0.0), (2.0, 0.0), (2.0, 2.0), (2.0, 0.0), (0.0, 0.0)] + ], + } + } + ) path = str(tmpdir.join(get_temp_filename(driver))) # Create file - with fiona.open(path, - mode='w', - driver=driver, - schema=schema) as c: + with fiona.open(path, mode="w", driver=driver, schema=schema) as c: c.write(record1) # Append record - with fiona.open(path, - mode='a', - driver=driver) as c: + with fiona.open(path, mode="a", driver=driver) as c: c.write(record2) - with fiona.open(path, - mode='r', - driver=driver) as c: + with fiona.open(path, mode="r", driver=driver) as c: data = [f for f in c] assert len(data) == 2 for f in data: - assert len(f.get('properties', {})) == 0 - assert data[0]['geometry'] == record1['geometry'] - assert data[1]['geometry'] == record2['geometry'] + assert len(f.properties) == 0 + + assert data[0].geometry.type == record1.geometry["type"] + assert data[1].geometry.type == record2.geometry["type"] -@pytest.mark.parametrize('driver', ['GPKG', 'GeoJSON']) +@pytest.mark.parametrize("driver", ["GPKG", "GeoJSON"]) def test_property_only_schema_write(tmpdir, driver): schema = { # No geometry defined here. - "properties": {'prop1': 'str'} + "properties": {"prop1": "str"} } - record1 = {'properties': {'prop1': 'one'}} + record1 = Feature.from_dict(**{"properties": {"prop1": "one"}}) path = str(tmpdir.join(get_temp_filename(driver))) - with fiona.open(path, - mode='w', - driver=driver, - schema=schema) as c: + with fiona.open(path, mode="w", driver=driver, schema=schema) as c: c.write(record1) - with fiona.open(path, - mode='r', - driver=driver) as c: + with fiona.open(path, mode="r", driver=driver) as c: data = [f for f in c] assert len(data) == 1 - assert len(data[0].get('properties', {})) == 1 - assert 'prop1' in data[0]['properties'] and data[0]['properties']['prop1'] == 'one' + assert len(data[0].properties) == 1 + assert "prop1" in data[0].properties and data[0].properties["prop1"] == "one" for f in data: - assert 'geometry' not in f or f['geometry'] is None + assert f.geometry is None -@pytest.mark.parametrize('driver', ['GPKG', 'GeoJSON']) +@pytest.mark.parametrize("driver", ["GPKG", "GeoJSON"]) def test_property_only_schema_update(tmpdir, driver): # Guard unsupported drivers - if driver in driver_mode_mingdal['a'] and GDALVersion.runtime() < GDALVersion( - *driver_mode_mingdal['a'][driver][:2]): + if driver in driver_mode_mingdal["a"] and GDALVersion.runtime() < GDALVersion( + *driver_mode_mingdal["a"][driver][:2] + ): return schema = { # No geometry defined here. - "properties": {'prop1': 'str'} + "properties": {"prop1": "str"} } - record1 = {'properties': {'prop1': 'one'}} - record2 = {'properties': {'prop1': 'two'}} + record1 = Feature.from_dict(**{"properties": {"prop1": "one"}}) + record2 = Feature.from_dict(**{"properties": {"prop1": "two"}}) path = str(tmpdir.join(get_temp_filename(driver))) # Create file - with fiona.open(path, - mode='w', - driver=driver, - schema=schema) as c: + with fiona.open(path, mode="w", driver=driver, schema=schema) as c: c.write(record1) # Append record - with fiona.open(path, - mode='a', - driver=driver) as c: + with fiona.open(path, mode="a", driver=driver) as c: c.write(record2) - with fiona.open(path, - mode='r', - driver=driver) as c: + with fiona.open(path, mode="r", driver=driver) as c: data = [f for f in c] assert len(data) == 2 for f in data: - assert len(f.get('properties', {})) == 1 - assert 'geometry' not in f or f['geometry'] is None - assert 'prop1' in data[0]['properties'] and data[0]['properties']['prop1'] == 'one' - assert 'prop1' in data[1]['properties'] and data[1]['properties']['prop1'] == 'two' + assert len(f.properties) == 1 + assert f.geometry is None + assert "prop1" in data[0].properties and data[0].properties["prop1"] == "one" + assert "prop1" in data[1].properties and data[1].properties["prop1"] == "two" def test_schema_default_fields_wrong_type(tmpdir): - """ Test for SchemaError if a default field is specified with a different type""" + """Test for SchemaError if a default field is specified with a different type""" - name = str(tmpdir.join('test.gpx')) - schema = {'properties': OrderedDict([('ele', 'str'), ('time', 'datetime')]), - 'geometry': 'Point'} + name = str(tmpdir.join("test.gpx")) + schema = { + "properties": OrderedDict([("ele", "str"), ("time", "datetime")]), + "geometry": "Point", + } with pytest.raises(SchemaError): - with fiona.open(name, 'w', - driver="GPX", - schema=schema) as c: + with fiona.open(name, "w", driver="GPX", schema=schema) as c: pass diff --git a/tests/test_schema_geom.py b/tests/test_schema_geom.py index 8a25f8ba9..04399dae5 100644 --- a/tests/test_schema_geom.py +++ b/tests/test_schema_geom.py @@ -6,60 +6,85 @@ import pytest from fiona.errors import GeometryTypeValidationError, UnsupportedGeometryTypeError +from fiona.model import Feature + @pytest.fixture def filename_shp(tmpdir): return str(tmpdir.join("example.shp")) + @pytest.fixture def filename_json(tmpdir): return str(tmpdir.join("example.json")) + properties = {"name": "str"} PROPERTIES = {"name": "example"} POINT = {"type": "Point", "coordinates": (1.0, 2.0)} LINESTRING = {"type": "LineString", "coordinates": [(1.0, 2.0), (3.0, 4.0)]} POLYGON = {"type": "Polygon", "coordinates": [[(0.0, 0.0), (1.0, 1.0), (0.0, 0.1)]]} -MULTILINESTRING = {"type": "MultiLineString", "coordinates": [[(0.0, 0.0), (1.0, 1.0)], [(1.0, 2.0), (3.0, 4.0)]]} -GEOMETRYCOLLECTION = {"type": "GeometryCollection", "geometries": [POINT, LINESTRING, POLYGON]} +MULTILINESTRING = { + "type": "MultiLineString", + "coordinates": [[(0.0, 0.0), (1.0, 1.0)], [(1.0, 2.0), (3.0, 4.0)]], +} +GEOMETRYCOLLECTION = { + "type": "GeometryCollection", + "geometries": [POINT, LINESTRING, POLYGON], +} INVALID = {"type": "InvalidType", "coordinates": (42.0, 43.0)} POINT_3D = {"type": "Point", "coordinates": (1.0, 2.0, 3.0)} + def write_point(collection): - feature = {"geometry": POINT, "properties": PROPERTIES} + feature = Feature.from_dict(**{"geometry": POINT, "properties": PROPERTIES}) collection.write(feature) + def write_linestring(collection): - feature = {"geometry": LINESTRING, "properties": PROPERTIES} + feature = Feature.from_dict(**{"geometry": LINESTRING, "properties": PROPERTIES}) collection.write(feature) + def write_polygon(collection): - feature = {"geometry": POLYGON, "properties": PROPERTIES} + feature = Feature.from_dict(**{"geometry": POLYGON, "properties": PROPERTIES}) collection.write(feature) + def write_invalid(collection): - feature = {"geometry": INVALID, "properties": PROPERTIES} + feature = Feature.from_dict(**{"geometry": INVALID, "properties": PROPERTIES}) collection.write(feature) + def write_multilinestring(collection): - feature = {"geometry": MULTILINESTRING, "properties": PROPERTIES} + feature = Feature.from_dict( + **{"geometry": MULTILINESTRING, "properties": PROPERTIES} + ) collection.write(feature) + def write_point_3d(collection): - feature = {"geometry": POINT_3D, "properties": PROPERTIES} + feature = Feature.from_dict(**{"geometry": POINT_3D, "properties": PROPERTIES}) collection.write(feature) + def write_geometrycollection(collection): - feature = {"geometry": GEOMETRYCOLLECTION, "properties": PROPERTIES} + feature = Feature.from_dict( + **{"geometry": GEOMETRYCOLLECTION, "properties": PROPERTIES} + ) collection.write(feature) + def write_null(collection): - feature = {"geometry": None, "properties": PROPERTIES} + feature = Feature.from_dict(**{"geometry": None, "properties": PROPERTIES}) collection.write(feature) + def test_point(filename_shp): schema = {"geometry": "Point", "properties": properties} - with fiona.open(filename_shp, "w", driver="ESRI Shapefile", schema=schema) as collection: + with fiona.open( + filename_shp, "w", driver="ESRI Shapefile", schema=schema + ) as collection: write_point(collection) write_point_3d(collection) write_null(collection) @@ -70,6 +95,7 @@ def test_point(filename_shp): with pytest.raises(GeometryTypeValidationError): write_invalid(collection) + def test_multi_type(filename_json): schema = {"geometry": ("Point", "LineString"), "properties": properties} with fiona.open(filename_json, "w", driver="GeoJSON", schema=schema) as collection: @@ -83,6 +109,7 @@ def test_multi_type(filename_json): with pytest.raises(GeometryTypeValidationError): write_invalid(collection) + def test_unknown(filename_json): """Reading and writing layers with "Unknown" (i.e. any) geometry type""" # write a layer with a mixture of geometry types @@ -104,6 +131,7 @@ def test_unknown(filename_json): with fiona.open(filename_dst, "w", **src.meta) as dst: dst.writerecords(src) + def test_any(filename_json): schema = {"geometry": "Any", "properties": properties} with fiona.open(filename_json, "w", driver="GeoJSON", schema=schema) as collection: @@ -116,48 +144,62 @@ def test_any(filename_json): with pytest.raises(GeometryTypeValidationError): write_invalid(collection) + def test_broken(filename_json): schema = {"geometry": "NOT_VALID", "properties": properties} with pytest.raises(UnsupportedGeometryTypeError): with fiona.open(filename_json, "w", driver="GeoJSON", schema=schema): pass + def test_broken_list(filename_json): - schema = {"geometry": ("Point", "LineString", "NOT_VALID"), "properties": properties} + schema = { + "geometry": ("Point", "LineString", "NOT_VALID"), + "properties": properties, + } with pytest.raises(UnsupportedGeometryTypeError): collection = fiona.open(filename_json, "w", driver="GeoJSON", schema=schema) + def test_invalid_schema(filename_shp): """Features match schema but geometries not supported by driver""" schema = {"geometry": ("Point", "LineString"), "properties": properties} - with fiona.open(filename_shp, "w", driver="ESRI Shapefile", schema=schema) as collection: + with fiona.open( + filename_shp, "w", driver="ESRI Shapefile", schema=schema + ) as collection: write_linestring(collection) with pytest.raises(RuntimeError): # ESRI Shapefile can only store a single geometry type write_point(collection) + def test_esri_multi_geom(filename_shp): """ESRI Shapefile doesn't differentiate between LineString/MultiLineString""" schema = {"geometry": "LineString", "properties": properties} - with fiona.open(filename_shp, "w", driver="ESRI Shapefile", schema=schema) as collection: + with fiona.open( + filename_shp, "w", driver="ESRI Shapefile", schema=schema + ) as collection: write_linestring(collection) write_multilinestring(collection) with pytest.raises(GeometryTypeValidationError): write_point(collection) + def test_3d_schema_ignored(filename_json): schema = {"geometry": "3D Point", "properties": properties} with fiona.open(filename_json, "w", driver="GeoJSON", schema=schema) as collection: write_point(collection) write_point_3d(collection) + def test_geometrycollection_schema(filename_json): schema = {"geometry": "GeometryCollection", "properties": properties} with fiona.open(filename_json, "w", driver="GeoJSON", schema=schema) as collection: write_geometrycollection(collection) + def test_none_schema(filename_json): schema = {"geometry": None, "properties": properties} with fiona.open(filename_json, "w", driver="GeoJSON", schema=schema) as collection: diff --git a/tests/test_slice.py b/tests/test_slice.py index 3e19586f0..135a6b960 100644 --- a/tests/test_slice.py +++ b/tests/test_slice.py @@ -1,15 +1,18 @@ -"""Note well: collection slicing is deprecated! -""" +"""Note well: collection slicing is deprecated!""" + import tempfile import shutil import os from collections import OrderedDict import pytest + from fiona.env import GDALVersion import fiona +from fiona.drvsupport import supported_drivers, _driver_supports_mode from fiona.errors import FionaDeprecationWarning +from fiona.model import Feature + from .conftest import get_temp_filename -from fiona.drvsupport import supported_drivers, _driver_supports_mode gdal_version = GDALVersion.runtime() @@ -17,7 +20,7 @@ def test_collection_get(path_coutwildrnp_shp): with fiona.open(path_coutwildrnp_shp) as src: result = src[5] - assert result['id'] == '5' + assert result.id == "5" def test_collection_slice(path_coutwildrnp_shp): @@ -25,7 +28,7 @@ def test_collection_slice(path_coutwildrnp_shp): results = src[:5] assert isinstance(results, list) assert len(results) == 5 - assert results[4]['id'] == '4' + assert results[4].id == "4" def test_collection_iterator_slice(path_coutwildrnp_shp): @@ -34,105 +37,139 @@ def test_collection_iterator_slice(path_coutwildrnp_shp): assert len(results) == 5 k, v = results[4] assert k == 4 - assert v['id'] == '4' + assert v.id == "4" def test_collection_iterator_next(path_coutwildrnp_shp): with fiona.open(path_coutwildrnp_shp) as src: k, v = next(src.items(5, None)) assert k == 5 - assert v['id'] == '5' - - -@pytest.fixture(scope="module", params=[driver for driver in supported_drivers if - _driver_supports_mode(driver, 'w') - and driver not in {'DGN', 'MapInfo File', 'GPSTrackMaker', 'GPX', 'BNA', 'DXF'}]) + assert v.id == "5" + + +@pytest.fixture( + scope="module", + params=[ + driver + for driver in supported_drivers + if _driver_supports_mode(driver, "w") + and driver not in {"DGN", "MapInfo File", "GPSTrackMaker", "GPX", "BNA", "DXF"} + ], +) def slice_dataset_path(request): - """ Create temporary datasets for test_collection_iterator_items_slice()""" + """Create temporary datasets for test_collection_iterator_items_slice()""" driver = request.param min_id = 0 max_id = 9 def get_schema(driver): - special_schemas = {'CSV': {'geometry': None, 'properties': OrderedDict([('position', 'int')])}} - return special_schemas.get(driver, {'geometry': 'Point', 'properties': OrderedDict([('position', 'int')])}) + special_schemas = { + "CSV": {"geometry": None, "properties": OrderedDict([("position", "int")])} + } + return special_schemas.get( + driver, + {"geometry": "Point", "properties": OrderedDict([("position", "int")])}, + ) def get_records(driver, range): - special_records1 = {'CSV': [{'geometry': None, 'properties': {'position': i}} for i in range], - 'PCIDSK': [{'geometry': {'type': 'Point', 'coordinates': (0.0, float(i), 0.0)}, - 'properties': {'position': i}} for i in range] - } - return special_records1.get(driver, [ - {'geometry': {'type': 'Point', 'coordinates': (0.0, float(i))}, 'properties': {'position': i}} for i in - range]) + special_records1 = { + "CSV": [ + Feature.from_dict(**{"geometry": None, "properties": {"position": i}}) + for i in range + ], + "PCIDSK": [ + Feature.from_dict( + **{ + "geometry": { + "type": "Point", + "coordinates": (0.0, float(i), 0.0), + }, + "properties": {"position": i}, + } + ) + for i in range + ], + } + return special_records1.get( + driver, + [ + Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0.0, float(i))}, + "properties": {"position": i}, + } + ) + for i in range + ], + ) schema = get_schema(driver) records = get_records(driver, range(min_id, max_id + 1)) create_kwargs = {} - if driver == 'FlatGeobuf': - create_kwargs['SPATIAL_INDEX'] = False + if driver == "FlatGeobuf": + create_kwargs["SPATIAL_INDEX"] = False tmpdir = tempfile.mkdtemp() path = os.path.join(tmpdir, get_temp_filename(driver)) - with fiona.open(path, 'w', - driver=driver, - schema=schema, - **create_kwargs) as c: + with fiona.open(path, "w", driver=driver, schema=schema, **create_kwargs) as c: c.writerecords(records) yield path shutil.rmtree(tmpdir) -@pytest.mark.parametrize("args", [(0, 5, None), - (1, 5, None), - (-5, None, None), - (-5, -1, None), - (0, None, None), - (5, None, None), - (8, None, None), - (9, None, None), - (10, None, None), - (0, 5, 2), - (0, 5, 2), - (1, 5, 2), - (-5, None, 2), - (-5, -1, 2), - (0, None, 2), - (0, 8, 2), - (0, 9, 2), - (0, 10, 2), - (1, 8, 2), - (1, 9, 2), - (1, 10, 2), - (1, None, 2), - (5, None, 2), - (5, None, -1), - (5, None, -2), - (5, None, None), - (4, None, -2), - (-1, -5, -1), - (-5, None, -1), - (0, 5, 1), - (5, 15, 1), - (15, 30, 1), - (5, 0, -1), - (15, 5, -1), - (30, 15, -1), - (0, 5, 2), - (5, 15, 2), - (15, 30, 2), - (5, 0, -2), - (15, 5, -2), - (30, 15, -2) - ]) -@pytest.mark.filterwarnings('ignore:.*OLC_FASTFEATURECOUNT*') -@pytest.mark.filterwarnings('ignore:.*OLCFastSetNextByIndex*') +@pytest.mark.parametrize( + "args", + [ + (0, 5, None), + (1, 5, None), + (-5, None, None), + (-5, -1, None), + (0, None, None), + (5, None, None), + (8, None, None), + (9, None, None), + (10, None, None), + (0, 5, 2), + (0, 5, 2), + (1, 5, 2), + (-5, None, 2), + (-5, -1, 2), + (0, None, 2), + (0, 8, 2), + (0, 9, 2), + (0, 10, 2), + (1, 8, 2), + (1, 9, 2), + (1, 10, 2), + (1, None, 2), + (5, None, 2), + (5, None, -1), + (5, None, -2), + (5, None, None), + (4, None, -2), + (-1, -5, -1), + (-5, None, -1), + (0, 5, 1), + (5, 15, 1), + (15, 30, 1), + (5, 0, -1), + (15, 5, -1), + (30, 15, -1), + (0, 5, 2), + (5, 15, 2), + (15, 30, 2), + (5, 0, -2), + (15, 5, -2), + (30, 15, -2), + ], +) +@pytest.mark.filterwarnings("ignore:.*OLC_FASTFEATURECOUNT*") +@pytest.mark.filterwarnings("ignore:.*OLCFastSetNextByIndex*") def test_collection_iterator_items_slice(slice_dataset_path, args): - """ Test if c.items(start, stop, step) returns the correct features. - """ + """Test if c.items(start, stop, step) returns the correct features.""" start, stop, step = args min_id = 0 @@ -140,10 +177,10 @@ def test_collection_iterator_items_slice(slice_dataset_path, args): positions = list(range(min_id, max_id + 1))[start:stop:step] - with fiona.open(slice_dataset_path, 'r') as c: + with fiona.open(slice_dataset_path, "r") as c: items = list(c.items(start, stop, step)) assert len(items) == len(positions) - record_positions = [int(item[1]['properties']['position']) for item in items] + record_positions = [int(item[1]["properties"]["position"]) for item in items] for expected_position, record_position in zip(positions, record_positions): assert expected_position == record_position diff --git a/tests/test_subtypes.py b/tests/test_subtypes.py index e1653f39d..1e10538f3 100644 --- a/tests/test_subtypes.py +++ b/tests/test_subtypes.py @@ -1,14 +1,16 @@ import fiona +from fiona.model import Feature + def test_read_bool_subtype(tmpdir): test_data = """{"type": "FeatureCollection", "features": [{"type": "Feature", "properties": {"bool": true, "not_bool": 1, "float": 42.5}, "geometry": null}]}""" path = tmpdir.join("test_read_bool_subtype.geojson") with open(str(path), "w") as f: f.write(test_data) - + with fiona.open(str(path), "r") as src: feature = next(iter(src)) - + if fiona.gdal_version.major >= 2: assert type(feature["properties"]["bool"]) is bool else: @@ -16,33 +18,36 @@ def test_read_bool_subtype(tmpdir): assert isinstance(feature["properties"]["not_bool"], int) assert type(feature["properties"]["float"]) is float + def test_write_bool_subtype(tmpdir): path = tmpdir.join("test_write_bool_subtype.geojson") - + schema = { "geometry": "Point", "properties": { "bool": "bool", "not_bool": "int", "float": "float", - } + }, } - - feature = { - "geometry": None, - "properties": { - "bool": True, - "not_bool": 1, - "float": 42.5, + + feature = Feature.from_dict( + **{ + "geometry": None, + "properties": { + "bool": True, + "not_bool": 1, + "float": 42.5, + }, } - } + ) with fiona.open(str(path), "w", driver="GeoJSON", schema=schema) as dst: dst.write(feature) - + with open(str(path), "r") as f: data = f.read() - + if fiona.gdal_version.major >= 2: assert """"bool": true""" in data else: diff --git a/tests/test_transactions.py b/tests/test_transactions.py index be966e8af..b1505279b 100644 --- a/tests/test_transactions.py +++ b/tests/test_transactions.py @@ -1,10 +1,13 @@ +from collections import defaultdict +import logging import os +import pytest +from random import uniform, randint + import fiona +from fiona.model import Feature import fiona.ogrext -import logging -from random import uniform, randint -from collections import defaultdict -import pytest + from tests.conftest import requires_gdal2 has_gpkg = "GPKG" in fiona.supported_drivers.keys() @@ -13,10 +16,13 @@ def create_records(count): for n in range(count): record = { - "geometry": {"type": "Point", "coordinates": [uniform(-180, 180), uniform(-90, 90)]}, - "properties": {"value": randint(0, 1000)} + "geometry": { + "type": "Point", + "coordinates": [uniform(-180, 180), uniform(-90, 90)], + }, + "properties": {"value": randint(0, 1000)}, } - yield record + yield Feature.from_dict(**record) class DebugHandler(logging.Handler): @@ -61,17 +67,20 @@ def test_transaction(self, tmpdir): path = str(tmpdir.join("output.gpkg")) - schema = { - "geometry": "Point", - "properties": {"value": "int"} - } + schema = {"geometry": "Point", "properties": {"value": "int"}} with fiona.open(path, "w", driver="GPKG", schema=schema) as dst: dst.writerecords(create_records(num_records)) assert self.handler.history["Starting transaction (initial)"] == 1 - assert self.handler.history["Starting transaction (intermediate)"] == num_records // transaction_size - assert self.handler.history["Committing transaction (intermediate)"] == num_records // transaction_size + assert ( + self.handler.history["Starting transaction (intermediate)"] + == num_records // transaction_size + ) + assert ( + self.handler.history["Committing transaction (intermediate)"] + == num_records // transaction_size + ) assert self.handler.history["Committing transaction (final)"] == 1 with fiona.open(path, "r") as src: diff --git a/tests/test_transform.py b/tests/test_transform.py index f5e637028..d5d0f0b9b 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -4,23 +4,18 @@ import pytest from fiona import transform from fiona.errors import FionaDeprecationWarning +from fiona.model import Geometry from .conftest import requires_gdal_lt_3 TEST_GEOMS = [ - {"type": "Point", "coordinates": [0.0, 0.0, 1000.0]}, - { - "type": "LineString", - "coordinates": [[0.0, 0.0, 1000.0], [0.1, 0.1, -1000.0]], - }, - { - "type": "MultiPoint", - "coordinates": [[0.0, 0.0, 1000.0], [0.1, 0.1, -1000.0]], - }, - { - "type": "Polygon", - "coordinates": [ + Geometry(type="Point", coordinates=[0.0, 0.0, 1000.0]), + Geometry(type="LineString", coordinates=[[0.0, 0.0, 1000.0], [0.1, 0.1, -1000.0]]), + Geometry(type="MultiPoint", coordinates=[[0.0, 0.0, 1000.0], [0.1, 0.1, -1000.0]]), + Geometry( + type="Polygon", + coordinates=[ [ [0.0, 0.0, 1000.0], [0.1, 0.1, -1000.0], @@ -28,10 +23,10 @@ [0.0, 0.0, 1000.0], ] ], - }, - { - "type": "MultiPolygon", - "coordinates": [ + ), + Geometry( + type="MultiPolygon", + coordinates=[ [ [ [0.0, 0.0, 1000.0], @@ -41,15 +36,14 @@ ] ] ], - }, + ), ] @pytest.mark.parametrize("geom", TEST_GEOMS) def test_transform_geom_with_z(geom): """Transforming a geom with Z succeeds""" - with pytest.warns(FionaDeprecationWarning): - transform.transform_geom("epsg:4326", "epsg:3857", geom, precision=3) + transform.transform_geom("epsg:4326", "epsg:3857", geom) @pytest.mark.parametrize("geom", TEST_GEOMS) @@ -59,71 +53,85 @@ def test_transform_geom_array_z(geom): "epsg:4326", "epsg:3857", [geom for _ in range(5)], - precision=3, ) assert isinstance(g2, list) assert len(g2) == 5 -@requires_gdal_lt_3 -def test_transform_geom_null_dest(): - failed_geom = { - 'type': 'Polygon', - 'coordinates': (( - (81.2180196471443, 6.197141424988303), - (80.34835696810447, 5.968369859232141), - (79.87246870312859, 6.763463446474915), - (79.69516686393516, 8.200843410673372), - (80.14780073437967, 9.824077663609557), - (80.83881798698664, 9.268426825391174), - (81.3043192890718, 8.564206244333675), - (81.78795901889143, 7.523055324733178), - (81.63732221876066, 6.481775214051936), - (81.2180196471443, 6.197141424988303) - ),) - } - with pytest.warns(UserWarning): - transformed_geom = transform.transform_geom( - src_crs="epsg:4326", - dst_crs="epsg:32628", - geom=failed_geom, - antimeridian_cutting=True, - precision=2, - ) - assert transformed_geom is None - - -@pytest.mark.parametrize("crs", ["epsg:4326", - "EPSG:4326", - "WGS84", - {'init': 'epsg:4326'}, - {'proj': 'longlat', 'datum': 'WGS84', 'no_defs': True}, - "OGC:CRS84"]) -def test_axis_ordering(crs): - """ Test if transform uses traditional_axis_mapping """ - +@pytest.mark.parametrize( + "crs", + [ + "epsg:4326", + "EPSG:4326", + "WGS84", + {"init": "epsg:4326"}, + {"proj": "longlat", "datum": "WGS84", "no_defs": True}, + "OGC:CRS84", + ], +) +def test_axis_ordering_rev(crs): + """Test if transform uses traditional_axis_mapping""" expected = (-8427998.647958742, 4587905.27136252) t1 = transform.transform(crs, "epsg:3857", [-75.71], [38.06]) assert (t1[0][0], t1[1][0]) == pytest.approx(expected) - geom = {"type": "Point", "coordinates": [-75.71, 38.06]} - g1 = transform.transform_geom(crs, "epsg:3857", geom, precision=3) + geom = Geometry.from_dict(**{"type": "Point", "coordinates": [-75.71, 38.06]}) + g1 = transform.transform_geom(crs, "epsg:3857", geom) assert g1["coordinates"] == pytest.approx(expected) + +@pytest.mark.parametrize( + "crs", + [ + "epsg:4326", + "EPSG:4326", + "WGS84", + {"init": "epsg:4326"}, + {"proj": "longlat", "datum": "WGS84", "no_defs": True}, + "OGC:CRS84", + ], +) +def test_axis_ordering_fwd(crs): + """Test if transform uses traditional_axis_mapping""" rev_expected = (-75.71, 38.06) t2 = transform.transform("epsg:3857", crs, [-8427998.647958742], [4587905.27136252]) assert (t2[0][0], t2[1][0]) == pytest.approx(rev_expected) - geom = {"type": "Point", "coordinates": [-8427998.647958742, 4587905.27136252]} - g2 = transform.transform_geom("epsg:3857", crs, geom, precision=3) - assert g2["coordinates"] == pytest.approx(rev_expected) + geom = Geometry.from_dict( + **{"type": "Point", "coordinates": [-8427998.647958742, 4587905.27136252]} + ) + g2 = transform.transform_geom("epsg:3857", crs, geom) + assert g2.coordinates == pytest.approx(rev_expected) def test_transform_issue971(): - """ See https://github.com/Toblerity/Fiona/issues/971 """ - source_crs = "epsg:25832" - dest_src = "epsg:4326" - geom = {'type': 'GeometryCollection', 'geometries': [{'type': 'LineString', - 'coordinates': [(512381.8870945257, 5866313.311218272), - (512371.23869999964, 5866322.282500001), - (512364.6014999999, 5866328.260199999)]}]} - geom_transformed = transform.transform_geom(source_crs, dest_src, geom, precision=3) - assert geom_transformed['geometries'][0]['coordinates'][0] == pytest.approx((9.184, 52.946)) + """See https://github.com/Toblerity/Fiona/issues/971""" + source_crs = "EPSG:25832" + dest_src = "EPSG:4326" + geom = { + "type": "GeometryCollection", + "geometries": [ + { + "type": "LineString", + "coordinates": [ + (512381.8870945257, 5866313.311218272), + (512371.23869999964, 5866322.282500001), + (512364.6014999999, 5866328.260199999), + ], + } + ], + } + with pytest.warns(FionaDeprecationWarning): + geom_transformed = transform.transform_geom(source_crs, dest_src, geom) + assert geom_transformed.geometries[0].coordinates[0] == pytest.approx( + (9.18427, 52.94630) + ) + + +def test_transform_geom_precision_deprecation(): + """Get a deprecation warning in 1.9""" + with pytest.warns(FionaDeprecationWarning): + transform.transform_geom( + "epsg:4326", + "epsg:3857", + Geometry(type="Point", coordinates=(0, 0)), + precision=2, + ) diff --git a/tests/test_unicode.py b/tests/test_unicode.py index 7b13a2e72..ade652df2 100644 --- a/tests/test_unicode.py +++ b/tests/test_unicode.py @@ -11,39 +11,37 @@ import fiona from fiona.errors import SchemaError +from fiona.model import Feature class TestUnicodePath(object): - def setup(self): tempdir = tempfile.mkdtemp() - self.dir = os.path.join(tempdir, 'français') - shutil.copytree(os.path.join(os.path.dirname(__file__), 'data'), - self.dir) + self.dir = os.path.join(tempdir, "français") + shutil.copytree(os.path.join(os.path.dirname(__file__), "data"), self.dir) def teardown(self): shutil.rmtree(os.path.dirname(self.dir)) def test_unicode_path(self): - path = self.dir + '/coutwildrnp.shp' + path = self.dir + "/coutwildrnp.shp" with fiona.open(path) as c: assert len(c) == 67 def test_unicode_path_layer(self): path = self.dir - layer = 'coutwildrnp' + layer = "coutwildrnp" with fiona.open(path, layer=layer) as c: assert len(c) == 67 def test_utf8_path(self): - path = self.dir + '/coutwildrnp.shp' + path = self.dir + "/coutwildrnp.shp" if sys.version_info < (3,): with fiona.open(path) as c: assert len(c) == 67 class TestUnicodeStringField(object): - def setup(self): self.tempdir = tempfile.mkdtemp() @@ -64,61 +62,86 @@ def test_write_mismatch(self): # # Consequences: no error on write, but there will be an error # on reading the data and expecting latin-1. - schema = { - 'geometry': 'Point', - 'properties': {'label': 'str', 'num': 'int'}} - - with fiona.open(os.path.join(self.tempdir, "test-write-fail.shp"), - 'w', driver="ESRI Shapefile", schema=schema, - encoding='latin1') as c: - c.writerecords([{ - 'type': 'Feature', - 'geometry': {'type': 'Point', 'coordinates': [0, 0]}, - 'properties': { - 'label': '徐汇区', - 'num': 0}}]) - - with fiona.open(os.path.join(self.tempdir), encoding='latin1') as c: + schema = {"geometry": "Point", "properties": {"label": "str", "num": "int"}} + + with fiona.open( + os.path.join(self.tempdir, "test-write-fail.shp"), + "w", + driver="ESRI Shapefile", + schema=schema, + encoding="latin1", + ) as c: + c.writerecords( + [ + { + "type": "Feature", + "geometry": {"type": "Point", "coordinates": [0, 0]}, + "properties": {"label": "徐汇区", "num": 0}, + } + ] + ) + + with fiona.open(os.path.join(self.tempdir), encoding="latin1") as c: f = next(iter(c)) # Next assert fails. - assert f['properties']['label'] == '徐汇区' + assert f.properties["label"] == "徐汇区" def test_write_utf8(self): schema = { - 'geometry': 'Point', - 'properties': {'label': 'str', 'verit\xe9': 'int'}} - with fiona.open(os.path.join(self.tempdir, "test-write.shp"), - "w", "ESRI Shapefile", schema=schema, - encoding='utf-8') as c: - c.writerecords([{ - 'type': 'Feature', - 'geometry': {'type': 'Point', 'coordinates': [0, 0]}, - 'properties': { - 'label': 'Ba\u2019kelalan', 'verit\xe9': 0}}]) - - with fiona.open(os.path.join(self.tempdir), encoding='utf-8') as c: + "geometry": "Point", + "properties": {"label": "str", "verit\xe9": "int"}, + } + with fiona.open( + os.path.join(self.tempdir, "test-write.shp"), + "w", + "ESRI Shapefile", + schema=schema, + encoding="utf-8", + ) as c: + c.writerecords( + [ + Feature.from_dict( + **{ + "type": "Feature", + "geometry": {"type": "Point", "coordinates": [0, 0]}, + "properties": {"label": "Ba\u2019kelalan", "verit\xe9": 0}, + } + ) + ] + ) + + with fiona.open(os.path.join(self.tempdir), encoding="utf-8") as c: f = next(iter(c)) - assert f['properties']['label'] == 'Ba\u2019kelalan' - assert f['properties']['verit\xe9'] == 0 + assert f.properties["label"] == "Ba\u2019kelalan" + assert f.properties["verit\xe9"] == 0 @pytest.mark.iconv def test_write_gb18030(self): """Can write a simplified Chinese shapefile""" - schema = { - 'geometry': 'Point', - 'properties': {'label': 'str', 'num': 'int'}} - with fiona.open(os.path.join(self.tempdir, "test-write-gb18030.shp"), - 'w', driver="ESRI Shapefile", schema=schema, - encoding='gb18030') as c: - c.writerecords([{ - 'type': 'Feature', - 'geometry': {'type': 'Point', 'coordinates': [0, 0]}, - 'properties': {'label': '徐汇区', 'num': 0}}]) - - with fiona.open(os.path.join(self.tempdir), encoding='gb18030') as c: + schema = {"geometry": "Point", "properties": {"label": "str", "num": "int"}} + with fiona.open( + os.path.join(self.tempdir, "test-write-gb18030.shp"), + "w", + driver="ESRI Shapefile", + schema=schema, + encoding="gb18030", + ) as c: + c.writerecords( + [ + Feature.from_dict( + **{ + "type": "Feature", + "geometry": {"type": "Point", "coordinates": [0, 0]}, + "properties": {"label": "徐汇区", "num": 0}, + } + ) + ] + ) + + with fiona.open(os.path.join(self.tempdir), encoding="gb18030") as c: f = next(iter(c)) - assert f['properties']['label'] == '徐汇区' - assert f['properties']['num'] == 0 + assert f.properties["label"] == "徐汇区" + assert f.properties["num"] == 0 @pytest.mark.iconv def test_gb2312_field_wrong_encoding(self): @@ -139,12 +162,16 @@ def test_gb2312_field_wrong_encoding(self): }, "driver": "ESRI Shapefile", } - feature = { - "properties": {field_name: 123}, - "geometry": {"type": "Point", "coordinates": [1, 2]} - } + feature = Feature.from_dict( + **{ + "properties": {field_name: 123}, + "geometry": {"type": "Point", "coordinates": [1, 2]}, + } + ) # when encoding is specified, write is successful - with fiona.open(os.path.join(self.tempdir, "test1.shp"), "w", encoding="GB2312", **meta) as collection: + with fiona.open( + os.path.join(self.tempdir, "test1.shp"), "w", encoding="GB2312", **meta + ) as collection: collection.write(feature) # no encoding with pytest.raises(SchemaError): diff --git a/tests/test_vfs.py b/tests/test_vfs.py index f742f30eb..789bf438e 100644 --- a/tests/test_vfs.py +++ b/tests/test_vfs.py @@ -15,17 +15,19 @@ # Custom markers (from rasterio) mingdalversion = pytest.mark.skipif( - fiona.gdal_version < (2, 1, 0), - reason="S3 raster access requires GDAL 2.1") + fiona.gdal_version < (2, 1, 0), reason="S3 raster access requires GDAL 2.1" +) credentials = pytest.mark.skipif( - not(boto3.Session()._session.get_credentials()), - reason="S3 raster access requires credentials") + not (boto3.Session()._session.get_credentials()), + reason="S3 raster access requires credentials", +) # TODO: remove this once we've successfully moved the tar tests over # to TestVsiReading. + class VsiReadingTest(ReadingTest): # There's a bug in GDAL 1.9.2 http://trac.osgeo.org/gdal/ticket/5093 # in which the VSI driver reports the wrong number of features. @@ -33,14 +35,16 @@ class VsiReadingTest(ReadingTest): # passes and creating a new method in this class that we can exclude # from the test runner at run time. - @pytest.mark.xfail(reason="The number of features present in the archive " - "differs based on the GDAL version.") + @pytest.mark.xfail( + reason="The number of features present in the archive " + "differs based on the GDAL version." + ) def test_filter_vsi(self): results = list(self.c.filter(bbox=(-114.0, 35.0, -104, 45.0))) assert len(results) == 67 f = results[0] - assert f['id'] == "0" - assert f['properties']['STATE'] == 'UT' + assert f["id"] == "0" + assert f["properties"]["STATE"] == "UT" class TestVsiReading(TestReading): @@ -50,80 +54,73 @@ class TestVsiReading(TestReading): # passes and creating a new method in this class that we can exclude # from the test runner at run time. - @pytest.mark.xfail(reason="The number of features present in the archive " - "differs based on the GDAL version.") + @pytest.mark.xfail( + reason="The number of features present in the archive " + "differs based on the GDAL version." + ) def test_filter_vsi(self): results = list(self.c.filter(bbox=(-114.0, 35.0, -104, 45.0))) assert len(results) == 67 f = results[0] - assert f['id'] == "0" - assert f['properties']['STATE'] == 'UT' + assert f["id"] == "0" + assert f["properties"]["STATE"] == "UT" class TestZipReading(TestVsiReading): @pytest.fixture(autouse=True) def zipfile(self, data_dir, path_coutwildrnp_zip): self.c = fiona.open("zip://{}".format(path_coutwildrnp_zip, "r")) - self.path = os.path.join(data_dir, 'coutwildrnp.zip') + self.path = os.path.join(data_dir, "coutwildrnp.zip") yield self.c.close() def test_open_repr(self): - assert ( - repr(self.c) == - ("".format( - id=hex(id(self.c)), - path=self.path))) + assert repr(self.c) == ( + "".format(id=hex(id(self.c)), path=self.path) + ) def test_closed_repr(self): self.c.close() - assert ( - repr(self.c) == - ("".format( - id=hex(id(self.c)), - path=self.path))) + assert repr(self.c) == ( + "".format(id=hex(id(self.c)), path=self.path) + ) def test_path(self): - assert self.c.path == '/vsizip/{path}'.format(path=self.path) + assert self.c.path == "/vsizip/{path}".format(path=self.path) class TestZipArchiveReading(TestVsiReading): @pytest.fixture(autouse=True) def zipfile(self, data_dir, path_coutwildrnp_zip): - vfs = 'zip://{}'.format(path_coutwildrnp_zip) + vfs = "zip://{}".format(path_coutwildrnp_zip) self.c = fiona.open(vfs + "!coutwildrnp.shp", "r") - self.path = os.path.join(data_dir, 'coutwildrnp.zip') + self.path = os.path.join(data_dir, "coutwildrnp.zip") yield self.c.close() def test_open_repr(self): - assert ( - repr(self.c) == - ("".format( - id=hex(id(self.c)), - path=self.path))) + assert repr(self.c) == ( + "".format(id=hex(id(self.c)), path=self.path) + ) def test_closed_repr(self): self.c.close() - assert ( - repr(self.c) == - ("".format( - id=hex(id(self.c)), - path=self.path))) + assert repr(self.c) == ( + "".format(id=hex(id(self.c)), path=self.path) + ) def test_path(self): - assert (self.c.path == - '/vsizip/{path}/coutwildrnp.shp'.format(path=self.path)) + assert self.c.path == "/vsizip/{path}/coutwildrnp.shp".format(path=self.path) class TestZipArchiveReadingAbsPath(TestZipArchiveReading): @pytest.fixture(autouse=True) def zipfile(self, path_coutwildrnp_zip): - vfs = 'zip://{}'.format(os.path.abspath(path_coutwildrnp_zip)) + vfs = "zip://{}".format(os.path.abspath(path_coutwildrnp_zip)) self.c = fiona.open(vfs + "!coutwildrnp.shp", "r") yield self.c.close() @@ -136,46 +133,43 @@ def test_closed_repr(self): assert repr(self.c).startswith("".format( - id=hex(id(self.c)), - path=self.path))) + assert repr(self.c) == ( + "".format(id=hex(id(self.c)), path=self.path) + ) def test_closed_repr(self): self.c.close() - assert ( - repr(self.c) == - ("".format( - id=hex(id(self.c)), - path=self.path))) + assert repr(self.c) == ( + "".format(id=hex(id(self.c)), path=self.path) + ) def test_path(self): - assert ( - self.c.path == - '/vsitar/{path}/testing/coutwildrnp.shp'.format(path=self.path)) + assert self.c.path == "/vsitar/{path}/testing/coutwildrnp.shp".format( + path=self.path + ) @pytest.mark.network def test_open_http(): - ds = fiona.open('https://raw.githubusercontent.com/OSGeo/gdal/master/autotest/ogr/data/poly.shp') + ds = fiona.open( + "https://raw.githubusercontent.com/OSGeo/gdal/master/autotest/ogr/data/poly.shp" + ) assert len(ds) == 10 @@ -183,13 +177,14 @@ def test_open_http(): @mingdalversion @pytest.mark.network def test_open_s3(): - ds = fiona.open('zip+s3://fiona-testing/coutwildrnp.zip') + ds = fiona.open("zip+s3://fiona-testing/coutwildrnp.zip") assert len(ds) == 67 +@credentials @pytest.mark.network def test_open_zip_https(): - ds = fiona.open('zip+https://s3.amazonaws.com/fiona-testing/coutwildrnp.zip') + ds = fiona.open("zip+https://s3.amazonaws.com/fiona-testing/coutwildrnp.zip") assert len(ds) == 67 diff --git a/tests/test_write.py b/tests/test_write.py index 460a44462..d4bd3150d 100644 --- a/tests/test_write.py +++ b/tests/test_write.py @@ -4,16 +4,19 @@ import fiona from fiona.crs import CRS +from fiona.model import Feature def test_issue771(tmpdir, caplog): """Overwrite a GeoJSON file without logging errors.""" schema = {"geometry": "Point", "properties": {"zero": "int"}} - feature = { - "geometry": {"type": "Point", "coordinates": (0, 0)}, - "properties": {"zero": "0"}, - } + feature = Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0, 0)}, + "properties": {"zero": "0"}, + } + ) outputfile = tmpdir.join("test.geojson") @@ -36,10 +39,12 @@ def test_issue771(tmpdir, caplog): def test_write__esri_only_wkt(tmpdir): """https://github.com/Toblerity/Fiona/issues/977""" schema = {"geometry": "Point", "properties": {"zero": "int"}} - feature = { - "geometry": {"type": "Point", "coordinates": (0, 0)}, - "properties": {"zero": "0"}, - } + feature = Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0, 0)}, + "properties": {"zero": "0"}, + } + ) target_crs = ( 'PROJCS["IaRCS_04_Sioux_City-Iowa_Falls_NAD_1983_2011_LCC_US_Feet",' 'GEOGCS["GCS_NAD_1983_2011",DATUM["D_NAD_1983_2011",' @@ -57,7 +62,8 @@ def test_write__esri_only_wkt(tmpdir): ) outputfile = tmpdir.join("test.shp") with fiona.open( - str(outputfile), "w", + str(outputfile), + "w", driver="ESRI Shapefile", schema=schema, crs=target_crs, @@ -66,7 +72,7 @@ def test_write__esri_only_wkt(tmpdir): assert collection.crs_wkt.startswith( ( 'PROJCS["IaRCS_04_Sioux_City-Iowa_Falls_NAD_1983_2011_LCC_US_Feet"', - 'PROJCRS["IaRCS_04_Sioux_City-Iowa_Falls_NAD_1983_2011_LCC_US_Feet"' # GDAL 3.3+ + 'PROJCRS["IaRCS_04_Sioux_City-Iowa_Falls_NAD_1983_2011_LCC_US_Feet"', # GDAL 3.3+ ) ) @@ -74,10 +80,12 @@ def test_write__esri_only_wkt(tmpdir): def test_write__wkt_version(tmpdir): """https://github.com/Toblerity/Fiona/issues/977""" schema = {"geometry": "Point", "properties": {"zero": "int"}} - feature = { - "geometry": {"type": "Point", "coordinates": (0, 0)}, - "properties": {"zero": "0"}, - } + feature = Feature.from_dict( + **{ + "geometry": {"type": "Point", "coordinates": (0, 0)}, + "properties": {"zero": "0"}, + } + ) target_crs = ( 'PROJCS["IaRCS_04_Sioux_City-Iowa_Falls_NAD_1983_2011_LCC_US_Feet",' 'GEOGCS["GCS_NAD_1983_2011",DATUM["D_NAD_1983_2011",' @@ -95,7 +103,8 @@ def test_write__wkt_version(tmpdir): ) outputfile = tmpdir.join("test.shp") with fiona.open( - str(outputfile), "w", + str(outputfile), + "w", driver="ESRI Shapefile", schema=schema, crs=target_crs,