diff --git a/.github/workflows/docker-image-build-publish.yml b/.github/workflows/docker-image-build-publish.yml new file mode 100644 index 0000000..bc13c1b --- /dev/null +++ b/.github/workflows/docker-image-build-publish.yml @@ -0,0 +1,48 @@ +# +name: Create and publish a Docker image + +# Configures this workflow to run every time a change is pushed to the branch called `release`. +on: + push: + branches: + - master +# Defines two custom environment variables for the workflow. These are used for the Container registry domain, and a name for the Docker image that this workflow builds. +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +# There is a single job in this workflow. It's configured to run on the latest available version of Ubuntu. +jobs: + build-and-push-image: + runs-on: ubuntu-latest + # Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job. + permissions: + contents: read + packages: write + # + steps: + - name: Checkout repository + uses: actions/checkout@v4 + # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. + - name: Log in to the Container registry + uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + # This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels. + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + # This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages. + # It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository. + # It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step. + - name: Build and push Docker image + uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4 + with: + context: . + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/API/main.py b/API/main.py index 0712904..93193cb 100644 --- a/API/main.py +++ b/API/main.py @@ -1,5 +1,6 @@ import os import tempfile +from typing import List, Optional import requests from fastapi import FastAPI @@ -14,87 +15,119 @@ class PredictionRequest(BaseModel): - bbox: list[float] = Field( - ..., - example=[ - 100.56228021333352, - 13.685230854641182, - 100.56383321235313, - 13.685961853747969, - ], - description="Bounding box coordinates [min_longitude, min_latitude, max_longitude, max_latitude].", - ) + """ + Request model for the prediction endpoint. + """ + + bbox: List[float] + checkpoint: str = Field( ..., example="path/to/model.tflite or https://example.com/model.tflite", description="Path or URL to the machine learning model file.", ) - zoom: int = Field( + + zoom_level: int = Field( ..., - ge=18, - le=23, - example=20, - description="Zoom level for predictions (between 18 and 23).", + description="Zoom level of the tiles to be used for prediction.", ) - tms: str = Field( + + source: str = Field( ..., - example="https://tiles.openaerialmap.org/6501a65c0906de000167e64d/0/6501a65c0906de000167e64e/{z}/{x}/{y}", - description="URL for tile map service.", + description="Your Image URL on which you want to detect features.", ) - confidence: float = Field( - 0.5, - example=0.5, - gt=0, - le=1, - description="Threshold probability for filtering out low-confidence predictions. Defaults to 0.5.", + + use_josm_q: Optional[bool] = Field( + False, + description="Indicates whether to use JOSM query. Defaults to False.", ) - area_threshold: PositiveFloat = Field( - 3, - example=3, - description="Threshold for filtering polygon areas. Defaults to 3 sqm.", + + merge_adjacent_polygons: Optional[bool] = Field( + True, + description="Merges adjacent self-intersecting or containing each other polygons. Defaults to True.", + ) + + confidence: Optional[int] = Field( + 50, + description="Threshold probability for filtering out low-confidence predictions. Defaults to 50.", + ) + + max_angle_change: Optional[int] = Field( + 15, + description="Maximum angle change parameter for prediction. Defaults to 15.", + ) + + skew_tolerance: Optional[int] = Field( + 15, + description="Skew tolerance parameter for prediction. Defaults to 15.", ) - tolerance: PositiveFloat = Field( + + tolerance: Optional[float] = Field( 0.5, - example=0.5, - description="Tolerance parameter for simplifying polygons. Defaults to 0.5 m.", + description="Tolerance parameter for simplifying polygons. Defaults to 0.5.", ) - tile_overlap_distance: PositiveFloat = Field( + + area_threshold: Optional[float] = Field( + 3, + description="Threshold for filtering polygon areas. Defaults to 3.", + ) + + tile_overlap_distance: Optional[float] = Field( 0.15, - example=0.15, - description="Tile overlap distance to remove the strip between predictions. Defaults to 0.15 m.", + description="Provides tile overlap distance to remove the strip between predictions. Defaults to 0.15.", ) - merge_adjacent_polygons: bool = Field( - True, - example=True, - description="Flag to merge adjacent polygons. Defaults to True.", + + @validator( + "max_angle_change", + "skew_tolerance", + "tolerance", + "tile_overlap_distance", + "area_threshold", ) + def validate_values(self, value): + if value is not None: + if value < 0 or value > 45: + raise ValueError(f"Value should be between 0 and 45: {value}") + return value + + @validator("tolerance") + def validate_tolerance(self, value): + if value is not None: + if value < 0 or value > 10: + raise ValueError(f"Value should be between 0 and 10: {value}") + return value + + @validator("tile_overlap_distance") + def validate_tile_overlap_distance(self, value): + if value is not None: + if value < 0 or value > 1: + raise ValueError(f"Value should be between 0 and 1: {value}") + return value + + @validator("area_threshold") + def validate_area_threshold(self, value): + if value is not None: + if value < 0 or value > 20: + raise ValueError(f"Value should be between 0 and 20: {value}") + return value + + @validator("confidence") + def validate_confidence(self, value): + if value is not None: + if value < 0 or value > 100: + raise ValueError(f"Value should be between 0 and 100: {value}") + return value / 100 @validator("bbox") - def validate_bbox_length(cls, value): - """ - Validates the length of bbox coordinates. - """ + def validate_bbox(self, value): if len(value) != 4: - raise ValueError("bbox must contain 4 float values") + raise ValueError("bbox should have exactly 4 elements") return value - @validator("checkpoint") - def validate_checkpoint(cls, value): - """ - Validates checkpoint parameter. If URL, download the file to temp directory. - """ - if value.startswith("http"): - response = requests.get(value) - if response.status_code != 200: - raise ValueError( - "Failed to download model checkpoint from the provided URL" - ) - _, temp_file_path = tempfile.mkstemp(suffix=".tflite") - with open(temp_file_path, "wb") as f: - f.write(response.content) - return temp_file_path - elif not os.path.exists(value): - raise ValueError("Model checkpoint file not found") + @validator("zoom_level") + def validate_zoom_level(self, value): + if value < 18 or value > 22: + raise ValueError("Zoom level should be between 18 and 22") return value @@ -111,15 +144,18 @@ async def predict_api(request: PredictionRequest): """ try: predictions = predict( - request.bbox, - request.checkpoint, - request.zoom, - request.tms, + bbox=request.bbox, + model_path=request.checkpoint, + zoom_level=request.zoom_level, + tms_url=request.source, + tile_size=256, confidence=request.confidence, - area_threshold=request.area_threshold, - tolerance=request.tolerance, tile_overlap_distance=request.tile_overlap_distance, merge_adjancent_polygons=request.merge_adjacent_polygons, + max_angle_change=request.max_angle_change, + skew_tolerance=request.skew_tolerance, + tolerance=request.tolerance, + area_threshold=request.area_threshold, ) return predictions except Exception as e: diff --git a/API/requirements.txt b/API/requirements.txt index 0b2260c..e50bcd0 100644 --- a/API/requirements.txt +++ b/API/requirements.txt @@ -1,4 +1,4 @@ fastapi==0.103.2 uvicorn==0.22.0 -fairpredictor==0.0.29 +fairpredictor tflite-runtime==2.14.0 \ No newline at end of file diff --git a/predictor/app.py b/predictor/app.py index 95f4be3..c6cc669 100644 --- a/predictor/app.py +++ b/predictor/app.py @@ -5,6 +5,8 @@ import time import uuid +from orthogonalizer import othogonalize_poly + from .downloader import download from .prediction import run_prediction from .raster2polygon import polygonizer @@ -25,6 +27,9 @@ def predict( merge_adjancent_polygons=True, use_raster2polygon=False, remove_metadata=True, + use_josm_q=False, + max_angle_change=15, + skew_tolerance=15, ): """ Parameters: @@ -95,4 +100,13 @@ def predict( prediction_geojson_data = json.load(f) if remove_metadata: shutil.rmtree(base_path) + for feature in prediction_geojson_data["features"]: + feature["properties"]["building"] = "yes" + feature["properties"]["source"] = "fAIr" + if use_josm_q is True: + feature["geometry"] = othogonalize_poly( + feature["geometry"], + maxAngleChange=max_angle_change, + skewTolerance=skew_tolerance, + ) return prediction_geojson_data diff --git a/predictor/utils.py b/predictor/utils.py index 5939472..7f39128 100644 --- a/predictor/utils.py +++ b/predictor/utils.py @@ -94,6 +94,21 @@ def latlng2tile(zoom, lat, lng, tile_size): return t_x, t_y +def tile_xy_to_quad_key(tile_x, tile_y, level_of_detail): + quad_key = [] + for i in range(level_of_detail, 0, -1): + digit = "0" + mask = 1 << (i - 1) + if (tile_x & mask) != 0: + digit = chr(ord(digit) + 1) + if (tile_y & mask) != 0: + digit = chr(ord(digit) + 1) + digit = chr(ord(digit) + 1) + quad_key.append(digit) + + return "".join(quad_key) + + def download_imagery(start: list, end: list, zm_level, base_path, source="maxar"): """Downloads imagery from start to end tile coordinate system @@ -126,7 +141,9 @@ def download_imagery(start: list, end: list, zm_level, base_path, source="maxar" raise ex source_name = source download_url = f"https://services.digitalglobe.com/earthservice/tmsaccess/tms/1.0.0/DigitalGlobe:ImageryTileService@EPSG:3857@jpg/{zm_level}/{download_path[0]}/{download_path[1]}.jpg?connectId={connect_id}&flipy=true" - + elif source == "bing": + download_url = f"https://ecn.t2.tiles.virtualearth.net/tiles/a{tile_xy_to_quad_key(download_path[0],download_path[1],zm_level)}.jpeg?g=14037&pr=odbl&n=z" + print(download_url) else: # source should be url as string , like this : https://tiles.openaerialmap.org/62dbd947d8499800053796ec/0/62dbd947d8499800053796ed/{z}/{x}/{y} if "{-y}" in source: diff --git a/requirements.txt b/requirements.txt index 35476bd..501d44e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,4 +4,5 @@ tqdm<=4.62.3 Pillow<=9.0.1 geopandas<=0.10.2 shapely -rasterio \ No newline at end of file +rasterio +orthogonalizer diff --git a/setup.py b/setup.py index b9443af..ae06270 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ setup( name="fairpredictor", - version="0.0.29", + version="0.0.30", url="https://github.com/kshitijrajsharma/fairpredictor", author="Kshitij Raj Sharma", author_email="skshitizraj@gmail.com", @@ -33,6 +33,6 @@ "geopandas<=0.14.5", "shapely>=1.0.0,<=2.0.2", "rasterio>=1.0.0,<=1.3.8", - # "raster2polygon", + "orthogonalizer", ], ) diff --git a/tests/test_predict.py b/tests/test_predict.py index e69de29..2b5793d 100644 --- a/tests/test_predict.py +++ b/tests/test_predict.py @@ -0,0 +1,12 @@ +bbox = [-84.1334429383278, 9.953153171808898, -84.13033694028854, 9.954719779271468] +zoom_level = 19 +from predictor import download + +image_download_path = download( + bbox, + zoom_level=zoom_level, + tms_url="bing", + tile_size=256, + download_path="/Users/kshitij/hotosm/fairpredictor/download/test", +) +print(image_download_path)