From 7e030cb91a6eb5d5aebeaf09362341929f869920 Mon Sep 17 00:00:00 2001 From: bhgithub100 Date: Wed, 17 Jul 2024 11:20:37 -0400 Subject: [PATCH] formatting the notebook using black --- autopilot/autopilot_ts_data_merge.ipynb | 330 +++++++++++++----------- 1 file changed, 177 insertions(+), 153 deletions(-) diff --git a/autopilot/autopilot_ts_data_merge.ipynb b/autopilot/autopilot_ts_data_merge.ipynb index b788ff9973..136f64f77a 100644 --- a/autopilot/autopilot_ts_data_merge.ipynb +++ b/autopilot/autopilot_ts_data_merge.ipynb @@ -7,6 +7,28 @@ "jp-MarkdownHeadingCollapsed": true, "tags": [] }, + "source": [ + "# Time-Series Forecasting - Merge Amazon Forecast Datasets for Amazon SageMaker Canvas API" + ] + }, + { + "cell_type": "markdown", + "id": "4889982e", + "metadata": {}, + "source": [ + "---\n", + "\n", + "This notebook's CI test result for us-west-2 is as follows. CI test results in other regions can be found at the end of the notebook. \n", + "\n", + "![This us-west-2 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/us-west-2/autopilot|autopilot_ts_data_merge.ipynb)\n", + "\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "b01027dc", + "metadata": {}, "source": [ "### 1. Introduction " ] @@ -77,14 +99,14 @@ "\n", "region = boto3.Session().region_name\n", "session = sagemaker.Session()\n", - "client = boto3.client('sts')\n", + "client = boto3.client(\"sts\")\n", "account_id = client.get_caller_identity()[\"Account\"]\n", "\n", "# Modify the following default_bucket to use a bucket of your choosing\n", "bucket = session.default_bucket()\n", - "data_bucket = 'rawdata-' + region + '-' + account_id\n", - "#bucket = 'my-bucket'\n", - "prefix = 'moving-to-canvas'\n", + "data_bucket = \"rawdata-\" + region + \"-\" + account_id\n", + "# bucket = 'my-bucket'\n", + "prefix = \"moving-to-canvas\"\n", "\n", "role = get_execution_role()\n", "\n", @@ -101,26 +123,12 @@ }, "outputs": [], "source": [ - "# Assign column heading to 3 different data files for target time series, related time series and item metadata \n", - "columns_tts = [\n", - " 'item_id',\n", - " 'store_id',\n", - " 'demand',\n", - " 'ts'\n", - "]\n", + "# Assign column heading to 3 different data files for target time series, related time series and item metadata\n", + "columns_tts = [\"item_id\", \"store_id\", \"demand\", \"ts\"]\n", "\n", - "columns_rts = [\n", - " 'item_id',\n", - " 'store_id',\n", - " 'price',\n", - " 'ts'\n", - "]\n", + "columns_rts = [\"item_id\", \"store_id\", \"price\", \"ts\"]\n", "\n", - "columns_items = [\n", - " 'item_id',\n", - " 'item_type',\n", - " 'item_description'\n", - "]" + "columns_items = [\"item_id\", \"item_type\", \"item_description\"]" ] }, { @@ -132,15 +140,15 @@ }, "outputs": [], "source": [ - "#Read from data file and explore the data. Also change the time stamp format to desired one if needed.\n", - "tbl_tts = pd.read_csv(\"data/food-forecast-tts-uc1.csv\", header = None)\n", + "# Read from data file and explore the data. Also change the time stamp format to desired one if needed.\n", + "tbl_tts = pd.read_csv(\"data/food-forecast-tts-uc1.csv\", header=None)\n", "tbl_tts.columns = columns_tts\n", - "tbl_tts['ts'] = pd.to_datetime(tbl_tts['ts'], format='%m/%d/%y').dt.strftime('%Y-%m-%d')\n", - "#print(tbl_tts.shape)\n", - "#tbl_tts.head()\n", - "#tbl_tts['ts'].min(), tbl_tts['ts'].max()\n", - "#print(tbl_tts.dtypes)\n", - "#print(tbl_tts.isnull().sum())" + "tbl_tts[\"ts\"] = pd.to_datetime(tbl_tts[\"ts\"], format=\"%m/%d/%y\").dt.strftime(\"%Y-%m-%d\")\n", + "# print(tbl_tts.shape)\n", + "# tbl_tts.head()\n", + "# tbl_tts['ts'].min(), tbl_tts['ts'].max()\n", + "# print(tbl_tts.dtypes)\n", + "# print(tbl_tts.isnull().sum())" ] }, { @@ -152,15 +160,15 @@ }, "outputs": [], "source": [ - "#read from data file and explore the data. Also change the time stamp format to desired one if needed.\n", - "tbl_rts = pd.read_csv(\"data/food-forecast-rts-uc1.csv\", header = None)\n", + "# read from data file and explore the data. Also change the time stamp format to desired one if needed.\n", + "tbl_rts = pd.read_csv(\"data/food-forecast-rts-uc1.csv\", header=None)\n", "tbl_rts.columns = columns_rts\n", - "tbl_rts['ts'] = pd.to_datetime(tbl_rts['ts'], format='%m/%d/%y').dt.strftime('%Y-%m-%d')\n", - "#print(tbl_rts.shape)\n", - "#tbl_rts .head()\n", - "#tbl_rts['ts'].min(), tbl_rts['ts'].max() \n", - "#print(tbl_rts.dtypes)\n", - "#print(tbl_rts.isnull().sum())" + "tbl_rts[\"ts\"] = pd.to_datetime(tbl_rts[\"ts\"], format=\"%m/%d/%y\").dt.strftime(\"%Y-%m-%d\")\n", + "# print(tbl_rts.shape)\n", + "# tbl_rts .head()\n", + "# tbl_rts['ts'].min(), tbl_rts['ts'].max()\n", + "# print(tbl_rts.dtypes)\n", + "# print(tbl_rts.isnull().sum())" ] }, { @@ -172,12 +180,12 @@ }, "outputs": [], "source": [ - "#read from data file and explore the data\n", - "tbl_item = pd.read_csv(\"data/food-forecast-item.csv\", header = None)\n", + "# read from data file and explore the data\n", + "tbl_item = pd.read_csv(\"data/food-forecast-item.csv\", header=None)\n", "tbl_item.columns = columns_items\n", - "#tbl_item = tbl_item.set_index('item_id', inplace=True)\n", - "#print(tbl_item.shape)\n", - "#tbl_item.head()" + "# tbl_item = tbl_item.set_index('item_id', inplace=True)\n", + "# print(tbl_item.shape)\n", + "# tbl_item.head()" ] }, { @@ -189,10 +197,10 @@ }, "outputs": [], "source": [ - "#Join the data files into one data file\n", - "tts_rts_combined_outer = tbl_tts.merge(tbl_rts, how='outer')\n", + "# Join the data files into one data file\n", + "tts_rts_combined_outer = tbl_tts.merge(tbl_rts, how=\"outer\")\n", "tts_rts_combined_outer\n", - "combined_tts_rts_im = tts_rts_combined_outer.merge(tbl_item, how='left')\n", + "combined_tts_rts_im = tts_rts_combined_outer.merge(tbl_item, how=\"left\")\n", "combined_tts_rts_im" ] }, @@ -205,9 +213,9 @@ }, "outputs": [], "source": [ - "#Write the combined dataset to csv file which will be used for training the model using SageMaker Canvas API\n", + "# Write the combined dataset to csv file which will be used for training the model using SageMaker Canvas API\n", "file_name = \"combined_tts_rts_item.csv\"\n", - "full_path = \"data/\"+file_name\n", + "full_path = \"data/\" + file_name\n", "combined_tts_rts_im.to_csv(full_path, index=False)" ] }, @@ -220,15 +228,12 @@ }, "outputs": [], "source": [ - "# All columns in tts will be included in TimeSeriesConfig as it contains \n", + "# All columns in tts will be included in TimeSeriesConfig as it contains\n", "# target, itemID, timestamp, and additional forecast dimensions.\n", "exclude_columns = columns_tts\n", "columns_to_include = [col for col in combined_tts_rts_im.columns if col not in exclude_columns]\n", "\n", - "json_data = {\n", - " \"FeatureAttributeNames\": columns_to_include,\n", - " \"FeatureDataTypes\": {}\n", - "}\n", + "json_data = {\"FeatureAttributeNames\": columns_to_include, \"FeatureDataTypes\": {}}\n", "\n", "for col in columns_to_include:\n", " dtype = combined_tts_rts_im[col].dtype\n", @@ -244,9 +249,9 @@ "\n", "json_str = json.dumps(json_data, indent=4)\n", "\n", - "#print(json_str)\n", + "# print(json_str)\n", "\n", - "with open('data/feature.json', 'w') as f:\n", + "with open(\"data/feature.json\", \"w\") as f:\n", " f.write(json_str)" ] }, @@ -259,24 +264,24 @@ }, "outputs": [], "source": [ - "#Upload the data file and config file to S3 bucket\n", + "# Upload the data file and config file to S3 bucket\n", "\n", - "s3 = boto3.client('s3')\n", - "object_name = prefix+'/train/'+file_name\n", - "#print(object_name)\n", + "s3 = boto3.client(\"s3\")\n", + "object_name = prefix + \"/train/\" + file_name\n", + "# print(object_name)\n", "try:\n", - " response = s3.upload_file(full_path, bucket, object_name)\n", + " response = s3.upload_file(full_path, bucket, object_name)\n", "except ClientError as e:\n", - " logging.error(e)\n", + " logging.error(e)\n", "\n", "config_file_name = \"feature.json\"\n", - "object_name = prefix + \"/\" + config_file_name \n", + "object_name = prefix + \"/\" + config_file_name\n", "config_full_path = \"data/\" + config_file_name\n", "\n", "try:\n", - " response = s3.upload_file(config_full_path, data_bucket, object_name)\n", + " response = s3.upload_file(config_full_path, data_bucket, object_name)\n", "except ClientError as e:\n", - " logging.error(e)" + " logging.error(e)" ] }, { @@ -329,51 +334,43 @@ "outputs": [], "source": [ "input_data_config = [\n", - " { 'ChannelType': 'training',\n", - " 'ContentType': 'text/csv;header=present',\n", - " 'CompressionType': 'None',\n", - " 'DataSource': {\n", - " 'S3DataSource': {\n", - " 'S3DataType': 'S3Prefix',\n", - " 'S3Uri': 's3://{}/{}/train/'.format(bucket, prefix),\n", + " {\n", + " \"ChannelType\": \"training\",\n", + " \"ContentType\": \"text/csv;header=present\",\n", + " \"CompressionType\": \"None\",\n", + " \"DataSource\": {\n", + " \"S3DataSource\": {\n", + " \"S3DataType\": \"S3Prefix\",\n", + " \"S3Uri\": \"s3://{}/{}/train/\".format(bucket, prefix),\n", " }\n", - " }\n", + " },\n", " }\n", "]\n", "\n", - "output_data_config = {'S3OutputPath': 's3://{}/{}/train_output'.format(bucket, prefix)}\n", + "output_data_config = {\"S3OutputPath\": \"s3://{}/{}/train_output\".format(bucket, prefix)}\n", "\n", - "optimizaton_metric_config = {'MetricName': 'AverageWeightedQuantileLoss'}\n", + "optimizaton_metric_config = {\"MetricName\": \"AverageWeightedQuantileLoss\"}\n", "\n", - "automl_problem_type_config ={\n", - " 'TimeSeriesForecastingJobConfig': {\n", - " 'FeatureSpecificationS3Uri': 's3://{}/{}/feature.json'.format(data_bucket, prefix), \n", - " 'ForecastFrequency': 'M',\n", - " 'ForecastHorizon': 2,\n", - " 'ForecastQuantiles': ['p50','p60','p70','p80','p90'],\n", - " 'Transformations': {\n", - " 'Filling': {\n", - " 'demand': {\n", - " 'middlefill' : 'zero',\n", - " 'backfill' : 'zero'\n", - " },\n", - " 'price': {\n", - " 'middlefill' : 'zero',\n", - " 'backfill' : 'zero',\n", - " 'futurefill' : 'zero'\n", - " } \n", - " }\n", - " },\n", - " 'TimeSeriesConfig': {\n", - " 'TargetAttributeName': 'demand',\n", - " 'TimestampAttributeName': 'ts',\n", - " 'ItemIdentifierAttributeName': 'item_id',\n", - " 'GroupingAttributeNames': [\n", - " 'store_id'\n", - " ]\n", + "automl_problem_type_config = {\n", + " \"TimeSeriesForecastingJobConfig\": {\n", + " \"FeatureSpecificationS3Uri\": \"s3://{}/{}/feature.json\".format(data_bucket, prefix),\n", + " \"ForecastFrequency\": \"M\",\n", + " \"ForecastHorizon\": 2,\n", + " \"ForecastQuantiles\": [\"p50\", \"p60\", \"p70\", \"p80\", \"p90\"],\n", + " \"Transformations\": {\n", + " \"Filling\": {\n", + " \"demand\": {\"middlefill\": \"zero\", \"backfill\": \"zero\"},\n", + " \"price\": {\"middlefill\": \"zero\", \"backfill\": \"zero\", \"futurefill\": \"zero\"},\n", " }\n", - " }\n", - " }" + " },\n", + " \"TimeSeriesConfig\": {\n", + " \"TargetAttributeName\": \"demand\",\n", + " \"TimestampAttributeName\": \"ts\",\n", + " \"ItemIdentifierAttributeName\": \"item_id\",\n", + " \"GroupingAttributeNames\": [\"store_id\"],\n", + " },\n", + " }\n", + "}" ] }, { @@ -397,9 +394,9 @@ " AutoMLJobName=auto_ml_job_name,\n", " AutoMLJobInputDataConfig=input_data_config,\n", " OutputDataConfig=output_data_config,\n", - " AutoMLProblemTypeConfig = automl_problem_type_config,\n", + " AutoMLProblemTypeConfig=automl_problem_type_config,\n", " AutoMLJobObjective=optimizaton_metric_config,\n", - " RoleArn=role\n", + " RoleArn=role,\n", ")" ] }, @@ -430,7 +427,10 @@ " job_run_status = describe_response[\"AutoMLJobStatus\"]\n", "\n", " print(\n", - " dt.now(), describe_response[\"AutoMLJobStatus\"] + \" - \" + describe_response[\"AutoMLJobSecondaryStatus\"]\n", + " dt.now(),\n", + " describe_response[\"AutoMLJobStatus\"]\n", + " + \" - \"\n", + " + describe_response[\"AutoMLJobSecondaryStatus\"],\n", " )\n", " sleep(180)" ] @@ -452,18 +452,16 @@ }, "outputs": [], "source": [ - "best_candidate = sm.describe_auto_ml_job_v2(AutoMLJobName=auto_ml_job_name)['BestCandidate']\n", - "best_candidate_containers = best_candidate['InferenceContainers'] \n", - "best_candidate_name = best_candidate['CandidateName']\n", + "best_candidate = sm.describe_auto_ml_job_v2(AutoMLJobName=auto_ml_job_name)[\"BestCandidate\"]\n", + "best_candidate_containers = best_candidate[\"InferenceContainers\"]\n", + "best_candidate_name = best_candidate[\"CandidateName\"]\n", "\n", "reponse = sm.create_model(\n", - "ModelName = best_candidate_name,\n", - "ExecutionRoleArn = role,\n", - "Containers = best_candidate_containers\n", + " ModelName=best_candidate_name, ExecutionRoleArn=role, Containers=best_candidate_containers\n", ")\n", "\n", - "print('BestCandidateName:',best_candidate_name)\n", - "print('BestCandidateContainers:',best_candidate_containers)" + "print(\"BestCandidateName:\", best_candidate_name)\n", + "print(\"BestCandidateContainers:\", best_candidate_containers)" ] }, { @@ -500,7 +498,7 @@ "outputs": [], "source": [ "timestamp_suffix = strftime(\"%Y%m%d-%H%M%S\", gmtime())\n", - "transform_job_name=f'{best_candidate_name}-' + timestamp_suffix\n", + "transform_job_name = f\"{best_candidate_name}-\" + timestamp_suffix\n", "print(\"BatchTransformJob: \" + transform_job_name)" ] }, @@ -523,10 +521,10 @@ }, "outputs": [], "source": [ - "#modify the input file for inference to remove n/a values\n", - "df = pd.read_csv('data/combined_tts_rts_item.csv')\n", - "df.fillna(0, inplace = True) \n", - "df.to_csv('data/combined_tts_rts_item_modified.csv', index=False)" + "# modify the input file for inference to remove n/a values\n", + "df = pd.read_csv(\"data/combined_tts_rts_item.csv\")\n", + "df.fillna(0, inplace=True)\n", + "df.to_csv(\"data/combined_tts_rts_item_modified.csv\", index=False)" ] }, { @@ -538,17 +536,17 @@ }, "outputs": [], "source": [ - "#Upload the data file to S3 bucket for batch prediction\n", - "s3 = boto3.client('s3')\n", + "# Upload the data file to S3 bucket for batch prediction\n", + "s3 = boto3.client(\"s3\")\n", "file_name = \"combined_tts_rts_item.csv\"\n", "modified_file_name = \"combined_tts_rts_item_modified.csv\"\n", "full_path = \"data/\" + modified_file_name\n", - "object_name = prefix + '/batch_transform/input/' + file_name\n", - "#print(object_name)\n", + "object_name = prefix + \"/batch_transform/input/\" + file_name\n", + "# print(object_name)\n", "try:\n", - " response = s3.upload_file(full_path, bucket, object_name)\n", + " response = s3.upload_file(full_path, bucket, object_name)\n", "except ClientError as e:\n", - " logging.error(e)" + " logging.error(e)" ] }, { @@ -561,31 +559,26 @@ "outputs": [], "source": [ "response = sm.create_transform_job(\n", - " TransformJobName=transform_job_name, \n", + " TransformJobName=transform_job_name,\n", " ModelName=best_candidate_name,\n", " MaxPayloadInMB=0,\n", - " ModelClientConfig={\n", - " 'InvocationsTimeoutInSeconds': 3600\n", - " },\n", + " ModelClientConfig={\"InvocationsTimeoutInSeconds\": 3600},\n", " TransformInput={\n", - " 'DataSource': {\n", - " 'S3DataSource': {\n", - " 'S3DataType': 'S3Prefix',\n", - " 'S3Uri': 's3://{}/{}/batch_transform/input/'.format(bucket, prefix)\n", + " \"DataSource\": {\n", + " \"S3DataSource\": {\n", + " \"S3DataType\": \"S3Prefix\",\n", + " \"S3Uri\": \"s3://{}/{}/batch_transform/input/\".format(bucket, prefix),\n", " }\n", " },\n", - " 'ContentType': 'text/csv',\n", - " 'SplitType': 'None'\n", + " \"ContentType\": \"text/csv\",\n", + " \"SplitType\": \"None\",\n", " },\n", " TransformOutput={\n", - " 'S3OutputPath': 's3://{}/{}/batch_transform/output/'.format(bucket, prefix),\n", - " 'AssembleWith': 'Line',\n", + " \"S3OutputPath\": \"s3://{}/{}/batch_transform/output/\".format(bucket, prefix),\n", + " \"AssembleWith\": \"Line\",\n", " },\n", - " TransformResources={\n", - " 'InstanceType': 'ml.m5.4xlarge',\n", - " 'InstanceCount': 1\n", - " }\n", - " )" + " TransformResources={\"InstanceType\": \"ml.m5.4xlarge\", \"InstanceCount\": 1},\n", + ")" ] }, { @@ -613,9 +606,7 @@ " describe_response = sm.describe_transform_job(TransformJobName=transform_job_name)\n", " job_run_status = describe_response[\"TransformJobStatus\"]\n", "\n", - " print(\n", - " dt.now(), describe_response[\"TransformJobStatus\"]\n", - " )\n", + " print(dt.now(), describe_response[\"TransformJobStatus\"])\n", " sleep(120)" ] }, @@ -639,20 +630,53 @@ }, "outputs": [], "source": [ - "s3 = boto3.resource('s3')\n", - "s3.Bucket(bucket).download_file('{}/batch_transform/output/combined_tts_rts_item.csv.out'.format(prefix), \n", - " 'combined_tts_rts_item.csv.out')\n", - "df = pd.read_csv('combined_tts_rts_item.csv.out')\n", + "s3 = boto3.resource(\"s3\")\n", + "s3.Bucket(bucket).download_file(\n", + " \"{}/batch_transform/output/combined_tts_rts_item.csv.out\".format(prefix),\n", + " \"combined_tts_rts_item.csv.out\",\n", + ")\n", + "df = pd.read_csv(\"combined_tts_rts_item.csv.out\")\n", "df.head(10)" ] }, { - "cell_type": "code", - "execution_count": null, - "id": "e950a2da-c8e7-48cf-98fd-7964b68937b2", + "cell_type": "markdown", + "id": "0005ad58", "metadata": {}, - "outputs": [], - "source": [] + "source": [ + "## Notebook CI Test Results\n", + "\n", + "This notebook was tested in multiple regions. The test results are as follows, except for us-west-2 which is shown at the top of the notebook.\n", + "\n", + "![This us-east-1 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/us-east-1/autopilot|autopilot_ts_data_merge.ipynb)\n", + "\n", + "![This us-east-2 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/us-east-2/autopilot|autopilot_ts_data_merge.ipynb)\n", + "\n", + "![This us-west-1 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/us-west-1/autopilot|autopilot_ts_data_merge.ipynb)\n", + "\n", + "![This ca-central-1 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/ca-central-1/autopilot|autopilot_ts_data_merge.ipynb)\n", + "\n", + "![This sa-east-1 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/sa-east-1/autopilot|autopilot_ts_data_merge.ipynb\n", + "![This eu-west-1 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/eu-west-1/autopilot|autopilot_ts_data_merge.ipynb)\n", + "\n", + "![This eu-west-2 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/eu-west-2/autopilot|autopilot_ts_data_merge.ipynb)\n", + "\n", + "![This eu-west-3 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/eu-west-3/autopilot|autopilot_ts_data_merge.ipynb)\n", + "\n", + "![This eu-central-1 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/eu-central-1/autopilot|autopilot_ts_data_merge.ipynb)\n", + "\n", + "![This eu-north-1 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/eu-north-1/autopilot|autopilot_ts_data_merge.ipynb)\n", + "\n", + "![This ap-southeast-1 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/ap-southeast-1/autopilot|autopilot_ts_data_merge.ipynb)\n", + "\n", + "![This ap-southeast-2 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/ap-southeast-2/autopilot|autopilot_ts_data_merge.ipynb)\n", + "\n", + "![This ap-northeast-1 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/ap-northeast-1/autopilot|autopilot_ts_data_merge.ipynb)\n", + "\n", + "![This ap-northeast-2 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/ap-northeast-2/autopilot|autopilot_ts_data_merge.ipynb)\n", + "\n", + "![This ap-south-1 badge failed to load. Check your device's internet connectivity, otherwise the service is currently unavailable](https://prod.us-west-2.tcx-beacon.docs.aws.dev/sagemaker-nb/ap-south-1/autopilot|autopilot_ts_data_merge.ipynb)" + ] } ], "metadata": {