-
Notifications
You must be signed in to change notification settings - Fork 0
/
data_stream.py
105 lines (85 loc) · 3.54 KB
/
data_stream.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import logging
import json
from pyspark.sql import SparkSession
from pyspark.sql.types import *
import pyspark.sql.functions as psf
from dateutil.parser import parse as parse_date
# TODO Create a schema for incoming resources
schema = StructType([
StructField("crime_id", StringType(), True),
StructField("original_crime_type_name", StringType(), True),
StructField("report_date", StringType(), True),
StructField("call_date", StringType(), True),
StructField("offense_date", StringType(), True),
StructField("call_time", StringType(), True),
StructField("call_date_time", StringType(), True),
StructField("disposition", StringType(), True),
StructField("address", StringType(), True),
StructField("city", StringType(), True),
StructField("state", StringType(), True),
StructField("agency_id", StringType(), True),
StructField("address_type", StringType(), True),
StructField("common_location", StringType(), True)
])
# TODO create a spark udf to convert time to YYYYmmDDhh format
@psf.udf(StringType())
def udf_convert_time(timestamp):
data = parse_date(timestamp)
return str(data.strftime('%y%m%d%H'))
def run_spark_job(spark):
# TODO Create Spark Configuration
# Create Spark configurations with max offset of 200 per trigger
# set up correct bootstrap server and port
df = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "localhost:9092") \
.option("subscribe", "service-calls") \
.option("maxOffsetPerTrigger", "200") \
.option("startingOffsets", "earliest") \
.load()
# Show schema for the incoming resources for checks
df.printSchema()
# TODO extract the correct column from the kafka input resources
# Take only value and convert it to String
kafka_df = df.selectExpr("CAST(value AS STRING)")
service_table = kafka_df\
.select(psf.from_json(psf.col('value'), schema).alias("SERVICE_CALLS"))\
.select("SERVICE_CALLS.*")
distinct_table = service_table\
.select(psf.col('crime_id'),
psf.col('original_crime_type_name'),
psf.to_timestamp(psf.col('call_date_time')).alias('call_datetime'),
psf.col('address'),
psf.col('disposition'))
# TODO get different types of original_crime_type_name in 60 minutes interval
counts_df = distinct_table \
.withWatermark("call_datetime", "60 minutes")
# TODO use udf to convert timestamp to right format on a call_date_time column
converted_df = distinct_table \
.select(psf.col('crime_id'),
psf.col('original_crime_type_name'),
udf_convert_time(psf.col('call_datetime')),
psf.col('address'),
psf.col('disposition'))
# TODO apply aggregations using windows function to see how many calls occurred in 2 day span
calls_per_2_days = counts_df \
.groupBy(
psf.window(distinct_table.call_datetime, "7 days"),
distinct_table.original_crime_type_name
).count()
# TODO write output stream
query = calls_per_2_days \
.writeStream \
.outputMode('complete') \
.format('console') \
.start()
# TODO attach a ProgressReporter
query.awaitTermination()
if __name__ == "__main__":
logger = logging.getLogger(__name__)
# TODO Create Spark in Local mode
spark = SparkSession.builder.appName('SF Crime Statistics').master("local").getOrCreate()
logger.info("Spark started")
run_spark_job(spark)
spark.stop()