-
Notifications
You must be signed in to change notification settings - Fork 1.4k
/
Copy pathcolumns.py
479 lines (386 loc) · 16.2 KB
/
columns.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
from __future__ import annotations
import logging
import re
from collections.abc import Callable, Iterable, Iterator
from dataclasses import dataclass, replace
from datetime import timedelta
from typing import Any, Literal, TypeVar, cast
from clickhouse_driver import Client
from django.utils.timezone import now
from posthog.cache_utils import cache_for
from posthog.clickhouse.cluster import ClickhouseCluster, FuturesMap, HostInfo, get_cluster
from posthog.clickhouse.kafka_engine import trim_quotes_expr
from posthog.clickhouse.materialized_columns import ColumnName, TablesWithMaterializedColumns
from posthog.client import sync_execute
from posthog.models.event.sql import EVENTS_DATA_TABLE
from posthog.models.person.sql import PERSONS_TABLE
from posthog.models.property import PropertyName, TableColumn, TableWithProperties
from posthog.models.utils import generate_random_short_suffix
from posthog.settings import CLICKHOUSE_DATABASE, TEST
logger = logging.getLogger(__name__)
T = TypeVar("T")
DEFAULT_TABLE_COLUMN: Literal["properties"] = "properties"
SHORT_TABLE_COLUMN_NAME = {
"properties": "p",
"group_properties": "gp",
"person_properties": "pp",
"group0_properties": "gp0",
"group1_properties": "gp1",
"group2_properties": "gp2",
"group3_properties": "gp3",
"group4_properties": "gp4",
}
@dataclass
class MaterializedColumn:
name: ColumnName
details: MaterializedColumnDetails
is_nullable: bool
@property
def type(self) -> str:
if self.is_nullable:
return "Nullable(String)"
else:
return "String"
def get_expression_and_parameters(self) -> tuple[str, dict[str, Any]]:
if self.is_nullable:
return (
f"JSONExtract({self.details.table_column}, %(property_name)s, %(property_type)s)",
{"property_name": self.details.property_name, "property_type": self.type},
)
else:
return (
trim_quotes_expr(f"JSONExtractRaw({self.details.table_column}, %(property)s)"),
{"property": self.details.property_name},
)
@staticmethod
def get_all(table: TablesWithMaterializedColumns) -> Iterator[MaterializedColumn]:
rows = sync_execute(
"""
SELECT name, comment, type like 'Nullable(%%)' as is_nullable
FROM system.columns
WHERE database = %(database)s
AND table = %(table)s
AND comment LIKE '%%column_materializer::%%'
AND comment not LIKE '%%column_materializer::elements_chain::%%'
""",
{"database": CLICKHOUSE_DATABASE, "table": table},
)
for name, comment, is_nullable in rows:
yield MaterializedColumn(name, MaterializedColumnDetails.from_column_comment(comment), is_nullable)
@staticmethod
def get(table: TablesWithMaterializedColumns, column_name: ColumnName) -> MaterializedColumn:
# TODO: It would be more efficient to push the filter here down into the `get_all` query, but that would require
# more a sophisticated method of constructing queries than we have right now, and this data set should be small
# enough that this doesn't really matter (at least as of writing.)
columns = [column for column in MaterializedColumn.get_all(table) if column.name == column_name]
match columns:
case []:
raise ValueError("column does not exist")
case [column]:
return column
case _:
# this should never happen (column names are unique within a table) and suggests an error in the query
raise ValueError(f"got {len(columns)} columns, expected 0 or 1")
@dataclass(frozen=True)
class MaterializedColumnDetails:
table_column: TableColumn
property_name: PropertyName
is_disabled: bool
COMMENT_PREFIX = "column_materializer"
COMMENT_SEPARATOR = "::"
COMMENT_DISABLED_MARKER = "disabled"
def as_column_comment(self) -> str:
bits = [self.COMMENT_PREFIX, self.table_column, self.property_name]
if self.is_disabled:
bits.append(self.COMMENT_DISABLED_MARKER)
return self.COMMENT_SEPARATOR.join(bits)
@classmethod
def from_column_comment(cls, comment: str) -> MaterializedColumnDetails:
match comment.split(cls.COMMENT_SEPARATOR, 3):
# Old style comments have the format "column_materializer::property", dealing with the default table column.
case [cls.COMMENT_PREFIX, property_name]:
return MaterializedColumnDetails(DEFAULT_TABLE_COLUMN, property_name, is_disabled=False)
# Otherwise, it's "column_materializer::table_column::property" for columns that are active.
case [cls.COMMENT_PREFIX, table_column, property_name]:
return MaterializedColumnDetails(cast(TableColumn, table_column), property_name, is_disabled=False)
# Columns that are marked as disabled have an extra trailer indicating their status.
case [cls.COMMENT_PREFIX, table_column, property_name, cls.COMMENT_DISABLED_MARKER]:
return MaterializedColumnDetails(cast(TableColumn, table_column), property_name, is_disabled=True)
case _:
raise ValueError(f"unexpected comment format: {comment!r}")
def get_materialized_columns(
table: TablesWithMaterializedColumns,
) -> dict[tuple[PropertyName, TableColumn], MaterializedColumn]:
return {
(column.details.property_name, column.details.table_column): column
for column in MaterializedColumn.get_all(table)
}
@cache_for(timedelta(minutes=15))
def get_enabled_materialized_columns(
table: TablesWithMaterializedColumns,
) -> dict[tuple[PropertyName, TableColumn], MaterializedColumn]:
return {k: column for k, column in get_materialized_columns(table).items() if not column.details.is_disabled}
@dataclass
class TableInfo:
data_table: str
@property
def read_table(self) -> str:
return self.data_table
def map_data_nodes(self, cluster: ClickhouseCluster, fn: Callable[[Client], T]) -> FuturesMap[HostInfo, T]:
return cluster.map_all_hosts(fn)
@dataclass
class ShardedTableInfo(TableInfo):
dist_table: str
@property
def read_table(self) -> str:
return self.dist_table
def map_data_nodes(self, cluster: ClickhouseCluster, fn: Callable[[Client], T]) -> FuturesMap[HostInfo, T]:
return cluster.map_one_host_per_shard(fn)
tables: dict[str, TableInfo | ShardedTableInfo] = {
PERSONS_TABLE: TableInfo(PERSONS_TABLE),
"events": ShardedTableInfo(EVENTS_DATA_TABLE(), "events"),
}
def get_minmax_index_name(column: str) -> str:
return f"minmax_{column}"
@dataclass
class CreateColumnOnDataNodesTask:
table: str
column: MaterializedColumn
create_minmax_index: bool
add_column_comment: bool
def execute(self, client: Client) -> None:
expression, parameters = self.column.get_expression_and_parameters()
actions = [
f"ADD COLUMN IF NOT EXISTS {self.column.name} {self.column.type} MATERIALIZED {expression}",
]
if self.add_column_comment:
actions.append(f"COMMENT COLUMN {self.column.name} %(comment)s")
parameters["comment"] = self.column.details.as_column_comment()
if self.create_minmax_index:
index_name = get_minmax_index_name(self.column.name)
actions.append(f"ADD INDEX IF NOT EXISTS {index_name} {self.column.name} TYPE minmax GRANULARITY 1")
client.execute(
f"ALTER TABLE {self.table} " + ", ".join(actions),
parameters,
settings={"alter_sync": 2 if TEST else 1},
)
@dataclass
class CreateColumnOnQueryNodesTask:
table: str
column: MaterializedColumn
def execute(self, client: Client) -> None:
client.execute(
f"""
ALTER TABLE {self.table}
ADD COLUMN IF NOT EXISTS {self.column.name} {self.column.type},
COMMENT COLUMN {self.column.name} %(comment)s
""",
{"comment": self.column.details.as_column_comment()},
settings={"alter_sync": 2 if TEST else 1},
)
def materialize(
table: TableWithProperties,
property: PropertyName,
column_name: ColumnName | None = None,
table_column: TableColumn = DEFAULT_TABLE_COLUMN,
create_minmax_index=not TEST,
is_nullable: bool = False,
) -> MaterializedColumn:
if existing_column := get_materialized_columns(table).get((property, table_column)):
if TEST:
return existing_column
raise ValueError(f"Property already materialized. table={table}, property={property}, column={table_column}")
if table_column not in SHORT_TABLE_COLUMN_NAME:
raise ValueError(f"Invalid table_column={table_column} for materialisation")
cluster = get_cluster()
table_info = tables[table]
column = MaterializedColumn(
name=column_name or _materialized_column_name(table, property, table_column),
details=MaterializedColumnDetails(
table_column=table_column,
property_name=property,
is_disabled=False,
),
is_nullable=is_nullable,
)
table_info.map_data_nodes(
cluster,
CreateColumnOnDataNodesTask(
table_info.data_table,
column,
create_minmax_index,
add_column_comment=table_info.read_table == table_info.data_table,
).execute,
).result()
if isinstance(table_info, ShardedTableInfo):
cluster.map_all_hosts(
CreateColumnOnQueryNodesTask(
table_info.dist_table,
column,
).execute
).result()
return column
@dataclass
class UpdateColumnCommentTask:
table: str
columns: list[MaterializedColumn]
def execute(self, client: Client) -> None:
actions = []
parameters = {}
for i, column in enumerate(self.columns):
parameter_name = f"comment_{i}"
actions.append(f"COMMENT COLUMN {column.name} %({parameter_name})s")
parameters[parameter_name] = column.details.as_column_comment()
client.execute(
f"ALTER TABLE {self.table} " + ", ".join(actions),
parameters,
settings={"alter_sync": 2 if TEST else 1},
)
def update_column_is_disabled(
table: TablesWithMaterializedColumns, column_names: Iterable[str], is_disabled: bool
) -> None:
cluster = get_cluster()
table_info = tables[table]
columns = [MaterializedColumn.get(table, column_name) for column_name in column_names]
cluster.map_all_hosts(
UpdateColumnCommentTask(
table_info.read_table,
[replace(column, details=replace(column.details, is_disabled=is_disabled)) for column in columns],
).execute
).result()
def check_index_exists(client: Client, table: str, index: str) -> bool:
[(count,)] = client.execute(
"""
SELECT count()
FROM system.data_skipping_indices
WHERE database = currentDatabase() AND table = %(table)s AND name = %(name)s
""",
{"table": table, "name": index},
)
assert 1 >= count >= 0
return bool(count)
def check_column_exists(client: Client, table: str, column: str) -> bool:
[(count,)] = client.execute(
"""
SELECT count()
FROM system.columns
WHERE database = currentDatabase() AND table = %(table)s AND name = %(name)s
""",
{"table": table, "name": column},
)
assert 1 >= count >= 0
return bool(count)
@dataclass
class DropColumnTask:
table: str
column_names: list[str]
try_drop_index: bool
def execute(self, client: Client) -> None:
actions = []
for column_name in self.column_names:
if self.try_drop_index:
index_name = get_minmax_index_name(column_name)
drop_index_action = f"DROP INDEX IF EXISTS {index_name}"
if check_index_exists(client, self.table, index_name):
actions.append(drop_index_action)
else:
logger.info("Skipping %r, nothing to do...", drop_index_action)
drop_column_action = f"DROP COLUMN IF EXISTS {column_name}"
if check_column_exists(client, self.table, column_name):
actions.append(drop_column_action)
else:
logger.info("Skipping %r, nothing to do...", drop_column_action)
if actions:
client.execute(
f"ALTER TABLE {self.table} " + ", ".join(actions),
settings={"alter_sync": 2 if TEST else 1},
)
def drop_column(table: TablesWithMaterializedColumns, column_names: Iterable[str]) -> None:
cluster = get_cluster()
table_info = tables[table]
column_names = [*column_names]
if isinstance(table_info, ShardedTableInfo):
cluster.map_all_hosts(
DropColumnTask(
table_info.dist_table,
column_names,
try_drop_index=False, # no indexes on distributed tables
).execute
).result()
table_info.map_data_nodes(
cluster,
DropColumnTask(
table_info.data_table,
column_names,
try_drop_index=True,
).execute,
).result()
@dataclass
class BackfillColumnTask:
table: str
columns: list[MaterializedColumn]
backfill_period: timedelta | None
test_settings: dict[str, Any] | None
def execute(self, client: Client) -> None:
# Hack from https://github.com/ClickHouse/ClickHouse/issues/19785
# Note that for this to work all inserts should list columns explicitly
# Improve this if https://github.com/ClickHouse/ClickHouse/issues/27730 ever gets resolved
for column in self.columns:
expression, parameters = column.get_expression_and_parameters()
client.execute(
f"""
ALTER TABLE {self.table}
MODIFY COLUMN {column.name} {column.type} DEFAULT {expression}
""",
parameters,
settings=self.test_settings,
)
# Kick off mutations which will update clickhouse partitions in the background. This will return immediately
assignments = ", ".join(f"{column.name} = {column.name}" for column in self.columns)
if self.backfill_period is not None:
where_clause = "timestamp > %(cutoff)s"
parameters = {"cutoff": (now() - self.backfill_period).strftime("%Y-%m-%d")}
else:
where_clause = "1 = 1"
parameters = {}
client.execute(
f"ALTER TABLE {self.table} UPDATE {assignments} WHERE {where_clause}",
parameters,
settings=self.test_settings,
)
def backfill_materialized_columns(
table: TableWithProperties,
columns: Iterable[MaterializedColumn],
backfill_period: timedelta,
test_settings=None,
) -> None:
"""
Backfills the materialized column after its creation.
This will require reading and writing a lot of data on clickhouse disk.
"""
cluster = get_cluster()
table_info = tables[table]
table_info.map_data_nodes(
cluster,
BackfillColumnTask(
table_info.data_table,
[*columns],
backfill_period if table == "events" else None, # XXX
test_settings,
).execute,
).result()
def _materialized_column_name(
table: TableWithProperties,
property: PropertyName,
table_column: TableColumn = DEFAULT_TABLE_COLUMN,
) -> ColumnName:
"Returns a sanitized and unique column name to use for materialized column"
prefix = "pmat_" if table == "person" else "mat_"
if table_column != DEFAULT_TABLE_COLUMN:
prefix += f"{SHORT_TABLE_COLUMN_NAME[table_column]}_"
property_str = re.sub("[^0-9a-zA-Z$]", "_", property)
existing_materialized_column_names = {column.name for column in get_materialized_columns(table).values()}
suffix = ""
while f"{prefix}{property_str}{suffix}" in existing_materialized_column_names:
suffix = "_" + generate_random_short_suffix()
return f"{prefix}{property_str}{suffix}"