Skip to content

Commit

Permalink
[BACKPORT pg15-cherrypicks][#22150] YSQL, QueryDiagnostics: EXPLAIN (…
Browse files Browse the repository at this point in the history
…ANALYZE, DIST) support for queryDiagnostics

Summary:
No merge conflict
- yb_query_diagnostics.c
  - YbQueryDiagnostics_ExecutorStart
    - Resolved compile errors by replacing `MAX_REMOVE_VALUE` with pg15 counterpart function `pg_prng_double(&pg_global_prng_state)`. Upstream PG 3804539e48e794781c6145c7f988f5d507418fa8 replaced random function with pg_prng_double.  YB master 40689bc
  - Imported this function `include "common/pg_prng.h"`
    - same

original summary
- This diff adds functionality for `explain_analyze` and `explain_dist` params.
- Note: This diff does not include code for supporting EXPLAIN(ANALYZE, DIST, DEBUG)
- Explain.txt looks like:-

```
duration: 6.407 ms
plan:
Sort  (cost=550.02..550.85 rows=333 width=116) (actual time=6.390..6.390 rows=0 loops=1)
  Sort Key: (sum(o.total_amount)) DESC, (count(DISTINCT o.order_id)) DESC
  Sort Method: quicksort  Memory: 25kB
  ->  GroupAggregate  (cost=496.90..536.07 rows=333 width=116) (actual time=6.377..6.377 rows=0 loops=1)
        Group Key: c.customer_id, ((((c.first_name)::text || ' '::text) || (c.last_name)::text))
        Filter: (count(DISTINCT o.order_id) > 0)
        ->  Sort  (cost=496.90..499.40 rows=1000 width=178) (actual time=6.374..6.374 rows=0 loops=1)
              Sort Key: c.customer_id, ((((c.first_name)::text || ' '::text) || (c.last_name)::text))
              Sort Method: quicksort  Memory: 25kB
              ->  YB Batched Nested Loop Left Join  (cost=117.50..447.07 rows=1000 width=178) (actual time=6.362..6.362 rows=0 loops=1)
                    Join Filter: (oi.product_id = p.product_id)
                    ->  YB Batched Nested Loop Join  (cost=117.50..328.19 rows=1000 width=268) (actual time=6.359..6.359 rows=0 loops=1)
                          Join Filter: (o.customer_id = c.customer_id)
                          ->  Hash Right Join  (cost=117.50..225.69 rows=1000 width=32) (actual time=6.354..6.355 rows=0 loops=1)
                                Hash Cond: (oi.order_id = o.order_id)
                                ->  Seq Scan on order_items oi  (cost=0.00..100.00 rows=1000 width=12) (never executed)
                                ->  Hash  (cost=105.00..105.00 rows=1000 width=24) (actual time=6.348..6.348 rows=0 loops=1)
                                      Buckets: 1024  Batches: 1  Memory Usage: 8kB
                                      ->  Seq Scan on orders o  (cost=0.00..105.00 rows=1000 width=24) (actual time=6.345..6.345 rows=0 loops=1)
                                            Storage Filter: ((order_date >= '2023-04-01'::date) AND (order_date <= '2023-05-31'::date))
                                            Storage Table Read Requests: 1
                                            Storage Table Read Execution Time: 0.000 ms
                          ->  Index Scan using customers_pkey on customers c  (cost=0.00..0.12 rows=1 width=240) (never executed)
                                Index Cond: (customer_id = ANY (ARRAY[o.customer_id, $1, $2, ..., $1023]))
                                Storage Filter: (registration_date >= '2023-01-01'::date)
                    ->  Index Scan using products_pkey on products p  (cost=0.00..0.11 rows=1 width=122) (never executed)
                          Index Cond: (product_id = ANY (ARRAY[oi.product_id, $1025, $1026, ..., $2047]))

duration: 6.559 ms
plan:
Sort  (cost=550.02..550.85 rows=333 width=116) (actual time=6.537..6.537 rows=0 loops=1)
  Sort Key: (sum(o.total_amount)) DESC, (count(DISTINCT o.order_id)) DESC
  Sort Method: quicksort  Memory: 25kB
  ->  GroupAggregate  (cost=496.90..536.07 rows=333 width=116) (actual time=6.526..6.526 rows=0 loops=1)
        Group Key: c.customer_id, ((((c.first_name)::text || ' '::text) || (c.last_name)::text))
        Filter: (count(DISTINCT o.order_id) > 0)
        ->  Sort  (cost=496.90..499.40 rows=1000 width=178) (actual time=6.523..6.523 rows=0 loops=1)
              Sort Key: c.customer_id, ((((c.first_name)::text || ' '::text) || (c.last_name)::text))
              Sort Method: quicksort  Memory: 25kB
              ->  YB Batched Nested Loop Left Join  (cost=117.50..447.07 rows=1000 width=178) (actual time=6.515..6.515 rows=0 loops=1)
                    Join Filter: (oi.product_id = p.product_id)
                    ->  YB Batched Nested Loop Join  (cost=117.50..328.19 rows=1000 width=268) (actual time=6.511..6.511 rows=0 loops=1)
                          Join Filter: (o.customer_id = c.customer_id)
                          ->  Hash Right Join  (cost=117.50..225.69 rows=1000 width=32) (actual time=6.506..6.506 rows=0 loops=1)
                                Hash Cond: (oi.order_id = o.order_id)
                                ->  Seq Scan on order_items oi  (cost=0.00..100.00 rows=1000 width=12) (never executed)
                                ->  Hash  (cost=105.00..105.00 rows=1000 width=24) (actual time=6.497..6.497 rows=0 loops=1)
                                      Buckets: 1024  Batches: 1  Memory Usage: 8kB
                                      ->  Seq Scan on orders o  (cost=0.00..105.00 rows=1000 width=24) (actual time=6.494..6.494 rows=0 loops=1)
                                            Storage Filter: ((order_date >= '2023-04-01'::date) AND (order_date <= '2023-05-31'::date))
                                            Storage Table Read Requests: 1
                                            Storage Table Read Execution Time: 0.000 ms
                          ->  Index Scan using customers_pkey on customers c  (cost=0.00..0.12 rows=1 width=240) (never executed)
                                Index Cond: (customer_id = ANY (ARRAY[o.customer_id, $1, $2, ..., $1023]))
                                Storage Filter: (registration_date >= '2023-01-01'::date)
                    ->  Index Scan using products_pkey on products p  (cost=0.00..0.11 rows=1 width=122) (never executed)
                          Index Cond: (product_id = ANY (ARRAY[oi.product_id, $1025, $1026, ..., $2047]))

```
Jira: DB-11078

Original commit: 40689bc / D35566

Test Plan:
./yb_build.sh --java-test TestYbQueryDiagnostics#checkExplainData

Testing with a large query that generates an EXPLAIN plan exceeding 16KB, causes output termination and loss of remaining data. This is expected and will be handled later  [[ #23720 | #23720 ]]

Reviewers: jason, tfoucher

Reviewed By: jason

Subscribers: ybase, yql

Differential Revision: https://phorge.dev.yugabyte.com/D37980
  • Loading branch information
IshanChhangani committed Sep 12, 2024
1 parent 1af87bc commit ec1c068
Show file tree
Hide file tree
Showing 2 changed files with 111 additions and 0 deletions.
102 changes: 102 additions & 0 deletions src/postgres/src/backend/utils/misc/yb_query_diagnostics.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,12 @@
#include <unistd.h>

#include "access/hash.h"
#include "commands/explain.h"
#include "common/file_perm.h"
#include "common/pg_prng.h"
#include "common/pg_yb_common.h"
#include "funcapi.h"
#include "pg_yb_utils.h"
#include "pgstat.h"
#include "postmaster/bgworker.h"
#include "storage/ipc.h"
Expand All @@ -44,6 +47,7 @@
#define BIND_VAR_FILE "bind_variables.csv"
#define PGSS_FILE "pg_stat_statements.csv"
#define ASH_FILE "active_session_history.csv"
#define EXPLAIN_PLAN_FILE "explain_plan.txt"
/* Constants used for yb_query_diagnostics_status view */
#define YB_QUERY_DIAGNOSTICS_STATUS_COLS 8
#define DIAGNOSTICS_SUCCESS 0
Expand All @@ -70,6 +74,7 @@ int yb_query_diagnostics_bg_worker_interval_ms;
int yb_query_diagnostics_circular_buffer_size;

/* Saved hook value in case of unload */
static ExecutorStart_hook_type prev_ExecutorStart = NULL;
static ExecutorEnd_hook_type prev_ExecutorEnd = NULL;

/* Flags set by interrupt handlers for later service in the main loop. */
Expand All @@ -82,7 +87,9 @@ static HTAB *bundles_in_progress = NULL;
static LWLock *bundles_in_progress_lock; /* protects bundles_in_progress hash table */
static YbQueryDiagnosticsBundles *bundles_completed = NULL;
static const char *status_msg[] = {"Success", "In Progress", "Error"};
static bool current_query_sampled = false;

static void YbQueryDiagnostics_ExecutorStart(QueryDesc *queryDesc, int eflags);
static void YbQueryDiagnostics_ExecutorEnd(QueryDesc *queryDesc);

static void InsertNewBundleInfo(YbQueryDiagnosticsMetadata *metadata);
Expand All @@ -94,6 +101,8 @@ static bool DumpToFile(const char *path, const char *file_name, const char *data
static void RemoveExpiredEntries();
static void AccumulateBindVariables(YbQueryDiagnosticsEntry *entry,
const double totaltime_ms, const ParamListInfo params);
static void AccumulateExplain(QueryDesc *queryDesc, YbQueryDiagnosticsEntry *entry,
bool explain_analyze, bool explain_dist, double totaltime_ms);
static void YbQueryDiagnosticsBgWorkerSighup(SIGNAL_ARGS);
static void YbQueryDiagnosticsBgWorkerSigterm(SIGNAL_ARGS);
static inline TimestampTz BundleEndTime(const YbQueryDiagnosticsEntry *entry);
Expand All @@ -116,6 +125,9 @@ static void AppendToErrorDescription(char *description, const char *format, ...)
void
YbQueryDiagnosticsInstallHook(void)
{
prev_ExecutorStart = ExecutorStart_hook;
ExecutorStart_hook = YbQueryDiagnostics_ExecutorStart;

prev_ExecutorEnd = ExecutorEnd_hook;
ExecutorEnd_hook = YbQueryDiagnostics_ExecutorEnd;
}
Expand Down Expand Up @@ -506,6 +518,40 @@ YbSetPgssNormalizedQueryText(int64 query_id, const Size query_offset, int query_
LWLockRelease(bundles_in_progress_lock);
}

static void
YbQueryDiagnostics_ExecutorStart(QueryDesc *queryDesc, int eflags)
{
uint64 query_id = queryDesc->plannedstmt->queryId;
YbQueryDiagnosticsEntry *entry;

LWLockAcquire(bundles_in_progress_lock, LW_SHARED);

/*
* This can slow down the query execution, even if the query is not being bundled.
*/
entry = (YbQueryDiagnosticsEntry *) hash_search(bundles_in_progress,
&query_id, HASH_FIND,
NULL);

if (entry)
current_query_sampled = (pg_prng_double(&pg_global_prng_state) <
entry->metadata.params.explain_sample_rate / 100.0);
else
current_query_sampled = false;

/* Enable per-node instrumentation iff explain_analyze is required. */
if (current_query_sampled &&
(entry->metadata.params.explain_analyze && (eflags & EXEC_FLAG_EXPLAIN_ONLY) == 0))
queryDesc->instrument_options |= INSTRUMENT_ALL;

LWLockRelease(bundles_in_progress_lock);

if (prev_ExecutorStart)
prev_ExecutorStart(queryDesc, eflags);
else
standard_ExecutorStart(queryDesc, eflags);
}

static void
YbQueryDiagnostics_ExecutorEnd(QueryDesc *queryDesc)
{
Expand All @@ -532,6 +578,11 @@ YbQueryDiagnostics_ExecutorEnd(QueryDesc *queryDesc)
AccumulateBindVariables(entry, totaltime_ms, queryDesc->params);

AccumulatePgss(queryDesc, entry);

if (current_query_sampled)
AccumulateExplain(queryDesc, entry,
entry->metadata.params.explain_analyze,
entry->metadata.params.explain_dist, totaltime_ms);
}

LWLockRelease(bundles_in_progress_lock);
Expand Down Expand Up @@ -649,6 +700,47 @@ PgssToString(int64 query_id, char *pgss_str, YbQueryDiagnosticsPgss pgss, const
pgss.counters.blk_read_time, pgss.counters.blk_write_time);
}

static void
AccumulateExplain(QueryDesc *queryDesc, YbQueryDiagnosticsEntry *entry, bool explain_analyze,
bool explain_dist, double totaltime_ms)
{
ExplainState *es = NewExplainState();

es->analyze = (queryDesc->instrument_options && explain_analyze);
es->verbose = false;
es->buffers = es->analyze;
es->timing = es->analyze;
es->summary = es->analyze;
es->format = EXPLAIN_FORMAT_TEXT;
es->rpc = (es->analyze && explain_dist);

/* Note: this part of code comes from auto_explain.c */
ExplainPrintPlan(es, queryDesc);
if (es->analyze)
ExplainPrintTriggers(es, queryDesc);
if (es->costs)
ExplainPrintJITSummary(es, queryDesc);

/* Removes the trailing newline that might be introduced by ExplainPrintJITSummary */
if (es->str->len > 0 && es->str->data[es->str->len - 1] == '\n')
es->str->data[--es->str->len] = '\0';

SpinLockAcquire(&entry->mutex);

/* TODO(GH#23720): Add support for handling oversized explain plans */
int remaining_space = sizeof(entry->explain_plan) - strlen(entry->explain_plan) - 1;
if (remaining_space > 0)
snprintf(entry->explain_plan + strlen(entry->explain_plan), remaining_space,
"duration: %.3f ms\nplan:\n%s\n\n", totaltime_ms, es->str->data);

SpinLockRelease(&entry->mutex);

pfree(es->str->data);
pfree(es->str);
pfree(es);
}


/*
* FormatParams
* Iterates over all of the params and prints them in CSV fromat.
Expand Down Expand Up @@ -705,6 +797,7 @@ InsertNewBundleInfo(YbQueryDiagnosticsMetadata *metadata)
{
entry->metadata = *metadata;
MemSet(entry->bind_vars, 0, YB_QD_MAX_BIND_VARS_LEN);
MemSet(entry->explain_plan, 0, YB_QD_MAX_EXPLAIN_PLAN_LEN);
SpinLockInit(&entry->mutex);
entry->pgss = (YbQueryDiagnosticsPgss) {.counters = {0}, .query_offset = 0, .query_len = 0};
}
Expand Down Expand Up @@ -775,11 +868,13 @@ RemoveExpiredEntries()
char bind_var_copy[YB_QD_MAX_BIND_VARS_LEN];
char description[YB_QD_DESCRIPTION_LEN];
int status = DIAGNOSTICS_SUCCESS;
char explain_plan_copy[YB_QD_MAX_EXPLAIN_PLAN_LEN];
YbQueryDiagnosticsMetadata metadata_copy = entry->metadata;
YbQueryDiagnosticsPgss pgss_copy = entry->pgss;

memcpy(bind_var_copy, entry->bind_vars, YB_QD_MAX_BIND_VARS_LEN);
memcpy(&metadata_copy, &entry->metadata, sizeof(YbQueryDiagnosticsMetadata));
memcpy(explain_plan_copy, entry->explain_plan, YB_QD_MAX_EXPLAIN_PLAN_LEN);
description[0] = '\0';

SpinLockRelease(&entry->mutex);
Expand Down Expand Up @@ -830,6 +925,13 @@ RemoveExpiredEntries()
if (status == DIAGNOSTICS_ERROR)
goto removeEntry;

/* Dump explain plan */
has_data_to_dump |= DumpToFile(metadata_copy.path, EXPLAIN_PLAN_FILE,
explain_plan_copy, &status, description);

if (status == DIAGNOSTICS_ERROR)
goto removeEntry;

/* Dump ASH */
if (yb_ash_enable_infra)
{
Expand Down
9 changes: 9 additions & 0 deletions src/postgres/src/include/yb_query_diagnostics.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,12 @@
#define YB_QD_MAX_PGSS_LEN 2048
#define YB_QD_DESCRIPTION_LEN 128

/*
* Currently, if the explain plan is larger than 16KB, we truncate it.
* Github issue #23720: handles queries with explain plans excedding 16KB.
*/
#define YB_QD_MAX_EXPLAIN_PLAN_LEN 16384

/* GUC variables */
extern int yb_query_diagnostics_bg_worker_interval_ms;
extern int yb_query_diagnostics_circular_buffer_size;
Expand Down Expand Up @@ -126,6 +132,9 @@ typedef struct YbQueryDiagnosticsEntry

/* Holds the pg_stat_statements data until flushed to disc */
YbQueryDiagnosticsPgss pgss;

/* Holds the explain plan data until flushed to disc */
char explain_plan[YB_QD_MAX_EXPLAIN_PLAN_LEN];
} YbQueryDiagnosticsEntry;

typedef void (*YbGetNormalizedQueryFuncPtr)(Size query_offset, int query_len, char *normalized_query);
Expand Down

0 comments on commit ec1c068

Please sign in to comment.