Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merging community commits #47

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
90 changes: 74 additions & 16 deletions contrib/auto_explain/t/001_auto_explain.pl
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,42 @@
use PostgreSQL::Test::Utils;
use Test::More;

# Runs the specified query and returns the emitted server log.
# If any parameters are specified, these are set in postgresql.conf,
# and reset after the query is run.
sub query_log
{
my ($node, $sql, $params) = @_;
$params ||= {};

if (keys %$params)
{
for my $key (keys %$params)
{
$node->append_conf('postgresql.conf', "$key = $params->{$key}\n");
}
$node->reload;
}

my $log = $node->logfile();
my $offset = -s $log;

$node->safe_psql("postgres", $sql);

my $log_contents = slurp_file($log, $offset);

if (keys %$params)
{
for my $key (keys %$params)
{
$node->adjust_conf('postgresql.conf', $key, undef);
}
$node->reload;
}

return $log_contents;
}

my $node = PostgreSQL::Test::Cluster->new('main');
$node->init;
$node->append_conf('postgresql.conf',
Expand All @@ -16,39 +52,61 @@
$node->append_conf('postgresql.conf', "auto_explain.log_analyze = on");
$node->start;

# run a couple of queries
$node->safe_psql("postgres", "SELECT * FROM pg_class;");
$node->safe_psql("postgres",
"SELECT * FROM pg_proc WHERE proname = 'int4pl';");

# emit some json too
$node->append_conf('postgresql.conf', "auto_explain.log_format = json");
$node->reload;
$node->safe_psql("postgres", "SELECT * FROM pg_proc;");
$node->safe_psql("postgres",
"SELECT * FROM pg_class WHERE relname = 'pg_class';");

$node->stop('fast');
# Simple query.
my $log_contents = query_log($node, "SELECT * FROM pg_class;");

my $log = $node->logfile();

my $log_contents = slurp_file($log);
like(
$log_contents,
qr/Query Text: SELECT \* FROM pg_class;/,
"query text logged, text mode");

like(
$log_contents,
qr/Seq Scan on pg_class/,
"sequential scan logged, text mode");

# Prepared query.
$log_contents = query_log($node,
q{PREPARE get_proc(name) AS SELECT * FROM pg_proc WHERE proname = $1; EXECUTE get_proc('int4pl');}
);

like(
$log_contents,
qr/Query Text: PREPARE get_proc\(name\) AS SELECT \* FROM pg_proc WHERE proname = \$1;/,
"prepared query text logged, text mode");

like(
$log_contents,
qr/Index Scan using pg_proc_proname_args_nsp_index on pg_proc/,
"index scan logged, text mode");

# JSON format.
$log_contents = query_log(
$node,
"SELECT * FROM pg_proc;",
{ "auto_explain.log_format" => "json" });

like(
$log_contents,
qr/"Query Text": "SELECT \* FROM pg_proc;"/,
"query text logged, json mode");

like(
$log_contents,
qr/"Node Type": "Seq Scan"[^}]*"Relation Name": "pg_proc"/s,
"sequential scan logged, json mode");

# Prepared query in JSON format.
$log_contents = query_log(
$node,
q{PREPARE get_class(name) AS SELECT * FROM pg_class WHERE relname = $1; EXECUTE get_class('pg_class');},
{ "auto_explain.log_format" => "json" });

like(
$log_contents,
qr/"Query Text": "PREPARE get_class\(name\) AS SELECT \* FROM pg_class WHERE relname = \$1;"/,
"prepared query text logged, json mode");

like(
$log_contents,
qr/"Node Type": "Index Scan"[^}]*"Index Name": "pg_class_relname_nsp_index"/s,
Expand Down
4 changes: 2 additions & 2 deletions contrib/hstore/hstore_gist.c
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,7 @@ ghstore_picksplit(PG_FUNCTION_ARGS)
if (ISALLTRUE(datum_l) || ISALLTRUE(_j))
{
if (!ISALLTRUE(datum_l))
MemSet((void *) union_l, 0xff, siglen);
memset((void *) union_l, 0xff, siglen);
}
else
{
Expand All @@ -475,7 +475,7 @@ ghstore_picksplit(PG_FUNCTION_ARGS)
if (ISALLTRUE(datum_r) || ISALLTRUE(_j))
{
if (!ISALLTRUE(datum_r))
MemSet((void *) union_r, 0xff, siglen);
memset((void *) union_r, 0xff, siglen);
}
else
{
Expand Down
4 changes: 2 additions & 2 deletions contrib/intarray/_intbig_gist.c
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ g_intbig_picksplit(PG_FUNCTION_ARGS)
if (ISALLTRUE(datum_l) || ISALLTRUE(_j))
{
if (!ISALLTRUE(datum_l))
MemSet((void *) union_l, 0xff, siglen);
memset((void *) union_l, 0xff, siglen);
}
else
{
Expand All @@ -436,7 +436,7 @@ g_intbig_picksplit(PG_FUNCTION_ARGS)
if (ISALLTRUE(datum_r) || ISALLTRUE(_j))
{
if (!ISALLTRUE(datum_r))
MemSet((void *) union_r, 0xff, siglen);
memset((void *) union_r, 0xff, siglen);
}
else
{
Expand Down
4 changes: 2 additions & 2 deletions contrib/ltree/_ltree_gist.c
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ _ltree_picksplit(PG_FUNCTION_ARGS)
if (LTG_ISALLTRUE(datum_l) || LTG_ISALLTRUE(_j))
{
if (!LTG_ISALLTRUE(datum_l))
MemSet((void *) union_l, 0xff, siglen);
memset((void *) union_l, 0xff, siglen);
}
else
{
Expand All @@ -361,7 +361,7 @@ _ltree_picksplit(PG_FUNCTION_ARGS)
if (LTG_ISALLTRUE(datum_r) || LTG_ISALLTRUE(_j))
{
if (!LTG_ISALLTRUE(datum_r))
MemSet((void *) union_r, 0xff, siglen);
memset((void *) union_r, 0xff, siglen);
}
else
{
Expand Down
2 changes: 1 addition & 1 deletion contrib/oid2name/oid2name.c
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,7 @@ sql_exec(PGconn *conn, const char *todo, bool quiet)
}
fprintf(stdout, "\n");
pad = (char *) pg_malloc(l + 1);
MemSet(pad, '-', l);
memset(pad, '-', l);
pad[l] = '\0';
fprintf(stdout, "%s\n", pad);
free(pad);
Expand Down
4 changes: 2 additions & 2 deletions contrib/pg_trgm/trgm_gist.c
Original file line number Diff line number Diff line change
Expand Up @@ -914,7 +914,7 @@ gtrgm_picksplit(PG_FUNCTION_ARGS)
if (ISALLTRUE(datum_l) || cache[j].allistrue)
{
if (!ISALLTRUE(datum_l))
MemSet((void *) GETSIGN(datum_l), 0xff, siglen);
memset((void *) GETSIGN(datum_l), 0xff, siglen);
}
else
{
Expand All @@ -930,7 +930,7 @@ gtrgm_picksplit(PG_FUNCTION_ARGS)
if (ISALLTRUE(datum_r) || cache[j].allistrue)
{
if (!ISALLTRUE(datum_r))
MemSet((void *) GETSIGN(datum_r), 0xff, siglen);
memset((void *) GETSIGN(datum_r), 0xff, siglen);
}
else
{
Expand Down
3 changes: 3 additions & 0 deletions doc/src/sgml/ref/merge.sgml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@ PostgreSQL documentation
-->

<refentry id="sql-merge">
<indexterm zone="sql-merge">
<primary>MERGE</primary>
</indexterm>

<refmeta>
<refentrytitle>MERGE</refentrytitle>
Expand Down
4 changes: 2 additions & 2 deletions doc/src/sgml/ref/pg_dump.sgml
Original file line number Diff line number Diff line change
Expand Up @@ -372,8 +372,8 @@ PostgreSQL documentation
<para>
Requesting exclusive locks on database objects while running a parallel dump could
cause the dump to fail. The reason is that the <application>pg_dump</application> leader process
requests shared locks on the objects that the worker processes are going to dump later
in order to
requests shared locks (<link linkend="locking-tables">ACCESS SHARE</link>) on the
objects that the worker processes are going to dump later in order to
make sure that nobody deletes them and makes them go away while the dump is running.
If another client then requests an exclusive lock on a table, that lock will not be
granted but will be queued waiting for the shared lock of the leader process to be
Expand Down
2 changes: 1 addition & 1 deletion src/backend/access/hash/hashovfl.c
Original file line number Diff line number Diff line change
Expand Up @@ -760,7 +760,7 @@ _hash_initbitmapbuffer(Buffer buf, uint16 bmsize, bool initpage)

/* set all of the bits to 1 */
freep = HashPageGetBitmap(pg);
MemSet(freep, 0xFF, bmsize);
memset(freep, 0xFF, bmsize);

/*
* Set pd_lower just past the end of the bitmap page data. We could even
Expand Down
2 changes: 1 addition & 1 deletion src/backend/access/transam/xlogrecovery.c
Original file line number Diff line number Diff line change
Expand Up @@ -716,7 +716,7 @@ InitWalRecovery(ControlFileData *ControlFile, bool *wasShutdown_ptr,
* know how far we need to replay the WAL before we reach consistency.
* This can happen for example if a base backup is taken from a
* running server using an atomic filesystem snapshot, without calling
* pg_start/stop_backup. Or if you just kill a running primary server
* pg_backup_start/stop. Or if you just kill a running primary server
* and put it into archive recovery by creating a recovery signal
* file.
*
Expand Down
2 changes: 1 addition & 1 deletion src/backend/replication/basebackup.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
#include <unistd.h>
#include <time.h>

#include "access/xlog_internal.h" /* for pg_start/stop_backup */
#include "access/xlog_internal.h" /* for pg_backup_start/stop */
#include "common/compression.h"
#include "common/file_perm.h"
#include "commands/defrem.h"
Expand Down
2 changes: 1 addition & 1 deletion src/backend/replication/walreceiver.c
Original file line number Diff line number Diff line change
Expand Up @@ -1410,7 +1410,7 @@ pg_stat_get_wal_receiver(PG_FUNCTION_ARGS)
* see details. Other users only get the pid value to know whether it
* is a WAL receiver, but no details.
*/
MemSet(&nulls[1], true, sizeof(bool) * (tupdesc->natts - 1));
memset(&nulls[1], true, sizeof(bool) * (tupdesc->natts - 1));
}
else
{
Expand Down
6 changes: 3 additions & 3 deletions src/backend/storage/ipc/dsm_impl.c
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
if ((fd = shm_open(name, flags, PG_FILE_MODE_OWNER)) == -1)
{
ReleaseExternalFD();
if (errno != EEXIST)
if (op == DSM_OP_ATTACH || errno != EEXIST)
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not open shared memory segment \"%s\": %m",
Expand Down Expand Up @@ -500,7 +500,7 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,

if ((ident = shmget(key, segsize, flags)) == -1)
{
if (errno != EEXIST)
if (op == DSM_OP_ATTACH || errno != EEXIST)
{
int save_errno = errno;

Expand Down Expand Up @@ -822,7 +822,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
flags = O_RDWR | (op == DSM_OP_CREATE ? O_CREAT | O_EXCL : 0);
if ((fd = OpenTransientFile(name, flags)) == -1)
{
if (errno != EEXIST)
if (op == DSM_OP_ATTACH || errno != EEXIST)
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not open shared memory segment \"%s\": %m",
Expand Down
4 changes: 2 additions & 2 deletions src/backend/utils/adt/tsgistidx.c
Original file line number Diff line number Diff line change
Expand Up @@ -753,7 +753,7 @@ gtsvector_picksplit(PG_FUNCTION_ARGS)
if (ISALLTRUE(datum_l) || cache[j].allistrue)
{
if (!ISALLTRUE(datum_l))
MemSet((void *) GETSIGN(datum_l), 0xff, siglen);
memset((void *) GETSIGN(datum_l), 0xff, siglen);
}
else
{
Expand All @@ -769,7 +769,7 @@ gtsvector_picksplit(PG_FUNCTION_ARGS)
if (ISALLTRUE(datum_r) || cache[j].allistrue)
{
if (!ISALLTRUE(datum_r))
MemSet((void *) GETSIGN(datum_r), 0xff, siglen);
memset((void *) GETSIGN(datum_r), 0xff, siglen);
}
else
{
Expand Down
2 changes: 1 addition & 1 deletion src/backend/utils/cache/relcache.c
Original file line number Diff line number Diff line change
Expand Up @@ -6260,7 +6260,7 @@ load_relcache_init_file(bool shared)
rel->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
rel->rd_droppedSubid = InvalidSubTransactionId;
rel->rd_amcache = NULL;
MemSet(&rel->pgstat_info, 0, sizeof(rel->pgstat_info));
rel->pgstat_info = NULL;

/*
* Recompute lock and physical addressing info. This is needed in
Expand Down
Loading