Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit 1944cf0

Browse files
nathan-bossartCommitfest Bot
authored and
Commitfest Bot
committed
pg_upgrade: Use COPY for large object metadata.
Reported-by: Hannu Krosing <hannuk@google.com> Suggested-by: Tom Lane <tgl@sss.pgh.pa.us> Discussion: https://postgr.es/m/CAMT0RQSS-6qLH%2BzYsOeUbAYhop3wmQTkNmQpo5--QRDUR%2BqYmQ%40mail.gmail.com
1 parent 44ce4e1 commit 1944cf0

File tree

4 files changed

+90
-7
lines changed

4 files changed

+90
-7
lines changed

src/bin/pg_dump/pg_backup_archiver.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,8 @@
3131
#endif
3232

3333
#include "catalog/pg_class_d.h"
34+
#include "catalog/pg_largeobject_metadata.h"
35+
#include "catalog/pg_shdepend.h"
3436
#include "common/string.h"
3537
#include "compress_io.h"
3638
#include "dumputils.h"
@@ -2974,6 +2976,17 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
29742976
int res = REQ_SCHEMA | REQ_DATA;
29752977
RestoreOptions *ropt = AH->public.ropt;
29762978

2979+
/*
2980+
* For binary upgrade mode, dump pg_largeobject_metadata and the
2981+
* associated pg_shdepend rows. This is faster to restore than the
2982+
* equivalent set of large object commands.
2983+
*/
2984+
if (ropt->binary_upgrade && AH->public.remoteVersion >= 120000 &&
2985+
strcmp(te->desc, "TABLE DATA") == 0 &&
2986+
(te->catalogId.oid == LargeObjectMetadataRelationId ||
2987+
te->catalogId.oid == SharedDependRelationId))
2988+
return REQ_DATA;
2989+
29772990
/* These items are treated specially */
29782991
if (strcmp(te->desc, "ENCODING") == 0 ||
29792992
strcmp(te->desc, "STDSTRINGS") == 0 ||

src/bin/pg_dump/pg_dump.c

Lines changed: 73 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -49,8 +49,10 @@
4949
#include "catalog/pg_class_d.h"
5050
#include "catalog/pg_default_acl_d.h"
5151
#include "catalog/pg_largeobject_d.h"
52+
#include "catalog/pg_largeobject_metadata_d.h"
5253
#include "catalog/pg_proc_d.h"
5354
#include "catalog/pg_publication_d.h"
55+
#include "catalog/pg_shdepend.h"
5456
#include "catalog/pg_subscription_d.h"
5557
#include "catalog/pg_type_d.h"
5658
#include "common/connect.h"
@@ -209,6 +211,12 @@ static int nbinaryUpgradeClassOids = 0;
209211
static SequenceItem *sequences = NULL;
210212
static int nsequences = 0;
211213

214+
/*
215+
* For binary upgrade, the dump ID of pg_largeobject_metadata is saved for use
216+
* as a dependency for pg_shdepend and any large object comments/seclabels.
217+
*/
218+
static DumpId lo_metadata_dumpId;
219+
212220
/* Maximum number of relations to fetch in a fetchAttributeStats() call. */
213221
#define MAX_ATTR_STATS_RELS 64
214222

@@ -1083,6 +1091,34 @@ main(int argc, char **argv)
10831091
if (!dopt.dumpData && dopt.sequence_data)
10841092
getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
10851093

1094+
/*
1095+
* For binary upgrade mode, dump pg_largeobject_metadata and the
1096+
* associated pg_shdepend rows. This is faster to restore than the
1097+
* equivalent set of large object commands.
1098+
*/
1099+
if (dopt.binary_upgrade && fout->remoteVersion >= 120000)
1100+
{
1101+
TableInfo *lo_metadata = findTableByOid(LargeObjectMetadataRelationId);
1102+
TableInfo *shdepend = findTableByOid(SharedDependRelationId);
1103+
1104+
makeTableDataInfo(&dopt, lo_metadata);
1105+
makeTableDataInfo(&dopt, shdepend);
1106+
1107+
/*
1108+
* Save pg_largeobject_metadata's dump ID for use as a dependency on
1109+
* pg_shdepend and any large object comments/seclabels.
1110+
*/
1111+
lo_metadata_dumpId = lo_metadata->dataObj->dobj.dumpId;
1112+
addObjectDependency(&shdepend->dataObj->dobj, lo_metadata_dumpId);
1113+
1114+
/*
1115+
* Only dump large object shdepend rows for this database.
1116+
*/
1117+
shdepend->dataObj->filtercond = "WHERE classid = 'pg_largeobject'::regclass "
1118+
"AND dbid = (SELECT oid FROM pg_database "
1119+
" WHERE datname = current_database())";
1120+
}
1121+
10861122
/*
10871123
* In binary-upgrade mode, we do not have to worry about the actual LO
10881124
* data or the associated metadata that resides in the pg_largeobject and
@@ -3922,10 +3958,29 @@ getLOs(Archive *fout)
39223958
* as it will be copied by pg_upgrade, which simply copies the
39233959
* pg_largeobject table. We *do* however dump out anything but the
39243960
* data, as pg_upgrade copies just pg_largeobject, but not
3925-
* pg_largeobject_metadata, after the dump is restored.
3961+
* pg_largeobject_metadata, after the dump is restored. In versions
3962+
* before v12, this is done via proper large object commands. In
3963+
* newer versions, we dump the content of pg_largeobject_metadata and
3964+
* any associated pg_shdepend rows, which is faster to restore.
39263965
*/
39273966
if (dopt->binary_upgrade)
3928-
loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA;
3967+
{
3968+
if (fout->remoteVersion >= 120000)
3969+
{
3970+
loinfo->dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL | DUMP_COMPONENT_DEFINITION);
3971+
3972+
/*
3973+
* Mark the large object as dependent on
3974+
* pg_largeobject_metadata so that any large object
3975+
* comments/seclables are dumped after it.
3976+
*/
3977+
loinfo->dobj.dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
3978+
loinfo->dobj.dependencies[0] = lo_metadata_dumpId;
3979+
loinfo->dobj.nDeps = loinfo->dobj.allocDeps = 1;
3980+
}
3981+
else
3982+
loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA;
3983+
}
39293984

39303985
/*
39313986
* Create a "BLOBS" data item for the group, too. This is just a
@@ -9034,8 +9089,18 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
90349089
if (tbinfo->relkind == RELKIND_SEQUENCE)
90359090
continue;
90369091

9037-
/* Don't bother with uninteresting tables, either */
9038-
if (!tbinfo->interesting)
9092+
/*
9093+
* Don't bother with uninteresting tables, either. For binary
9094+
* upgrades, this is bypassed for pg_largeobject_metadata and
9095+
* pg_shdepend so that the columns names are collected for the
9096+
* corresponding COPY commands. Restoring the data for those catalogs
9097+
* is faster than restoring the equivalent set of large object
9098+
* commands.
9099+
*/
9100+
if (!tbinfo->interesting &&
9101+
!(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 &&
9102+
(tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
9103+
tbinfo->dobj.catId.oid == SharedDependRelationId)))
90399104
continue;
90409105

90419106
/* OK, we need info for this table */
@@ -9232,7 +9297,10 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
92329297
pg_fatal("unrecognized table OID %u", attrelid);
92339298
/* cross-check that we only got requested tables */
92349299
if (tbinfo->relkind == RELKIND_SEQUENCE ||
9235-
!tbinfo->interesting)
9300+
(!tbinfo->interesting &&
9301+
!(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 &&
9302+
(tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
9303+
tbinfo->dobj.catId.oid == SharedDependRelationId))))
92369304
pg_fatal("unexpected column data for table \"%s\"",
92379305
tbinfo->dobj.name);
92389306

src/bin/pg_dump/pg_dump_sort.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,10 +76,10 @@ enum dbObjectTypePriorities
7676
PRIO_TABLE_ATTACH,
7777
PRIO_DUMMY_TYPE,
7878
PRIO_ATTRDEF,
79-
PRIO_LARGE_OBJECT,
8079
PRIO_PRE_DATA_BOUNDARY, /* boundary! */
8180
PRIO_TABLE_DATA,
8281
PRIO_SEQUENCE_SET,
82+
PRIO_LARGE_OBJECT,
8383
PRIO_LARGE_OBJECT_DATA,
8484
PRIO_STATISTICS_DATA_DATA,
8585
PRIO_POST_DATA_BOUNDARY, /* boundary! */

src/bin/pg_dump/t/002_pg_dump.pl

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1029,6 +1029,7 @@
10291029
test_schema_plus_large_objects => 1,
10301030
},
10311031
unlike => {
1032+
binary_upgrade => 1,
10321033
no_large_objects => 1,
10331034
no_owner => 1,
10341035
schema_only => 1,
@@ -1517,6 +1518,7 @@
15171518
test_schema_plus_large_objects => 1,
15181519
},
15191520
unlike => {
1521+
binary_upgrade => 1,
15201522
schema_only => 1,
15211523
schema_only_with_statistics => 1,
15221524
no_large_objects => 1,
@@ -4524,9 +4526,9 @@
45244526
no_schema => 1,
45254527
section_data => 1,
45264528
test_schema_plus_large_objects => 1,
4527-
binary_upgrade => 1,
45284529
},
45294530
unlike => {
4531+
binary_upgrade => 1,
45304532
no_large_objects => 1,
45314533
no_privs => 1,
45324534
schema_only => 1,

0 commit comments

Comments
 (0)