Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/nbtree/nbtinsert.c6
-rw-r--r--src/backend/access/nbtree/nbtsort.c15
-rw-r--r--src/backend/access/nbtree/nbtutils.c7
-rw-r--r--src/backend/catalog/index.c7
-rw-r--r--src/backend/catalog/information_schema.sql9
-rw-r--r--src/backend/catalog/sql_features.txt1
-rw-r--r--src/backend/catalog/toasting.c1
-rw-r--r--src/backend/commands/indexcmds.c3
-rw-r--r--src/backend/nodes/copyfuncs.c2
-rw-r--r--src/backend/nodes/equalfuncs.c2
-rw-r--r--src/backend/nodes/makefuncs.c3
-rw-r--r--src/backend/nodes/outfuncs.c2
-rw-r--r--src/backend/parser/gram.y47
-rw-r--r--src/backend/parser/parse_utilcmd.c3
-rw-r--r--src/backend/utils/adt/ruleutils.c23
-rw-r--r--src/backend/utils/cache/relcache.c1
-rw-r--r--src/backend/utils/sort/tuplesort.c8
17 files changed, 104 insertions, 36 deletions
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 62746c47219..68628ec000d 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -398,9 +398,9 @@ _bt_search_insert(Relation rel, BTInsertState insertstate)
* _bt_findinsertloc() to reuse most of the binary search work we do
* here.
*
- * Do not call here when there are NULL values in scan key. NULL should be
- * considered unequal to NULL when checking for duplicates, but we are not
- * prepared to handle that correctly.
+ * This code treats NULLs as equal, unlike the default semantics for unique
+ * indexes. So do not call here when there are NULL values in scan key and
+ * the index uses the default NULLS DISTINCT mode.
*/
static TransactionId
_bt_check_unique(Relation rel, BTInsertState insertstate, Relation heapRel,
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index dc220146fdc..8a19de2f66c 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -89,6 +89,7 @@ typedef struct BTSpool
Relation heap;
Relation index;
bool isunique;
+ bool nulls_not_distinct;
} BTSpool;
/*
@@ -106,6 +107,7 @@ typedef struct BTShared
Oid heaprelid;
Oid indexrelid;
bool isunique;
+ bool nulls_not_distinct;
bool isconcurrent;
int scantuplesortstates;
@@ -206,6 +208,7 @@ typedef struct BTLeader
typedef struct BTBuildState
{
bool isunique;
+ bool nulls_not_distinct;
bool havedead;
Relation heap;
BTSpool *spool;
@@ -307,6 +310,7 @@ btbuild(Relation heap, Relation index, IndexInfo *indexInfo)
#endif /* BTREE_BUILD_STATS */
buildstate.isunique = indexInfo->ii_Unique;
+ buildstate.nulls_not_distinct = indexInfo->ii_NullsNotDistinct;
buildstate.havedead = false;
buildstate.heap = heap;
buildstate.spool = NULL;
@@ -380,6 +384,7 @@ _bt_spools_heapscan(Relation heap, Relation index, BTBuildState *buildstate,
btspool->heap = heap;
btspool->index = index;
btspool->isunique = indexInfo->ii_Unique;
+ btspool->nulls_not_distinct = indexInfo->ii_NullsNotDistinct;
/* Save as primary spool */
buildstate->spool = btspool;
@@ -429,6 +434,7 @@ _bt_spools_heapscan(Relation heap, Relation index, BTBuildState *buildstate,
*/
buildstate->spool->sortstate =
tuplesort_begin_index_btree(heap, index, buildstate->isunique,
+ buildstate->nulls_not_distinct,
maintenance_work_mem, coordinate,
false);
@@ -468,7 +474,7 @@ _bt_spools_heapscan(Relation heap, Relation index, BTBuildState *buildstate,
* full, so we give it only work_mem
*/
buildstate->spool2->sortstate =
- tuplesort_begin_index_btree(heap, index, false, work_mem,
+ tuplesort_begin_index_btree(heap, index, false, false, work_mem,
coordinate2, false);
}
@@ -1554,6 +1560,7 @@ _bt_begin_parallel(BTBuildState *buildstate, bool isconcurrent, int request)
btshared->heaprelid = RelationGetRelid(btspool->heap);
btshared->indexrelid = RelationGetRelid(btspool->index);
btshared->isunique = btspool->isunique;
+ btshared->nulls_not_distinct = btspool->nulls_not_distinct;
btshared->isconcurrent = isconcurrent;
btshared->scantuplesortstates = scantuplesortstates;
ConditionVariableInit(&btshared->workersdonecv);
@@ -1747,6 +1754,7 @@ _bt_leader_participate_as_worker(BTBuildState *buildstate)
leaderworker->heap = buildstate->spool->heap;
leaderworker->index = buildstate->spool->index;
leaderworker->isunique = buildstate->spool->isunique;
+ leaderworker->nulls_not_distinct = buildstate->spool->nulls_not_distinct;
/* Initialize second spool, if required */
if (!btleader->btshared->isunique)
@@ -1846,6 +1854,7 @@ _bt_parallel_build_main(dsm_segment *seg, shm_toc *toc)
btspool->heap = heapRel;
btspool->index = indexRel;
btspool->isunique = btshared->isunique;
+ btspool->nulls_not_distinct = btshared->nulls_not_distinct;
/* Look up shared state private to tuplesort.c */
sharedsort = shm_toc_lookup(toc, PARALLEL_KEY_TUPLESORT, false);
@@ -1928,6 +1937,7 @@ _bt_parallel_scan_and_sort(BTSpool *btspool, BTSpool *btspool2,
btspool->sortstate = tuplesort_begin_index_btree(btspool->heap,
btspool->index,
btspool->isunique,
+ btspool->nulls_not_distinct,
sortmem, coordinate,
false);
@@ -1950,13 +1960,14 @@ _bt_parallel_scan_and_sort(BTSpool *btspool, BTSpool *btspool2,
coordinate2->nParticipants = -1;
coordinate2->sharedsort = sharedsort2;
btspool2->sortstate =
- tuplesort_begin_index_btree(btspool->heap, btspool->index, false,
+ tuplesort_begin_index_btree(btspool->heap, btspool->index, false, false,
Min(sortmem, work_mem), coordinate2,
false);
}
/* Fill in buildstate for _bt_build_callback() */
buildstate.isunique = btshared->isunique;
+ buildstate.nulls_not_distinct = btshared->nulls_not_distinct;
buildstate.havedead = false;
buildstate.heap = btspool->heap;
buildstate.spool = btspool;
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index ed67863c567..6a651d8397b 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -165,6 +165,13 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
key->anynullkeys = true;
}
+ /*
+ * In NULLS NOT DISTINCT mode, we pretend that there are no null keys, so
+ * that full uniqueness check is done.
+ */
+ if (rel->rd_index->indnullsnotdistinct)
+ key->anynullkeys = false;
+
return key;
}
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 2308d402565..5e3fc2b35dc 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -614,6 +614,7 @@ UpdateIndexRelation(Oid indexoid,
values[Anum_pg_index_indnatts - 1] = Int16GetDatum(indexInfo->ii_NumIndexAttrs);
values[Anum_pg_index_indnkeyatts - 1] = Int16GetDatum(indexInfo->ii_NumIndexKeyAttrs);
values[Anum_pg_index_indisunique - 1] = BoolGetDatum(indexInfo->ii_Unique);
+ values[Anum_pg_index_indnullsnotdistinct - 1] = BoolGetDatum(indexInfo->ii_NullsNotDistinct);
values[Anum_pg_index_indisprimary - 1] = BoolGetDatum(primary);
values[Anum_pg_index_indisexclusion - 1] = BoolGetDatum(isexclusion);
values[Anum_pg_index_indimmediate - 1] = BoolGetDatum(immediate);
@@ -1368,6 +1369,7 @@ index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId,
indexExprs,
indexPreds,
oldInfo->ii_Unique,
+ oldInfo->ii_NullsNotDistinct,
false, /* not ready for inserts */
true);
@@ -2440,6 +2442,7 @@ BuildIndexInfo(Relation index)
RelationGetIndexExpressions(index),
RelationGetIndexPredicate(index),
indexStruct->indisunique,
+ indexStruct->indnullsnotdistinct,
indexStruct->indisready,
false);
@@ -2499,6 +2502,7 @@ BuildDummyIndexInfo(Relation index)
RelationGetDummyIndexExpressions(index),
NIL,
indexStruct->indisunique,
+ indexStruct->indnullsnotdistinct,
indexStruct->indisready,
false);
@@ -2532,6 +2536,9 @@ CompareIndexInfo(IndexInfo *info1, IndexInfo *info2,
if (info1->ii_Unique != info2->ii_Unique)
return false;
+ if (info1->ii_NullsNotDistinct != info2->ii_NullsNotDistinct)
+ return false;
+
/* indexes are only equivalent if they have the same access method */
if (info1->ii_Am != info2->ii_Am)
return false;
diff --git a/src/backend/catalog/information_schema.sql b/src/backend/catalog/information_schema.sql
index b4f348a24db..c4ef8e78a5f 100644
--- a/src/backend/catalog/information_schema.sql
+++ b/src/backend/catalog/information_schema.sql
@@ -1838,7 +1838,11 @@ CREATE VIEW table_constraints AS
AS is_deferrable,
CAST(CASE WHEN c.condeferred THEN 'YES' ELSE 'NO' END AS yes_or_no)
AS initially_deferred,
- CAST('YES' AS yes_or_no) AS enforced
+ CAST('YES' AS yes_or_no) AS enforced,
+ CAST(CASE WHEN c.contype = 'u'
+ THEN CASE WHEN (SELECT NOT indnullsnotdistinct FROM pg_index WHERE indexrelid = conindid) THEN 'YES' ELSE 'NO' END
+ END
+ AS yes_or_no) AS nulls_distinct
FROM pg_namespace nc,
pg_namespace nr,
@@ -1868,7 +1872,8 @@ CREATE VIEW table_constraints AS
CAST('CHECK' AS character_data) AS constraint_type,
CAST('NO' AS yes_or_no) AS is_deferrable,
CAST('NO' AS yes_or_no) AS initially_deferred,
- CAST('YES' AS yes_or_no) AS enforced
+ CAST('YES' AS yes_or_no) AS enforced,
+ CAST(NULL AS yes_or_no) AS nulls_distinct
FROM pg_namespace nr,
pg_class r,
diff --git a/src/backend/catalog/sql_features.txt b/src/backend/catalog/sql_features.txt
index b8a78f4d419..097d9c4784b 100644
--- a/src/backend/catalog/sql_features.txt
+++ b/src/backend/catalog/sql_features.txt
@@ -228,6 +228,7 @@ F263 Comma-separated predicates in simple CASE expression NO
F271 Compound character literals YES
F281 LIKE enhancements YES
F291 UNIQUE predicate NO
+F292 UNIQUE null treatment YES SQL:202x draft
F301 CORRESPONDING in query expressions NO
F302 INTERSECT table operator YES
F302 INTERSECT table operator 01 INTERSECT DISTINCT table operator YES
diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c
index 3c27cb1e71b..9bc10729b02 100644
--- a/src/backend/catalog/toasting.c
+++ b/src/backend/catalog/toasting.c
@@ -301,6 +301,7 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
indexInfo->ii_ExclusionStrats = NULL;
indexInfo->ii_OpclassOptions = NULL;
indexInfo->ii_Unique = true;
+ indexInfo->ii_NullsNotDistinct = false;
indexInfo->ii_ReadyForInserts = true;
indexInfo->ii_CheckedUnchanged = false;
indexInfo->ii_IndexUnchanged = false;
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 42aacc8f0a2..560dcc87a2c 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -226,7 +226,7 @@ CheckIndexCompatible(Oid oldId,
* ii_NumIndexKeyAttrs with same value.
*/
indexInfo = makeIndexInfo(numberOfAttributes, numberOfAttributes,
- accessMethodId, NIL, NIL, false, false, false);
+ accessMethodId, NIL, NIL, false, false, false, false);
typeObjectId = (Oid *) palloc(numberOfAttributes * sizeof(Oid));
collationObjectId = (Oid *) palloc(numberOfAttributes * sizeof(Oid));
classObjectId = (Oid *) palloc(numberOfAttributes * sizeof(Oid));
@@ -867,6 +867,7 @@ DefineIndex(Oid relationId,
NIL, /* expressions, NIL for now */
make_ands_implicit((Expr *) stmt->whereClause),
stmt->unique,
+ stmt->nulls_not_distinct,
!concurrent,
concurrent);
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 90b5da51c95..6bd95bbce24 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -3072,6 +3072,7 @@ _copyConstraint(const Constraint *from)
COPY_NODE_FIELD(raw_expr);
COPY_STRING_FIELD(cooked_expr);
COPY_SCALAR_FIELD(generated_when);
+ COPY_SCALAR_FIELD(nulls_not_distinct);
COPY_NODE_FIELD(keys);
COPY_NODE_FIELD(including);
COPY_NODE_FIELD(exclusions);
@@ -3664,6 +3665,7 @@ _copyIndexStmt(const IndexStmt *from)
COPY_SCALAR_FIELD(oldCreateSubid);
COPY_SCALAR_FIELD(oldFirstRelfilenodeSubid);
COPY_SCALAR_FIELD(unique);
+ COPY_SCALAR_FIELD(nulls_not_distinct);
COPY_SCALAR_FIELD(primary);
COPY_SCALAR_FIELD(isconstraint);
COPY_SCALAR_FIELD(deferrable);
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 06345da3ba8..4126516222b 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -1400,6 +1400,7 @@ _equalIndexStmt(const IndexStmt *a, const IndexStmt *b)
COMPARE_SCALAR_FIELD(oldCreateSubid);
COMPARE_SCALAR_FIELD(oldFirstRelfilenodeSubid);
COMPARE_SCALAR_FIELD(unique);
+ COMPARE_SCALAR_FIELD(nulls_not_distinct);
COMPARE_SCALAR_FIELD(primary);
COMPARE_SCALAR_FIELD(isconstraint);
COMPARE_SCALAR_FIELD(deferrable);
@@ -2710,6 +2711,7 @@ _equalConstraint(const Constraint *a, const Constraint *b)
COMPARE_NODE_FIELD(raw_expr);
COMPARE_STRING_FIELD(cooked_expr);
COMPARE_SCALAR_FIELD(generated_when);
+ COMPARE_SCALAR_FIELD(nulls_not_distinct);
COMPARE_NODE_FIELD(keys);
COMPARE_NODE_FIELD(including);
COMPARE_NODE_FIELD(exclusions);
diff --git a/src/backend/nodes/makefuncs.c b/src/backend/nodes/makefuncs.c
index 822395625b6..c85d8fe9751 100644
--- a/src/backend/nodes/makefuncs.c
+++ b/src/backend/nodes/makefuncs.c
@@ -741,7 +741,7 @@ make_ands_implicit(Expr *clause)
*/
IndexInfo *
makeIndexInfo(int numattrs, int numkeyattrs, Oid amoid, List *expressions,
- List *predicates, bool unique, bool isready, bool concurrent)
+ List *predicates, bool unique, bool nulls_not_distinct, bool isready, bool concurrent)
{
IndexInfo *n = makeNode(IndexInfo);
@@ -750,6 +750,7 @@ makeIndexInfo(int numattrs, int numkeyattrs, Oid amoid, List *expressions,
Assert(n->ii_NumIndexKeyAttrs != 0);
Assert(n->ii_NumIndexKeyAttrs <= n->ii_NumIndexAttrs);
n->ii_Unique = unique;
+ n->ii_NullsNotDistinct = nulls_not_distinct;
n->ii_ReadyForInserts = isready;
n->ii_CheckedUnchanged = false;
n->ii_IndexUnchanged = false;
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index 2b0236937aa..6bdad462c78 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -2775,6 +2775,7 @@ _outIndexStmt(StringInfo str, const IndexStmt *node)
WRITE_UINT_FIELD(oldCreateSubid);
WRITE_UINT_FIELD(oldFirstRelfilenodeSubid);
WRITE_BOOL_FIELD(unique);
+ WRITE_BOOL_FIELD(nulls_not_distinct);
WRITE_BOOL_FIELD(primary);
WRITE_BOOL_FIELD(isconstraint);
WRITE_BOOL_FIELD(deferrable);
@@ -3713,6 +3714,7 @@ _outConstraint(StringInfo str, const Constraint *node)
case CONSTR_UNIQUE:
appendStringInfoString(str, "UNIQUE");
+ WRITE_BOOL_FIELD(nulls_not_distinct);
WRITE_NODE_FIELD(keys);
WRITE_NODE_FIELD(including);
WRITE_NODE_FIELD(options);
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index b5966712ce1..c4f32425060 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -625,6 +625,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
%type <ival> opt_window_exclusion_clause
%type <str> opt_existing_window_name
%type <boolean> opt_if_not_exists
+%type <boolean> opt_unique_null_treatment
%type <ival> generated_when override_kind
%type <partspec> PartitionSpec OptPartitionSpec
%type <partelem> part_elem
@@ -3623,15 +3624,16 @@ ColConstraintElem:
n->location = @1;
$$ = (Node *)n;
}
- | UNIQUE opt_definition OptConsTableSpace
+ | UNIQUE opt_unique_null_treatment opt_definition OptConsTableSpace
{
Constraint *n = makeNode(Constraint);
n->contype = CONSTR_UNIQUE;
n->location = @1;
+ n->nulls_not_distinct = !$2;
n->keys = NULL;
- n->options = $2;
+ n->options = $3;
n->indexname = NULL;
- n->indexspace = $3;
+ n->indexspace = $4;
$$ = (Node *)n;
}
| PRIMARY KEY opt_definition OptConsTableSpace
@@ -3716,6 +3718,12 @@ ColConstraintElem:
}
;
+opt_unique_null_treatment:
+ NULLS_P DISTINCT { $$ = true; }
+ | NULLS_P NOT DISTINCT { $$ = false; }
+ | /*EMPTY*/ { $$ = true; }
+ ;
+
generated_when:
ALWAYS { $$ = ATTRIBUTE_IDENTITY_ALWAYS; }
| BY DEFAULT { $$ = ATTRIBUTE_IDENTITY_BY_DEFAULT; }
@@ -3828,18 +3836,19 @@ ConstraintElem:
n->initially_valid = !n->skip_validation;
$$ = (Node *)n;
}
- | UNIQUE '(' columnList ')' opt_c_include opt_definition OptConsTableSpace
+ | UNIQUE opt_unique_null_treatment '(' columnList ')' opt_c_include opt_definition OptConsTableSpace
ConstraintAttributeSpec
{
Constraint *n = makeNode(Constraint);
n->contype = CONSTR_UNIQUE;
n->location = @1;
- n->keys = $3;
- n->including = $5;
- n->options = $6;
+ n->nulls_not_distinct = !$2;
+ n->keys = $4;
+ n->including = $6;
+ n->options = $7;
n->indexname = NULL;
- n->indexspace = $7;
- processCASbits($8, @8, "UNIQUE",
+ n->indexspace = $8;
+ processCASbits($9, @9, "UNIQUE",
&n->deferrable, &n->initdeferred, NULL,
NULL, yyscanner);
$$ = (Node *)n;
@@ -7411,7 +7420,7 @@ defacl_privilege_target:
IndexStmt: CREATE opt_unique INDEX opt_concurrently opt_index_name
ON relation_expr access_method_clause '(' index_params ')'
- opt_include opt_reloptions OptTableSpace where_clause
+ opt_include opt_unique_null_treatment opt_reloptions OptTableSpace where_clause
{
IndexStmt *n = makeNode(IndexStmt);
n->unique = $2;
@@ -7421,9 +7430,10 @@ IndexStmt: CREATE opt_unique INDEX opt_concurrently opt_index_name
n->accessMethod = $8;
n->indexParams = $10;
n->indexIncludingParams = $12;
- n->options = $13;
- n->tableSpace = $14;
- n->whereClause = $15;
+ n->nulls_not_distinct = !$13;
+ n->options = $14;
+ n->tableSpace = $15;
+ n->whereClause = $16;
n->excludeOpNames = NIL;
n->idxcomment = NULL;
n->indexOid = InvalidOid;
@@ -7441,7 +7451,7 @@ IndexStmt: CREATE opt_unique INDEX opt_concurrently opt_index_name
}
| CREATE opt_unique INDEX opt_concurrently IF_P NOT EXISTS name
ON relation_expr access_method_clause '(' index_params ')'
- opt_include opt_reloptions OptTableSpace where_clause
+ opt_include opt_unique_null_treatment opt_reloptions OptTableSpace where_clause
{
IndexStmt *n = makeNode(IndexStmt);
n->unique = $2;
@@ -7451,9 +7461,10 @@ IndexStmt: CREATE opt_unique INDEX opt_concurrently opt_index_name
n->accessMethod = $11;
n->indexParams = $13;
n->indexIncludingParams = $15;
- n->options = $16;
- n->tableSpace = $17;
- n->whereClause = $18;
+ n->nulls_not_distinct = !$16;
+ n->options = $17;
+ n->tableSpace = $18;
+ n->whereClause = $19;
n->excludeOpNames = NIL;
n->idxcomment = NULL;
n->indexOid = InvalidOid;
@@ -13802,7 +13813,7 @@ a_expr: c_expr { $$ = $1; }
else
$$ = (Node *) makeA_Expr(AEXPR_OP_ALL, $2, $1, $5, @2);
}
- | UNIQUE select_with_parens
+ | UNIQUE opt_unique_null_treatment select_with_parens
{
/* Not sure how to get rid of the parentheses
* but there are lots of shift/reduce errors without them.
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 0eea214dd89..99efa26ce4a 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -1581,6 +1581,7 @@ generateClonedIndexStmt(RangeVar *heapRel, Relation source_idx,
index->oldCreateSubid = InvalidSubTransactionId;
index->oldFirstRelfilenodeSubid = InvalidSubTransactionId;
index->unique = idxrec->indisunique;
+ index->nulls_not_distinct = idxrec->indnullsnotdistinct;
index->primary = idxrec->indisprimary;
index->transformed = true; /* don't need transformIndexStmt */
index->concurrent = false;
@@ -2112,6 +2113,7 @@ transformIndexConstraints(CreateStmtContext *cxt)
equal(index->whereClause, priorindex->whereClause) &&
equal(index->excludeOpNames, priorindex->excludeOpNames) &&
strcmp(index->accessMethod, priorindex->accessMethod) == 0 &&
+ index->nulls_not_distinct == priorindex->nulls_not_distinct &&
index->deferrable == priorindex->deferrable &&
index->initdeferred == priorindex->initdeferred)
{
@@ -2178,6 +2180,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
* DefineIndex will check for it.
*/
}
+ index->nulls_not_distinct = constraint->nulls_not_distinct;
index->isconstraint = true;
index->deferrable = constraint->deferrable;
index->initdeferred = constraint->initdeferred;
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 039b1d2b951..b16526e65e9 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -1444,6 +1444,9 @@ pg_get_indexdef_worker(Oid indexrelid, int colno,
{
appendStringInfoChar(&buf, ')');
+ if (idxrec->indnullsnotdistinct)
+ appendStringInfo(&buf, " NULLS NOT DISTINCT");
+
/*
* If it has options, append "WITH (options)"
*/
@@ -2312,9 +2315,20 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
/* Start off the constraint definition */
if (conForm->contype == CONSTRAINT_PRIMARY)
- appendStringInfoString(&buf, "PRIMARY KEY (");
+ appendStringInfoString(&buf, "PRIMARY KEY ");
else
- appendStringInfoString(&buf, "UNIQUE (");
+ appendStringInfoString(&buf, "UNIQUE ");
+
+ indexId = conForm->conindid;
+
+ indtup = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexId));
+ if (!HeapTupleIsValid(indtup))
+ elog(ERROR, "cache lookup failed for index %u", indexId);
+ if (conForm->contype == CONSTRAINT_UNIQUE &&
+ ((Form_pg_index) GETSTRUCT(indtup))->indnullsnotdistinct)
+ appendStringInfoString(&buf, "NULLS NOT DISTINCT ");
+
+ appendStringInfoString(&buf, "(");
/* Fetch and build target column list */
val = SysCacheGetAttr(CONSTROID, tup,
@@ -2327,12 +2341,7 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
appendStringInfoChar(&buf, ')');
- indexId = conForm->conindid;
-
/* Build including column list (from pg_index.indkeys) */
- indtup = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexId));
- if (!HeapTupleIsValid(indtup))
- elog(ERROR, "cache lookup failed for index %u", indexId);
val = SysCacheGetAttr(INDEXRELID, indtup,
Anum_pg_index_indnatts, &isnull);
if (isnull)
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 2e760e8a3bd..2707fed12f4 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -2274,6 +2274,7 @@ RelationReloadIndexInfo(Relation relation)
* the array fields are allowed to change, though.
*/
relation->rd_index->indisunique = index->indisunique;
+ relation->rd_index->indnullsnotdistinct = index->indnullsnotdistinct;
relation->rd_index->indisprimary = index->indisprimary;
relation->rd_index->indisexclusion = index->indisexclusion;
relation->rd_index->indimmediate = index->indimmediate;
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index a3f22d7357f..086e948fca6 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -459,6 +459,7 @@ struct Tuplesortstate
/* These are specific to the index_btree subcase: */
bool enforceUnique; /* complain if we find duplicate tuples */
+ bool uniqueNullsNotDistinct; /* unique constraint null treatment */
/* These are specific to the index_hash subcase: */
uint32 high_mask; /* masks for sortable part of hash code */
@@ -1065,6 +1066,7 @@ Tuplesortstate *
tuplesort_begin_index_btree(Relation heapRel,
Relation indexRel,
bool enforceUnique,
+ bool uniqueNullsNotDistinct,
int workMem,
SortCoordinate coordinate,
bool randomAccess)
@@ -1103,6 +1105,7 @@ tuplesort_begin_index_btree(Relation heapRel,
state->heapRel = heapRel;
state->indexRel = indexRel;
state->enforceUnique = enforceUnique;
+ state->uniqueNullsNotDistinct = uniqueNullsNotDistinct;
indexScanKey = _bt_mkscankey(indexRel, NULL);
@@ -4200,14 +4203,15 @@ comparetup_index_btree(const SortTuple *a, const SortTuple *b,
/*
* If btree has asked us to enforce uniqueness, complain if two equal
- * tuples are detected (unless there was at least one NULL field).
+ * tuples are detected (unless there was at least one NULL field and NULLS
+ * NOT DISTINCT was not set).
*
* It is sufficient to make the test here, because if two tuples are equal
* they *must* get compared at some stage of the sort --- otherwise the
* sort algorithm wouldn't have checked whether one must appear before the
* other.
*/
- if (state->enforceUnique && !equal_hasnull)
+ if (state->enforceUnique && !(!state->uniqueNullsNotDistinct && equal_hasnull))
{
Datum values[INDEX_MAX_KEYS];
bool isnull[INDEX_MAX_KEYS];