diff --git a/.dir-locals.el b/.dir-locals.el
index d8827a669a4..ab6208b6983 100644
--- a/.dir-locals.el
+++ b/.dir-locals.el
@@ -5,11 +5,11 @@
(fill-column . 78)
(indent-tabs-mode . t)
(tab-width . 4)))
- (dsssl-mode . ((indent-tabs-mode . nil)))
- (nxml-mode . ((indent-tabs-mode . nil)))
+ (nxml-mode . ((fill-column . 78)
+ (indent-tabs-mode . nil)))
(perl-mode . ((perl-indent-level . 4)
- (perl-continued-statement-offset . 4)
- (perl-continued-brace-offset . 4)
+ (perl-continued-statement-offset . 2)
+ (perl-continued-brace-offset . -2)
(perl-brace-offset . 0)
(perl-brace-imaginary-offset . 0)
(perl-label-offset . -2)
diff --git a/.gitattributes b/.gitattributes
index bdbcdb560af..3ac99972812 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -14,7 +14,6 @@ README.* conflict-marker-size=32
# Certain data files that contain special whitespace, and other special cases
*.data -whitespace
contrib/pgcrypto/sql/pgp-armor.sql whitespace=-blank-at-eol
-doc/bug.template whitespace=space-before-tab,-blank-at-eof,blank-at-eol
src/backend/catalog/sql_features.txt whitespace=space-before-tab,blank-at-eof,-blank-at-eol
# Test output files that contain extra whitespace
diff --git a/COPYRIGHT b/COPYRIGHT
index 33e6e4842ad..fe7c3857327 100644
--- a/COPYRIGHT
+++ b/COPYRIGHT
@@ -1,7 +1,7 @@
PostgreSQL Database Management System
(formerly known as Postgres, then as Postgres95)
-Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
+Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
Portions Copyright (c) 1994, The Regents of the University of California
diff --git a/GNUmakefile.in b/GNUmakefile.in
index dc76a5d11dd..9dc373c79cc 100644
--- a/GNUmakefile.in
+++ b/GNUmakefile.in
@@ -63,10 +63,12 @@ distclean maintainer-clean:
@rm -rf autom4te.cache/
rm -f config.cache config.log config.status GNUmakefile
-check check-tests installcheck installcheck-parallel installcheck-tests:
+check check-tests installcheck installcheck-parallel installcheck-tests: CHECKPREP_TOP=src/test/regress
+check check-tests installcheck installcheck-parallel installcheck-tests: submake-generated-headers
$(MAKE) -C src/test/regress $@
$(call recurse,check-world,src/test src/pl src/interfaces/ecpg contrib src/bin,check)
+$(call recurse,checkprep, src/test src/pl src/interfaces/ecpg contrib src/bin)
$(call recurse,installcheck-world,src/test src/pl src/interfaces/ecpg contrib src/bin,installcheck)
@@ -78,7 +80,6 @@ GNUmakefile: GNUmakefile.in $(top_builddir)/config.status
distdir = postgresql-$(VERSION)
dummy = =install=
-garbage = =* "#"* ."#"* *~* *.orig *.rej core postgresql-*
dist: $(distdir).tar.gz $(distdir).tar.bz2
rm -rf $(distdir)
@@ -127,4 +128,10 @@ distcheck: dist
rm -rf $(distdir) $(dummy)
@echo "Distribution integrity checks out."
-.PHONY: dist distdir distcheck docs install-docs world check-world install-world installcheck-world
+headerscheck: submake-generated-headers
+ $(top_srcdir)/src/tools/pginclude/headerscheck $(top_srcdir) $(abs_top_builddir)
+
+cpluspluscheck: submake-generated-headers
+ $(top_srcdir)/src/tools/pginclude/cpluspluscheck $(top_srcdir) $(abs_top_builddir)
+
+.PHONY: dist distdir distcheck docs install-docs world check-world install-world installcheck-world headerscheck cpluspluscheck
diff --git a/Makefile b/Makefile
index c400854cd3d..99dcfff654d 100644
--- a/Makefile
+++ b/Makefile
@@ -33,7 +33,7 @@ all check install installdirs installcheck installcheck-parallel uninstall clean
\
if [ x"$${GMAKE+set}" = xset ]; then \
echo "Using GNU make found at $${GMAKE}"; \
- unset MAKEFLAGS; unset MAKELEVEL; \
+ unset MAKELEVEL; \
$${GMAKE} $@ ; \
else \
echo "You must use GNU make to build PostgreSQL." ; \
diff --git a/aclocal.m4 b/aclocal.m4
index a517e949f15..bfd34ecec8c 100644
--- a/aclocal.m4
+++ b/aclocal.m4
@@ -4,6 +4,7 @@ m4_include([config/ax_prog_perl_modules.m4])
m4_include([config/ax_pthread.m4])
m4_include([config/c-compiler.m4])
m4_include([config/c-library.m4])
+m4_include([config/check_decls.m4])
m4_include([config/docbook.m4])
m4_include([config/general.m4])
m4_include([config/libtool.m4])
diff --git a/config/ac_func_accept_argtypes.m4 b/config/ac_func_accept_argtypes.m4
index ee28bacf4b0..178ef678183 100644
--- a/config/ac_func_accept_argtypes.m4
+++ b/config/ac_func_accept_argtypes.m4
@@ -43,8 +43,8 @@ AC_DEFUN([AC_FUNC_ACCEPT_ARGTYPES],
[AC_CACHE_VAL(ac_cv_func_accept_arg1,dnl
[AC_CACHE_VAL(ac_cv_func_accept_arg2,dnl
[AC_CACHE_VAL(ac_cv_func_accept_arg3,dnl
- [for ac_cv_func_accept_return in 'int' 'unsigned int PASCAL' 'SOCKET WSAAPI'; do
- for ac_cv_func_accept_arg1 in 'int' 'unsigned int' 'SOCKET'; do
+ [for ac_cv_func_accept_return in 'int' 'SOCKET WSAAPI' 'unsigned int PASCAL'; do
+ for ac_cv_func_accept_arg1 in 'int' 'SOCKET' 'unsigned int'; do
for ac_cv_func_accept_arg2 in 'struct sockaddr *' 'const struct sockaddr *' 'void *'; do
for ac_cv_func_accept_arg3 in 'int' 'size_t' 'socklen_t' 'unsigned int' 'void'; do
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
diff --git a/config/ax_pthread.m4 b/config/ax_pthread.m4
index f6445a6ffa8..5fbf9fe0d68 100644
--- a/config/ax_pthread.m4
+++ b/config/ax_pthread.m4
@@ -1,5 +1,5 @@
# ===========================================================================
-# http://www.gnu.org/software/autoconf-archive/ax_pthread.html
+# https://www.gnu.org/software/autoconf-archive/ax_pthread.html
# ===========================================================================
#
# SYNOPSIS
@@ -19,10 +19,10 @@
# is necessary on AIX to use the special cc_r compiler alias.)
#
# NOTE: You are assumed to not only compile your program with these flags,
-# but also link it with them as well. e.g. you should link with
+# but also to link with them as well. For example, you might link with
# $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS
#
-# If you are only building threads programs, you may wish to use these
+# If you are only building threaded programs, you may wish to use these
# variables in your default LIBS, CFLAGS, and CC:
#
# LIBS="$PTHREAD_LIBS $LIBS"
@@ -30,8 +30,8 @@
# CC="$PTHREAD_CC"
#
# In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute constant
-# has a nonstandard name, defines PTHREAD_CREATE_JOINABLE to that name
-# (e.g. PTHREAD_CREATE_UNDETACHED on AIX).
+# has a nonstandard name, this macro defines PTHREAD_CREATE_JOINABLE to
+# that name (e.g. PTHREAD_CREATE_UNDETACHED on AIX).
#
# Also HAVE_PTHREAD_PRIO_INHERIT is defined if pthread is found and the
# PTHREAD_PRIO_INHERIT symbol is defined when compiling with
@@ -67,7 +67,7 @@
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
-# with this program. If not, see .
+# with this program. If not, see .
#
# As a special exception, the respective Autoconf Macro's copyright owner
# gives unlimited permission to copy, distribute and modify the configure
@@ -82,12 +82,13 @@
# modified version of the Autoconf Macro, you may extend this special
# exception to the GPL to apply to your modified version as well.
-#serial 21
+#serial 24
AU_ALIAS([ACX_PTHREAD], [AX_PTHREAD])
AC_DEFUN([AX_PTHREAD], [
AC_REQUIRE([AC_CANONICAL_HOST])
AC_REQUIRE([AC_PROG_CC])
+AC_REQUIRE([AC_PROG_SED])
AC_LANG_PUSH([C])
ax_pthread_ok=no
@@ -98,20 +99,23 @@ ax_pthread_ok=no
# First of all, check if the user has set any of the PTHREAD_LIBS,
# etcetera environment variables, and if threads linking works using
# them:
-if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then
- save_CFLAGS="$CFLAGS"
+if test "x$PTHREAD_CFLAGS$PTHREAD_LIBS" != "x"; then
+ ax_pthread_save_CC="$CC"
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ ax_pthread_save_LIBS="$LIBS"
+ AS_IF([test "x$PTHREAD_CC" != "x"], [CC="$PTHREAD_CC"])
CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
- save_LIBS="$LIBS"
LIBS="$PTHREAD_LIBS $LIBS"
AC_MSG_CHECKING([for pthread_join using $CC $PTHREAD_CFLAGS $PTHREAD_LIBS])
AC_LINK_IFELSE([AC_LANG_CALL([], [pthread_join])], [ax_pthread_ok=yes])
AC_MSG_RESULT([$ax_pthread_ok])
- if test x"$ax_pthread_ok" = xno; then
+ if test "x$ax_pthread_ok" = "xno"; then
PTHREAD_LIBS=""
PTHREAD_CFLAGS=""
fi
- LIBS="$save_LIBS"
- CFLAGS="$save_CFLAGS"
+ CC="$ax_pthread_save_CC"
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ LIBS="$ax_pthread_save_LIBS"
fi
# We must check for the threads library under a number of different
@@ -124,7 +128,7 @@ fi
# which indicates that we try without any flags at all, and "pthread-config"
# which is a program returning the flags for the Pth emulation library.
-ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mt -mthreads pthread --thread-safe pthread-config"
+ax_pthread_flags="pthreads none -Kthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config"
# The ordering *is* (sometimes) important. Some notes on the
# individual items follow:
@@ -133,14 +137,14 @@ ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mt
# none: in case threads are in libc; should be tried before -Kthread and
# other compiler flags to prevent continual compiler warnings
# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h)
-# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
-# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads), Tru64
-# -pthreads: Solaris/gcc
+# (Note: HP C rejects this with "bad form for `-t' option")
+# -pthreads: Solaris/gcc (Note: HP C also rejects)
# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it
# doesn't hurt to check since this sometimes defines pthreads and
# -D_REENTRANT too), HP C (must be checked before -lpthread, which
-# is present but should not be used directly)
+# is present but should not be used directly; and before -mthreads,
+# because the compiler interprets this as "-mt" + "-hreads")
# -mthreads: Mingw32/gcc, Lynx/gcc
# pthread: Linux, etcetera
# --thread-safe: KAI C++
@@ -148,6 +152,14 @@ ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mt
case $host_os in
+ freebsd*)
+
+ # -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
+ # lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
+
+ ax_pthread_flags="-kthread lthread $ax_pthread_flags"
+ ;;
+
hpux*)
# From the cc(1) man page: "[-mt] Sets various -D flags to enable
@@ -174,42 +186,144 @@ case $host_os in
solaris*)
- # Newer versions of Solaris require the "-mt -lpthread" pair, and we
- # check that first. On some older versions, libc contains stubbed
+ # On Solaris (at least, for some versions), libc contains stubbed
# (non-functional) versions of the pthreads routines, so link-based
- # tests will erroneously succeed. (We need to link with -pthreads/-mt/
- # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather
- # a function called by this macro, so we could check for that, but
- # who knows whether they'll stub that too in a future libc.) So
- # we'll look for -pthreads and -lpthread shortly thereafter.
+ # tests will erroneously succeed. (N.B.: The stubs are missing
+ # pthread_cleanup_push, or rather a function called by this macro,
+ # so we could check for that, but who knows whether they'll stub
+ # that too in a future libc.) So we'll check first for the
+ # standard Solaris way of linking pthreads (-mt -lpthread).
- ax_pthread_flags="-mt,pthread -pthreads -pthread pthread $ax_pthread_flags"
+ ax_pthread_flags="-mt,pthread pthread $ax_pthread_flags"
;;
esac
-# Older versions of Clang only give a warning instead of an error for an
-# unrecognized option, unless we specify -Werror. (We throw in some extra
-# Clang warning flags for good measure.)
-
-AC_CACHE_CHECK([if compiler needs certain flags to reject unknown flags],
- [ax_cv_PTHREAD_REJECT_UNKNOWN],
- [ax_cv_PTHREAD_REJECT_UNKNOWN=unknown
- save_CFLAGS="$CFLAGS"
- ax_pthread_extra_flags="-Wunknown-warning-option -Wunused-command-line-argument"
- CFLAGS="$CFLAGS $ax_pthread_extra_flags -Wfoobaz -foobaz"
- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([int foo(void);],[foo()])],
- [ax_cv_PTHREAD_REJECT_UNKNOWN="-Werror $ax_pthread_extra_flags"],
- [ax_cv_PTHREAD_REJECT_UNKNOWN=no])
- CFLAGS="$save_CFLAGS"
+# GCC generally uses -pthread, or -pthreads on some platforms (e.g. SPARC)
+
+AS_IF([test "x$GCC" = "xyes"],
+ [ax_pthread_flags="-pthread -pthreads $ax_pthread_flags"])
+
+# The presence of a feature test macro requesting re-entrant function
+# definitions is, on some systems, a strong hint that pthreads support is
+# correctly enabled
+
+case $host_os in
+ darwin* | hpux* | linux* | osf* | solaris*)
+ ax_pthread_check_macro="_REENTRANT"
+ ;;
+
+ aix*)
+ ax_pthread_check_macro="_THREAD_SAFE"
+ ;;
+
+ *)
+ ax_pthread_check_macro="--"
+ ;;
+esac
+AS_IF([test "x$ax_pthread_check_macro" = "x--"],
+ [ax_pthread_check_cond=0],
+ [ax_pthread_check_cond="!defined($ax_pthread_check_macro)"])
+
+# Are we compiling with Clang?
+
+AC_CACHE_CHECK([whether $CC is Clang],
+ [ax_cv_PTHREAD_CLANG],
+ [ax_cv_PTHREAD_CLANG=no
+ # Note that Autoconf sets GCC=yes for Clang as well as GCC
+ if test "x$GCC" = "xyes"; then
+ AC_EGREP_CPP([AX_PTHREAD_CC_IS_CLANG],
+ [/* Note: Clang 2.7 lacks __clang_[a-z]+__ */
+# if defined(__clang__) && defined(__llvm__)
+ AX_PTHREAD_CC_IS_CLANG
+# endif
+ ],
+ [ax_cv_PTHREAD_CLANG=yes])
+ fi
])
-ax_pthread_extra_flags=
-AS_IF([test "x$ax_cv_PTHREAD_REJECT_UNKNOWN" != "xno"],
- [ax_pthread_extra_flags="$ax_cv_PTHREAD_REJECT_UNKNOWN"])
+ax_pthread_clang="$ax_cv_PTHREAD_CLANG"
+
+ax_pthread_clang_warning=no
+
+# Clang needs special handling, because older versions handle the -pthread
+# option in a rather... idiosyncratic way
+
+if test "x$ax_pthread_clang" = "xyes"; then
+
+ # Clang takes -pthread; it has never supported any other flag
+
+ # (Note 1: This will need to be revisited if a system that Clang
+ # supports has POSIX threads in a separate library. This tends not
+ # to be the way of modern systems, but it's conceivable.)
+
+ # (Note 2: On some systems, notably Darwin, -pthread is not needed
+ # to get POSIX threads support; the API is always present and
+ # active. We could reasonably leave PTHREAD_CFLAGS empty. But
+ # -pthread does define _REENTRANT, and while the Darwin headers
+ # ignore this macro, third-party headers might not.)
+
+ PTHREAD_CFLAGS="-pthread"
+ PTHREAD_LIBS=
+
+ ax_pthread_ok=yes
+
+ # However, older versions of Clang make a point of warning the user
+ # that, in an invocation where only linking and no compilation is
+ # taking place, the -pthread option has no effect ("argument unused
+ # during compilation"). They expect -pthread to be passed in only
+ # when source code is being compiled.
+ #
+ # Problem is, this is at odds with the way Automake and most other
+ # C build frameworks function, which is that the same flags used in
+ # compilation (CFLAGS) are also used in linking. Many systems
+ # supported by AX_PTHREAD require exactly this for POSIX threads
+ # support, and in fact it is often not straightforward to specify a
+ # flag that is used only in the compilation phase and not in
+ # linking. Such a scenario is extremely rare in practice.
+ #
+ # Even though use of the -pthread flag in linking would only print
+ # a warning, this can be a nuisance for well-run software projects
+ # that build with -Werror. So if the active version of Clang has
+ # this misfeature, we search for an option to squash it.
+
+ AC_CACHE_CHECK([whether Clang needs flag to prevent "argument unused" warning when linking with -pthread],
+ [ax_cv_PTHREAD_CLANG_NO_WARN_FLAG],
+ [ax_cv_PTHREAD_CLANG_NO_WARN_FLAG=unknown
+ # Create an alternate version of $ac_link that compiles and
+ # links in two steps (.c -> .o, .o -> exe) instead of one
+ # (.c -> exe), because the warning occurs only in the second
+ # step
+ ax_pthread_save_ac_link="$ac_link"
+ ax_pthread_sed='s/conftest\.\$ac_ext/conftest.$ac_objext/g'
+ ax_pthread_link_step=`$as_echo "$ac_link" | sed "$ax_pthread_sed"`
+ ax_pthread_2step_ac_link="($ac_compile) && (echo ==== >&5) && ($ax_pthread_link_step)"
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ for ax_pthread_try in '' -Qunused-arguments -Wno-unused-command-line-argument unknown; do
+ AS_IF([test "x$ax_pthread_try" = "xunknown"], [break])
+ CFLAGS="-Werror -Wunknown-warning-option $ax_pthread_try -pthread $ax_pthread_save_CFLAGS"
+ ac_link="$ax_pthread_save_ac_link"
+ AC_LINK_IFELSE([AC_LANG_SOURCE([[int main(void){return 0;}]])],
+ [ac_link="$ax_pthread_2step_ac_link"
+ AC_LINK_IFELSE([AC_LANG_SOURCE([[int main(void){return 0;}]])],
+ [break])
+ ])
+ done
+ ac_link="$ax_pthread_save_ac_link"
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ AS_IF([test "x$ax_pthread_try" = "x"], [ax_pthread_try=no])
+ ax_cv_PTHREAD_CLANG_NO_WARN_FLAG="$ax_pthread_try"
+ ])
+
+ case "$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG" in
+ no | unknown) ;;
+ *) PTHREAD_CFLAGS="$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG $PTHREAD_CFLAGS" ;;
+ esac
-if test x"$ax_pthread_ok" = xno; then
-for flag in $ax_pthread_flags; do
+fi # $ax_pthread_clang = yes
- case $flag in
+if test "x$ax_pthread_ok" = "xno"; then
+for ax_pthread_try_flag in $ax_pthread_flags; do
+
+ case $ax_pthread_try_flag in
none)
AC_MSG_CHECKING([whether pthreads work without any flags])
;;
@@ -221,27 +335,27 @@ for flag in $ax_pthread_flags; do
;;
-*)
- AC_MSG_CHECKING([whether pthreads work with $flag])
- PTHREAD_CFLAGS="$flag"
+ AC_MSG_CHECKING([whether pthreads work with $ax_pthread_try_flag])
+ PTHREAD_CFLAGS="$ax_pthread_try_flag"
;;
pthread-config)
AC_CHECK_PROG([ax_pthread_config], [pthread-config], [yes], [no])
- if test x"$ax_pthread_config" = xno; then continue; fi
+ AS_IF([test "x$ax_pthread_config" = "xno"], [continue])
PTHREAD_CFLAGS="`pthread-config --cflags`"
PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`"
;;
*)
- AC_MSG_CHECKING([for the pthreads library -l$flag])
- PTHREAD_LIBS="-l$flag"
+ AC_MSG_CHECKING([for the pthreads library -l$ax_pthread_try_flag])
+ PTHREAD_LIBS="-l$ax_pthread_try_flag"
;;
esac
- save_LIBS="$LIBS"
- save_CFLAGS="$CFLAGS"
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ ax_pthread_save_LIBS="$LIBS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
LIBS="$PTHREAD_LIBS $LIBS"
- CFLAGS="$CFLAGS $PTHREAD_CFLAGS $ax_pthread_extra_flags"
# Check for various functions. We must include pthread.h,
# since some functions may be macros. (On the Sequent, we
@@ -252,7 +366,11 @@ for flag in $ax_pthread_flags; do
# pthread_cleanup_push because it is one of the few pthread
# functions on Solaris that doesn't have a non-functional libc stub.
# We try pthread_create on general principles.
+
AC_LINK_IFELSE([AC_LANG_PROGRAM([#include
+# if $ax_pthread_check_cond
+# error "$ax_pthread_check_macro must be defined"
+# endif
static void routine(void *a) { a = 0; }
static void *start_routine(void *a) { return a; }],
[pthread_t th; pthread_attr_t attr;
@@ -261,16 +379,14 @@ for flag in $ax_pthread_flags; do
pthread_attr_init(&attr);
pthread_cleanup_push(routine, 0);
pthread_cleanup_pop(0) /* ; */])],
- [ax_pthread_ok=yes],
- [])
+ [ax_pthread_ok=yes],
+ [])
- LIBS="$save_LIBS"
- CFLAGS="$save_CFLAGS"
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ LIBS="$ax_pthread_save_LIBS"
AC_MSG_RESULT([$ax_pthread_ok])
- if test "x$ax_pthread_ok" = xyes; then
- break;
- fi
+ AS_IF([test "x$ax_pthread_ok" = "xyes"], [break])
PTHREAD_LIBS=""
PTHREAD_CFLAGS=""
@@ -278,49 +394,41 @@ done
fi
# Various other checks:
-if test "x$ax_pthread_ok" = xyes; then
- save_LIBS="$LIBS"
- LIBS="$PTHREAD_LIBS $LIBS"
- save_CFLAGS="$CFLAGS"
+if test "x$ax_pthread_ok" = "xyes"; then
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ ax_pthread_save_LIBS="$LIBS"
CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
# Detect AIX lossage: JOINABLE attribute is called UNDETACHED.
AC_CACHE_CHECK([for joinable pthread attribute],
[ax_cv_PTHREAD_JOINABLE_ATTR],
[ax_cv_PTHREAD_JOINABLE_ATTR=unknown
- for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do
+ for ax_pthread_attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do
AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ],
- [int attr = $attr; return attr /* ; */])],
- [ax_cv_PTHREAD_JOINABLE_ATTR=$attr; break],
+ [int attr = $ax_pthread_attr; return attr /* ; */])],
+ [ax_cv_PTHREAD_JOINABLE_ATTR=$ax_pthread_attr; break],
[])
done
])
- AS_IF([test "$ax_cv_PTHREAD_JOINABLE_ATTR" != unknown && \
- test "$ax_cv_PTHREAD_JOINABLE_ATTR" != PTHREAD_CREATE_JOINABLE],
+ AS_IF([test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xunknown" && \
+ test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xPTHREAD_CREATE_JOINABLE" && \
+ test "x$ax_pthread_joinable_attr_defined" != "xyes"],
[AC_DEFINE_UNQUOTED([PTHREAD_CREATE_JOINABLE],
[$ax_cv_PTHREAD_JOINABLE_ATTR],
[Define to necessary symbol if this constant
- uses a non-standard name on your system.])])
+ uses a non-standard name on your system.])
+ ax_pthread_joinable_attr_defined=yes
+ ])
- AC_CACHE_CHECK([if more special flags are required for pthreads],
+ AC_CACHE_CHECK([whether more special flags are required for pthreads],
[ax_cv_PTHREAD_SPECIAL_FLAGS],
[ax_cv_PTHREAD_SPECIAL_FLAGS=no
- ax_pthread_special_flags_added=no
- AC_EGREP_CPP([AX_PTHREAD_NEED_SPECIAL_FLAG],
- [
-# if !defined(_REENTRANT) && !defined(_THREAD_SAFE)
- AX_PTHREAD_NEED_SPECIAL_FLAG
-# endif
- ],
- [case $host_os in
- aix* | freebsd*)
- ax_cv_PTHREAD_SPECIAL_FLAGS="-D_THREAD_SAFE"
- ;;
- darwin* | hpux* | osf* | solaris*)
- ax_cv_PTHREAD_SPECIAL_FLAGS="-D_REENTRANT"
- ;;
- esac
- ])
+ case $host_os in
+ solaris*)
+ ax_cv_PTHREAD_SPECIAL_FLAGS="-D_POSIX_PTHREAD_SEMANTICS"
+ ;;
+ esac
])
AS_IF([test "x$ax_cv_PTHREAD_SPECIAL_FLAGS" != "xno" && \
test "x$ax_pthread_special_flags_added" != "xyes"],
@@ -334,23 +442,26 @@ if test "x$ax_pthread_ok" = xyes; then
[ax_cv_PTHREAD_PRIO_INHERIT=yes],
[ax_cv_PTHREAD_PRIO_INHERIT=no])
])
- AS_IF([test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes"],
- [AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], [1], [Have PTHREAD_PRIO_INHERIT.])])
+ AS_IF([test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes" && \
+ test "x$ax_pthread_prio_inherit_defined" != "xyes"],
+ [AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], [1], [Have PTHREAD_PRIO_INHERIT.])
+ ax_pthread_prio_inherit_defined=yes
+ ])
- LIBS="$save_LIBS"
- CFLAGS="$save_CFLAGS"
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ LIBS="$ax_pthread_save_LIBS"
# More AIX lossage: compile with *_r variant
- if test "x$GCC" != xyes; then
+ if test "x$GCC" != "xyes"; then
case $host_os in
aix*)
AS_CASE(["x/$CC"],
- [x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6],
- [#handle absolute path differently from PATH based program lookup
- AS_CASE(["x$CC"],
- [x/*],
- [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])],
- [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])])
+ [x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6],
+ [#handle absolute path differently from PATH based program lookup
+ AS_CASE(["x$CC"],
+ [x/*],
+ [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])],
+ [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])])
;;
esac
fi
@@ -363,7 +474,7 @@ AC_SUBST([PTHREAD_CFLAGS])
AC_SUBST([PTHREAD_CC])
# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
-if test x"$ax_pthread_ok" = xyes; then
+if test "x$ax_pthread_ok" = "xyes"; then
ifelse([$1],,[AC_DEFINE([HAVE_PTHREAD],[1],[Define if you have POSIX threads libraries and header files.])],[$1])
:
else
diff --git a/config/c-compiler.m4 b/config/c-compiler.m4
index ba5c40db01c..71b645839db 100644
--- a/config/c-compiler.m4
+++ b/config/c-compiler.m4
@@ -19,24 +19,38 @@ fi])# PGAC_C_SIGNED
# PGAC_C_PRINTF_ARCHETYPE
# -----------------------
-# Set the format archetype used by gcc to check printf type functions. We
-# prefer "gnu_printf", which includes what glibc uses, such as %m for error
-# strings and %lld for 64 bit long longs. GCC 4.4 introduced it. It makes a
-# dramatic difference on Windows.
+# Select the format archetype to be used by gcc to check printf-type functions.
+# We prefer "gnu_printf", as that most closely matches the features supported
+# by src/port/snprintf.c (particularly the %m conversion spec). However,
+# on some NetBSD versions, that doesn't work while "__syslog__" does.
+# If all else fails, use "printf".
AC_DEFUN([PGAC_PRINTF_ARCHETYPE],
[AC_CACHE_CHECK([for printf format archetype], pgac_cv_printf_archetype,
+[pgac_cv_printf_archetype=gnu_printf
+PGAC_TEST_PRINTF_ARCHETYPE
+if [[ "$ac_archetype_ok" = no ]]; then
+ pgac_cv_printf_archetype=__syslog__
+ PGAC_TEST_PRINTF_ARCHETYPE
+ if [[ "$ac_archetype_ok" = no ]]; then
+ pgac_cv_printf_archetype=printf
+ fi
+fi])
+AC_DEFINE_UNQUOTED([PG_PRINTF_ATTRIBUTE], [$pgac_cv_printf_archetype],
+[Define to best printf format archetype, usually gnu_printf if available.])
+])# PGAC_PRINTF_ARCHETYPE
+
+# Subroutine: test $pgac_cv_printf_archetype, set $ac_archetype_ok to yes or no
+AC_DEFUN([PGAC_TEST_PRINTF_ARCHETYPE],
[ac_save_c_werror_flag=$ac_c_werror_flag
ac_c_werror_flag=yes
AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
-[extern int
-pgac_write(int ignore, const char *fmt,...)
-__attribute__((format(gnu_printf, 2, 3)));], [])],
- [pgac_cv_printf_archetype=gnu_printf],
- [pgac_cv_printf_archetype=printf])
-ac_c_werror_flag=$ac_save_c_werror_flag])
-AC_DEFINE_UNQUOTED([PG_PRINTF_ATTRIBUTE], [$pgac_cv_printf_archetype],
- [Define to gnu_printf if compiler supports it, else printf.])
-])# PGAC_PRINTF_ARCHETYPE
+[extern void pgac_write(int ignore, const char *fmt,...)
+__attribute__((format($pgac_cv_printf_archetype, 2, 3)));],
+[pgac_write(0, "error %s: %m", "foo");])],
+ [ac_archetype_ok=yes],
+ [ac_archetype_ok=no])
+ac_c_werror_flag=$ac_save_c_werror_flag
+])# PGAC_TEST_PRINTF_ARCHETYPE
# PGAC_TYPE_64BIT_INT(TYPE)
@@ -259,60 +273,6 @@ AC_DEFINE(HAVE__BUILTIN_TYPES_COMPATIBLE_P, 1,
fi])# PGAC_C_TYPES_COMPATIBLE
-# PGAC_C_BUILTIN_BSWAP16
-# -------------------------
-# Check if the C compiler understands __builtin_bswap16(),
-# and define HAVE__BUILTIN_BSWAP16 if so.
-AC_DEFUN([PGAC_C_BUILTIN_BSWAP16],
-[AC_CACHE_CHECK(for __builtin_bswap16, pgac_cv__builtin_bswap16,
-[AC_COMPILE_IFELSE([AC_LANG_SOURCE(
-[static unsigned long int x = __builtin_bswap16(0xaabb);]
-)],
-[pgac_cv__builtin_bswap16=yes],
-[pgac_cv__builtin_bswap16=no])])
-if test x"$pgac_cv__builtin_bswap16" = xyes ; then
-AC_DEFINE(HAVE__BUILTIN_BSWAP16, 1,
- [Define to 1 if your compiler understands __builtin_bswap16.])
-fi])# PGAC_C_BUILTIN_BSWAP16
-
-
-
-# PGAC_C_BUILTIN_BSWAP32
-# -------------------------
-# Check if the C compiler understands __builtin_bswap32(),
-# and define HAVE__BUILTIN_BSWAP32 if so.
-AC_DEFUN([PGAC_C_BUILTIN_BSWAP32],
-[AC_CACHE_CHECK(for __builtin_bswap32, pgac_cv__builtin_bswap32,
-[AC_COMPILE_IFELSE([AC_LANG_SOURCE(
-[static unsigned long int x = __builtin_bswap32(0xaabbccdd);]
-)],
-[pgac_cv__builtin_bswap32=yes],
-[pgac_cv__builtin_bswap32=no])])
-if test x"$pgac_cv__builtin_bswap32" = xyes ; then
-AC_DEFINE(HAVE__BUILTIN_BSWAP32, 1,
- [Define to 1 if your compiler understands __builtin_bswap32.])
-fi])# PGAC_C_BUILTIN_BSWAP32
-
-
-
-# PGAC_C_BUILTIN_BSWAP64
-# -------------------------
-# Check if the C compiler understands __builtin_bswap64(),
-# and define HAVE__BUILTIN_BSWAP64 if so.
-AC_DEFUN([PGAC_C_BUILTIN_BSWAP64],
-[AC_CACHE_CHECK(for __builtin_bswap64, pgac_cv__builtin_bswap64,
-[AC_COMPILE_IFELSE([AC_LANG_SOURCE(
-[static unsigned long int x = __builtin_bswap64(0xaabbccddeeff0011);]
-)],
-[pgac_cv__builtin_bswap64=yes],
-[pgac_cv__builtin_bswap64=no])])
-if test x"$pgac_cv__builtin_bswap64" = xyes ; then
-AC_DEFINE(HAVE__BUILTIN_BSWAP64, 1,
- [Define to 1 if your compiler understands __builtin_bswap64.])
-fi])# PGAC_C_BUILTIN_BSWAP64
-
-
-
# PGAC_C_BUILTIN_CONSTANT_P
# -------------------------
# Check if the C compiler understands __builtin_constant_p(),
@@ -409,22 +369,30 @@ fi])# PGAC_C_COMPUTED_GOTO
-# PGAC_C_VA_ARGS
-# --------------
-# Check if the C compiler understands C99-style variadic macros,
-# and define HAVE__VA_ARGS if so.
-AC_DEFUN([PGAC_C_VA_ARGS],
-[AC_CACHE_CHECK(for __VA_ARGS__, pgac_cv__va_args,
-[AC_COMPILE_IFELSE([AC_LANG_PROGRAM([#include ],
-[#define debug(...) fprintf(stderr, __VA_ARGS__)
-debug("%s", "blarg");
-])],
-[pgac_cv__va_args=yes],
-[pgac_cv__va_args=no])])
-if test x"$pgac_cv__va_args" = xyes ; then
-AC_DEFINE(HAVE__VA_ARGS, 1,
- [Define to 1 if your compiler understands __VA_ARGS__ in macros.])
-fi])# PGAC_C_VA_ARGS
+# PGAC_CHECK_BUILTIN_FUNC
+# -----------------------
+# This is similar to AC_CHECK_FUNCS(), except that it will work for compiler
+# builtin functions, as that usually fails to.
+# The first argument is the function name, eg [__builtin_clzl], and the
+# second is its argument list, eg [unsigned long x]. The current coding
+# works only for a single argument named x; we might generalize that later.
+# It's assumed that the function's result type is coercible to int.
+# On success, we define "HAVEfuncname" (there's usually more than enough
+# underscores already, so we don't add another one).
+AC_DEFUN([PGAC_CHECK_BUILTIN_FUNC],
+[AC_CACHE_CHECK(for $1, pgac_cv$1,
+[AC_LINK_IFELSE([AC_LANG_PROGRAM([
+int
+call$1($2)
+{
+ return $1(x);
+}], [])],
+[pgac_cv$1=yes],
+[pgac_cv$1=no])])
+if test x"${pgac_cv$1}" = xyes ; then
+AC_DEFINE_UNQUOTED(AS_TR_CPP([HAVE$1]), 1,
+ [Define to 1 if your compiler understands $1.])
+fi])# PGAC_CHECK_BUILTIN_FUNC
@@ -588,7 +556,7 @@ AC_DEFUN([PGAC_HAVE_GCC__SYNC_INT32_CAS],
[pgac_cv_gcc_sync_int32_cas="yes"],
[pgac_cv_gcc_sync_int32_cas="no"])])
if test x"$pgac_cv_gcc_sync_int32_cas" = x"yes"; then
- AC_DEFINE(HAVE_GCC__SYNC_INT32_CAS, 1, [Define to 1 if you have __sync_compare_and_swap(int *, int, int).])
+ AC_DEFINE(HAVE_GCC__SYNC_INT32_CAS, 1, [Define to 1 if you have __sync_val_compare_and_swap(int *, int, int).])
fi])# PGAC_HAVE_GCC__SYNC_INT32_CAS
# PGAC_HAVE_GCC__SYNC_INT64_CAS
@@ -603,7 +571,7 @@ AC_DEFUN([PGAC_HAVE_GCC__SYNC_INT64_CAS],
[pgac_cv_gcc_sync_int64_cas="yes"],
[pgac_cv_gcc_sync_int64_cas="no"])])
if test x"$pgac_cv_gcc_sync_int64_cas" = x"yes"; then
- AC_DEFINE(HAVE_GCC__SYNC_INT64_CAS, 1, [Define to 1 if you have __sync_compare_and_swap(int64 *, int64, int64).])
+ AC_DEFINE(HAVE_GCC__SYNC_INT64_CAS, 1, [Define to 1 if you have __sync_val_compare_and_swap(int64 *, int64, int64).])
fi])# PGAC_HAVE_GCC__SYNC_INT64_CAS
# PGAC_HAVE_GCC__ATOMIC_INT32_CAS
@@ -635,7 +603,7 @@ AC_DEFUN([PGAC_HAVE_GCC__ATOMIC_INT64_CAS],
[pgac_cv_gcc_atomic_int64_cas="yes"],
[pgac_cv_gcc_atomic_int64_cas="no"])])
if test x"$pgac_cv_gcc_atomic_int64_cas" = x"yes"; then
- AC_DEFINE(HAVE_GCC__ATOMIC_INT64_CAS, 1, [Define to 1 if you have __atomic_compare_exchange_n(int64 *, int *, int64).])
+ AC_DEFINE(HAVE_GCC__ATOMIC_INT64_CAS, 1, [Define to 1 if you have __atomic_compare_exchange_n(int64 *, int64 *, int64).])
fi])# PGAC_HAVE_GCC__ATOMIC_INT64_CAS
# PGAC_SSE42_CRC32_INTRINSICS
diff --git a/config/c-library.m4 b/config/c-library.m4
index 34b25081a6e..6f2b0fbb4e6 100644
--- a/config/c-library.m4
+++ b/config/c-library.m4
@@ -82,23 +82,23 @@ AH_VERBATIM(GETTIMEOFDAY_1ARG_,
# PGAC_FUNC_STRERROR_R_INT
# ---------------------------
-# Check if strerror_r() returns an int (SUSv3) rather than a char * (GNU libc)
-# If so, define STRERROR_R_INT
+# Check if strerror_r() returns int (POSIX) rather than char * (GNU libc).
+# If so, define STRERROR_R_INT.
+# The result is uncertain if strerror_r() isn't provided,
+# but we don't much care.
AC_DEFUN([PGAC_FUNC_STRERROR_R_INT],
[AC_CACHE_CHECK(whether strerror_r returns int,
pgac_cv_func_strerror_r_int,
[AC_COMPILE_IFELSE([AC_LANG_PROGRAM([#include ],
-[#ifndef _AIX
-int strerror_r(int, char *, size_t);
-#else
-/* Older AIX has 'int' for the third argument so we don't test the args. */
-int strerror_r();
-#endif])],
+[[char buf[100];
+ switch (strerror_r(1, buf, sizeof(buf)))
+ { case 0: break; default: break; }
+]])],
[pgac_cv_func_strerror_r_int=yes],
[pgac_cv_func_strerror_r_int=no])])
if test x"$pgac_cv_func_strerror_r_int" = xyes ; then
AC_DEFINE(STRERROR_R_INT, 1,
- [Define to 1 if strerror_r() returns a int.])
+ [Define to 1 if strerror_r() returns int.])
fi
])# PGAC_FUNC_STRERROR_R_INT
@@ -171,129 +171,6 @@ AC_DEFUN([PGAC_STRUCT_ADDRINFO],
])])# PGAC_STRUCT_ADDRINFO
-# PGAC_FUNC_SNPRINTF_LONG_LONG_INT_MODIFIER
-# ---------------------------------------
-# Determine which length modifier snprintf uses for long long int. We
-# handle ll, q, and I64. The result is in shell variable
-# LONG_LONG_INT_MODIFIER.
-#
-# MinGW uses '%I64d', though gcc throws a warning with -Wall,
-# while '%lld' doesn't generate a warning, but doesn't work.
-#
-AC_DEFUN([PGAC_FUNC_SNPRINTF_LONG_LONG_INT_MODIFIER],
-[AC_MSG_CHECKING([snprintf length modifier for long long int])
-AC_CACHE_VAL(pgac_cv_snprintf_long_long_int_modifier,
-[for pgac_modifier in 'll' 'q' 'I64'; do
-AC_RUN_IFELSE([AC_LANG_SOURCE([[#include
-#include
-typedef long long int ac_int64;
-#define INT64_FORMAT "%${pgac_modifier}d"
-
-ac_int64 a = 20000001;
-ac_int64 b = 40000005;
-
-int does_int64_snprintf_work()
-{
- ac_int64 c;
- char buf[100];
-
- if (sizeof(ac_int64) != 8)
- return 0; /* doesn't look like the right size */
-
- c = a * b;
- snprintf(buf, 100, INT64_FORMAT, c);
- if (strcmp(buf, "800000140000005") != 0)
- return 0; /* either multiply or snprintf is busted */
- return 1;
-}
-
-int
-main() {
- return (! does_int64_snprintf_work());
-}]])],
-[pgac_cv_snprintf_long_long_int_modifier=$pgac_modifier; break],
-[],
-[pgac_cv_snprintf_long_long_int_modifier=cross; break])
-done])dnl AC_CACHE_VAL
-
-LONG_LONG_INT_MODIFIER=''
-
-case $pgac_cv_snprintf_long_long_int_modifier in
- cross) AC_MSG_RESULT([cannot test (not on host machine)]);;
- ?*) AC_MSG_RESULT([$pgac_cv_snprintf_long_long_int_modifier])
- LONG_LONG_INT_MODIFIER=$pgac_cv_snprintf_long_long_int_modifier;;
- *) AC_MSG_RESULT(none);;
-esac])# PGAC_FUNC_SNPRINTF_LONG_LONG_INT_MODIFIER
-
-
-# PGAC_FUNC_SNPRINTF_ARG_CONTROL
-# ---------------------------------------
-# Determine if snprintf supports %1$ argument selection, e.g. %5$ selects
-# the fifth argument after the printf format string.
-# This is not in the C99 standard, but in the Single Unix Specification (SUS).
-# It is used in our language translation strings.
-#
-AC_DEFUN([PGAC_FUNC_SNPRINTF_ARG_CONTROL],
-[AC_MSG_CHECKING([whether snprintf supports argument control])
-AC_CACHE_VAL(pgac_cv_snprintf_arg_control,
-[AC_RUN_IFELSE([AC_LANG_SOURCE([[#include
-#include
-
-int main()
-{
- char buf[100];
-
- /* can it swap arguments? */
- snprintf(buf, 100, "%2\$d %1\$d", 3, 4);
- if (strcmp(buf, "4 3") != 0)
- return 1;
- return 0;
-}]])],
-[pgac_cv_snprintf_arg_control=yes],
-[pgac_cv_snprintf_arg_control=no],
-[pgac_cv_snprintf_arg_control=cross])
-])dnl AC_CACHE_VAL
-AC_MSG_RESULT([$pgac_cv_snprintf_arg_control])
-])# PGAC_FUNC_SNPRINTF_ARG_CONTROL
-
-# PGAC_FUNC_SNPRINTF_SIZE_T_SUPPORT
-# ---------------------------------------
-# Determine if snprintf supports the z length modifier for printing
-# size_t-sized variables. That's supported by C99 and POSIX but not
-# all platforms play ball, so we must test whether it's working.
-#
-AC_DEFUN([PGAC_FUNC_SNPRINTF_SIZE_T_SUPPORT],
-[AC_MSG_CHECKING([whether snprintf supports the %z modifier])
-AC_CACHE_VAL(pgac_cv_snprintf_size_t_support,
-[AC_RUN_IFELSE([AC_LANG_SOURCE([[#include
-#include
-
-int main()
-{
- char bufz[100];
- char buf64[100];
-
- /*
- * Print the largest unsigned number fitting in a size_t using both %zu
- * and the previously-determined format for 64-bit integers. Note that
- * we don't run this code unless we know snprintf handles 64-bit ints.
- */
- bufz[0] = '\0'; /* in case snprintf fails to emit anything */
- snprintf(bufz, sizeof(bufz), "%zu", ~((size_t) 0));
- snprintf(buf64, sizeof(buf64), "%" INT64_MODIFIER "u",
- (unsigned PG_INT64_TYPE) ~((size_t) 0));
- if (strcmp(bufz, buf64) != 0)
- return 1;
- return 0;
-}]])],
-[pgac_cv_snprintf_size_t_support=yes],
-[pgac_cv_snprintf_size_t_support=no],
-[pgac_cv_snprintf_size_t_support=cross])
-])dnl AC_CACHE_VAL
-AC_MSG_RESULT([$pgac_cv_snprintf_size_t_support])
-])# PGAC_FUNC_SNPRINTF_SIZE_T_SUPPORT
-
-
# PGAC_TYPE_LOCALE_T
# ------------------
# Check for the locale_t type and find the right header file. macOS
diff --git a/config/check_decls.m4 b/config/check_decls.m4
new file mode 100644
index 00000000000..f1b90c54301
--- /dev/null
+++ b/config/check_decls.m4
@@ -0,0 +1,116 @@
+# config/check_decls.m4
+
+# This file redefines the standard Autoconf macro _AC_CHECK_DECL_BODY,
+# and adds a supporting function _AC_UNDECLARED_WARNING, to make
+# AC_CHECK_DECLS behave correctly when checking for built-in library
+# functions with clang.
+
+# This is based on commit 82ef7805faffa151e724aa76c245ec590d174580
+# in the Autoconf git repository. We can drop it if they ever get
+# around to releasing a new version of Autoconf. In the meantime,
+# it's distributed under Autoconf's license:
+
+# This file is part of Autoconf. This program is free
+# software; you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the Autoconf Configure Script Exception,
+# version 3.0, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License
+# and a copy of the Autoconf Configure Script Exception along with
+# this program; see the files COPYINGv3 and COPYING.EXCEPTION
+# respectively. If not, see .
+
+# Written by David MacKenzie, with help from
+# Franc,ois Pinard, Karl Berry, Richard Pixley, Ian Lance Taylor,
+# Roland McGrath, Noah Friedman, david d zuhn, and many others.
+
+
+# _AC_UNDECLARED_WARNING
+# ----------------------
+# Set ac_[]_AC_LANG_ABBREV[]_decl_warn_flag=yes if the compiler uses a warning,
+# not a more-customary error, to report some undeclared identifiers. Fail when
+# an affected compiler warns also on valid input. _AC_PROG_PREPROC_WORKS_IFELSE
+# solves a related problem.
+AC_DEFUN([_AC_UNDECLARED_WARNING],
+[# The Clang compiler raises a warning for an undeclared identifier that matches
+# a compiler builtin function. All extant Clang versions are affected, as of
+# Clang 3.6.0. Test a builtin known to every version. This problem affects the
+# C and Objective C languages, but Clang does report an error under C++ and
+# Objective C++.
+#
+# Passing -fno-builtin to the compiler would suppress this problem. That
+# strategy would have the advantage of being insensitive to stray warnings, but
+# it would make tests less realistic.
+AC_CACHE_CHECK([how $[]_AC_CC[] reports undeclared, standard C functions],
+[ac_cv_[]_AC_LANG_ABBREV[]_decl_report],
+[AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [(void) strchr;])],
+ [AS_IF([test -s conftest.err], [dnl
+ # For AC_CHECK_DECL to react to warnings, the compiler must be silent on
+ # valid AC_CHECK_DECL input. No library function is consistently available
+ # on freestanding implementations, so test against a dummy declaration.
+ # Include always-available headers on the off chance that they somehow
+ # elicit warnings.
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([dnl
+#include
+#include
+#include
+#include
+extern void ac_decl (int, char *);],
+[@%:@ifdef __cplusplus
+ (void) ac_decl ((int) 0, (char *) 0);
+ (void) ac_decl;
+@%:@else
+ (void) ac_decl;
+@%:@endif
+])],
+ [AS_IF([test -s conftest.err],
+ [AC_MSG_FAILURE([cannot detect from compiler exit status or warnings])],
+ [ac_cv_[]_AC_LANG_ABBREV[]_decl_report=warning])],
+ [AC_MSG_FAILURE([cannot compile a simple declaration test])])],
+ [AC_MSG_FAILURE([compiler does not report undeclared identifiers])])],
+ [ac_cv_[]_AC_LANG_ABBREV[]_decl_report=error])])
+
+case $ac_cv_[]_AC_LANG_ABBREV[]_decl_report in
+ warning) ac_[]_AC_LANG_ABBREV[]_decl_warn_flag=yes ;;
+ *) ac_[]_AC_LANG_ABBREV[]_decl_warn_flag= ;;
+esac
+])# _AC_UNDECLARED_WARNING
+
+# _AC_CHECK_DECL_BODY
+# -------------------
+# Shell function body for AC_CHECK_DECL.
+m4_define([_AC_CHECK_DECL_BODY],
+[ AS_LINENO_PUSH([$[]1])
+ # Initialize each $ac_[]_AC_LANG_ABBREV[]_decl_warn_flag once.
+ AC_DEFUN([_AC_UNDECLARED_WARNING_]_AC_LANG_ABBREV,
+ [_AC_UNDECLARED_WARNING])dnl
+ AC_REQUIRE([_AC_UNDECLARED_WARNING_]_AC_LANG_ABBREV)dnl
+ [as_decl_name=`echo $][2|sed 's/ *(.*//'`]
+ [as_decl_use=`echo $][2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'`]
+ AC_CACHE_CHECK([whether $as_decl_name is declared], [$[]3],
+ [ac_save_werror_flag=$ac_[]_AC_LANG_ABBREV[]_werror_flag
+ ac_[]_AC_LANG_ABBREV[]_werror_flag="$ac_[]_AC_LANG_ABBREV[]_decl_warn_flag$ac_[]_AC_LANG_ABBREV[]_werror_flag"
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([$[]4],
+[@%:@ifndef $[]as_decl_name
+@%:@ifdef __cplusplus
+ (void) $[]as_decl_use;
+@%:@else
+ (void) $[]as_decl_name;
+@%:@endif
+@%:@endif
+])],
+ [AS_VAR_SET([$[]3], [yes])],
+ [AS_VAR_SET([$[]3], [no])])
+ ac_[]_AC_LANG_ABBREV[]_werror_flag=$ac_save_werror_flag])
+ AS_LINENO_POP
+])# _AC_CHECK_DECL_BODY
diff --git a/config/config.guess b/config/config.guess
index faa63aa9429..79d1317f52b 100644
--- a/config/config.guess
+++ b/config/config.guess
@@ -1,8 +1,8 @@
#! /bin/sh
# Attempt to guess a canonical system name.
-# Copyright 1992-2017 Free Software Foundation, Inc.
+# Copyright 1992-2019 Free Software Foundation, Inc.
-timestamp='2017-05-11'
+timestamp='2019-03-04'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
@@ -15,7 +15,7 @@ timestamp='2017-05-11'
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, see .
+# along with this program; if not, see .
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
@@ -27,7 +27,7 @@ timestamp='2017-05-11'
# Originally written by Per Bothner; maintained since 2000 by Ben Elliston.
#
# You can get the latest version of this script from:
-# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
+# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
#
# Please send patches to .
@@ -39,7 +39,7 @@ Usage: $0 [OPTION]
Output the configuration name of the system \`$me' is run on.
-Operation modes:
+Options:
-h, --help print this help, then exit
-t, --time-stamp print date of last modification, then exit
-v, --version print version number, then exit
@@ -50,7 +50,7 @@ version="\
GNU config.guess ($timestamp)
Originally written by Per Bothner.
-Copyright 1992-2017 Free Software Foundation, Inc.
+Copyright 1992-2019 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -84,8 +84,6 @@ if test $# != 0; then
exit 1
fi
-trap 'exit 1' 1 2 15
-
# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
# compiler to aid in system detection is discouraged as it requires
# temporary files to be created and, as you can see below, it is a
@@ -96,34 +94,38 @@ trap 'exit 1' 1 2 15
# Portable tmp directory creation inspired by the Autoconf team.
-set_cc_for_build='
-trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
-trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
-: ${TMPDIR=/tmp} ;
- { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
- { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
- { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
- { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
-dummy=$tmp/dummy ;
-tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
-case $CC_FOR_BUILD,$HOST_CC,$CC in
- ,,) echo "int x;" > $dummy.c ;
- for c in cc gcc c89 c99 ; do
- if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
- CC_FOR_BUILD="$c"; break ;
- fi ;
- done ;
- if test x"$CC_FOR_BUILD" = x ; then
- CC_FOR_BUILD=no_compiler_found ;
- fi
- ;;
- ,,*) CC_FOR_BUILD=$CC ;;
- ,*,*) CC_FOR_BUILD=$HOST_CC ;;
-esac ; set_cc_for_build= ;'
+tmp=
+# shellcheck disable=SC2172
+trap 'test -z "$tmp" || rm -fr "$tmp"' 0 1 2 13 15
+
+set_cc_for_build() {
+ : "${TMPDIR=/tmp}"
+ # shellcheck disable=SC2039
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; }
+ dummy=$tmp/dummy
+ case ${CC_FOR_BUILD-},${HOST_CC-},${CC-} in
+ ,,) echo "int x;" > "$dummy.c"
+ for driver in cc gcc c89 c99 ; do
+ if ($driver -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then
+ CC_FOR_BUILD="$driver"
+ break
+ fi
+ done
+ if test x"$CC_FOR_BUILD" = x ; then
+ CC_FOR_BUILD=no_compiler_found
+ fi
+ ;;
+ ,,*) CC_FOR_BUILD=$CC ;;
+ ,*,*) CC_FOR_BUILD=$HOST_CC ;;
+ esac
+}
# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
# (ghazi@noc.rutgers.edu 1994-08-24)
-if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+if test -f /.attbin/uname ; then
PATH=$PATH:/.attbin ; export PATH
fi
@@ -132,14 +134,14 @@ UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
-case "${UNAME_SYSTEM}" in
+case "$UNAME_SYSTEM" in
Linux|GNU|GNU/*)
# If the system lacks a compiler, then just pick glibc.
# We could probably try harder.
LIBC=gnu
- eval $set_cc_for_build
- cat <<-EOF > $dummy.c
+ set_cc_for_build
+ cat <<-EOF > "$dummy.c"
#include
#if defined(__UCLIBC__)
LIBC=uclibc
@@ -149,13 +151,20 @@ Linux|GNU|GNU/*)
LIBC=gnu
#endif
EOF
- eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`
+ eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`"
+
+ # If ldd exists, use it to detect musl libc.
+ if command -v ldd >/dev/null && \
+ ldd --version 2>&1 | grep -q ^musl
+ then
+ LIBC=musl
+ fi
;;
esac
# Note: order is significant - the case branches are not exclusive.
-case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
*:NetBSD:*:*)
# NetBSD (nbsd) targets should (where applicable) match one or
# more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
@@ -169,30 +178,30 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
# portion of the name. We always set it to "unknown".
sysctl="sysctl -n hw.machine_arch"
UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \
- /sbin/$sysctl 2>/dev/null || \
- /usr/sbin/$sysctl 2>/dev/null || \
+ "/sbin/$sysctl" 2>/dev/null || \
+ "/usr/sbin/$sysctl" 2>/dev/null || \
echo unknown)`
- case "${UNAME_MACHINE_ARCH}" in
+ case "$UNAME_MACHINE_ARCH" in
armeb) machine=armeb-unknown ;;
arm*) machine=arm-unknown ;;
sh3el) machine=shl-unknown ;;
sh3eb) machine=sh-unknown ;;
sh5el) machine=sh5le-unknown ;;
earmv*)
- arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'`
- endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'`
- machine=${arch}${endian}-unknown
+ arch=`echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,'`
+ endian=`echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p'`
+ machine="${arch}${endian}"-unknown
;;
- *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+ *) machine="$UNAME_MACHINE_ARCH"-unknown ;;
esac
# The Operating System including object format, if it has switched
# to ELF recently (or will in the future) and ABI.
- case "${UNAME_MACHINE_ARCH}" in
+ case "$UNAME_MACHINE_ARCH" in
earm*)
os=netbsdelf
;;
arm*|i386|m68k|ns32k|sh3*|sparc|vax)
- eval $set_cc_for_build
+ set_cc_for_build
if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
| grep -q __ELF__
then
@@ -208,10 +217,10 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
;;
esac
# Determine ABI tags.
- case "${UNAME_MACHINE_ARCH}" in
+ case "$UNAME_MACHINE_ARCH" in
earm*)
expr='s/^earmv[0-9]/-eabi/;s/eb$//'
- abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"`
+ abi=`echo "$UNAME_MACHINE_ARCH" | sed -e "$expr"`
;;
esac
# The OS release
@@ -219,46 +228,55 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
# thus, need a distinct triplet. However, they do not need
# kernel version information, so it can be replaced with a
# suitable tag, in the style of linux-gnu.
- case "${UNAME_VERSION}" in
+ case "$UNAME_VERSION" in
Debian*)
release='-gnu'
;;
*)
- release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2`
+ release=`echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2`
;;
esac
# Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
# contains redundant information, the shorter form:
# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
- echo "${machine}-${os}${release}${abi}"
+ echo "$machine-${os}${release}${abi-}"
exit ;;
*:Bitrig:*:*)
UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
- echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE}
+ echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE"
exit ;;
*:OpenBSD:*:*)
UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
- echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+ echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE"
exit ;;
*:LibertyBSD:*:*)
UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'`
- echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE}
+ echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE"
+ exit ;;
+ *:MidnightBSD:*:*)
+ echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE"
exit ;;
*:ekkoBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+ echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE"
exit ;;
*:SolidBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+ echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE"
exit ;;
macppc:MirBSD:*:*)
- echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+ echo powerpc-unknown-mirbsd"$UNAME_RELEASE"
exit ;;
*:MirBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+ echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE"
exit ;;
*:Sortix:*:*)
- echo ${UNAME_MACHINE}-unknown-sortix
+ echo "$UNAME_MACHINE"-unknown-sortix
+ exit ;;
+ *:Redox:*:*)
+ echo "$UNAME_MACHINE"-unknown-redox
exit ;;
+ mips:OSF1:*.*)
+ echo mips-dec-osf1
+ exit ;;
alpha:OSF1:*:*)
case $UNAME_RELEASE in
*4.0)
@@ -310,28 +328,19 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
# A Tn.n version is a released field test version.
# A Xn.n version is an unreleased experimental baselevel.
# 1.2 uses "1.2" for uname -r.
- echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`
+ echo "$UNAME_MACHINE"-dec-osf"`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`"
# Reset EXIT trap before exiting to avoid spurious non-zero exit code.
exitcode=$?
trap '' 0
exit $exitcode ;;
- Alpha\ *:Windows_NT*:*)
- # How do we know it's Interix rather than the generic POSIX subsystem?
- # Should we change UNAME_MACHINE based on the output of uname instead
- # of the specific Alpha model?
- echo alpha-pc-interix
- exit ;;
- 21064:Windows_NT:50:3)
- echo alpha-dec-winnt3.5
- exit ;;
Amiga*:UNIX_System_V:4.0:*)
echo m68k-unknown-sysv4
exit ;;
*:[Aa]miga[Oo][Ss]:*:*)
- echo ${UNAME_MACHINE}-unknown-amigaos
+ echo "$UNAME_MACHINE"-unknown-amigaos
exit ;;
*:[Mm]orph[Oo][Ss]:*:*)
- echo ${UNAME_MACHINE}-unknown-morphos
+ echo "$UNAME_MACHINE"-unknown-morphos
exit ;;
*:OS/390:*:*)
echo i370-ibm-openedition
@@ -343,7 +352,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
echo powerpc-ibm-os400
exit ;;
arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
- echo arm-acorn-riscix${UNAME_RELEASE}
+ echo arm-acorn-riscix"$UNAME_RELEASE"
exit ;;
arm*:riscos:*:*|arm*:RISCOS:*:*)
echo arm-unknown-riscos
@@ -370,19 +379,19 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
sparc) echo sparc-icl-nx7; exit ;;
esac ;;
s390x:SunOS:*:*)
- echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ echo "$UNAME_MACHINE"-ibm-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`"
exit ;;
sun4H:SunOS:5.*:*)
- echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ echo sparc-hal-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`"
exit ;;
sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
- echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ echo sparc-sun-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`"
exit ;;
i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
- echo i386-pc-auroraux${UNAME_RELEASE}
+ echo i386-pc-auroraux"$UNAME_RELEASE"
exit ;;
i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
- eval $set_cc_for_build
+ set_cc_for_build
SUN_ARCH=i386
# If there is a compiler, see if it is configured for 64-bit objects.
# Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
@@ -395,13 +404,13 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
SUN_ARCH=x86_64
fi
fi
- echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ echo "$SUN_ARCH"-pc-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`"
exit ;;
sun4*:SunOS:6*:*)
# According to config.sub, this is the proper way to canonicalize
# SunOS6. Hard to guess exactly what SunOS6 will be like, but
# it's likely to be more like Solaris than SunOS4.
- echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ echo sparc-sun-solaris3"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`"
exit ;;
sun4*:SunOS:*:*)
case "`/usr/bin/arch -k`" in
@@ -410,25 +419,25 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
;;
esac
# Japanese Language versions have a version number like `4.1.3-JL'.
- echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+ echo sparc-sun-sunos"`echo "$UNAME_RELEASE"|sed -e 's/-/_/'`"
exit ;;
sun3*:SunOS:*:*)
- echo m68k-sun-sunos${UNAME_RELEASE}
+ echo m68k-sun-sunos"$UNAME_RELEASE"
exit ;;
sun*:*:4.2BSD:*)
UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
- test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3
+ test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3
case "`/bin/arch`" in
sun3)
- echo m68k-sun-sunos${UNAME_RELEASE}
+ echo m68k-sun-sunos"$UNAME_RELEASE"
;;
sun4)
- echo sparc-sun-sunos${UNAME_RELEASE}
+ echo sparc-sun-sunos"$UNAME_RELEASE"
;;
esac
exit ;;
aushp:SunOS:*:*)
- echo sparc-auspex-sunos${UNAME_RELEASE}
+ echo sparc-auspex-sunos"$UNAME_RELEASE"
exit ;;
# The situation for MiNT is a little confusing. The machine name
# can be virtually everything (everything which is not
@@ -439,44 +448,44 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
# MiNT. But MiNT is downward compatible to TOS, so this should
# be no problem.
atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
+ echo m68k-atari-mint"$UNAME_RELEASE"
exit ;;
atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
+ echo m68k-atari-mint"$UNAME_RELEASE"
exit ;;
*falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
+ echo m68k-atari-mint"$UNAME_RELEASE"
exit ;;
milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
- echo m68k-milan-mint${UNAME_RELEASE}
+ echo m68k-milan-mint"$UNAME_RELEASE"
exit ;;
hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
- echo m68k-hades-mint${UNAME_RELEASE}
+ echo m68k-hades-mint"$UNAME_RELEASE"
exit ;;
*:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
- echo m68k-unknown-mint${UNAME_RELEASE}
+ echo m68k-unknown-mint"$UNAME_RELEASE"
exit ;;
m68k:machten:*:*)
- echo m68k-apple-machten${UNAME_RELEASE}
+ echo m68k-apple-machten"$UNAME_RELEASE"
exit ;;
powerpc:machten:*:*)
- echo powerpc-apple-machten${UNAME_RELEASE}
+ echo powerpc-apple-machten"$UNAME_RELEASE"
exit ;;
RISC*:Mach:*:*)
echo mips-dec-mach_bsd4.3
exit ;;
RISC*:ULTRIX:*:*)
- echo mips-dec-ultrix${UNAME_RELEASE}
+ echo mips-dec-ultrix"$UNAME_RELEASE"
exit ;;
VAX*:ULTRIX*:*:*)
- echo vax-dec-ultrix${UNAME_RELEASE}
+ echo vax-dec-ultrix"$UNAME_RELEASE"
exit ;;
2020:CLIX:*:* | 2430:CLIX:*:*)
- echo clipper-intergraph-clix${UNAME_RELEASE}
+ echo clipper-intergraph-clix"$UNAME_RELEASE"
exit ;;
mips:*:*:UMIPS | mips:*:*:RISCos)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
+ set_cc_for_build
+ sed 's/^ //' << EOF > "$dummy.c"
#ifdef __cplusplus
#include /* for printf() prototype */
int main (int argc, char *argv[]) {
@@ -485,23 +494,23 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
#endif
#if defined (host_mips) && defined (MIPSEB)
#if defined (SYSTYPE_SYSV)
- printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+ printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0);
#endif
#if defined (SYSTYPE_SVR4)
- printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+ printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0);
#endif
#if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
- printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+ printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0);
#endif
#endif
exit (-1);
}
EOF
- $CC_FOR_BUILD -o $dummy $dummy.c &&
- dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
- SYSTEM_NAME=`$dummy $dummyarg` &&
+ $CC_FOR_BUILD -o "$dummy" "$dummy.c" &&
+ dummyarg=`echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+ SYSTEM_NAME=`"$dummy" "$dummyarg"` &&
{ echo "$SYSTEM_NAME"; exit; }
- echo mips-mips-riscos${UNAME_RELEASE}
+ echo mips-mips-riscos"$UNAME_RELEASE"
exit ;;
Motorola:PowerMAX_OS:*:*)
echo powerpc-motorola-powermax
@@ -527,17 +536,17 @@ EOF
AViiON:dgux:*:*)
# DG/UX returns AViiON for all architectures
UNAME_PROCESSOR=`/usr/bin/uname -p`
- if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+ if [ "$UNAME_PROCESSOR" = mc88100 ] || [ "$UNAME_PROCESSOR" = mc88110 ]
then
- if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
- [ ${TARGET_BINARY_INTERFACE}x = x ]
+ if [ "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx ] || \
+ [ "$TARGET_BINARY_INTERFACE"x = x ]
then
- echo m88k-dg-dgux${UNAME_RELEASE}
+ echo m88k-dg-dgux"$UNAME_RELEASE"
else
- echo m88k-dg-dguxbcs${UNAME_RELEASE}
+ echo m88k-dg-dguxbcs"$UNAME_RELEASE"
fi
else
- echo i586-dg-dgux${UNAME_RELEASE}
+ echo i586-dg-dgux"$UNAME_RELEASE"
fi
exit ;;
M88*:DolphinOS:*:*) # DolphinOS (SVR3)
@@ -554,7 +563,7 @@ EOF
echo m68k-tektronix-bsd
exit ;;
*:IRIX*:*:*)
- echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ echo mips-sgi-irix"`echo "$UNAME_RELEASE"|sed -e 's/-/_/g'`"
exit ;;
????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
@@ -566,14 +575,14 @@ EOF
if [ -x /usr/bin/oslevel ] ; then
IBM_REV=`/usr/bin/oslevel`
else
- IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ IBM_REV="$UNAME_VERSION.$UNAME_RELEASE"
fi
- echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+ echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV"
exit ;;
*:AIX:2:3)
if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
+ set_cc_for_build
+ sed 's/^ //' << EOF > "$dummy.c"
#include
main()
@@ -584,7 +593,7 @@ EOF
exit(0);
}
EOF
- if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+ if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"`
then
echo "$SYSTEM_NAME"
else
@@ -598,7 +607,7 @@ EOF
exit ;;
*:AIX:*:[4567])
IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
- if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+ if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then
IBM_ARCH=rs6000
else
IBM_ARCH=powerpc
@@ -607,18 +616,18 @@ EOF
IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc |
awk -F: '{ print $3 }' | sed s/[0-9]*$/0/`
else
- IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ IBM_REV="$UNAME_VERSION.$UNAME_RELEASE"
fi
- echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+ echo "$IBM_ARCH"-ibm-aix"$IBM_REV"
exit ;;
*:AIX:*:*)
echo rs6000-ibm-aix
exit ;;
- ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+ ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*)
echo romp-ibm-bsd4.4
exit ;;
ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
- echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
+ echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to
exit ;; # report: romp-ibm BSD 4.3
*:BOSX:*:*)
echo rs6000-bull-bosx
@@ -633,28 +642,28 @@ EOF
echo m68k-hp-bsd4.4
exit ;;
9000/[34678]??:HP-UX:*:*)
- HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
- case "${UNAME_MACHINE}" in
- 9000/31? ) HP_ARCH=m68000 ;;
- 9000/[34]?? ) HP_ARCH=m68k ;;
+ HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'`
+ case "$UNAME_MACHINE" in
+ 9000/31?) HP_ARCH=m68000 ;;
+ 9000/[34]??) HP_ARCH=m68k ;;
9000/[678][0-9][0-9])
if [ -x /usr/bin/getconf ]; then
sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
- case "${sc_cpu_version}" in
+ case "$sc_cpu_version" in
523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0
528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1
532) # CPU_PA_RISC2_0
- case "${sc_kernel_bits}" in
+ case "$sc_kernel_bits" in
32) HP_ARCH=hppa2.0n ;;
64) HP_ARCH=hppa2.0w ;;
'') HP_ARCH=hppa2.0 ;; # HP-UX 10.20
esac ;;
esac
fi
- if [ "${HP_ARCH}" = "" ]; then
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
+ if [ "$HP_ARCH" = "" ]; then
+ set_cc_for_build
+ sed 's/^ //' << EOF > "$dummy.c"
#define _HPUX_SOURCE
#include
@@ -687,13 +696,13 @@ EOF
exit (0);
}
EOF
- (CCOPTS="" $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+ (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=`"$dummy"`
test -z "$HP_ARCH" && HP_ARCH=hppa
fi ;;
esac
- if [ ${HP_ARCH} = hppa2.0w ]
+ if [ "$HP_ARCH" = hppa2.0w ]
then
- eval $set_cc_for_build
+ set_cc_for_build
# hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
# 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
@@ -712,15 +721,15 @@ EOF
HP_ARCH=hppa64
fi
fi
- echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+ echo "$HP_ARCH"-hp-hpux"$HPUX_REV"
exit ;;
ia64:HP-UX:*:*)
- HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
- echo ia64-hp-hpux${HPUX_REV}
+ HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'`
+ echo ia64-hp-hpux"$HPUX_REV"
exit ;;
3050*:HI-UX:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
+ set_cc_for_build
+ sed 's/^ //' << EOF > "$dummy.c"
#include
int
main ()
@@ -745,11 +754,11 @@ EOF
exit (0);
}
EOF
- $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+ $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` &&
{ echo "$SYSTEM_NAME"; exit; }
echo unknown-hitachi-hiuxwe2
exit ;;
- 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*)
echo hppa1.1-hp-bsd
exit ;;
9000/8??:4.3bsd:*:*)
@@ -758,7 +767,7 @@ EOF
*9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
echo hppa1.0-hp-mpeix
exit ;;
- hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*)
echo hppa1.1-hp-osf
exit ;;
hp8??:OSF1:*:*)
@@ -766,9 +775,9 @@ EOF
exit ;;
i*86:OSF1:*:*)
if [ -x /usr/sbin/sysversion ] ; then
- echo ${UNAME_MACHINE}-unknown-osf1mk
+ echo "$UNAME_MACHINE"-unknown-osf1mk
else
- echo ${UNAME_MACHINE}-unknown-osf1
+ echo "$UNAME_MACHINE"-unknown-osf1
fi
exit ;;
parisc*:Lites*:*:*)
@@ -793,128 +802,120 @@ EOF
echo c4-convex-bsd
exit ;;
CRAY*Y-MP:*:*:*)
- echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
exit ;;
CRAY*[A-Z]90:*:*:*)
- echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+ echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \
| sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
-e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
-e 's/\.[^.]*$/.X/'
exit ;;
CRAY*TS:*:*:*)
- echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
exit ;;
CRAY*T3E:*:*:*)
- echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
exit ;;
CRAY*SV1:*:*:*)
- echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
exit ;;
*:UNICOS/mp:*:*)
- echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
exit ;;
F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`
FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ FUJITSU_REL=`echo "$UNAME_RELEASE" | sed -e 's/ /_/'`
echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
exit ;;
5000:UNIX_System_V:4.*:*)
FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'`
+ FUJITSU_REL=`echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'`
echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
exit ;;
i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
- echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+ echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE"
exit ;;
sparc*:BSD/OS:*:*)
- echo sparc-unknown-bsdi${UNAME_RELEASE}
+ echo sparc-unknown-bsdi"$UNAME_RELEASE"
exit ;;
*:BSD/OS:*:*)
- echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE"
+ exit ;;
+ arm:FreeBSD:*:*)
+ UNAME_PROCESSOR=`uname -p`
+ set_cc_for_build
+ if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_PCS_VFP
+ then
+ echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabi
+ else
+ echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabihf
+ fi
exit ;;
*:FreeBSD:*:*)
UNAME_PROCESSOR=`/usr/bin/uname -p`
- case ${UNAME_PROCESSOR} in
+ case "$UNAME_PROCESSOR" in
amd64)
UNAME_PROCESSOR=x86_64 ;;
i386)
UNAME_PROCESSOR=i586 ;;
esac
- echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+ echo "$UNAME_PROCESSOR"-unknown-freebsd"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`"
exit ;;
i*:CYGWIN*:*)
- echo ${UNAME_MACHINE}-pc-cygwin
+ echo "$UNAME_MACHINE"-pc-cygwin
exit ;;
*:MINGW64*:*)
- echo ${UNAME_MACHINE}-pc-mingw64
+ echo "$UNAME_MACHINE"-pc-mingw64
exit ;;
*:MINGW*:*)
- echo ${UNAME_MACHINE}-pc-mingw32
+ echo "$UNAME_MACHINE"-pc-mingw32
exit ;;
*:MSYS*:*)
- echo ${UNAME_MACHINE}-pc-msys
- exit ;;
- i*:windows32*:*)
- # uname -m includes "-pc" on this system.
- echo ${UNAME_MACHINE}-mingw32
+ echo "$UNAME_MACHINE"-pc-msys
exit ;;
i*:PW*:*)
- echo ${UNAME_MACHINE}-pc-pw32
+ echo "$UNAME_MACHINE"-pc-pw32
exit ;;
*:Interix*:*)
- case ${UNAME_MACHINE} in
+ case "$UNAME_MACHINE" in
x86)
- echo i586-pc-interix${UNAME_RELEASE}
+ echo i586-pc-interix"$UNAME_RELEASE"
exit ;;
authenticamd | genuineintel | EM64T)
- echo x86_64-unknown-interix${UNAME_RELEASE}
+ echo x86_64-unknown-interix"$UNAME_RELEASE"
exit ;;
IA64)
- echo ia64-unknown-interix${UNAME_RELEASE}
+ echo ia64-unknown-interix"$UNAME_RELEASE"
exit ;;
esac ;;
- [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
- echo i${UNAME_MACHINE}-pc-mks
- exit ;;
- 8664:Windows_NT:*)
- echo x86_64-pc-mks
- exit ;;
- i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
- # How do we know it's Interix rather than the generic POSIX subsystem?
- # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
- # UNAME_MACHINE based on the output of uname instead of i386?
- echo i586-pc-interix
- exit ;;
i*:UWIN*:*)
- echo ${UNAME_MACHINE}-pc-uwin
+ echo "$UNAME_MACHINE"-pc-uwin
exit ;;
amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
- echo x86_64-unknown-cygwin
- exit ;;
- p*:CYGWIN*:*)
- echo powerpcle-unknown-cygwin
+ echo x86_64-pc-cygwin
exit ;;
prep*:SunOS:5.*:*)
- echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ echo powerpcle-unknown-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`"
exit ;;
*:GNU:*:*)
# the GNU system
- echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ echo "`echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,'`-unknown-$LIBC`echo "$UNAME_RELEASE"|sed -e 's,/.*$,,'`"
exit ;;
*:GNU/*:*:*)
# other systems with GNU libc and userland
- echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC}
+ echo "$UNAME_MACHINE-unknown-`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`-$LIBC"
exit ;;
- i*86:Minix:*:*)
- echo ${UNAME_MACHINE}-pc-minix
+ *:Minix:*:*)
+ echo "$UNAME_MACHINE"-unknown-minix
exit ;;
aarch64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
aarch64_be:Linux:*:*)
UNAME_MACHINE=aarch64_be
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
alpha:Linux:*:*)
case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
@@ -928,140 +929,168 @@ EOF
esac
objdump --private-headers /bin/sh | grep -q ld.so.1
if test "$?" = 0 ; then LIBC=gnulibc1 ; fi
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
arc:Linux:*:* | arceb:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
arm*:Linux:*:*)
- eval $set_cc_for_build
+ set_cc_for_build
if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
| grep -q __ARM_EABI__
then
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
else
if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
| grep -q __ARM_PCS_VFP
then
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi
else
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf
fi
fi
exit ;;
avr32*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
cris:Linux:*:*)
- echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+ echo "$UNAME_MACHINE"-axis-linux-"$LIBC"
exit ;;
crisv32:Linux:*:*)
- echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+ echo "$UNAME_MACHINE"-axis-linux-"$LIBC"
exit ;;
e2k:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
frv:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
hexagon:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
i*86:Linux:*:*)
- echo ${UNAME_MACHINE}-pc-linux-${LIBC}
+ echo "$UNAME_MACHINE"-pc-linux-"$LIBC"
exit ;;
ia64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
k1om:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
m32r*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
m68*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
mips:Linux:*:* | mips64:Linux:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
+ set_cc_for_build
+ IS_GLIBC=0
+ test x"${LIBC}" = xgnu && IS_GLIBC=1
+ sed 's/^ //' << EOF > "$dummy.c"
#undef CPU
- #undef ${UNAME_MACHINE}
- #undef ${UNAME_MACHINE}el
+ #undef mips
+ #undef mipsel
+ #undef mips64
+ #undef mips64el
+ #if ${IS_GLIBC} && defined(_ABI64)
+ LIBCABI=gnuabi64
+ #else
+ #if ${IS_GLIBC} && defined(_ABIN32)
+ LIBCABI=gnuabin32
+ #else
+ LIBCABI=${LIBC}
+ #endif
+ #endif
+
+ #if ${IS_GLIBC} && defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6
+ CPU=mipsisa64r6
+ #else
+ #if ${IS_GLIBC} && !defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6
+ CPU=mipsisa32r6
+ #else
+ #if defined(__mips64)
+ CPU=mips64
+ #else
+ CPU=mips
+ #endif
+ #endif
+ #endif
+
#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=${UNAME_MACHINE}el
+ MIPS_ENDIAN=el
#else
#if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=${UNAME_MACHINE}
+ MIPS_ENDIAN=
#else
- CPU=
+ MIPS_ENDIAN=
#endif
#endif
EOF
- eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; }
+ eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU\|^MIPS_ENDIAN\|^LIBCABI'`"
+ test "x$CPU" != x && { echo "$CPU${MIPS_ENDIAN}-unknown-linux-$LIBCABI"; exit; }
;;
mips64el:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
openrisc*:Linux:*:*)
- echo or1k-unknown-linux-${LIBC}
+ echo or1k-unknown-linux-"$LIBC"
exit ;;
or32:Linux:*:* | or1k*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
padre:Linux:*:*)
- echo sparc-unknown-linux-${LIBC}
+ echo sparc-unknown-linux-"$LIBC"
exit ;;
parisc64:Linux:*:* | hppa64:Linux:*:*)
- echo hppa64-unknown-linux-${LIBC}
+ echo hppa64-unknown-linux-"$LIBC"
exit ;;
parisc:Linux:*:* | hppa:Linux:*:*)
# Look for CPU level
case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
- PA7*) echo hppa1.1-unknown-linux-${LIBC} ;;
- PA8*) echo hppa2.0-unknown-linux-${LIBC} ;;
- *) echo hppa-unknown-linux-${LIBC} ;;
+ PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;;
+ PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;;
+ *) echo hppa-unknown-linux-"$LIBC" ;;
esac
exit ;;
ppc64:Linux:*:*)
- echo powerpc64-unknown-linux-${LIBC}
+ echo powerpc64-unknown-linux-"$LIBC"
exit ;;
ppc:Linux:*:*)
- echo powerpc-unknown-linux-${LIBC}
+ echo powerpc-unknown-linux-"$LIBC"
exit ;;
ppc64le:Linux:*:*)
- echo powerpc64le-unknown-linux-${LIBC}
+ echo powerpc64le-unknown-linux-"$LIBC"
exit ;;
ppcle:Linux:*:*)
- echo powerpcle-unknown-linux-${LIBC}
+ echo powerpcle-unknown-linux-"$LIBC"
exit ;;
riscv32:Linux:*:* | riscv64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
s390:Linux:*:* | s390x:Linux:*:*)
- echo ${UNAME_MACHINE}-ibm-linux-${LIBC}
+ echo "$UNAME_MACHINE"-ibm-linux-"$LIBC"
exit ;;
sh64*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
sh*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
sparc:Linux:*:* | sparc64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
tile*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
vax:Linux:*:*)
- echo ${UNAME_MACHINE}-dec-linux-${LIBC}
+ echo "$UNAME_MACHINE"-dec-linux-"$LIBC"
exit ;;
x86_64:Linux:*:*)
- echo ${UNAME_MACHINE}-pc-linux-${LIBC}
+ echo "$UNAME_MACHINE"-pc-linux-"$LIBC"
exit ;;
xtensa*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
i*86:DYNIX/ptx:4*:*)
# ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
@@ -1075,34 +1104,34 @@ EOF
# I am not positive that other SVR4 systems won't match this,
# I just have to hope. -- rms.
# Use sysv4.2uw... so that sysv4* matches it.
- echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+ echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION"
exit ;;
i*86:OS/2:*:*)
# If we were able to find `uname', then EMX Unix compatibility
# is probably installed.
- echo ${UNAME_MACHINE}-pc-os2-emx
+ echo "$UNAME_MACHINE"-pc-os2-emx
exit ;;
i*86:XTS-300:*:STOP)
- echo ${UNAME_MACHINE}-unknown-stop
+ echo "$UNAME_MACHINE"-unknown-stop
exit ;;
i*86:atheos:*:*)
- echo ${UNAME_MACHINE}-unknown-atheos
+ echo "$UNAME_MACHINE"-unknown-atheos
exit ;;
i*86:syllable:*:*)
- echo ${UNAME_MACHINE}-pc-syllable
+ echo "$UNAME_MACHINE"-pc-syllable
exit ;;
i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
- echo i386-unknown-lynxos${UNAME_RELEASE}
+ echo i386-unknown-lynxos"$UNAME_RELEASE"
exit ;;
i*86:*DOS:*:*)
- echo ${UNAME_MACHINE}-pc-msdosdjgpp
+ echo "$UNAME_MACHINE"-pc-msdosdjgpp
exit ;;
- i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
- UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+ i*86:*:4.*:*)
+ UNAME_REL=`echo "$UNAME_RELEASE" | sed 's/\/MP$//'`
if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
- echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+ echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL"
else
- echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+ echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL"
fi
exit ;;
i*86:*:5:[678]*)
@@ -1112,12 +1141,12 @@ EOF
*Pentium) UNAME_MACHINE=i586 ;;
*Pent*|*Celeron) UNAME_MACHINE=i686 ;;
esac
- echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+ echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}"
exit ;;
i*86:*:3.2:*)
if test -f /usr/options/cb.name; then
UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then
UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
(/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
@@ -1127,9 +1156,9 @@ EOF
&& UNAME_MACHINE=i686
(/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
&& UNAME_MACHINE=i686
- echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+ echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL"
else
- echo ${UNAME_MACHINE}-pc-sysv32
+ echo "$UNAME_MACHINE"-pc-sysv32
fi
exit ;;
pc:*:*:*)
@@ -1149,9 +1178,9 @@ EOF
exit ;;
i860:*:4.*:*) # i860-SVR4
if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
- echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+ echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4
else # Add other i860-SVR4 vendors below as they are discovered.
- echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
+ echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4
fi
exit ;;
mini*:CTIX:SYS*5:*)
@@ -1171,9 +1200,9 @@ EOF
test -r /etc/.relid \
&& OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ && { echo i486-ncr-sysv4.3"$OS_REL"; exit; }
/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
- && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;;
3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
&& { echo i486-ncr-sysv4; exit; } ;;
@@ -1182,28 +1211,28 @@ EOF
test -r /etc/.relid \
&& OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ && { echo i486-ncr-sysv4.3"$OS_REL"; exit; }
/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
- && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; }
/bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
- && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;;
m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
- echo m68k-unknown-lynxos${UNAME_RELEASE}
+ echo m68k-unknown-lynxos"$UNAME_RELEASE"
exit ;;
mc68030:UNIX_System_V:4.*:*)
echo m68k-atari-sysv4
exit ;;
TSUNAMI:LynxOS:2.*:*)
- echo sparc-unknown-lynxos${UNAME_RELEASE}
+ echo sparc-unknown-lynxos"$UNAME_RELEASE"
exit ;;
rs6000:LynxOS:2.*:*)
- echo rs6000-unknown-lynxos${UNAME_RELEASE}
+ echo rs6000-unknown-lynxos"$UNAME_RELEASE"
exit ;;
PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
- echo powerpc-unknown-lynxos${UNAME_RELEASE}
+ echo powerpc-unknown-lynxos"$UNAME_RELEASE"
exit ;;
SM[BE]S:UNIX_SV:*:*)
- echo mips-dde-sysv${UNAME_RELEASE}
+ echo mips-dde-sysv"$UNAME_RELEASE"
exit ;;
RM*:ReliantUNIX-*:*:*)
echo mips-sni-sysv4
@@ -1214,7 +1243,7 @@ EOF
*:SINIX-*:*:*)
if uname -p 2>/dev/null >/dev/null ; then
UNAME_MACHINE=`(uname -p) 2>/dev/null`
- echo ${UNAME_MACHINE}-sni-sysv4
+ echo "$UNAME_MACHINE"-sni-sysv4
else
echo ns32k-sni-sysv
fi
@@ -1234,23 +1263,23 @@ EOF
exit ;;
i*86:VOS:*:*)
# From Paul.Green@stratus.com.
- echo ${UNAME_MACHINE}-stratus-vos
+ echo "$UNAME_MACHINE"-stratus-vos
exit ;;
*:VOS:*:*)
# From Paul.Green@stratus.com.
echo hppa1.1-stratus-vos
exit ;;
mc68*:A/UX:*:*)
- echo m68k-apple-aux${UNAME_RELEASE}
+ echo m68k-apple-aux"$UNAME_RELEASE"
exit ;;
news*:NEWS-OS:6*:*)
echo mips-sony-newsos6
exit ;;
R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
if [ -d /usr/nec ]; then
- echo mips-nec-sysv${UNAME_RELEASE}
+ echo mips-nec-sysv"$UNAME_RELEASE"
else
- echo mips-unknown-sysv${UNAME_RELEASE}
+ echo mips-unknown-sysv"$UNAME_RELEASE"
fi
exit ;;
BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
@@ -1269,49 +1298,56 @@ EOF
echo x86_64-unknown-haiku
exit ;;
SX-4:SUPER-UX:*:*)
- echo sx4-nec-superux${UNAME_RELEASE}
+ echo sx4-nec-superux"$UNAME_RELEASE"
exit ;;
SX-5:SUPER-UX:*:*)
- echo sx5-nec-superux${UNAME_RELEASE}
+ echo sx5-nec-superux"$UNAME_RELEASE"
exit ;;
SX-6:SUPER-UX:*:*)
- echo sx6-nec-superux${UNAME_RELEASE}
+ echo sx6-nec-superux"$UNAME_RELEASE"
exit ;;
SX-7:SUPER-UX:*:*)
- echo sx7-nec-superux${UNAME_RELEASE}
+ echo sx7-nec-superux"$UNAME_RELEASE"
exit ;;
SX-8:SUPER-UX:*:*)
- echo sx8-nec-superux${UNAME_RELEASE}
+ echo sx8-nec-superux"$UNAME_RELEASE"
exit ;;
SX-8R:SUPER-UX:*:*)
- echo sx8r-nec-superux${UNAME_RELEASE}
+ echo sx8r-nec-superux"$UNAME_RELEASE"
exit ;;
SX-ACE:SUPER-UX:*:*)
- echo sxace-nec-superux${UNAME_RELEASE}
+ echo sxace-nec-superux"$UNAME_RELEASE"
exit ;;
Power*:Rhapsody:*:*)
- echo powerpc-apple-rhapsody${UNAME_RELEASE}
+ echo powerpc-apple-rhapsody"$UNAME_RELEASE"
exit ;;
*:Rhapsody:*:*)
- echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+ echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE"
exit ;;
*:Darwin:*:*)
UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
- eval $set_cc_for_build
+ set_cc_for_build
if test "$UNAME_PROCESSOR" = unknown ; then
UNAME_PROCESSOR=powerpc
fi
- if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then
+ if test "`echo "$UNAME_RELEASE" | sed -e 's/\..*//'`" -le 10 ; then
if [ "$CC_FOR_BUILD" != no_compiler_found ]; then
if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
- (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
- grep IS_64BIT_ARCH >/dev/null
+ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
then
case $UNAME_PROCESSOR in
i386) UNAME_PROCESSOR=x86_64 ;;
powerpc) UNAME_PROCESSOR=powerpc64 ;;
esac
fi
+ # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc
+ if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \
+ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_PPC >/dev/null
+ then
+ UNAME_PROCESSOR=powerpc
+ fi
fi
elif test "$UNAME_PROCESSOR" = i386 ; then
# Avoid executing cc on OS X 10.9, as it ships with a stub
@@ -1322,7 +1358,7 @@ EOF
# that Apple uses in portable devices.
UNAME_PROCESSOR=x86_64
fi
- echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
+ echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE"
exit ;;
*:procnto*:*:* | *:QNX:[0123456789]*:*)
UNAME_PROCESSOR=`uname -p`
@@ -1330,22 +1366,25 @@ EOF
UNAME_PROCESSOR=i386
UNAME_MACHINE=pc
fi
- echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+ echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE"
exit ;;
*:QNX:*:4*)
echo i386-pc-qnx
exit ;;
NEO-*:NONSTOP_KERNEL:*:*)
- echo neo-tandem-nsk${UNAME_RELEASE}
+ echo neo-tandem-nsk"$UNAME_RELEASE"
exit ;;
NSE-*:NONSTOP_KERNEL:*:*)
- echo nse-tandem-nsk${UNAME_RELEASE}
+ echo nse-tandem-nsk"$UNAME_RELEASE"
exit ;;
NSR-*:NONSTOP_KERNEL:*:*)
- echo nsr-tandem-nsk${UNAME_RELEASE}
+ echo nsr-tandem-nsk"$UNAME_RELEASE"
+ exit ;;
+ NSV-*:NONSTOP_KERNEL:*:*)
+ echo nsv-tandem-nsk"$UNAME_RELEASE"
exit ;;
NSX-*:NONSTOP_KERNEL:*:*)
- echo nsx-tandem-nsk${UNAME_RELEASE}
+ echo nsx-tandem-nsk"$UNAME_RELEASE"
exit ;;
*:NonStop-UX:*:*)
echo mips-compaq-nonstopux
@@ -1354,18 +1393,19 @@ EOF
echo bs2000-siemens-sysv
exit ;;
DS/*:UNIX_System_V:*:*)
- echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+ echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE"
exit ;;
*:Plan9:*:*)
# "uname -m" is not consistent, so use $cputype instead. 386
# is converted to i386 for consistency with other x86
# operating systems.
+ # shellcheck disable=SC2154
if test "$cputype" = 386; then
UNAME_MACHINE=i386
else
UNAME_MACHINE="$cputype"
fi
- echo ${UNAME_MACHINE}-unknown-plan9
+ echo "$UNAME_MACHINE"-unknown-plan9
exit ;;
*:TOPS-10:*:*)
echo pdp10-unknown-tops10
@@ -1386,14 +1426,14 @@ EOF
echo pdp10-unknown-its
exit ;;
SEI:*:*:SEIUX)
- echo mips-sei-seiux${UNAME_RELEASE}
+ echo mips-sei-seiux"$UNAME_RELEASE"
exit ;;
*:DragonFly:*:*)
- echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+ echo "$UNAME_MACHINE"-unknown-dragonfly"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`"
exit ;;
*:*VMS:*:*)
UNAME_MACHINE=`(uname -p) 2>/dev/null`
- case "${UNAME_MACHINE}" in
+ case "$UNAME_MACHINE" in
A*) echo alpha-dec-vms ; exit ;;
I*) echo ia64-dec-vms ; exit ;;
V*) echo vax-dec-vms ; exit ;;
@@ -1402,32 +1442,171 @@ EOF
echo i386-pc-xenix
exit ;;
i*86:skyos:*:*)
- echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE} | sed -e 's/ .*$//'`
+ echo "$UNAME_MACHINE"-pc-skyos"`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'`"
exit ;;
i*86:rdos:*:*)
- echo ${UNAME_MACHINE}-pc-rdos
+ echo "$UNAME_MACHINE"-pc-rdos
exit ;;
i*86:AROS:*:*)
- echo ${UNAME_MACHINE}-pc-aros
+ echo "$UNAME_MACHINE"-pc-aros
exit ;;
x86_64:VMkernel:*:*)
- echo ${UNAME_MACHINE}-unknown-esx
+ echo "$UNAME_MACHINE"-unknown-esx
exit ;;
amd64:Isilon\ OneFS:*:*)
echo x86_64-unknown-onefs
exit ;;
+ *:Unleashed:*:*)
+ echo "$UNAME_MACHINE"-unknown-unleashed"$UNAME_RELEASE"
+ exit ;;
+esac
+
+# No uname command or uname output not recognized.
+set_cc_for_build
+cat > "$dummy.c" <
+#include
+#endif
+main ()
+{
+#if defined (sony)
+#if defined (MIPSEB)
+ /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
+ I don't know.... */
+ printf ("mips-sony-bsd\n"); exit (0);
+#else
+#include
+ printf ("m68k-sony-newsos%s\n",
+#ifdef NEWSOS4
+ "4"
+#else
+ ""
+#endif
+ ); exit (0);
+#endif
+#endif
+
+#if defined (NeXT)
+#if !defined (__ARCHITECTURE__)
+#define __ARCHITECTURE__ "m68k"
+#endif
+ int version;
+ version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
+ if (version < 4)
+ printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
+ else
+ printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
+ exit (0);
+#endif
+
+#if defined (MULTIMAX) || defined (n16)
+#if defined (UMAXV)
+ printf ("ns32k-encore-sysv\n"); exit (0);
+#else
+#if defined (CMU)
+ printf ("ns32k-encore-mach\n"); exit (0);
+#else
+ printf ("ns32k-encore-bsd\n"); exit (0);
+#endif
+#endif
+#endif
+
+#if defined (__386BSD__)
+ printf ("i386-pc-bsd\n"); exit (0);
+#endif
+
+#if defined (sequent)
+#if defined (i386)
+ printf ("i386-sequent-dynix\n"); exit (0);
+#endif
+#if defined (ns32000)
+ printf ("ns32k-sequent-dynix\n"); exit (0);
+#endif
+#endif
+
+#if defined (_SEQUENT_)
+ struct utsname un;
+
+ uname(&un);
+ if (strncmp(un.version, "V2", 2) == 0) {
+ printf ("i386-sequent-ptx2\n"); exit (0);
+ }
+ if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
+ printf ("i386-sequent-ptx1\n"); exit (0);
+ }
+ printf ("i386-sequent-ptx\n"); exit (0);
+#endif
+
+#if defined (vax)
+#if !defined (ultrix)
+#include
+#if defined (BSD)
+#if BSD == 43
+ printf ("vax-dec-bsd4.3\n"); exit (0);
+#else
+#if BSD == 199006
+ printf ("vax-dec-bsd4.3reno\n"); exit (0);
+#else
+ printf ("vax-dec-bsd\n"); exit (0);
+#endif
+#endif
+#else
+ printf ("vax-dec-bsd\n"); exit (0);
+#endif
+#else
+ printf ("vax-dec-ultrix\n"); exit (0);
+#endif
+#endif
+#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__)
+#if defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__)
+#include
+#if defined(_SIZE_T_) /* >= ULTRIX4 */
+ printf ("mips-dec-ultrix4\n"); exit (0);
+#else
+#if defined(ULTRIX3) || defined(ultrix3) || defined(SIGLOST)
+ printf ("mips-dec-ultrix3\n"); exit (0);
+#endif
+#endif
+#endif
+#endif
+
+#if defined (alliant) && defined (i860)
+ printf ("i860-alliant-bsd\n"); exit (0);
+#endif
+
+ exit (1);
+}
+EOF
+
+$CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+
+# Apollos put the system type in the environment.
+test -d /usr/apollo && { echo "$ISP-apollo-$SYSTYPE"; exit; }
+
+echo "$0: unable to guess system type" >&2
+
+case "$UNAME_MACHINE:$UNAME_SYSTEM" in
+ mips:Linux | mips64:Linux)
+ # If we got here on MIPS GNU/Linux, output extra information.
+ cat >&2 <&2 </dev/null`
/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
-UNAME_MACHINE = ${UNAME_MACHINE}
-UNAME_RELEASE = ${UNAME_RELEASE}
-UNAME_SYSTEM = ${UNAME_SYSTEM}
-UNAME_VERSION = ${UNAME_VERSION}
+UNAME_MACHINE = "$UNAME_MACHINE"
+UNAME_RELEASE = "$UNAME_RELEASE"
+UNAME_SYSTEM = "$UNAME_SYSTEM"
+UNAME_VERSION = "$UNAME_VERSION"
EOF
exit 1
# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
+# eval: (add-hook 'before-save-hook 'time-stamp)
# time-stamp-start: "timestamp='"
# time-stamp-format: "%:y-%02m-%02d"
# time-stamp-end: "'"
diff --git a/config/config.sub b/config/config.sub
index 40ea5dfe115..f53af5a2da7 100644
--- a/config/config.sub
+++ b/config/config.sub
@@ -1,8 +1,8 @@
#! /bin/sh
# Configuration validation subroutine script.
-# Copyright 1992-2017 Free Software Foundation, Inc.
+# Copyright 1992-2019 Free Software Foundation, Inc.
-timestamp='2017-04-02'
+timestamp='2019-01-05'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
@@ -15,7 +15,7 @@ timestamp='2017-04-02'
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, see .
+# along with this program; if not, see .
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
@@ -33,7 +33,7 @@ timestamp='2017-04-02'
# Otherwise, we print the canonical config type on stdout and succeed.
# You can get the latest version of this script from:
-# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub
+# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub
# This file is supposed to be the same for all GNU packages
# and recognize all the CPU types, system types and aliases
@@ -57,7 +57,7 @@ Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS
Canonicalize a configuration name.
-Operation modes:
+Options:
-h, --help print this help, then exit
-t, --time-stamp print date of last modification, then exit
-v, --version print version number, then exit
@@ -67,7 +67,7 @@ Report bugs and patches to ."
version="\
GNU config.sub ($timestamp)
-Copyright 1992-2017 Free Software Foundation, Inc.
+Copyright 1992-2019 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -89,12 +89,12 @@ while test $# -gt 0 ; do
- ) # Use stdin as input.
break ;;
-* )
- echo "$me: invalid option $1$help"
+ echo "$me: invalid option $1$help" >&2
exit 1 ;;
*local*)
# First pass through any local machine types.
- echo $1
+ echo "$1"
exit ;;
* )
@@ -110,1252 +110,1167 @@ case $# in
exit 1;;
esac
-# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
-# Here we must recognize all the valid KERNEL-OS combinations.
-maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
-case $maybe_os in
- nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
- linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
- knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \
- kopensolaris*-gnu* | cloudabi*-eabi* | \
- storm-chaos* | os2-emx* | rtmk-nova*)
- os=-$maybe_os
- basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
- ;;
- android-linux)
- os=-linux-android
- basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
- ;;
- *)
- basic_machine=`echo $1 | sed 's/-[^-]*$//'`
- if [ $basic_machine != $1 ]
- then os=`echo $1 | sed 's/.*-/-/'`
- else os=; fi
- ;;
-esac
+# Split fields of configuration type
+# shellcheck disable=SC2162
+IFS="-" read field1 field2 field3 field4 <&2
+ exit 1
;;
- -ptx*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+ *-*-*-*)
+ basic_machine=$field1-$field2
+ os=$field3-$field4
;;
- -windowsnt*)
- os=`echo $os | sed -e 's/windowsnt/winnt/'`
+ *-*-*)
+ # Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two
+ # parts
+ maybe_os=$field2-$field3
+ case $maybe_os in
+ nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc \
+ | linux-newlib* | linux-musl* | linux-uclibc* | uclinux-uclibc* \
+ | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \
+ | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \
+ | storm-chaos* | os2-emx* | rtmk-nova*)
+ basic_machine=$field1
+ os=$maybe_os
+ ;;
+ android-linux)
+ basic_machine=$field1-unknown
+ os=linux-android
+ ;;
+ *)
+ basic_machine=$field1-$field2
+ os=$field3
+ ;;
+ esac
;;
- -psos*)
- os=-psos
+ *-*)
+ # A lone config we happen to match not fitting any pattern
+ case $field1-$field2 in
+ decstation-3100)
+ basic_machine=mips-dec
+ os=
+ ;;
+ *-*)
+ # Second component is usually, but not always the OS
+ case $field2 in
+ # Prevent following clause from handling this valid os
+ sun*os*)
+ basic_machine=$field1
+ os=$field2
+ ;;
+ # Manufacturers
+ dec* | mips* | sequent* | encore* | pc533* | sgi* | sony* \
+ | att* | 7300* | 3300* | delta* | motorola* | sun[234]* \
+ | unicom* | ibm* | next | hp | isi* | apollo | altos* \
+ | convergent* | ncr* | news | 32* | 3600* | 3100* \
+ | hitachi* | c[123]* | convex* | sun | crds | omron* | dg \
+ | ultra | tti* | harris | dolphin | highlevel | gould \
+ | cbm | ns | masscomp | apple | axis | knuth | cray \
+ | microblaze* | sim | cisco \
+ | oki | wec | wrs | winbond)
+ basic_machine=$field1-$field2
+ os=
+ ;;
+ *)
+ basic_machine=$field1
+ os=$field2
+ ;;
+ esac
+ ;;
+ esac
;;
- -mint | -mint[0-9]*)
- basic_machine=m68k-atari
- os=-mint
+ *)
+ # Convert single-component short-hands not valid as part of
+ # multi-component configurations.
+ case $field1 in
+ 386bsd)
+ basic_machine=i386-pc
+ os=bsd
+ ;;
+ a29khif)
+ basic_machine=a29k-amd
+ os=udi
+ ;;
+ adobe68k)
+ basic_machine=m68010-adobe
+ os=scout
+ ;;
+ alliant)
+ basic_machine=fx80-alliant
+ os=
+ ;;
+ altos | altos3068)
+ basic_machine=m68k-altos
+ os=
+ ;;
+ am29k)
+ basic_machine=a29k-none
+ os=bsd
+ ;;
+ amdahl)
+ basic_machine=580-amdahl
+ os=sysv
+ ;;
+ amiga)
+ basic_machine=m68k-unknown
+ os=
+ ;;
+ amigaos | amigados)
+ basic_machine=m68k-unknown
+ os=amigaos
+ ;;
+ amigaunix | amix)
+ basic_machine=m68k-unknown
+ os=sysv4
+ ;;
+ apollo68)
+ basic_machine=m68k-apollo
+ os=sysv
+ ;;
+ apollo68bsd)
+ basic_machine=m68k-apollo
+ os=bsd
+ ;;
+ aros)
+ basic_machine=i386-pc
+ os=aros
+ ;;
+ aux)
+ basic_machine=m68k-apple
+ os=aux
+ ;;
+ balance)
+ basic_machine=ns32k-sequent
+ os=dynix
+ ;;
+ blackfin)
+ basic_machine=bfin-unknown
+ os=linux
+ ;;
+ cegcc)
+ basic_machine=arm-unknown
+ os=cegcc
+ ;;
+ convex-c1)
+ basic_machine=c1-convex
+ os=bsd
+ ;;
+ convex-c2)
+ basic_machine=c2-convex
+ os=bsd
+ ;;
+ convex-c32)
+ basic_machine=c32-convex
+ os=bsd
+ ;;
+ convex-c34)
+ basic_machine=c34-convex
+ os=bsd
+ ;;
+ convex-c38)
+ basic_machine=c38-convex
+ os=bsd
+ ;;
+ cray)
+ basic_machine=j90-cray
+ os=unicos
+ ;;
+ crds | unos)
+ basic_machine=m68k-crds
+ os=
+ ;;
+ da30)
+ basic_machine=m68k-da30
+ os=
+ ;;
+ decstation | pmax | pmin | dec3100 | decstatn)
+ basic_machine=mips-dec
+ os=
+ ;;
+ delta88)
+ basic_machine=m88k-motorola
+ os=sysv3
+ ;;
+ dicos)
+ basic_machine=i686-pc
+ os=dicos
+ ;;
+ djgpp)
+ basic_machine=i586-pc
+ os=msdosdjgpp
+ ;;
+ ebmon29k)
+ basic_machine=a29k-amd
+ os=ebmon
+ ;;
+ es1800 | OSE68k | ose68k | ose | OSE)
+ basic_machine=m68k-ericsson
+ os=ose
+ ;;
+ gmicro)
+ basic_machine=tron-gmicro
+ os=sysv
+ ;;
+ go32)
+ basic_machine=i386-pc
+ os=go32
+ ;;
+ h8300hms)
+ basic_machine=h8300-hitachi
+ os=hms
+ ;;
+ h8300xray)
+ basic_machine=h8300-hitachi
+ os=xray
+ ;;
+ h8500hms)
+ basic_machine=h8500-hitachi
+ os=hms
+ ;;
+ harris)
+ basic_machine=m88k-harris
+ os=sysv3
+ ;;
+ hp300)
+ basic_machine=m68k-hp
+ ;;
+ hp300bsd)
+ basic_machine=m68k-hp
+ os=bsd
+ ;;
+ hp300hpux)
+ basic_machine=m68k-hp
+ os=hpux
+ ;;
+ hppaosf)
+ basic_machine=hppa1.1-hp
+ os=osf
+ ;;
+ hppro)
+ basic_machine=hppa1.1-hp
+ os=proelf
+ ;;
+ i386mach)
+ basic_machine=i386-mach
+ os=mach
+ ;;
+ vsta)
+ basic_machine=i386-pc
+ os=vsta
+ ;;
+ isi68 | isi)
+ basic_machine=m68k-isi
+ os=sysv
+ ;;
+ m68knommu)
+ basic_machine=m68k-unknown
+ os=linux
+ ;;
+ magnum | m3230)
+ basic_machine=mips-mips
+ os=sysv
+ ;;
+ merlin)
+ basic_machine=ns32k-utek
+ os=sysv
+ ;;
+ mingw64)
+ basic_machine=x86_64-pc
+ os=mingw64
+ ;;
+ mingw32)
+ basic_machine=i686-pc
+ os=mingw32
+ ;;
+ mingw32ce)
+ basic_machine=arm-unknown
+ os=mingw32ce
+ ;;
+ monitor)
+ basic_machine=m68k-rom68k
+ os=coff
+ ;;
+ morphos)
+ basic_machine=powerpc-unknown
+ os=morphos
+ ;;
+ moxiebox)
+ basic_machine=moxie-unknown
+ os=moxiebox
+ ;;
+ msdos)
+ basic_machine=i386-pc
+ os=msdos
+ ;;
+ msys)
+ basic_machine=i686-pc
+ os=msys
+ ;;
+ mvs)
+ basic_machine=i370-ibm
+ os=mvs
+ ;;
+ nacl)
+ basic_machine=le32-unknown
+ os=nacl
+ ;;
+ ncr3000)
+ basic_machine=i486-ncr
+ os=sysv4
+ ;;
+ netbsd386)
+ basic_machine=i386-pc
+ os=netbsd
+ ;;
+ netwinder)
+ basic_machine=armv4l-rebel
+ os=linux
+ ;;
+ news | news700 | news800 | news900)
+ basic_machine=m68k-sony
+ os=newsos
+ ;;
+ news1000)
+ basic_machine=m68030-sony
+ os=newsos
+ ;;
+ necv70)
+ basic_machine=v70-nec
+ os=sysv
+ ;;
+ nh3000)
+ basic_machine=m68k-harris
+ os=cxux
+ ;;
+ nh[45]000)
+ basic_machine=m88k-harris
+ os=cxux
+ ;;
+ nindy960)
+ basic_machine=i960-intel
+ os=nindy
+ ;;
+ mon960)
+ basic_machine=i960-intel
+ os=mon960
+ ;;
+ nonstopux)
+ basic_machine=mips-compaq
+ os=nonstopux
+ ;;
+ os400)
+ basic_machine=powerpc-ibm
+ os=os400
+ ;;
+ OSE68000 | ose68000)
+ basic_machine=m68000-ericsson
+ os=ose
+ ;;
+ os68k)
+ basic_machine=m68k-none
+ os=os68k
+ ;;
+ paragon)
+ basic_machine=i860-intel
+ os=osf
+ ;;
+ parisc)
+ basic_machine=hppa-unknown
+ os=linux
+ ;;
+ pw32)
+ basic_machine=i586-unknown
+ os=pw32
+ ;;
+ rdos | rdos64)
+ basic_machine=x86_64-pc
+ os=rdos
+ ;;
+ rdos32)
+ basic_machine=i386-pc
+ os=rdos
+ ;;
+ rom68k)
+ basic_machine=m68k-rom68k
+ os=coff
+ ;;
+ sa29200)
+ basic_machine=a29k-amd
+ os=udi
+ ;;
+ sei)
+ basic_machine=mips-sei
+ os=seiux
+ ;;
+ sequent)
+ basic_machine=i386-sequent
+ os=
+ ;;
+ sps7)
+ basic_machine=m68k-bull
+ os=sysv2
+ ;;
+ st2000)
+ basic_machine=m68k-tandem
+ os=
+ ;;
+ stratus)
+ basic_machine=i860-stratus
+ os=sysv4
+ ;;
+ sun2)
+ basic_machine=m68000-sun
+ os=
+ ;;
+ sun2os3)
+ basic_machine=m68000-sun
+ os=sunos3
+ ;;
+ sun2os4)
+ basic_machine=m68000-sun
+ os=sunos4
+ ;;
+ sun3)
+ basic_machine=m68k-sun
+ os=
+ ;;
+ sun3os3)
+ basic_machine=m68k-sun
+ os=sunos3
+ ;;
+ sun3os4)
+ basic_machine=m68k-sun
+ os=sunos4
+ ;;
+ sun4)
+ basic_machine=sparc-sun
+ os=
+ ;;
+ sun4os3)
+ basic_machine=sparc-sun
+ os=sunos3
+ ;;
+ sun4os4)
+ basic_machine=sparc-sun
+ os=sunos4
+ ;;
+ sun4sol2)
+ basic_machine=sparc-sun
+ os=solaris2
+ ;;
+ sun386 | sun386i | roadrunner)
+ basic_machine=i386-sun
+ os=
+ ;;
+ sv1)
+ basic_machine=sv1-cray
+ os=unicos
+ ;;
+ symmetry)
+ basic_machine=i386-sequent
+ os=dynix
+ ;;
+ t3e)
+ basic_machine=alphaev5-cray
+ os=unicos
+ ;;
+ t90)
+ basic_machine=t90-cray
+ os=unicos
+ ;;
+ toad1)
+ basic_machine=pdp10-xkl
+ os=tops20
+ ;;
+ tpf)
+ basic_machine=s390x-ibm
+ os=tpf
+ ;;
+ udi29k)
+ basic_machine=a29k-amd
+ os=udi
+ ;;
+ ultra3)
+ basic_machine=a29k-nyu
+ os=sym1
+ ;;
+ v810 | necv810)
+ basic_machine=v810-nec
+ os=none
+ ;;
+ vaxv)
+ basic_machine=vax-dec
+ os=sysv
+ ;;
+ vms)
+ basic_machine=vax-dec
+ os=vms
+ ;;
+ vxworks960)
+ basic_machine=i960-wrs
+ os=vxworks
+ ;;
+ vxworks68)
+ basic_machine=m68k-wrs
+ os=vxworks
+ ;;
+ vxworks29k)
+ basic_machine=a29k-wrs
+ os=vxworks
+ ;;
+ xbox)
+ basic_machine=i686-pc
+ os=mingw32
+ ;;
+ ymp)
+ basic_machine=ymp-cray
+ os=unicos
+ ;;
+ *)
+ basic_machine=$1
+ os=
+ ;;
+ esac
;;
esac
-# Decode aliases for certain CPU-COMPANY combinations.
+# Decode 1-component or ad-hoc basic machines
case $basic_machine in
- # Recognize the basic CPU types without company name.
- # Some are omitted here because they have special meanings below.
- 1750a | 580 \
- | a29k \
- | aarch64 | aarch64_be \
- | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
- | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
- | am33_2.0 \
- | arc | arceb \
- | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \
- | avr | avr32 \
- | ba \
- | be32 | be64 \
- | bfin \
- | c4x | c8051 | clipper \
- | d10v | d30v | dlx | dsp16xx \
- | e2k | epiphany \
- | fido | fr30 | frv | ft32 \
- | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
- | hexagon \
- | i370 | i860 | i960 | ia16 | ia64 \
- | ip2k | iq2000 \
- | k1om \
- | le32 | le64 \
- | lm32 \
- | m32c | m32r | m32rle | m68000 | m68k | m88k \
- | maxq | mb | microblaze | microblazeel | mcore | mep | metag \
- | mips | mipsbe | mipseb | mipsel | mipsle \
- | mips16 \
- | mips64 | mips64el \
- | mips64octeon | mips64octeonel \
- | mips64orion | mips64orionel \
- | mips64r5900 | mips64r5900el \
- | mips64vr | mips64vrel \
- | mips64vr4100 | mips64vr4100el \
- | mips64vr4300 | mips64vr4300el \
- | mips64vr5000 | mips64vr5000el \
- | mips64vr5900 | mips64vr5900el \
- | mipsisa32 | mipsisa32el \
- | mipsisa32r2 | mipsisa32r2el \
- | mipsisa32r6 | mipsisa32r6el \
- | mipsisa64 | mipsisa64el \
- | mipsisa64r2 | mipsisa64r2el \
- | mipsisa64r6 | mipsisa64r6el \
- | mipsisa64sb1 | mipsisa64sb1el \
- | mipsisa64sr71k | mipsisa64sr71kel \
- | mipsr5900 | mipsr5900el \
- | mipstx39 | mipstx39el \
- | mn10200 | mn10300 \
- | moxie \
- | mt \
- | msp430 \
- | nds32 | nds32le | nds32be \
- | nios | nios2 | nios2eb | nios2el \
- | ns16k | ns32k \
- | open8 | or1k | or1knd | or32 \
- | pdp10 | pdp11 | pj | pjl \
- | powerpc | powerpc64 | powerpc64le | powerpcle \
- | pru \
- | pyramid \
- | riscv32 | riscv64 \
- | rl78 | rx \
- | score \
- | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
- | sh64 | sh64le \
- | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
- | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
- | spu \
- | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
- | ubicom32 \
- | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
- | visium \
- | wasm32 \
- | we32k \
- | x86 | xc16x | xstormy16 | xtensa \
- | z8k | z80)
- basic_machine=$basic_machine-unknown
- ;;
- c54x)
- basic_machine=tic54x-unknown
- ;;
- c55x)
- basic_machine=tic55x-unknown
- ;;
- c6x)
- basic_machine=tic6x-unknown
- ;;
- leon|leon[3-9])
- basic_machine=sparc-$basic_machine
- ;;
- m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip)
- basic_machine=$basic_machine-unknown
- os=-none
+ # Here we handle the default manufacturer of certain CPU types. It is in
+ # some cases the only manufacturer, in others, it is the most popular.
+ w89k)
+ cpu=hppa1.1
+ vendor=winbond
;;
- m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+ op50n)
+ cpu=hppa1.1
+ vendor=oki
;;
- ms1)
- basic_machine=mt-unknown
+ op60c)
+ cpu=hppa1.1
+ vendor=oki
;;
-
- strongarm | thumb | xscale)
- basic_machine=arm-unknown
+ ibm*)
+ cpu=i370
+ vendor=ibm
;;
- xgate)
- basic_machine=$basic_machine-unknown
- os=-none
+ orion105)
+ cpu=clipper
+ vendor=highlevel
;;
- xscaleeb)
- basic_machine=armeb-unknown
+ mac | mpw | mac-mpw)
+ cpu=m68k
+ vendor=apple
;;
-
- xscaleel)
- basic_machine=armel-unknown
+ pmac | pmac-mpw)
+ cpu=powerpc
+ vendor=apple
;;
- # We use `pc' rather than `unknown'
- # because (1) that's what they normally are, and
- # (2) the word "unknown" tends to confuse beginning users.
- i*86 | x86_64)
- basic_machine=$basic_machine-pc
- ;;
- # Object if more than one company name word.
- *-*-*)
- echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
- exit 1
- ;;
- # Recognize the basic CPU types with company name.
- 580-* \
- | a29k-* \
- | aarch64-* | aarch64_be-* \
- | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
- | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
- | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \
- | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
- | avr-* | avr32-* \
- | ba-* \
- | be32-* | be64-* \
- | bfin-* | bs2000-* \
- | c[123]* | c30-* | [cjt]90-* | c4x-* \
- | c8051-* | clipper-* | craynv-* | cydra-* \
- | d10v-* | d30v-* | dlx-* \
- | e2k-* | elxsi-* \
- | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
- | h8300-* | h8500-* \
- | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
- | hexagon-* \
- | i*86-* | i860-* | i960-* | ia16-* | ia64-* \
- | ip2k-* | iq2000-* \
- | k1om-* \
- | le32-* | le64-* \
- | lm32-* \
- | m32c-* | m32r-* | m32rle-* \
- | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
- | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
- | microblaze-* | microblazeel-* \
- | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
- | mips16-* \
- | mips64-* | mips64el-* \
- | mips64octeon-* | mips64octeonel-* \
- | mips64orion-* | mips64orionel-* \
- | mips64r5900-* | mips64r5900el-* \
- | mips64vr-* | mips64vrel-* \
- | mips64vr4100-* | mips64vr4100el-* \
- | mips64vr4300-* | mips64vr4300el-* \
- | mips64vr5000-* | mips64vr5000el-* \
- | mips64vr5900-* | mips64vr5900el-* \
- | mipsisa32-* | mipsisa32el-* \
- | mipsisa32r2-* | mipsisa32r2el-* \
- | mipsisa32r6-* | mipsisa32r6el-* \
- | mipsisa64-* | mipsisa64el-* \
- | mipsisa64r2-* | mipsisa64r2el-* \
- | mipsisa64r6-* | mipsisa64r6el-* \
- | mipsisa64sb1-* | mipsisa64sb1el-* \
- | mipsisa64sr71k-* | mipsisa64sr71kel-* \
- | mipsr5900-* | mipsr5900el-* \
- | mipstx39-* | mipstx39el-* \
- | mmix-* \
- | mt-* \
- | msp430-* \
- | nds32-* | nds32le-* | nds32be-* \
- | nios-* | nios2-* | nios2eb-* | nios2el-* \
- | none-* | np1-* | ns16k-* | ns32k-* \
- | open8-* \
- | or1k*-* \
- | orion-* \
- | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
- | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
- | pru-* \
- | pyramid-* \
- | riscv32-* | riscv64-* \
- | rl78-* | romp-* | rs6000-* | rx-* \
- | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
- | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
- | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
- | sparclite-* \
- | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \
- | tahoe-* \
- | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
- | tile*-* \
- | tron-* \
- | ubicom32-* \
- | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
- | vax-* \
- | visium-* \
- | wasm32-* \
- | we32k-* \
- | x86-* | x86_64-* | xc16x-* | xps100-* \
- | xstormy16-* | xtensa*-* \
- | ymp-* \
- | z8k-* | z80-*)
- ;;
- # Recognize the basic CPU types without company name, with glob match.
- xtensa*)
- basic_machine=$basic_machine-unknown
- ;;
# Recognize the various machine names and aliases which stand
# for a CPU type and a company and sometimes even an OS.
- 386bsd)
- basic_machine=i386-unknown
- os=-bsd
- ;;
3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
- basic_machine=m68000-att
+ cpu=m68000
+ vendor=att
;;
3b*)
- basic_machine=we32k-att
- ;;
- a29khif)
- basic_machine=a29k-amd
- os=-udi
- ;;
- abacus)
- basic_machine=abacus-unknown
- ;;
- adobe68k)
- basic_machine=m68010-adobe
- os=-scout
- ;;
- alliant | fx80)
- basic_machine=fx80-alliant
- ;;
- altos | altos3068)
- basic_machine=m68k-altos
- ;;
- am29k)
- basic_machine=a29k-none
- os=-bsd
- ;;
- amd64)
- basic_machine=x86_64-pc
- ;;
- amd64-*)
- basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- amdahl)
- basic_machine=580-amdahl
- os=-sysv
- ;;
- amiga | amiga-*)
- basic_machine=m68k-unknown
- ;;
- amigaos | amigados)
- basic_machine=m68k-unknown
- os=-amigaos
- ;;
- amigaunix | amix)
- basic_machine=m68k-unknown
- os=-sysv4
- ;;
- apollo68)
- basic_machine=m68k-apollo
- os=-sysv
- ;;
- apollo68bsd)
- basic_machine=m68k-apollo
- os=-bsd
- ;;
- aros)
- basic_machine=i386-pc
- os=-aros
- ;;
- asmjs)
- basic_machine=asmjs-unknown
- ;;
- aux)
- basic_machine=m68k-apple
- os=-aux
- ;;
- balance)
- basic_machine=ns32k-sequent
- os=-dynix
- ;;
- blackfin)
- basic_machine=bfin-unknown
- os=-linux
- ;;
- blackfin-*)
- basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
- os=-linux
+ cpu=we32k
+ vendor=att
;;
bluegene*)
- basic_machine=powerpc-ibm
- os=-cnk
- ;;
- c54x-*)
- basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- c55x-*)
- basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- c6x-*)
- basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- c90)
- basic_machine=c90-cray
- os=-unicos
- ;;
- cegcc)
- basic_machine=arm-unknown
- os=-cegcc
- ;;
- convex-c1)
- basic_machine=c1-convex
- os=-bsd
- ;;
- convex-c2)
- basic_machine=c2-convex
- os=-bsd
- ;;
- convex-c32)
- basic_machine=c32-convex
- os=-bsd
- ;;
- convex-c34)
- basic_machine=c34-convex
- os=-bsd
- ;;
- convex-c38)
- basic_machine=c38-convex
- os=-bsd
- ;;
- cray | j90)
- basic_machine=j90-cray
- os=-unicos
- ;;
- craynv)
- basic_machine=craynv-cray
- os=-unicosmp
- ;;
- cr16 | cr16-*)
- basic_machine=cr16-unknown
- os=-elf
- ;;
- crds | unos)
- basic_machine=m68k-crds
- ;;
- crisv32 | crisv32-* | etraxfs*)
- basic_machine=crisv32-axis
- ;;
- cris | cris-* | etrax*)
- basic_machine=cris-axis
- ;;
- crx)
- basic_machine=crx-unknown
- os=-elf
- ;;
- da30 | da30-*)
- basic_machine=m68k-da30
- ;;
- decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
- basic_machine=mips-dec
+ cpu=powerpc
+ vendor=ibm
+ os=cnk
;;
decsystem10* | dec10*)
- basic_machine=pdp10-dec
- os=-tops10
+ cpu=pdp10
+ vendor=dec
+ os=tops10
;;
decsystem20* | dec20*)
- basic_machine=pdp10-dec
- os=-tops20
+ cpu=pdp10
+ vendor=dec
+ os=tops20
;;
delta | 3300 | motorola-3300 | motorola-delta \
| 3300-motorola | delta-motorola)
- basic_machine=m68k-motorola
+ cpu=m68k
+ vendor=motorola
;;
- delta88)
- basic_machine=m88k-motorola
- os=-sysv3
- ;;
- dicos)
- basic_machine=i686-pc
- os=-dicos
- ;;
- djgpp)
- basic_machine=i586-pc
- os=-msdosdjgpp
- ;;
- dpx20 | dpx20-*)
- basic_machine=rs6000-bull
- os=-bosx
- ;;
- dpx2* | dpx2*-bull)
- basic_machine=m68k-bull
- os=-sysv3
- ;;
- e500v[12])
- basic_machine=powerpc-unknown
- os=$os"spe"
- ;;
- e500v[12]-*)
- basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
- os=$os"spe"
- ;;
- ebmon29k)
- basic_machine=a29k-amd
- os=-ebmon
- ;;
- elxsi)
- basic_machine=elxsi-elxsi
- os=-bsd
+ dpx2*)
+ cpu=m68k
+ vendor=bull
+ os=sysv3
;;
encore | umax | mmax)
- basic_machine=ns32k-encore
+ cpu=ns32k
+ vendor=encore
;;
- es1800 | OSE68k | ose68k | ose | OSE)
- basic_machine=m68k-ericsson
- os=-ose
+ elxsi)
+ cpu=elxsi
+ vendor=elxsi
+ os=${os:-bsd}
;;
fx2800)
- basic_machine=i860-alliant
+ cpu=i860
+ vendor=alliant
;;
genix)
- basic_machine=ns32k-ns
- ;;
- gmicro)
- basic_machine=tron-gmicro
- os=-sysv
- ;;
- go32)
- basic_machine=i386-pc
- os=-go32
+ cpu=ns32k
+ vendor=ns
;;
h3050r* | hiux*)
- basic_machine=hppa1.1-hitachi
- os=-hiuxwe2
- ;;
- h8300hms)
- basic_machine=h8300-hitachi
- os=-hms
- ;;
- h8300xray)
- basic_machine=h8300-hitachi
- os=-xray
- ;;
- h8500hms)
- basic_machine=h8500-hitachi
- os=-hms
- ;;
- harris)
- basic_machine=m88k-harris
- os=-sysv3
- ;;
- hp300-*)
- basic_machine=m68k-hp
- ;;
- hp300bsd)
- basic_machine=m68k-hp
- os=-bsd
- ;;
- hp300hpux)
- basic_machine=m68k-hp
- os=-hpux
+ cpu=hppa1.1
+ vendor=hitachi
+ os=hiuxwe2
;;
hp3k9[0-9][0-9] | hp9[0-9][0-9])
- basic_machine=hppa1.0-hp
+ cpu=hppa1.0
+ vendor=hp
;;
hp9k2[0-9][0-9] | hp9k31[0-9])
- basic_machine=m68000-hp
+ cpu=m68000
+ vendor=hp
;;
hp9k3[2-9][0-9])
- basic_machine=m68k-hp
+ cpu=m68k
+ vendor=hp
;;
hp9k6[0-9][0-9] | hp6[0-9][0-9])
- basic_machine=hppa1.0-hp
+ cpu=hppa1.0
+ vendor=hp
;;
hp9k7[0-79][0-9] | hp7[0-79][0-9])
- basic_machine=hppa1.1-hp
+ cpu=hppa1.1
+ vendor=hp
;;
hp9k78[0-9] | hp78[0-9])
# FIXME: really hppa2.0-hp
- basic_machine=hppa1.1-hp
+ cpu=hppa1.1
+ vendor=hp
;;
hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
# FIXME: really hppa2.0-hp
- basic_machine=hppa1.1-hp
+ cpu=hppa1.1
+ vendor=hp
;;
hp9k8[0-9][13679] | hp8[0-9][13679])
- basic_machine=hppa1.1-hp
+ cpu=hppa1.1
+ vendor=hp
;;
hp9k8[0-9][0-9] | hp8[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hppa-next)
- os=-nextstep3
- ;;
- hppaosf)
- basic_machine=hppa1.1-hp
- os=-osf
- ;;
- hppro)
- basic_machine=hppa1.1-hp
- os=-proelf
- ;;
- i370-ibm* | ibm*)
- basic_machine=i370-ibm
+ cpu=hppa1.0
+ vendor=hp
;;
i*86v32)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv32
+ cpu=`echo "$1" | sed -e 's/86.*/86/'`
+ vendor=pc
+ os=sysv32
;;
i*86v4*)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv4
+ cpu=`echo "$1" | sed -e 's/86.*/86/'`
+ vendor=pc
+ os=sysv4
;;
i*86v)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv
+ cpu=`echo "$1" | sed -e 's/86.*/86/'`
+ vendor=pc
+ os=sysv
;;
i*86sol2)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-solaris2
- ;;
- i386mach)
- basic_machine=i386-mach
- os=-mach
+ cpu=`echo "$1" | sed -e 's/86.*/86/'`
+ vendor=pc
+ os=solaris2
;;
- i386-vsta | vsta)
- basic_machine=i386-unknown
- os=-vsta
+ j90 | j90-cray)
+ cpu=j90
+ vendor=cray
+ os=${os:-unicos}
;;
iris | iris4d)
- basic_machine=mips-sgi
+ cpu=mips
+ vendor=sgi
case $os in
- -irix*)
+ irix*)
;;
*)
- os=-irix4
+ os=irix4
;;
esac
;;
- isi68 | isi)
- basic_machine=m68k-isi
- os=-sysv
- ;;
- leon-*|leon[3-9]-*)
- basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'`
- ;;
- m68knommu)
- basic_machine=m68k-unknown
- os=-linux
- ;;
- m68knommu-*)
- basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
- os=-linux
- ;;
- m88k-omron*)
- basic_machine=m88k-omron
- ;;
- magnum | m3230)
- basic_machine=mips-mips
- os=-sysv
- ;;
- merlin)
- basic_machine=ns32k-utek
- os=-sysv
- ;;
- microblaze*)
- basic_machine=microblaze-xilinx
- ;;
- mingw64)
- basic_machine=x86_64-pc
- os=-mingw64
- ;;
- mingw32)
- basic_machine=i686-pc
- os=-mingw32
- ;;
- mingw32ce)
- basic_machine=arm-unknown
- os=-mingw32ce
- ;;
miniframe)
- basic_machine=m68000-convergent
- ;;
- *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
- basic_machine=m68k-atari
- os=-mint
- ;;
- mips3*-*)
- basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
- ;;
- mips3*)
- basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
- ;;
- monitor)
- basic_machine=m68k-rom68k
- os=-coff
- ;;
- morphos)
- basic_machine=powerpc-unknown
- os=-morphos
- ;;
- moxiebox)
- basic_machine=moxie-unknown
- os=-moxiebox
- ;;
- msdos)
- basic_machine=i386-pc
- os=-msdos
- ;;
- ms1-*)
- basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
- ;;
- msys)
- basic_machine=i686-pc
- os=-msys
- ;;
- mvs)
- basic_machine=i370-ibm
- os=-mvs
- ;;
- nacl)
- basic_machine=le32-unknown
- os=-nacl
+ cpu=m68000
+ vendor=convergent
;;
- ncr3000)
- basic_machine=i486-ncr
- os=-sysv4
- ;;
- netbsd386)
- basic_machine=i386-unknown
- os=-netbsd
- ;;
- netwinder)
- basic_machine=armv4l-rebel
- os=-linux
- ;;
- news | news700 | news800 | news900)
- basic_machine=m68k-sony
- os=-newsos
- ;;
- news1000)
- basic_machine=m68030-sony
- os=-newsos
+ *mint | mint[0-9]* | *MiNT | *MiNT[0-9]*)
+ cpu=m68k
+ vendor=atari
+ os=mint
;;
news-3600 | risc-news)
- basic_machine=mips-sony
- os=-newsos
+ cpu=mips
+ vendor=sony
+ os=newsos
;;
- necv70)
- basic_machine=v70-nec
- os=-sysv
- ;;
- next | m*-next )
- basic_machine=m68k-next
+ next | m*-next)
+ cpu=m68k
+ vendor=next
case $os in
- -nextstep* )
+ openstep*)
+ ;;
+ nextstep*)
;;
- -ns2*)
- os=-nextstep2
+ ns2*)
+ os=nextstep2
;;
*)
- os=-nextstep3
+ os=nextstep3
;;
esac
;;
- nh3000)
- basic_machine=m68k-harris
- os=-cxux
- ;;
- nh[45]000)
- basic_machine=m88k-harris
- os=-cxux
- ;;
- nindy960)
- basic_machine=i960-intel
- os=-nindy
- ;;
- mon960)
- basic_machine=i960-intel
- os=-mon960
- ;;
- nonstopux)
- basic_machine=mips-compaq
- os=-nonstopux
- ;;
np1)
- basic_machine=np1-gould
- ;;
- neo-tandem)
- basic_machine=neo-tandem
- ;;
- nse-tandem)
- basic_machine=nse-tandem
- ;;
- nsr-tandem)
- basic_machine=nsr-tandem
- ;;
- nsx-tandem)
- basic_machine=nsx-tandem
+ cpu=np1
+ vendor=gould
;;
op50n-* | op60c-*)
- basic_machine=hppa1.1-oki
- os=-proelf
- ;;
- openrisc | openrisc-*)
- basic_machine=or32-unknown
- ;;
- os400)
- basic_machine=powerpc-ibm
- os=-os400
- ;;
- OSE68000 | ose68000)
- basic_machine=m68000-ericsson
- os=-ose
- ;;
- os68k)
- basic_machine=m68k-none
- os=-os68k
+ cpu=hppa1.1
+ vendor=oki
+ os=proelf
;;
pa-hitachi)
- basic_machine=hppa1.1-hitachi
- os=-hiuxwe2
- ;;
- paragon)
- basic_machine=i860-intel
- os=-osf
- ;;
- parisc)
- basic_machine=hppa-unknown
- os=-linux
- ;;
- parisc-*)
- basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
- os=-linux
+ cpu=hppa1.1
+ vendor=hitachi
+ os=hiuxwe2
;;
pbd)
- basic_machine=sparc-tti
+ cpu=sparc
+ vendor=tti
;;
pbb)
- basic_machine=m68k-tti
- ;;
- pc532 | pc532-*)
- basic_machine=ns32k-pc532
+ cpu=m68k
+ vendor=tti
;;
- pc98)
- basic_machine=i386-pc
- ;;
- pc98-*)
- basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentium | p5 | k5 | k6 | nexgen | viac3)
- basic_machine=i586-pc
- ;;
- pentiumpro | p6 | 6x86 | athlon | athlon_*)
- basic_machine=i686-pc
- ;;
- pentiumii | pentium2 | pentiumiii | pentium3)
- basic_machine=i686-pc
- ;;
- pentium4)
- basic_machine=i786-pc
- ;;
- pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
- basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentiumpro-* | p6-* | 6x86-* | athlon-*)
- basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
- basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentium4-*)
- basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
+ pc532)
+ cpu=ns32k
+ vendor=pc532
;;
pn)
- basic_machine=pn-gould
+ cpu=pn
+ vendor=gould
;;
- power) basic_machine=power-ibm
- ;;
- ppc | ppcbe) basic_machine=powerpc-unknown
- ;;
- ppc-* | ppcbe-*)
- basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppcle | powerpclittle)
- basic_machine=powerpcle-unknown
- ;;
- ppcle-* | powerpclittle-*)
- basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppc64) basic_machine=powerpc64-unknown
- ;;
- ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppc64le | powerpc64little)
- basic_machine=powerpc64le-unknown
- ;;
- ppc64le-* | powerpc64little-*)
- basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+ power)
+ cpu=power
+ vendor=ibm
;;
ps2)
- basic_machine=i386-ibm
- ;;
- pw32)
- basic_machine=i586-unknown
- os=-pw32
- ;;
- rdos | rdos64)
- basic_machine=x86_64-pc
- os=-rdos
- ;;
- rdos32)
- basic_machine=i386-pc
- os=-rdos
- ;;
- rom68k)
- basic_machine=m68k-rom68k
- os=-coff
+ cpu=i386
+ vendor=ibm
;;
rm[46]00)
- basic_machine=mips-siemens
+ cpu=mips
+ vendor=siemens
;;
rtpc | rtpc-*)
- basic_machine=romp-ibm
- ;;
- s390 | s390-*)
- basic_machine=s390-ibm
- ;;
- s390x | s390x-*)
- basic_machine=s390x-ibm
- ;;
- sa29200)
- basic_machine=a29k-amd
- os=-udi
- ;;
- sb1)
- basic_machine=mipsisa64sb1-unknown
- ;;
- sb1el)
- basic_machine=mipsisa64sb1el-unknown
+ cpu=romp
+ vendor=ibm
;;
sde)
- basic_machine=mipsisa32-sde
- os=-elf
+ cpu=mipsisa32
+ vendor=sde
+ os=${os:-elf}
;;
- sei)
- basic_machine=mips-sei
- os=-seiux
+ simso-wrs)
+ cpu=sparclite
+ vendor=wrs
+ os=vxworks
;;
- sequent)
- basic_machine=i386-sequent
+ tower | tower-32)
+ cpu=m68k
+ vendor=ncr
;;
- sh)
- basic_machine=sh-hitachi
- os=-hms
+ vpp*|vx|vx-*)
+ cpu=f301
+ vendor=fujitsu
;;
- sh5el)
- basic_machine=sh5le-unknown
+ w65)
+ cpu=w65
+ vendor=wdc
;;
- sh64)
- basic_machine=sh64-unknown
+ w89k-*)
+ cpu=hppa1.1
+ vendor=winbond
+ os=proelf
;;
- sparclite-wrs | simso-wrs)
- basic_machine=sparclite-wrs
- os=-vxworks
+ none)
+ cpu=none
+ vendor=none
;;
- sps7)
- basic_machine=m68k-bull
- os=-sysv2
+ leon|leon[3-9])
+ cpu=sparc
+ vendor=$basic_machine
;;
- spur)
- basic_machine=spur-unknown
+ leon-*|leon[3-9]-*)
+ cpu=sparc
+ vendor=`echo "$basic_machine" | sed 's/-.*//'`
;;
- st2000)
- basic_machine=m68k-tandem
+
+ *-*)
+ # shellcheck disable=SC2162
+ IFS="-" read cpu vendor <&2
- exit 1
+ # Recognize the canonical CPU types that are allowed with any
+ # company name.
+ case $cpu in
+ 1750a | 580 \
+ | a29k \
+ | aarch64 | aarch64_be \
+ | abacus \
+ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] \
+ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] \
+ | alphapca5[67] | alpha64pca5[67] \
+ | am33_2.0 \
+ | amdgcn \
+ | arc | arceb \
+ | arm | arm[lb]e | arme[lb] | armv* \
+ | avr | avr32 \
+ | asmjs \
+ | ba \
+ | be32 | be64 \
+ | bfin | bs2000 \
+ | c[123]* | c30 | [cjt]90 | c4x \
+ | c8051 | clipper | craynv | csky | cydra \
+ | d10v | d30v | dlx | dsp16xx \
+ | e2k | elxsi | epiphany \
+ | f30[01] | f700 | fido | fr30 | frv | ft32 | fx80 \
+ | h8300 | h8500 \
+ | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+ | hexagon \
+ | i370 | i*86 | i860 | i960 | ia16 | ia64 \
+ | ip2k | iq2000 \
+ | k1om \
+ | le32 | le64 \
+ | lm32 \
+ | m32c | m32r | m32rle \
+ | m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \
+ | m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \
+ | m88110 | m88k | maxq | mb | mcore | mep | metag \
+ | microblaze | microblazeel \
+ | mips | mipsbe | mipseb | mipsel | mipsle \
+ | mips16 \
+ | mips64 | mips64eb | mips64el \
+ | mips64octeon | mips64octeonel \
+ | mips64orion | mips64orionel \
+ | mips64r5900 | mips64r5900el \
+ | mips64vr | mips64vrel \
+ | mips64vr4100 | mips64vr4100el \
+ | mips64vr4300 | mips64vr4300el \
+ | mips64vr5000 | mips64vr5000el \
+ | mips64vr5900 | mips64vr5900el \
+ | mipsisa32 | mipsisa32el \
+ | mipsisa32r2 | mipsisa32r2el \
+ | mipsisa32r6 | mipsisa32r6el \
+ | mipsisa64 | mipsisa64el \
+ | mipsisa64r2 | mipsisa64r2el \
+ | mipsisa64r6 | mipsisa64r6el \
+ | mipsisa64sb1 | mipsisa64sb1el \
+ | mipsisa64sr71k | mipsisa64sr71kel \
+ | mipsr5900 | mipsr5900el \
+ | mipstx39 | mipstx39el \
+ | mmix \
+ | mn10200 | mn10300 \
+ | moxie \
+ | mt \
+ | msp430 \
+ | nds32 | nds32le | nds32be \
+ | nfp \
+ | nios | nios2 | nios2eb | nios2el \
+ | none | np1 | ns16k | ns32k | nvptx \
+ | open8 \
+ | or1k* \
+ | or32 \
+ | orion \
+ | picochip \
+ | pdp10 | pdp11 | pj | pjl | pn | power \
+ | powerpc | powerpc64 | powerpc64le | powerpcle | powerpcspe \
+ | pru \
+ | pyramid \
+ | riscv | riscv32 | riscv64 \
+ | rl78 | romp | rs6000 | rx \
+ | score \
+ | sh | shl \
+ | sh[1234] | sh[24]a | sh[24]ae[lb] | sh[23]e | she[lb] | sh[lb]e \
+ | sh[1234]e[lb] | sh[12345][lb]e | sh[23]ele | sh64 | sh64le \
+ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet \
+ | sparclite \
+ | sparcv8 | sparcv9 | sparcv9b | sparcv9v | sv1 | sx* \
+ | spu \
+ | tahoe \
+ | tic30 | tic4x | tic54x | tic55x | tic6x | tic80 \
+ | tron \
+ | ubicom32 \
+ | v70 | v850 | v850e | v850e1 | v850es | v850e2 | v850e2v3 \
+ | vax \
+ | visium \
+ | w65 \
+ | wasm32 | wasm64 \
+ | we32k \
+ | x86 | x86_64 | xc16x | xgate | xps100 \
+ | xstormy16 | xtensa* \
+ | ymp \
+ | z8k | z80)
+ ;;
+
+ *)
+ echo Invalid configuration \`"$1"\': machine \`"$cpu-$vendor"\' not recognized 1>&2
+ exit 1
+ ;;
+ esac
;;
esac
# Here we canonicalize certain aliases for manufacturers.
-case $basic_machine in
- *-digital*)
- basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+case $vendor in
+ digital*)
+ vendor=dec
;;
- *-commodore*)
- basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+ commodore*)
+ vendor=cbm
;;
*)
;;
@@ -1363,200 +1278,246 @@ esac
# Decode manufacturer-specific aliases for certain operating systems.
-if [ x"$os" != x"" ]
+if [ x$os != x ]
then
case $os in
- # First match some system type aliases
- # that might get confused with valid system types.
- # -solaris* is a basic system type, with this one exception.
- -auroraux)
- os=-auroraux
+ # First match some system type aliases that might get confused
+ # with valid system types.
+ # solaris* is a basic system type, with this one exception.
+ auroraux)
+ os=auroraux
;;
- -solaris1 | -solaris1.*)
- os=`echo $os | sed -e 's|solaris1|sunos4|'`
+ bluegene*)
+ os=cnk
;;
- -solaris)
- os=-solaris2
+ solaris1 | solaris1.*)
+ os=`echo $os | sed -e 's|solaris1|sunos4|'`
;;
- -svr4*)
- os=-sysv4
+ solaris)
+ os=solaris2
;;
- -unixware*)
- os=-sysv4.2uw
+ unixware*)
+ os=sysv4.2uw
;;
- -gnu/linux*)
+ gnu/linux*)
os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
;;
- # First accept the basic system types.
+ # es1800 is here to avoid being matched by es* (a different OS)
+ es1800*)
+ os=ose
+ ;;
+ # Some version numbers need modification
+ chorusos*)
+ os=chorusos
+ ;;
+ isc)
+ os=isc2.2
+ ;;
+ sco6)
+ os=sco5v6
+ ;;
+ sco5)
+ os=sco3.2v5
+ ;;
+ sco4)
+ os=sco3.2v4
+ ;;
+ sco3.2.[4-9]*)
+ os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+ ;;
+ sco3.2v[4-9]* | sco5v6*)
+ # Don't forget version if it is 3.2v4 or newer.
+ ;;
+ scout)
+ # Don't match below
+ ;;
+ sco*)
+ os=sco3.2v2
+ ;;
+ psos*)
+ os=psos
+ ;;
+ # Now accept the basic system types.
# The portable systems comes first.
- # Each alternative MUST END IN A *, to match a version number.
- # -sysv* is not here because it comes later, after sysvr4.
- -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
- | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
- | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
- | -sym* | -kopensolaris* | -plan9* \
- | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
- | -aos* | -aros* | -cloudabi* | -sortix* \
- | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
- | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
- | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
- | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \
- | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
- | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
- | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
- | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
- | -chorusos* | -chorusrdb* | -cegcc* | -glidix* \
- | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
- | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
- | -linux-newlib* | -linux-musl* | -linux-uclibc* \
- | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \
- | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
- | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
- | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
- | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
- | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
- | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
- | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \
- | -onefs* | -tirtos* | -phoenix* | -fuchsia* | -redox*)
+ # Each alternative MUST end in a * to match a version number.
+ # sysv* is not here because it comes later, after sysvr4.
+ gnu* | bsd* | mach* | minix* | genix* | ultrix* | irix* \
+ | *vms* | esix* | aix* | cnk* | sunos | sunos[34]*\
+ | hpux* | unos* | osf* | luna* | dgux* | auroraux* | solaris* \
+ | sym* | kopensolaris* | plan9* \
+ | amigaos* | amigados* | msdos* | newsos* | unicos* | aof* \
+ | aos* | aros* | cloudabi* | sortix* \
+ | nindy* | vxsim* | vxworks* | ebmon* | hms* | mvs* \
+ | clix* | riscos* | uniplus* | iris* | isc* | rtu* | xenix* \
+ | knetbsd* | mirbsd* | netbsd* \
+ | bitrig* | openbsd* | solidbsd* | libertybsd* \
+ | ekkobsd* | kfreebsd* | freebsd* | riscix* | lynxos* \
+ | bosx* | nextstep* | cxux* | aout* | elf* | oabi* \
+ | ptx* | coff* | ecoff* | winnt* | domain* | vsta* \
+ | udi* | eabi* | lites* | ieee* | go32* | aux* | hcos* \
+ | chorusrdb* | cegcc* | glidix* \
+ | cygwin* | msys* | pe* | moss* | proelf* | rtems* \
+ | midipix* | mingw32* | mingw64* | linux-gnu* | linux-android* \
+ | linux-newlib* | linux-musl* | linux-uclibc* \
+ | uxpv* | beos* | mpeix* | udk* | moxiebox* \
+ | interix* | uwin* | mks* | rhapsody* | darwin* \
+ | openstep* | oskit* | conix* | pw32* | nonstopux* \
+ | storm-chaos* | tops10* | tenex* | tops20* | its* \
+ | os2* | vos* | palmos* | uclinux* | nucleus* \
+ | morphos* | superux* | rtmk* | windiss* \
+ | powermax* | dnix* | nx6 | nx7 | sei* | dragonfly* \
+ | skyos* | haiku* | rdos* | toppers* | drops* | es* \
+ | onefs* | tirtos* | phoenix* | fuchsia* | redox* | bme* \
+ | midnightbsd* | amdhsa* | unleashed* | emscripten* | wasi*)
# Remember, each alternative MUST END IN *, to match a version number.
;;
- -qnx*)
- case $basic_machine in
- x86-* | i*86-*)
+ qnx*)
+ case $cpu in
+ x86 | i*86)
;;
*)
- os=-nto$os
+ os=nto-$os
;;
esac
;;
- -nto-qnx*)
+ hiux*)
+ os=hiuxwe2
;;
- -nto*)
- os=`echo $os | sed -e 's|nto|nto-qnx|'`
+ nto-qnx*)
;;
- -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
- | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
- | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+ nto*)
+ os=`echo $os | sed -e 's|nto|nto-qnx|'`
;;
- -mac*)
- os=`echo $os | sed -e 's|mac|macos|'`
+ sim | xray | os68k* | v88r* \
+ | windows* | osx | abug | netware* | os9* \
+ | macos* | mpw* | magic* | mmixware* | mon960* | lnews*)
;;
- -linux-dietlibc)
- os=-linux-dietlibc
+ linux-dietlibc)
+ os=linux-dietlibc
;;
- -linux*)
+ linux*)
os=`echo $os | sed -e 's|linux|linux-gnu|'`
;;
- -sunos5*)
- os=`echo $os | sed -e 's|sunos5|solaris2|'`
+ lynx*178)
+ os=lynxos178
;;
- -sunos6*)
- os=`echo $os | sed -e 's|sunos6|solaris3|'`
+ lynx*5)
+ os=lynxos5
;;
- -opened*)
- os=-openedition
+ lynx*)
+ os=lynxos
;;
- -os400*)
- os=-os400
+ mac*)
+ os=`echo "$os" | sed -e 's|mac|macos|'`
;;
- -wince*)
- os=-wince
+ opened*)
+ os=openedition
;;
- -osfrose*)
- os=-osfrose
+ os400*)
+ os=os400
;;
- -osf*)
- os=-osf
+ sunos5*)
+ os=`echo "$os" | sed -e 's|sunos5|solaris2|'`
;;
- -utek*)
- os=-bsd
+ sunos6*)
+ os=`echo "$os" | sed -e 's|sunos6|solaris3|'`
;;
- -dynix*)
- os=-bsd
+ wince*)
+ os=wince
;;
- -acis*)
- os=-aos
+ utek*)
+ os=bsd
;;
- -atheos*)
- os=-atheos
+ dynix*)
+ os=bsd
;;
- -syllable*)
- os=-syllable
+ acis*)
+ os=aos
;;
- -386bsd)
- os=-bsd
+ atheos*)
+ os=atheos
;;
- -ctix* | -uts*)
- os=-sysv
+ syllable*)
+ os=syllable
+ ;;
+ 386bsd)
+ os=bsd
;;
- -nova*)
- os=-rtmk-nova
+ ctix* | uts*)
+ os=sysv
;;
- -ns2 )
- os=-nextstep2
+ nova*)
+ os=rtmk-nova
;;
- -nsk*)
- os=-nsk
+ ns2)
+ os=nextstep2
+ ;;
+ nsk*)
+ os=nsk
;;
# Preserve the version number of sinix5.
- -sinix5.*)
+ sinix5.*)
os=`echo $os | sed -e 's|sinix|sysv|'`
;;
- -sinix*)
- os=-sysv4
- ;;
- -tpf*)
- os=-tpf
+ sinix*)
+ os=sysv4
;;
- -triton*)
- os=-sysv3
+ tpf*)
+ os=tpf
;;
- -oss*)
- os=-sysv3
+ triton*)
+ os=sysv3
;;
- -svr4)
- os=-sysv4
+ oss*)
+ os=sysv3
;;
- -svr3)
- os=-sysv3
+ svr4*)
+ os=sysv4
;;
- -sysvr4)
- os=-sysv4
+ svr3)
+ os=sysv3
;;
- # This must come after -sysvr4.
- -sysv*)
+ sysvr4)
+ os=sysv4
;;
- -ose*)
- os=-ose
+ # This must come after sysvr4.
+ sysv*)
;;
- -es1800*)
- os=-ose
+ ose*)
+ os=ose
;;
- -xenix)
- os=-xenix
+ *mint | mint[0-9]* | *MiNT | MiNT[0-9]*)
+ os=mint
;;
- -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
- os=-mint
+ zvmoe)
+ os=zvmoe
;;
- -aros*)
- os=-aros
+ dicos*)
+ os=dicos
;;
- -zvmoe)
- os=-zvmoe
+ pikeos*)
+ # Until real need of OS specific support for
+ # particular features comes up, bare metal
+ # configurations are quite functional.
+ case $cpu in
+ arm*)
+ os=eabi
+ ;;
+ *)
+ os=elf
+ ;;
+ esac
;;
- -dicos*)
- os=-dicos
+ nacl*)
;;
- -nacl*)
+ ios)
;;
- -ios)
+ none)
;;
- -none)
+ *-eabi)
;;
*)
- # Get rid of the `-' at the beginning of $os.
- os=`echo $os | sed 's/[^-]*-//'`
- echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+ echo Invalid configuration \`"$1"\': system \`"$os"\' not recognized 1>&2
exit 1
;;
esac
@@ -1572,264 +1533,265 @@ else
# will signal an error saying that MANUFACTURER isn't an operating
# system, and we'll never get to this point.
-case $basic_machine in
+case $cpu-$vendor in
score-*)
- os=-elf
+ os=elf
;;
spu-*)
- os=-elf
+ os=elf
;;
*-acorn)
- os=-riscix1.2
+ os=riscix1.2
;;
arm*-rebel)
- os=-linux
+ os=linux
;;
arm*-semi)
- os=-aout
+ os=aout
;;
c4x-* | tic4x-*)
- os=-coff
+ os=coff
;;
c8051-*)
- os=-elf
+ os=elf
+ ;;
+ clipper-intergraph)
+ os=clix
;;
hexagon-*)
- os=-elf
+ os=elf
;;
tic54x-*)
- os=-coff
+ os=coff
;;
tic55x-*)
- os=-coff
+ os=coff
;;
tic6x-*)
- os=-coff
+ os=coff
;;
# This must come before the *-dec entry.
pdp10-*)
- os=-tops20
+ os=tops20
;;
pdp11-*)
- os=-none
+ os=none
;;
*-dec | vax-*)
- os=-ultrix4.2
+ os=ultrix4.2
;;
m68*-apollo)
- os=-domain
+ os=domain
;;
i386-sun)
- os=-sunos4.0.2
+ os=sunos4.0.2
;;
m68000-sun)
- os=-sunos3
+ os=sunos3
;;
m68*-cisco)
- os=-aout
+ os=aout
;;
mep-*)
- os=-elf
+ os=elf
;;
mips*-cisco)
- os=-elf
+ os=elf
;;
mips*-*)
- os=-elf
+ os=elf
;;
or32-*)
- os=-coff
+ os=coff
;;
*-tti) # must be before sparc entry or we get the wrong os.
- os=-sysv3
+ os=sysv3
;;
sparc-* | *-sun)
- os=-sunos4.1.1
+ os=sunos4.1.1
;;
pru-*)
- os=-elf
+ os=elf
;;
*-be)
- os=-beos
- ;;
- *-haiku)
- os=-haiku
+ os=beos
;;
*-ibm)
- os=-aix
+ os=aix
;;
*-knuth)
- os=-mmixware
+ os=mmixware
;;
*-wec)
- os=-proelf
+ os=proelf
;;
*-winbond)
- os=-proelf
+ os=proelf
;;
*-oki)
- os=-proelf
+ os=proelf
;;
*-hp)
- os=-hpux
+ os=hpux
;;
*-hitachi)
- os=-hiux
+ os=hiux
;;
i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
- os=-sysv
+ os=sysv
;;
*-cbm)
- os=-amigaos
+ os=amigaos
;;
*-dg)
- os=-dgux
+ os=dgux
;;
*-dolphin)
- os=-sysv3
+ os=sysv3
;;
m68k-ccur)
- os=-rtu
+ os=rtu
;;
m88k-omron*)
- os=-luna
+ os=luna
;;
- *-next )
- os=-nextstep
+ *-next)
+ os=nextstep
;;
*-sequent)
- os=-ptx
+ os=ptx
;;
*-crds)
- os=-unos
+ os=unos
;;
*-ns)
- os=-genix
+ os=genix
;;
i370-*)
- os=-mvs
- ;;
- *-next)
- os=-nextstep3
+ os=mvs
;;
*-gould)
- os=-sysv
+ os=sysv
;;
*-highlevel)
- os=-bsd
+ os=bsd
;;
*-encore)
- os=-bsd
+ os=bsd
;;
*-sgi)
- os=-irix
+ os=irix
;;
*-siemens)
- os=-sysv4
+ os=sysv4
;;
*-masscomp)
- os=-rtu
+ os=rtu
;;
f30[01]-fujitsu | f700-fujitsu)
- os=-uxpv
+ os=uxpv
;;
*-rom68k)
- os=-coff
+ os=coff
;;
*-*bug)
- os=-coff
+ os=coff
;;
*-apple)
- os=-macos
+ os=macos
;;
*-atari*)
- os=-mint
+ os=mint
+ ;;
+ *-wrs)
+ os=vxworks
;;
*)
- os=-none
+ os=none
;;
esac
fi
# Here we handle the case where we know the os, and the CPU type, but not the
# manufacturer. We pick the logical manufacturer.
-vendor=unknown
-case $basic_machine in
- *-unknown)
+case $vendor in
+ unknown)
case $os in
- -riscix*)
+ riscix*)
vendor=acorn
;;
- -sunos*)
+ sunos*)
vendor=sun
;;
- -cnk*|-aix*)
+ cnk*|-aix*)
vendor=ibm
;;
- -beos*)
+ beos*)
vendor=be
;;
- -hpux*)
+ hpux*)
vendor=hp
;;
- -mpeix*)
+ mpeix*)
vendor=hp
;;
- -hiux*)
+ hiux*)
vendor=hitachi
;;
- -unos*)
+ unos*)
vendor=crds
;;
- -dgux*)
+ dgux*)
vendor=dg
;;
- -luna*)
+ luna*)
vendor=omron
;;
- -genix*)
+ genix*)
vendor=ns
;;
- -mvs* | -opened*)
+ clix*)
+ vendor=intergraph
+ ;;
+ mvs* | opened*)
vendor=ibm
;;
- -os400*)
+ os400*)
vendor=ibm
;;
- -ptx*)
+ ptx*)
vendor=sequent
;;
- -tpf*)
+ tpf*)
vendor=ibm
;;
- -vxsim* | -vxworks* | -windiss*)
+ vxsim* | vxworks* | windiss*)
vendor=wrs
;;
- -aux*)
+ aux*)
vendor=apple
;;
- -hms*)
+ hms*)
vendor=hitachi
;;
- -mpw* | -macos*)
+ mpw* | macos*)
vendor=apple
;;
- -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ *mint | mint[0-9]* | *MiNT | MiNT[0-9]*)
vendor=atari
;;
- -vos*)
+ vos*)
vendor=stratus
;;
esac
- basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
;;
esac
-echo $basic_machine$os
+echo "$cpu-$vendor-$os"
exit
# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
+# eval: (add-hook 'before-save-hook 'time-stamp)
# time-stamp-start: "timestamp='"
# time-stamp-format: "%:y-%02m-%02d"
# time-stamp-end: "'"
diff --git a/config/llvm.m4 b/config/llvm.m4
index e25ffec661b..a5f4a9af448 100644
--- a/config/llvm.m4
+++ b/config/llvm.m4
@@ -1,11 +1,11 @@
# config/llvm.m4
# PGAC_LLVM_SUPPORT
-# ---------------
+# -----------------
#
# Look for the LLVM installation, check that it's new enough, set the
# corresponding LLVM_{CFLAGS,CXXFLAGS,BINPATH} and LDFLAGS
-# variables. Also verifies that CLANG is available, to transform C
+# variables. Also verify that CLANG is available, to transform C
# into bitcode.
#
AC_DEFUN([PGAC_LLVM_SUPPORT],
@@ -13,7 +13,7 @@ AC_DEFUN([PGAC_LLVM_SUPPORT],
AC_REQUIRE([AC_PROG_AWK])
AC_ARG_VAR(LLVM_CONFIG, [path to llvm-config command])
- PGAC_PATH_PROGS(LLVM_CONFIG, llvm-config llvm-config-6.0 llvm-config-5.0 llvm-config-4.0 llvm-config-3.9)
+ PGAC_PATH_PROGS(LLVM_CONFIG, llvm-config llvm-config-7 llvm-config-6.0 llvm-config-5.0 llvm-config-4.0 llvm-config-3.9)
# no point continuing if llvm wasn't found
if test -z "$LLVM_CONFIG"; then
@@ -31,7 +31,7 @@ AC_DEFUN([PGAC_LLVM_SUPPORT],
# need clang to create some bitcode files
AC_ARG_VAR(CLANG, [path to clang compiler to generate bitcode])
- PGAC_PATH_PROGS(CLANG, clang clang-6.0 clang-5.0 clang-4.0 clang-3.9)
+ PGAC_PATH_PROGS(CLANG, clang clang-7 clang-6.0 clang-5.0 clang-4.0 clang-3.9)
if test -z "$CLANG"; then
AC_MSG_ERROR([clang not found, but required when compiling --with-llvm, specify with CLANG=])
fi
@@ -91,14 +91,7 @@ AC_DEFUN([PGAC_LLVM_SUPPORT],
LLVM_BINPATH=`$LLVM_CONFIG --bindir`
- # Check which functionality is present
- SAVE_CPPFLAGS="$CPPFLAGS"
- CPPFLAGS="$CPPFLAGS $LLVM_CPPFLAGS"
- AC_CHECK_DECLS([LLVMOrcGetSymbolAddressIn, LLVMOrcRegisterGDB, LLVMOrcRegisterPerf], [], [], [[#include ]])
- AC_CHECK_DECLS([LLVMGetHostCPUName], [], [], [[#include ]])
- CPPFLAGS="$SAVE_CPPFLAGS"
-
- # LLVM_CONFIG, CLANG are already output via AC_ARG_VAR
+dnl LLVM_CONFIG, CLANG are already output via AC_ARG_VAR
AC_SUBST(LLVM_LIBS)
AC_SUBST(LLVM_CPPFLAGS)
AC_SUBST(LLVM_CFLAGS)
@@ -106,3 +99,22 @@ AC_DEFUN([PGAC_LLVM_SUPPORT],
AC_SUBST(LLVM_BINPATH)
])# PGAC_LLVM_SUPPORT
+
+
+# PGAC_CHECK_LLVM_FUNCTIONS
+# -------------------------
+#
+# Check presence of some optional LLVM functions.
+# (This shouldn't happen until we're ready to run AC_CHECK_DECLS tests;
+# because PGAC_LLVM_SUPPORT runs very early, it's not an appropriate place.)
+#
+AC_DEFUN([PGAC_CHECK_LLVM_FUNCTIONS],
+[
+ # Check which functionality is present
+ SAVE_CPPFLAGS="$CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $LLVM_CPPFLAGS"
+ AC_CHECK_DECLS([LLVMOrcGetSymbolAddressIn], [], [], [[#include ]])
+ AC_CHECK_DECLS([LLVMGetHostCPUName, LLVMGetHostCPUFeatures], [], [], [[#include ]])
+ AC_CHECK_DECLS([LLVMCreateGDBRegistrationListener, LLVMCreatePerfJITEventListener], [], [], [[#include ]])
+ CPPFLAGS="$SAVE_CPPFLAGS"
+])# PGAC_CHECK_LLVM_FUNCTIONS
diff --git a/config/perl.m4 b/config/perl.m4
index caefb0705e7..059e31c4766 100644
--- a/config/perl.m4
+++ b/config/perl.m4
@@ -5,6 +5,7 @@
# --------------
AC_DEFUN([PGAC_PATH_PERL],
[PGAC_PATH_PROGS(PERL, perl)
+AC_ARG_VAR(PERL, [Perl program])dnl
if test "$PERL"; then
pgac_perl_version=`$PERL -v 2>/dev/null | sed -n ['s/This is perl.*v[a-z ]*\([0-9]\.[0-9][0-9.]*\).*$/\1/p']`
diff --git a/config/prep_buildtree b/config/prep_buildtree
index 5b72c392f68..a0eabd3dee2 100644
--- a/config/prep_buildtree
+++ b/config/prep_buildtree
@@ -33,7 +33,7 @@ for item in `find "$sourcetree" -type d \( \( -name CVS -prune \) -o \( -name .g
fi
done
-for item in `find "$sourcetree" -name Makefile -print -o -name GNUmakefile -print`; do
+for item in `find "$sourcetree" -name Makefile -print -o -name GNUmakefile -print | grep -v "$sourcetree/doc/src/sgml/images/"`; do
filename=`expr "$item" : "$sourcetree\(.*\)"`
if test ! -f "${item}.in"; then
if cmp "$item" "$buildtree/$filename" >/dev/null 2>&1; then : ; else
diff --git a/config/programs.m4 b/config/programs.m4
index aa84bfdb9e4..90ff9447bdd 100644
--- a/config/programs.m4
+++ b/config/programs.m4
@@ -56,7 +56,7 @@ if test -z "$BISON"; then
*** PostgreSQL then you do not need to worry about this, because the Bison
*** output is pre-generated.)])
fi
-# We don't need AC_SUBST(BISON) because PGAC_PATH_PROGS did it
+dnl We don't need AC_SUBST(BISON) because PGAC_PATH_PROGS did it
AC_SUBST(BISONFLAGS)
])# PGAC_PATH_BISON
@@ -179,11 +179,11 @@ for pgac_rllib in $READLINE_ORDER ; do
for pgac_lib in "" " -ltermcap" " -lncurses" " -lcurses" ; do
LIBS="${pgac_rllib}${pgac_lib} $pgac_save_LIBS"
AC_TRY_LINK_FUNC([readline], [[
- # Older NetBSD, OpenBSD, and Irix have a broken linker that does not
+ # Older NetBSD and OpenBSD have a broken linker that does not
# recognize dependent libraries; assume curses is needed if we didn't
# find any dependency.
case $host_os in
- netbsd* | openbsd* | irix*)
+ netbsd* | openbsd*)
if test x"$pgac_lib" = x"" ; then
pgac_lib=" -lcurses"
fi ;;
@@ -245,6 +245,7 @@ AC_DEFUN([PGAC_CHECK_GETTEXT],
AC_CHECK_HEADER([libintl.h], [],
[AC_MSG_ERROR([header file is required for NLS])])
PGAC_PATH_PROGS(MSGFMT, msgfmt)
+ AC_ARG_VAR(MSGFMT, [msgfmt program for NLS])dnl
if test -z "$MSGFMT"; then
AC_MSG_ERROR([msgfmt is required for NLS])
fi
diff --git a/config/python.m4 b/config/python.m4
index 587bca99d52..c51aa4e332e 100644
--- a/config/python.m4
+++ b/config/python.m4
@@ -8,8 +8,16 @@
# ----------------
# Look for Python and set the output variable 'PYTHON' if found,
# fail otherwise.
+#
+# As the Python 3 transition happens and PEP 394 isn't updated, we
+# need to cater to systems that don't have unversioned "python" by
+# default. Some systems ship with "python3" by default and perhaps
+# have "python" in an optional package. Some systems only have
+# "python2" and "python3", in which case it's reasonable to prefer the
+# newer version.
AC_DEFUN([PGAC_PATH_PYTHON],
-[PGAC_PATH_PROGS(PYTHON, python)
+[PGAC_PATH_PROGS(PYTHON, [python python3 python2])
+AC_ARG_VAR(PYTHON, [Python program])dnl
if test x"$PYTHON" = x""; then
AC_MSG_ERROR([Python not found])
fi
diff --git a/config/tcl.m4 b/config/tcl.m4
index a4bf231947f..9de31a57156 100644
--- a/config/tcl.m4
+++ b/config/tcl.m4
@@ -5,6 +5,7 @@
AC_DEFUN([PGAC_PATH_TCLSH],
[PGAC_PATH_PROGS(TCLSH, [tclsh tcl tclsh8.6 tclsh86 tclsh8.5 tclsh85 tclsh8.4 tclsh84])
+AC_ARG_VAR(TCLSH, [Tcl interpreter program (tclsh)])dnl
if test x"$TCLSH" = x""; then
AC_MSG_ERROR([Tcl shell not found])
fi
@@ -13,6 +14,10 @@ fi
# PGAC_PATH_TCLCONFIGSH([SEARCH-PATH])
# ------------------------------------
+# If the user doesn't specify $TCL_CONFIG_SH directly, search for it in
+# the list of directories passed as parameter (from --with-tclconfig).
+# If no list is given, try the Tcl shell's $auto_path.
+
AC_DEFUN([PGAC_PATH_TCLCONFIGSH],
[AC_REQUIRE([PGAC_PATH_TCLSH])[]dnl
AC_BEFORE([$0], [PGAC_PATH_TKCONFIGSH])[]dnl
@@ -24,7 +29,14 @@ if test -z "$TCL_CONFIG_SH"; then
set X $pgac_test_dirs; shift
if test $[#] -eq 0; then
test -z "$TCLSH" && AC_MSG_ERROR([unable to locate tclConfig.sh because no Tcl shell was found])
- set X `echo 'puts $auto_path' | $TCLSH`; shift
+ pgac_test_dirs=`echo 'puts $auto_path' | $TCLSH`
+ # On newer macOS, $auto_path frequently doesn't include the place
+ # where tclConfig.sh actually lives. Append that to the end, so as not
+ # to break cases where a non-default Tcl installation is being used.
+ if test -d "$PG_SYSROOT/System/Library/Frameworks/Tcl.framework" ; then
+ pgac_test_dirs="$pgac_test_dirs $PG_SYSROOT/System/Library/Frameworks/Tcl.framework"
+ fi
+ set X $pgac_test_dirs; shift
fi
for pgac_dir do
diff --git a/configure b/configure
index 56f18dfbc26..b3c92764be8 100755
--- a/configure
+++ b/configure
@@ -1,8 +1,8 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.69 for PostgreSQL 11devel.
+# Generated by GNU Autoconf 2.69 for PostgreSQL 13devel.
#
-# Report bugs to .
+# Report bugs to .
#
#
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@@ -11,7 +11,7 @@
# This configure script is free software; the Free Software Foundation
# gives unlimited permission to copy, distribute and modify it.
#
-# Copyright (c) 1996-2018, PostgreSQL Global Development Group
+# Copyright (c) 1996-2019, PostgreSQL Global Development Group
## -------------------- ##
## M4sh Initialization. ##
## -------------------- ##
@@ -269,10 +269,10 @@ fi
$as_echo "$0: be upgraded to zsh 4.3.4 or later."
else
$as_echo "$0: Please tell bug-autoconf@gnu.org and
-$0: pgsql-bugs@postgresql.org about your system, including
-$0: any error possibly output before this message. Then
-$0: install a modern shell, or manually run the script
-$0: under such a shell if you do have one."
+$0: pgsql-bugs@lists.postgresql.org about your system,
+$0: including any error possibly output before this
+$0: message. Then install a modern shell, or manually run
+$0: the script under such a shell if you do have one."
fi
exit 1
fi
@@ -582,9 +582,9 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='PostgreSQL'
PACKAGE_TARNAME='postgresql'
-PACKAGE_VERSION='11devel'
-PACKAGE_STRING='PostgreSQL 11devel'
-PACKAGE_BUGREPORT='pgsql-bugs@postgresql.org'
+PACKAGE_VERSION='13devel'
+PACKAGE_STRING='PostgreSQL 13devel'
+PACKAGE_BUGREPORT='pgsql-bugs@lists.postgresql.org'
PACKAGE_URL=''
ac_unique_file="src/backend/access/common/heaptuple.c"
@@ -627,6 +627,7 @@ ac_includes_default="\
ac_subst_vars='LTLIBOBJS
vpath_build
+PG_SYSROOT
PG_VERSION_NUM
PROVE
FOP
@@ -649,7 +650,6 @@ PG_CRC32C_OBJS
CFLAGS_ARMV8_CRC32C
CFLAGS_SSE42
have_win32_dbghelp
-HAVE_IPV6
LIBOBJS
UUID_LIBS
LDAP_LIBS_BE
@@ -658,6 +658,9 @@ PTHREAD_CFLAGS
PTHREAD_LIBS
PTHREAD_CC
ax_pthread_config
+EGREP
+GREP
+SED
ZIC
python_additional_libs
python_libspec
@@ -668,6 +671,7 @@ python_majorversion
PYTHON
perl_embed_ldflags
perl_embed_ccflags
+perl_includespec
perl_useshrplib
perl_privlibexp
perl_archlibexp
@@ -695,9 +699,6 @@ with_gnu_ld
LD
LDFLAGS_SL
LDFLAGS_EX
-ELF_SYS
-EGREP
-GREP
with_zlib
with_system_tzdata
with_libxslt
@@ -730,6 +731,7 @@ CPP
BITCODE_CXXFLAGS
BITCODE_CFLAGS
CFLAGS_VECTOR
+PERMIT_DECLARATION_AFTER_STATEMENT
LLVM_BINPATH
LLVM_CXXFLAGS
LLVM_CFLAGS
@@ -759,7 +761,6 @@ GENHTML
LCOV
GCOV
enable_debug
-enable_strong_random
enable_rpath
default_port
WANTED_LANGUAGES
@@ -827,7 +828,6 @@ with_pgport
enable_rpath
enable_spinlocks
enable_atomics
-enable_strong_random
enable_debug
enable_profiling
enable_coverage
@@ -887,8 +887,13 @@ PKG_CONFIG_PATH
PKG_CONFIG_LIBDIR
ICU_CFLAGS
ICU_LIBS
+XML2_CONFIG
LDFLAGS_EX
-LDFLAGS_SL'
+LDFLAGS_SL
+PERL
+PYTHON
+MSGFMT
+TCLSH'
# Initialize some variables set by options.
@@ -1429,7 +1434,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures PostgreSQL 11devel to adapt to many kinds of systems.
+\`configure' configures PostgreSQL 13devel to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1494,7 +1499,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of PostgreSQL 11devel:";;
+ short | recursive ) echo "Configuration of PostgreSQL 13devel:";;
esac
cat <<\_ACEOF
@@ -1510,7 +1515,6 @@ Optional Features:
executables
--disable-spinlocks do not use spinlocks
--disable-atomics do not use atomic operations
- --disable-strong-random do not use a strong random number source
--enable-debug build with debugging symbols (-g)
--enable-profiling build with profiling enabled
--enable-coverage build with coverage testing instrumentation
@@ -1587,13 +1591,18 @@ Some influential environment variables:
path overriding pkg-config's built-in search path
ICU_CFLAGS C compiler flags for ICU, overriding pkg-config
ICU_LIBS linker flags for ICU, overriding pkg-config
+ XML2_CONFIG path to xml2-config utility
LDFLAGS_EX extra linker flags for linking executables only
LDFLAGS_SL extra linker flags for linking shared libraries only
+ PERL Perl program
+ PYTHON Python program
+ MSGFMT msgfmt program for NLS
+ TCLSH Tcl interpreter program (tclsh)
Use these variables to override the choices made by `configure' or to help
it to find libraries and programs with nonstandard names/locations.
-Report bugs to .
+Report bugs to .
_ACEOF
ac_status=$?
fi
@@ -1656,14 +1665,14 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
-PostgreSQL configure 11devel
+PostgreSQL configure 13devel
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
This configure script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it.
-Copyright (c) 1996-2018, PostgreSQL Global Development Group
+Copyright (c) 1996-2019, PostgreSQL Global Development Group
_ACEOF
exit
fi
@@ -1748,52 +1757,6 @@ fi
} # ac_fn_cxx_try_compile
-# ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES
-# ---------------------------------------------
-# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR
-# accordingly.
-ac_fn_c_check_decl ()
-{
- as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
- as_decl_name=`echo $2|sed 's/ *(.*//'`
- as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'`
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5
-$as_echo_n "checking whether $as_decl_name is declared... " >&6; }
-if eval \${$3+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-$4
-int
-main ()
-{
-#ifndef $as_decl_name
-#ifdef __cplusplus
- (void) $as_decl_use;
-#else
- (void) $as_decl_name;
-#endif
-#endif
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- eval "$3=yes"
-else
- eval "$3=no"
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-eval ac_res=\$$3
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
- eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
-
-} # ac_fn_c_check_decl
-
# ac_fn_c_try_link LINENO
# -----------------------
# Try to link conftest.$ac_ext, and return whether this succeeded.
@@ -1947,9 +1910,9 @@ $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;}
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
-( $as_echo "## ---------------------------------------- ##
-## Report this to pgsql-bugs@postgresql.org ##
-## ---------------------------------------- ##"
+( $as_echo "## ---------------------------------------------- ##
+## Report this to pgsql-bugs@lists.postgresql.org ##
+## ---------------------------------------------- ##"
) | sed "s/^/$as_me: WARNING: /" >&2
;;
esac
@@ -2401,11 +2364,61 @@ rm -f conftest.val
as_fn_set_status $ac_retval
} # ac_fn_c_compute_int
+
+# ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES
+# ---------------------------------------------
+# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR
+# accordingly.
+ac_fn_c_check_decl ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ # Initialize each $ac_[]_AC_LANG_ABBREV[]_decl_warn_flag once.
+ as_decl_name=`echo $2|sed 's/ *(.*//'`
+ as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'`
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5
+$as_echo_n "checking whether $as_decl_name is declared... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_save_werror_flag=$ac_c_werror_flag
+ ac_c_werror_flag="$ac_c_decl_warn_flag$ac_c_werror_flag"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+#ifndef $as_decl_name
+#ifdef __cplusplus
+ (void) $as_decl_use;
+#else
+ (void) $as_decl_name;
+#endif
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ eval "$3=yes"
+else
+ eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_c_werror_flag=$ac_save_werror_flag
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_decl
cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by PostgreSQL $as_me 11devel, which was
+It was created by PostgreSQL $as_me 13devel, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@@ -2956,7 +2969,7 @@ PostgreSQL has apparently not been ported to your platform yet.
To try a manual configuration, look into the src/template directory
for a similar platform and use the '--with-template=' option.
-Please also contact to see about
+Please also contact to see about
rectifying this. Include the above 'checking host system type...'
line.
*******************************************************************
@@ -3266,34 +3279,6 @@ fi
-#
-# Random number generation
-#
-
-
-# Check whether --enable-strong-random was given.
-if test "${enable_strong_random+set}" = set; then :
- enableval=$enable_strong_random;
- case $enableval in
- yes)
- :
- ;;
- no)
- :
- ;;
- *)
- as_fn_error $? "no argument expected for --enable-strong-random option" "$LINENO" 5
- ;;
- esac
-
-else
- enable_strong_random=yes
-
-fi
-
-
-
-
#
# --enable-debug adds -g to compiler flags
#
@@ -4425,6 +4410,190 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C99" >&5
+$as_echo_n "checking for $CC option to accept ISO C99... " >&6; }
+if ${ac_cv_prog_cc_c99+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_prog_cc_c99=no
+ac_save_CC=$CC
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include
+#include
+#include
+#include
+#include
+
+// Check varargs macros. These examples are taken from C99 6.10.3.5.
+#define debug(...) fprintf (stderr, __VA_ARGS__)
+#define showlist(...) puts (#__VA_ARGS__)
+#define report(test,...) ((test) ? puts (#test) : printf (__VA_ARGS__))
+static void
+test_varargs_macros (void)
+{
+ int x = 1234;
+ int y = 5678;
+ debug ("Flag");
+ debug ("X = %d\n", x);
+ showlist (The first, second, and third items.);
+ report (x>y, "x is %d but y is %d", x, y);
+}
+
+// Check long long types.
+#define BIG64 18446744073709551615ull
+#define BIG32 4294967295ul
+#define BIG_OK (BIG64 / BIG32 == 4294967297ull && BIG64 % BIG32 == 0)
+#if !BIG_OK
+ your preprocessor is broken;
+#endif
+#if BIG_OK
+#else
+ your preprocessor is broken;
+#endif
+static long long int bignum = -9223372036854775807LL;
+static unsigned long long int ubignum = BIG64;
+
+struct incomplete_array
+{
+ int datasize;
+ double data[];
+};
+
+struct named_init {
+ int number;
+ const wchar_t *name;
+ double average;
+};
+
+typedef const char *ccp;
+
+static inline int
+test_restrict (ccp restrict text)
+{
+ // See if C++-style comments work.
+ // Iterate through items via the restricted pointer.
+ // Also check for declarations in for loops.
+ for (unsigned int i = 0; *(text+i) != '\0'; ++i)
+ continue;
+ return 0;
+}
+
+// Check varargs and va_copy.
+static void
+test_varargs (const char *format, ...)
+{
+ va_list args;
+ va_start (args, format);
+ va_list args_copy;
+ va_copy (args_copy, args);
+
+ const char *str;
+ int number;
+ float fnumber;
+
+ while (*format)
+ {
+ switch (*format++)
+ {
+ case 's': // string
+ str = va_arg (args_copy, const char *);
+ break;
+ case 'd': // int
+ number = va_arg (args_copy, int);
+ break;
+ case 'f': // float
+ fnumber = va_arg (args_copy, double);
+ break;
+ default:
+ break;
+ }
+ }
+ va_end (args_copy);
+ va_end (args);
+}
+
+int
+main ()
+{
+
+ // Check bool.
+ _Bool success = false;
+
+ // Check restrict.
+ if (test_restrict ("String literal") == 0)
+ success = true;
+ char *restrict newvar = "Another string";
+
+ // Check varargs.
+ test_varargs ("s, d' f .", "string", 65, 34.234);
+ test_varargs_macros ();
+
+ // Check flexible array members.
+ struct incomplete_array *ia =
+ malloc (sizeof (struct incomplete_array) + (sizeof (double) * 10));
+ ia->datasize = 10;
+ for (int i = 0; i < ia->datasize; ++i)
+ ia->data[i] = i * 1.234;
+
+ // Check named initializers.
+ struct named_init ni = {
+ .number = 34,
+ .name = L"Test wide string",
+ .average = 543.34343,
+ };
+
+ ni.number = 58;
+
+ int dynamic_array[ni.number];
+ dynamic_array[ni.number - 1] = 543;
+
+ // work around unused variable warnings
+ return (!success || bignum == 0LL || ubignum == 0uLL || newvar[0] == 'x'
+ || dynamic_array[ni.number - 1] != 543);
+
+ ;
+ return 0;
+}
+_ACEOF
+for ac_arg in '' -std=gnu99 -std=c99 -c99 -AC99 -D_STDC_C99= -qlanglvl=extc99
+do
+ CC="$ac_save_CC $ac_arg"
+ if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_c99=$ac_arg
+fi
+rm -f core conftest.err conftest.$ac_objext
+ test "x$ac_cv_prog_cc_c99" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c99" in
+ x)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+$as_echo "none needed" >&6; } ;;
+ xno)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+$as_echo "unsupported" >&6; } ;;
+ *)
+ CC="$CC $ac_cv_prog_cc_c99"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5
+$as_echo "$ac_cv_prog_cc_c99" >&6; } ;;
+esac
+if test "x$ac_cv_prog_cc_c99" != xno; then :
+
+fi
+
+
+
+# Error out if the compiler does not support C99, as the codebase
+# relies on that.
+if test "$ac_cv_prog_cc_c99" = no; then
+ as_fn_error $? "C compiler \"$CC\" does not support C99" "$LINENO" 5
+fi
+
ac_ext=cpp
ac_cpp='$CXXCPP $CPPFLAGS'
ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
@@ -4763,8 +4932,7 @@ fi
-if test "$with_llvm" = yes ; then
- for ac_prog in gawk mawk nawk awk
+for ac_prog in gawk mawk nawk awk
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
set dummy $ac_prog; ac_word=$2
@@ -4806,12 +4974,14 @@ fi
test -n "$AWK" && break
done
+if test "$with_llvm" = yes; then :
+
if test -z "$LLVM_CONFIG"; then
- for ac_prog in llvm-config llvm-config-6.0 llvm-config-5.0 llvm-config-4.0 llvm-config-3.9
+ for ac_prog in llvm-config llvm-config-7 llvm-config-6.0 llvm-config-5.0 llvm-config-4.0 llvm-config-3.9
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
set dummy $ac_prog; ac_word=$2
@@ -4882,7 +5052,7 @@ fi
# need clang to create some bitcode files
if test -z "$CLANG"; then
- for ac_prog in clang clang-6.0 clang-5.0 clang-4.0 clang-3.9
+ for ac_prog in clang clang-7 clang-6.0 clang-5.0 clang-4.0 clang-3.9
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
set dummy $ac_prog; ac_word=$2
@@ -4994,58 +5164,7 @@ fi
LLVM_BINPATH=`$LLVM_CONFIG --bindir`
- # Check which functionality is present
- SAVE_CPPFLAGS="$CPPFLAGS"
- CPPFLAGS="$CPPFLAGS $LLVM_CPPFLAGS"
- ac_fn_c_check_decl "$LINENO" "LLVMOrcGetSymbolAddressIn" "ac_cv_have_decl_LLVMOrcGetSymbolAddressIn" "#include
-"
-if test "x$ac_cv_have_decl_LLVMOrcGetSymbolAddressIn" = xyes; then :
- ac_have_decl=1
-else
- ac_have_decl=0
-fi
-
-cat >>confdefs.h <<_ACEOF
-#define HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN $ac_have_decl
-_ACEOF
-ac_fn_c_check_decl "$LINENO" "LLVMOrcRegisterGDB" "ac_cv_have_decl_LLVMOrcRegisterGDB" "#include
-"
-if test "x$ac_cv_have_decl_LLVMOrcRegisterGDB" = xyes; then :
- ac_have_decl=1
-else
- ac_have_decl=0
-fi
-
-cat >>confdefs.h <<_ACEOF
-#define HAVE_DECL_LLVMORCREGISTERGDB $ac_have_decl
-_ACEOF
-ac_fn_c_check_decl "$LINENO" "LLVMOrcRegisterPerf" "ac_cv_have_decl_LLVMOrcRegisterPerf" "#include
-"
-if test "x$ac_cv_have_decl_LLVMOrcRegisterPerf" = xyes; then :
- ac_have_decl=1
-else
- ac_have_decl=0
-fi
-
-cat >>confdefs.h <<_ACEOF
-#define HAVE_DECL_LLVMORCREGISTERPERF $ac_have_decl
-_ACEOF
-
- ac_fn_c_check_decl "$LINENO" "LLVMGetHostCPUName" "ac_cv_have_decl_LLVMGetHostCPUName" "#include
-"
-if test "x$ac_cv_have_decl_LLVMGetHostCPUName" = xyes; then :
- ac_have_decl=1
-else
- ac_have_decl=0
-fi
-
-cat >>confdefs.h <<_ACEOF
-#define HAVE_DECL_LLVMGETHOSTCPUNAME $ac_have_decl
-_ACEOF
-
- CPPFLAGS="$SAVE_CPPFLAGS"
- # LLVM_CONFIG, CLANG are already output via AC_ARG_VAR
@@ -5053,7 +5172,7 @@ _ACEOF
-fi
+fi # fi
unset CFLAGS
@@ -5142,6 +5261,7 @@ if test "$GCC" = yes -a "$ICC" = no; then
CFLAGS="-Wall -Wmissing-prototypes -Wpointer-arith"
CXXFLAGS="-Wall -Wpointer-arith"
# These work in some but not all gcc versions
+ save_CFLAGS=$CFLAGS
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -Wdeclaration-after-statement, for CFLAGS" >&5
$as_echo_n "checking whether ${CC} supports -Wdeclaration-after-statement, for CFLAGS... " >&6; }
@@ -5182,17 +5302,24 @@ if test x"$pgac_cv_prog_CC_cflags__Wdeclaration_after_statement" = x"yes"; then
fi
- # -Wdeclaration-after-statement isn't applicable for C++
+ # -Wdeclaration-after-statement isn't applicable for C++. Specific C files
+ # disable it, so AC_SUBST the negative form.
+ PERMIT_DECLARATION_AFTER_STATEMENT=
+ if test x"$save_CFLAGS" != x"$CFLAGS"; then
+ PERMIT_DECLARATION_AFTER_STATEMENT=-Wno-declaration-after-statement
+ fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -Wendif-labels, for CFLAGS" >&5
-$as_echo_n "checking whether ${CC} supports -Wendif-labels, for CFLAGS... " >&6; }
-if ${pgac_cv_prog_CC_cflags__Wendif_labels+:} false; then :
+ # Really don't want VLAs to be used in our dialect of C
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -Werror=vla, for CFLAGS" >&5
+$as_echo_n "checking whether ${CC} supports -Werror=vla, for CFLAGS... " >&6; }
+if ${pgac_cv_prog_CC_cflags__Werror_vla+:} false; then :
$as_echo_n "(cached) " >&6
else
pgac_save_CFLAGS=$CFLAGS
pgac_save_CC=$CC
CC=${CC}
-CFLAGS="${CFLAGS} -Wendif-labels"
+CFLAGS="${CFLAGS} -Werror=vla"
ac_save_c_werror_flag=$ac_c_werror_flag
ac_c_werror_flag=yes
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -5207,19 +5334,60 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- pgac_cv_prog_CC_cflags__Wendif_labels=yes
+ pgac_cv_prog_CC_cflags__Werror_vla=yes
else
- pgac_cv_prog_CC_cflags__Wendif_labels=no
+ pgac_cv_prog_CC_cflags__Werror_vla=no
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
ac_c_werror_flag=$ac_save_c_werror_flag
CFLAGS="$pgac_save_CFLAGS"
CC="$pgac_save_CC"
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__Wendif_labels" >&5
-$as_echo "$pgac_cv_prog_CC_cflags__Wendif_labels" >&6; }
-if test x"$pgac_cv_prog_CC_cflags__Wendif_labels" = x"yes"; then
- CFLAGS="${CFLAGS} -Wendif-labels"
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__Werror_vla" >&5
+$as_echo "$pgac_cv_prog_CC_cflags__Werror_vla" >&6; }
+if test x"$pgac_cv_prog_CC_cflags__Werror_vla" = x"yes"; then
+ CFLAGS="${CFLAGS} -Werror=vla"
+fi
+
+
+ # -Wvla is not applicable for C++
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -Wendif-labels, for CFLAGS" >&5
+$as_echo_n "checking whether ${CC} supports -Wendif-labels, for CFLAGS... " >&6; }
+if ${pgac_cv_prog_CC_cflags__Wendif_labels+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ pgac_save_CFLAGS=$CFLAGS
+pgac_save_CC=$CC
+CC=${CC}
+CFLAGS="${CFLAGS} -Wendif-labels"
+ac_save_c_werror_flag=$ac_c_werror_flag
+ac_c_werror_flag=yes
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ pgac_cv_prog_CC_cflags__Wendif_labels=yes
+else
+ pgac_cv_prog_CC_cflags__Wendif_labels=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_c_werror_flag=$ac_save_c_werror_flag
+CFLAGS="$pgac_save_CFLAGS"
+CC="$pgac_save_CC"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__Wendif_labels" >&5
+$as_echo "$pgac_cv_prog_CC_cflags__Wendif_labels" >&6; }
+if test x"$pgac_cv_prog_CC_cflags__Wendif_labels" = x"yes"; then
+ CFLAGS="${CFLAGS} -Wendif-labels"
fi
@@ -5815,6 +5983,7 @@ fi
# We want to suppress clang's unhelpful unused-command-line-argument warnings
# but gcc won't complain about unrecognized -Wno-foo switches, so we have to
# test for the positive form and if that works, add the negative form
+ NOT_THE_CFLAGS=""
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -Wunused-command-line-argument, for NOT_THE_CFLAGS" >&5
$as_echo_n "checking whether ${CC} supports -Wunused-command-line-argument, for NOT_THE_CFLAGS... " >&6; }
if ${pgac_cv_prog_CC_cflags__Wunused_command_line_argument+:} false; then :
@@ -5857,6 +6026,93 @@ fi
if test -n "$NOT_THE_CFLAGS"; then
CFLAGS="$CFLAGS -Wno-unused-command-line-argument"
fi
+ # Similarly disable useless truncation warnings from gcc 8+
+ NOT_THE_CFLAGS=""
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -Wformat-truncation, for NOT_THE_CFLAGS" >&5
+$as_echo_n "checking whether ${CC} supports -Wformat-truncation, for NOT_THE_CFLAGS... " >&6; }
+if ${pgac_cv_prog_CC_cflags__Wformat_truncation+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ pgac_save_CFLAGS=$CFLAGS
+pgac_save_CC=$CC
+CC=${CC}
+CFLAGS="${NOT_THE_CFLAGS} -Wformat-truncation"
+ac_save_c_werror_flag=$ac_c_werror_flag
+ac_c_werror_flag=yes
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ pgac_cv_prog_CC_cflags__Wformat_truncation=yes
+else
+ pgac_cv_prog_CC_cflags__Wformat_truncation=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_c_werror_flag=$ac_save_c_werror_flag
+CFLAGS="$pgac_save_CFLAGS"
+CC="$pgac_save_CC"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__Wformat_truncation" >&5
+$as_echo "$pgac_cv_prog_CC_cflags__Wformat_truncation" >&6; }
+if test x"$pgac_cv_prog_CC_cflags__Wformat_truncation" = x"yes"; then
+ NOT_THE_CFLAGS="${NOT_THE_CFLAGS} -Wformat-truncation"
+fi
+
+
+ if test -n "$NOT_THE_CFLAGS"; then
+ CFLAGS="$CFLAGS -Wno-format-truncation"
+ fi
+ NOT_THE_CFLAGS=""
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -Wstringop-truncation, for NOT_THE_CFLAGS" >&5
+$as_echo_n "checking whether ${CC} supports -Wstringop-truncation, for NOT_THE_CFLAGS... " >&6; }
+if ${pgac_cv_prog_CC_cflags__Wstringop_truncation+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ pgac_save_CFLAGS=$CFLAGS
+pgac_save_CC=$CC
+CC=${CC}
+CFLAGS="${NOT_THE_CFLAGS} -Wstringop-truncation"
+ac_save_c_werror_flag=$ac_c_werror_flag
+ac_c_werror_flag=yes
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ pgac_cv_prog_CC_cflags__Wstringop_truncation=yes
+else
+ pgac_cv_prog_CC_cflags__Wstringop_truncation=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_c_werror_flag=$ac_save_c_werror_flag
+CFLAGS="$pgac_save_CFLAGS"
+CC="$pgac_save_CC"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__Wstringop_truncation" >&5
+$as_echo "$pgac_cv_prog_CC_cflags__Wstringop_truncation" >&6; }
+if test x"$pgac_cv_prog_CC_cflags__Wstringop_truncation" = x"yes"; then
+ NOT_THE_CFLAGS="${NOT_THE_CFLAGS} -Wstringop-truncation"
+fi
+
+
+ if test -n "$NOT_THE_CFLAGS"; then
+ CFLAGS="$CFLAGS -Wno-stringop-truncation"
+ fi
elif test "$ICC" = yes; then
# Intel's compiler has a bug/misoptimization in checking for
# division by NAN (NaN == 0), -mp1 fixes it, so add it to the CFLAGS.
@@ -6697,6 +6953,39 @@ fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
fi
+# Defend against clang being used on x86-32 without SSE2 enabled. As current
+# versions of clang do not understand -fexcess-precision=standard, the use of
+# x87 floating point operations leads to problems like isinf possibly returning
+# false for a value that is infinite when converted from the 80bit register to
+# the 8byte memory representation.
+#
+# Only perform the test if the compiler doesn't understand
+# -fexcess-precision=standard, that way a potentially fixed compiler will work
+# automatically.
+if test "$pgac_cv_prog_CC_cflags__fexcess_precision_standard" = no; then
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+#if defined(__clang__) && defined(__i386__) && !defined(__SSE2_MATH__)
+choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+ as_fn_error $? "Compiling PostgreSQL with clang, on 32bit x86, requires SSE2 support. Use -msse2 or use gcc." "$LINENO" 5
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
ac_ext=c
ac_cpp='$CPP $CPPFLAGS'
ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
@@ -7912,7 +8201,7 @@ $as_echo_n "checking for XML2_CONFIG... " >&6; }
$as_echo "$XML2_CONFIG" >&6; }
fi
- if test -n "$XML2_CONFIG"; then
+ if test -n "$XML2_CONFIG"; then
for pgac_option in `$XML2_CONFIG --cflags`; do
case $pgac_option in
-I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
@@ -8017,247 +8306,84 @@ fi
#
-# Elf
+# Assignments
#
-# Assume system is ELF if it predefines __ELF__ as 1,
-# otherwise believe host_os based default.
-case $host_os in
- freebsd1*|freebsd2*) elf=no;;
- freebsd3*|freebsd4*) elf=yes;;
-esac
+CPPFLAGS="$CPPFLAGS $INCLUDES"
+LDFLAGS="$LDFLAGS $LIBDIRS"
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
-$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
-if ${ac_cv_path_GREP+:} false; then :
+
+
+
+# Check whether --with-gnu-ld was given.
+if test "${with_gnu_ld+set}" = set; then :
+ withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes
+else
+ with_gnu_ld=no
+fi
+
+ac_prog=ld
+if test "$GCC" = yes; then
+ # Check if gcc -print-prog-name=ld gives a path.
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by GCC" >&5
+$as_echo_n "checking for ld used by GCC... " >&6; }
+ case $host in
+ *-*-mingw*)
+ # gcc leaves a trailing carriage return which upsets mingw
+ ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+ *)
+ ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+ esac
+ case "$ac_prog" in
+ # Accept absolute paths.
+ [\\/]* | [A-Za-z]:[\\/]*)
+ re_direlt='/[^/][^/]*/\.\./'
+ # Canonicalize the path of ld
+ ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'`
+ while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do
+ ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"`
+ done
+ test -z "$LD" && LD="$ac_prog"
+ ;;
+ "")
+ # If it fails, then pretend we aren't using GCC.
+ ac_prog=ld
+ ;;
+ *)
+ # If it is relative, then search for the first ld in PATH.
+ with_gnu_ld=unknown
+ ;;
+ esac
+elif test "$with_gnu_ld" = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5
+$as_echo_n "checking for GNU ld... " >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5
+$as_echo_n "checking for non-GNU ld... " >&6; }
+fi
+if ${ac_cv_path_LD+:} false; then :
$as_echo_n "(cached) " >&6
else
- if test -z "$GREP"; then
- ac_path_GREP_found=false
- # Loop through the user's path and test for each of PROGNAME-LIST
- as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
-do
- IFS=$as_save_IFS
- test -z "$as_dir" && as_dir=.
- for ac_prog in grep ggrep; do
- for ac_exec_ext in '' $ac_executable_extensions; do
- ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
- as_fn_executable_p "$ac_path_GREP" || continue
-# Check for GNU ac_path_GREP and select it if it is found.
- # Check for GNU $ac_path_GREP
-case `"$ac_path_GREP" --version 2>&1` in
-*GNU*)
- ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
-*)
- ac_count=0
- $as_echo_n 0123456789 >"conftest.in"
- while :
- do
- cat "conftest.in" "conftest.in" >"conftest.tmp"
- mv "conftest.tmp" "conftest.in"
- cp "conftest.in" "conftest.nl"
- $as_echo 'GREP' >> "conftest.nl"
- "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
- diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
- as_fn_arith $ac_count + 1 && ac_count=$as_val
- if test $ac_count -gt ${ac_path_GREP_max-0}; then
- # Best one so far, save it but keep looking for a better one
- ac_cv_path_GREP="$ac_path_GREP"
- ac_path_GREP_max=$ac_count
+ if test -z "$LD"; then
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}"
+ for ac_dir in $PATH; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+ ac_cv_path_LD="$ac_dir/$ac_prog"
+ # Check to see if the program is GNU ld. I'd rather use --version,
+ # but apparently some GNU ld's only accept -v.
+ # Break only if it was the GNU/non-GNU ld that we prefer.
+ if "$ac_cv_path_LD" -v 2>&1 < /dev/null | egrep '(GNU|with BFD)' > /dev/null; then
+ test "$with_gnu_ld" != no && break
+ else
+ test "$with_gnu_ld" != yes && break
+ fi
fi
- # 10*(2^10) chars as input seems more than enough
- test $ac_count -gt 10 && break
- done
- rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
-esac
-
- $ac_path_GREP_found && break 3
- done
- done
done
-IFS=$as_save_IFS
- if test -z "$ac_cv_path_GREP"; then
- as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
- fi
+ IFS="$ac_save_ifs"
else
- ac_cv_path_GREP=$GREP
-fi
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
-$as_echo "$ac_cv_path_GREP" >&6; }
- GREP="$ac_cv_path_GREP"
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
-$as_echo_n "checking for egrep... " >&6; }
-if ${ac_cv_path_EGREP+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
- then ac_cv_path_EGREP="$GREP -E"
- else
- if test -z "$EGREP"; then
- ac_path_EGREP_found=false
- # Loop through the user's path and test for each of PROGNAME-LIST
- as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
-do
- IFS=$as_save_IFS
- test -z "$as_dir" && as_dir=.
- for ac_prog in egrep; do
- for ac_exec_ext in '' $ac_executable_extensions; do
- ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
- as_fn_executable_p "$ac_path_EGREP" || continue
-# Check for GNU ac_path_EGREP and select it if it is found.
- # Check for GNU $ac_path_EGREP
-case `"$ac_path_EGREP" --version 2>&1` in
-*GNU*)
- ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
-*)
- ac_count=0
- $as_echo_n 0123456789 >"conftest.in"
- while :
- do
- cat "conftest.in" "conftest.in" >"conftest.tmp"
- mv "conftest.tmp" "conftest.in"
- cp "conftest.in" "conftest.nl"
- $as_echo 'EGREP' >> "conftest.nl"
- "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
- diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
- as_fn_arith $ac_count + 1 && ac_count=$as_val
- if test $ac_count -gt ${ac_path_EGREP_max-0}; then
- # Best one so far, save it but keep looking for a better one
- ac_cv_path_EGREP="$ac_path_EGREP"
- ac_path_EGREP_max=$ac_count
- fi
- # 10*(2^10) chars as input seems more than enough
- test $ac_count -gt 10 && break
- done
- rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
-esac
-
- $ac_path_EGREP_found && break 3
- done
- done
- done
-IFS=$as_save_IFS
- if test -z "$ac_cv_path_EGREP"; then
- as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
- fi
-else
- ac_cv_path_EGREP=$EGREP
-fi
-
- fi
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
-$as_echo "$ac_cv_path_EGREP" >&6; }
- EGREP="$ac_cv_path_EGREP"
-
-
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-#if __ELF__
- yes
-#endif
-
-_ACEOF
-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
- $EGREP "yes" >/dev/null 2>&1; then :
- ELF_SYS=true
-else
- if test "X$elf" = "Xyes" ; then
- ELF_SYS=true
-else
- ELF_SYS=
-fi
-fi
-rm -f conftest*
-
-
-
-#
-# Assignments
-#
-
-CPPFLAGS="$CPPFLAGS $INCLUDES"
-LDFLAGS="$LDFLAGS $LIBDIRS"
-
-
-
-
-
-# Check whether --with-gnu-ld was given.
-if test "${with_gnu_ld+set}" = set; then :
- withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes
-else
- with_gnu_ld=no
-fi
-
-ac_prog=ld
-if test "$GCC" = yes; then
- # Check if gcc -print-prog-name=ld gives a path.
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by GCC" >&5
-$as_echo_n "checking for ld used by GCC... " >&6; }
- case $host in
- *-*-mingw*)
- # gcc leaves a trailing carriage return which upsets mingw
- ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
- *)
- ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
- esac
- case "$ac_prog" in
- # Accept absolute paths.
- [\\/]* | [A-Za-z]:[\\/]*)
- re_direlt='/[^/][^/]*/\.\./'
- # Canonicalize the path of ld
- ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'`
- while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do
- ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"`
- done
- test -z "$LD" && LD="$ac_prog"
- ;;
- "")
- # If it fails, then pretend we aren't using GCC.
- ac_prog=ld
- ;;
- *)
- # If it is relative, then search for the first ld in PATH.
- with_gnu_ld=unknown
- ;;
- esac
-elif test "$with_gnu_ld" = yes; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5
-$as_echo_n "checking for GNU ld... " >&6; }
-else
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5
-$as_echo_n "checking for non-GNU ld... " >&6; }
-fi
-if ${ac_cv_path_LD+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- if test -z "$LD"; then
- IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}"
- for ac_dir in $PATH; do
- test -z "$ac_dir" && ac_dir=.
- if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
- ac_cv_path_LD="$ac_dir/$ac_prog"
- # Check to see if the program is GNU ld. I'd rather use --version,
- # but apparently some GNU ld's only accept -v.
- # Break only if it was the GNU/non-GNU ld that we prefer.
- if "$ac_cv_path_LD" -v 2>&1 < /dev/null | egrep '(GNU|with BFD)' > /dev/null; then
- test "$with_gnu_ld" != no && break
- else
- test "$with_gnu_ld" != yes && break
- fi
- fi
- done
- IFS="$ac_save_ifs"
-else
- ac_cv_path_LD="$LD" # Let the user override the test with a path.
+ ac_cv_path_LD="$LD" # Let the user override the test with a path.
fi
fi
@@ -9040,48 +9166,6 @@ else
$as_echo "no, using $LN_S" >&6; }
fi
-for ac_prog in gawk mawk nawk awk
-do
- # Extract the first word of "$ac_prog", so it can be a program name with args.
-set dummy $ac_prog; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if ${ac_cv_prog_AWK+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- if test -n "$AWK"; then
- ac_cv_prog_AWK="$AWK" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
- IFS=$as_save_IFS
- test -z "$as_dir" && as_dir=.
- for ac_exec_ext in '' $ac_executable_extensions; do
- if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
- ac_cv_prog_AWK="$ac_prog"
- $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
- break 2
- fi
-done
- done
-IFS=$as_save_IFS
-
-fi
-fi
-AWK=$ac_cv_prog_AWK
-if test -n "$AWK"; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5
-$as_echo "$AWK" >&6; }
-else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
- test -n "$AWK" && break
-done
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5
$as_echo_n "checking for a thread-safe mkdir -p... " >&6; }
if test -z "$MKDIR_P"; then
@@ -9224,7 +9308,6 @@ $as_echo "$as_me: WARNING:
*** PostgreSQL then you do not need to worry about this, because the Bison
*** output is pre-generated.)" >&2;}
fi
-# We don't need AC_SUBST(BISON) because PGAC_PATH_PROGS did it
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for flex" >&5
@@ -9414,6 +9497,15 @@ You might have to rebuild your Perl installation. Refer to the
documentation for details. Use --without-perl to disable building
PL/Perl." "$LINENO" 5
fi
+ # On most platforms, archlibexp is also where the Perl include files live ...
+ perl_includespec="-I$perl_archlibexp/CORE"
+ # ... but on newer macOS versions, we must use -iwithsysroot to look
+ # under $PG_SYSROOT
+ if test \! -f "$perl_archlibexp/CORE/perl.h" ; then
+ if test -f "$PG_SYSROOT$perl_archlibexp/CORE/perl.h" ; then
+ perl_includespec="-iwithsysroot $perl_archlibexp/CORE"
+ fi
+ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFLAGS recommended by Perl" >&5
$as_echo_n "checking for CFLAGS recommended by Perl... " >&6; }
@@ -9459,7 +9551,7 @@ fi
if test "$with_python" = yes; then
if test -z "$PYTHON"; then
- for ac_prog in python
+ for ac_prog in python python3 python2
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
set dummy $ac_prog; ac_word=$2
@@ -9729,39 +9821,235 @@ fi
# other libraries can pull in the pthread functions as a side-effect. We
# want to use the -pthread or similar flags directly, and not rely on
# the side-effects of linking with some other library.
-#
-# note: We have to use AS_IF here rather than plain if. The AC_CHECK_HEADER
-# invocation below is the first one in the script, and autoconf generates
-# additional code for that, which must not be inside the if-block. AS_IF
-# knows how to do that.
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
-$as_echo_n "checking for ANSI C header files... " >&6; }
-if ${ac_cv_header_stdc+:} false; then :
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5
+$as_echo_n "checking for a sed that does not truncate output... " >&6; }
+if ${ac_cv_path_SED+:} false; then :
$as_echo_n "(cached) " >&6
else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-#include
-#include
-#include
-#include
-
-int
-main ()
-{
+ ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/
+ for ac_i in 1 2 3 4 5 6 7; do
+ ac_script="$ac_script$as_nl$ac_script"
+ done
+ echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed
+ { ac_script=; unset ac_script;}
+ if test -z "$SED"; then
+ ac_path_SED_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in sed gsed; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_SED="$as_dir/$ac_prog$ac_exec_ext"
+ as_fn_executable_p "$ac_path_SED" || continue
+# Check for GNU ac_path_SED and select it if it is found.
+ # Check for GNU $ac_path_SED
+case `"$ac_path_SED" --version 2>&1` in
+*GNU*)
+ ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo '' >> "conftest.nl"
+ "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ as_fn_arith $ac_count + 1 && ac_count=$as_val
+ if test $ac_count -gt ${ac_path_SED_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_SED="$ac_path_SED"
+ ac_path_SED_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- ac_cv_header_stdc=yes
+ $ac_path_SED_found && break 3
+ done
+ done
+ done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_SED"; then
+ as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5
+ fi
else
- ac_cv_header_stdc=no
+ ac_cv_path_SED=$SED
fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-if test $ac_cv_header_stdc = yes; then
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5
+$as_echo "$ac_cv_path_SED" >&6; }
+ SED="$ac_cv_path_SED"
+ rm -f conftest.sed
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
+$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
+if ${ac_cv_path_GREP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$GREP"; then
+ ac_path_GREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in grep ggrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
+ as_fn_executable_p "$ac_path_GREP" || continue
+# Check for GNU ac_path_GREP and select it if it is found.
+ # Check for GNU $ac_path_GREP
+case `"$ac_path_GREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'GREP' >> "conftest.nl"
+ "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ as_fn_arith $ac_count + 1 && ac_count=$as_val
+ if test $ac_count -gt ${ac_path_GREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_GREP="$ac_path_GREP"
+ ac_path_GREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_GREP_found && break 3
+ done
+ done
+ done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_GREP"; then
+ as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ fi
+else
+ ac_cv_path_GREP=$GREP
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
+$as_echo "$ac_cv_path_GREP" >&6; }
+ GREP="$ac_cv_path_GREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
+$as_echo_n "checking for egrep... " >&6; }
+if ${ac_cv_path_EGREP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
+ then ac_cv_path_EGREP="$GREP -E"
+ else
+ if test -z "$EGREP"; then
+ ac_path_EGREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in egrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
+ as_fn_executable_p "$ac_path_EGREP" || continue
+# Check for GNU ac_path_EGREP and select it if it is found.
+ # Check for GNU $ac_path_EGREP
+case `"$ac_path_EGREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'EGREP' >> "conftest.nl"
+ "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ as_fn_arith $ac_count + 1 && ac_count=$as_val
+ if test $ac_count -gt ${ac_path_EGREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_EGREP="$ac_path_EGREP"
+ ac_path_EGREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_EGREP_found && break 3
+ done
+ done
+ done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_EGREP"; then
+ as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ fi
+else
+ ac_cv_path_EGREP=$EGREP
+fi
+
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
+$as_echo "$ac_cv_path_EGREP" >&6; }
+ EGREP="$ac_cv_path_EGREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
+$as_echo_n "checking for ANSI C header files... " >&6; }
+if ${ac_cv_header_stdc+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include
+#include
+#include
+#include
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_header_stdc=yes
+else
+ ac_cv_header_stdc=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+if test $ac_cv_header_stdc = yes; then
# SunOS 4.x string.h does not declare mem*, contrary to ANSI.
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
@@ -9868,6 +10156,7 @@ if test "$enable_thread_safety" = yes -a "$PORTNAME" != "win32"; then :
+
ac_ext=c
ac_cpp='$CPP $CPPFLAGS'
ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
@@ -9883,10 +10172,14 @@ ax_pthread_ok=no
# First of all, check if the user has set any of the PTHREAD_LIBS,
# etcetera environment variables, and if threads linking works using
# them:
-if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then
- save_CFLAGS="$CFLAGS"
+if test "x$PTHREAD_CFLAGS$PTHREAD_LIBS" != "x"; then
+ ax_pthread_save_CC="$CC"
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ ax_pthread_save_LIBS="$LIBS"
+ if test "x$PTHREAD_CC" != "x"; then :
+ CC="$PTHREAD_CC"
+fi
CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
- save_LIBS="$LIBS"
LIBS="$PTHREAD_LIBS $LIBS"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_join using $CC $PTHREAD_CFLAGS $PTHREAD_LIBS" >&5
$as_echo_n "checking for pthread_join using $CC $PTHREAD_CFLAGS $PTHREAD_LIBS... " >&6; }
@@ -9915,12 +10208,13 @@ rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5
$as_echo "$ax_pthread_ok" >&6; }
- if test x"$ax_pthread_ok" = xno; then
+ if test "x$ax_pthread_ok" = "xno"; then
PTHREAD_LIBS=""
PTHREAD_CFLAGS=""
fi
- LIBS="$save_LIBS"
- CFLAGS="$save_CFLAGS"
+ CC="$ax_pthread_save_CC"
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ LIBS="$ax_pthread_save_LIBS"
fi
# We must check for the threads library under a number of different
@@ -9933,7 +10227,7 @@ fi
# which indicates that we try without any flags at all, and "pthread-config"
# which is a program returning the flags for the Pth emulation library.
-ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mt -mthreads pthread --thread-safe pthread-config"
+ax_pthread_flags="pthreads none -Kthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config"
# The ordering *is* (sometimes) important. Some notes on the
# individual items follow:
@@ -9942,14 +10236,14 @@ ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mt
# none: in case threads are in libc; should be tried before -Kthread and
# other compiler flags to prevent continual compiler warnings
# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h)
-# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
-# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads), Tru64
-# -pthreads: Solaris/gcc
+# (Note: HP C rejects this with "bad form for `-t' option")
+# -pthreads: Solaris/gcc (Note: HP C also rejects)
# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it
# doesn't hurt to check since this sometimes defines pthreads and
# -D_REENTRANT too), HP C (must be checked before -lpthread, which
-# is present but should not be used directly)
+# is present but should not be used directly; and before -mthreads,
+# because the compiler interprets this as "-mt" + "-hreads")
# -mthreads: Mingw32/gcc, Lynx/gcc
# pthread: Linux, etcetera
# --thread-safe: KAI C++
@@ -9957,6 +10251,14 @@ ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mt
case $host_os in
+ freebsd*)
+
+ # -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
+ # lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
+
+ ax_pthread_flags="-kthread lthread $ax_pthread_flags"
+ ;;
+
hpux*)
# From the cc(1) man page: "[-mt] Sets various -D flags to enable
@@ -9991,63 +10293,184 @@ rm -f conftest*
solaris*)
- # Newer versions of Solaris require the "-mt -lpthread" pair, and we
- # check that first. On some older versions, libc contains stubbed
+ # On Solaris (at least, for some versions), libc contains stubbed
# (non-functional) versions of the pthreads routines, so link-based
- # tests will erroneously succeed. (We need to link with -pthreads/-mt/
- # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather
- # a function called by this macro, so we could check for that, but
- # who knows whether they'll stub that too in a future libc.) So
- # we'll look for -pthreads and -lpthread shortly thereafter.
+ # tests will erroneously succeed. (N.B.: The stubs are missing
+ # pthread_cleanup_push, or rather a function called by this macro,
+ # so we could check for that, but who knows whether they'll stub
+ # that too in a future libc.) So we'll check first for the
+ # standard Solaris way of linking pthreads (-mt -lpthread).
+
+ ax_pthread_flags="-mt,pthread pthread $ax_pthread_flags"
+ ;;
+esac
+
+# GCC generally uses -pthread, or -pthreads on some platforms (e.g. SPARC)
+
+if test "x$GCC" = "xyes"; then :
+ ax_pthread_flags="-pthread -pthreads $ax_pthread_flags"
+fi
+
+# The presence of a feature test macro requesting re-entrant function
+# definitions is, on some systems, a strong hint that pthreads support is
+# correctly enabled
+
+case $host_os in
+ darwin* | hpux* | linux* | osf* | solaris*)
+ ax_pthread_check_macro="_REENTRANT"
+ ;;
+
+ aix*)
+ ax_pthread_check_macro="_THREAD_SAFE"
+ ;;
- ax_pthread_flags="-mt,pthread -pthreads -pthread pthread $ax_pthread_flags"
+ *)
+ ax_pthread_check_macro="--"
;;
esac
+if test "x$ax_pthread_check_macro" = "x--"; then :
+ ax_pthread_check_cond=0
+else
+ ax_pthread_check_cond="!defined($ax_pthread_check_macro)"
+fi
-# Older versions of Clang only give a warning instead of an error for an
-# unrecognized option, unless we specify -Werror. (We throw in some extra
-# Clang warning flags for good measure.)
+# Are we compiling with Clang?
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler needs certain flags to reject unknown flags" >&5
-$as_echo_n "checking if compiler needs certain flags to reject unknown flags... " >&6; }
-if ${ax_cv_PTHREAD_REJECT_UNKNOWN+:} false; then :
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC is Clang" >&5
+$as_echo_n "checking whether $CC is Clang... " >&6; }
+if ${ax_cv_PTHREAD_CLANG+:} false; then :
$as_echo_n "(cached) " >&6
else
- ax_cv_PTHREAD_REJECT_UNKNOWN=unknown
- save_CFLAGS="$CFLAGS"
- ax_pthread_extra_flags="-Wunknown-warning-option -Wunused-command-line-argument"
- CFLAGS="$CFLAGS $ax_pthread_extra_flags -Wfoobaz -foobaz"
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ ax_cv_PTHREAD_CLANG=no
+ # Note that Autoconf sets GCC=yes for Clang as well as GCC
+ if test "x$GCC" = "xyes"; then
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
-int foo(void);
-int
-main ()
-{
-foo()
- ;
- return 0;
-}
+/* Note: Clang 2.7 lacks __clang_[a-z]+__ */
+# if defined(__clang__) && defined(__llvm__)
+ AX_PTHREAD_CC_IS_CLANG
+# endif
+
_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- ax_cv_PTHREAD_REJECT_UNKNOWN="-Werror $ax_pthread_extra_flags"
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "AX_PTHREAD_CC_IS_CLANG" >/dev/null 2>&1; then :
+ ax_cv_PTHREAD_CLANG=yes
+fi
+rm -f conftest*
+
+ fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_PTHREAD_CLANG" >&5
+$as_echo "$ax_cv_PTHREAD_CLANG" >&6; }
+ax_pthread_clang="$ax_cv_PTHREAD_CLANG"
+
+ax_pthread_clang_warning=no
+
+# Clang needs special handling, because older versions handle the -pthread
+# option in a rather... idiosyncratic way
+
+if test "x$ax_pthread_clang" = "xyes"; then
+
+ # Clang takes -pthread; it has never supported any other flag
+
+ # (Note 1: This will need to be revisited if a system that Clang
+ # supports has POSIX threads in a separate library. This tends not
+ # to be the way of modern systems, but it's conceivable.)
+
+ # (Note 2: On some systems, notably Darwin, -pthread is not needed
+ # to get POSIX threads support; the API is always present and
+ # active. We could reasonably leave PTHREAD_CFLAGS empty. But
+ # -pthread does define _REENTRANT, and while the Darwin headers
+ # ignore this macro, third-party headers might not.)
+
+ PTHREAD_CFLAGS="-pthread"
+ PTHREAD_LIBS=
+
+ ax_pthread_ok=yes
+
+ # However, older versions of Clang make a point of warning the user
+ # that, in an invocation where only linking and no compilation is
+ # taking place, the -pthread option has no effect ("argument unused
+ # during compilation"). They expect -pthread to be passed in only
+ # when source code is being compiled.
+ #
+ # Problem is, this is at odds with the way Automake and most other
+ # C build frameworks function, which is that the same flags used in
+ # compilation (CFLAGS) are also used in linking. Many systems
+ # supported by AX_PTHREAD require exactly this for POSIX threads
+ # support, and in fact it is often not straightforward to specify a
+ # flag that is used only in the compilation phase and not in
+ # linking. Such a scenario is extremely rare in practice.
+ #
+ # Even though use of the -pthread flag in linking would only print
+ # a warning, this can be a nuisance for well-run software projects
+ # that build with -Werror. So if the active version of Clang has
+ # this misfeature, we search for an option to squash it.
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Clang needs flag to prevent \"argument unused\" warning when linking with -pthread" >&5
+$as_echo_n "checking whether Clang needs flag to prevent \"argument unused\" warning when linking with -pthread... " >&6; }
+if ${ax_cv_PTHREAD_CLANG_NO_WARN_FLAG+:} false; then :
+ $as_echo_n "(cached) " >&6
else
- ax_cv_PTHREAD_REJECT_UNKNOWN=no
+ ax_cv_PTHREAD_CLANG_NO_WARN_FLAG=unknown
+ # Create an alternate version of $ac_link that compiles and
+ # links in two steps (.c -> .o, .o -> exe) instead of one
+ # (.c -> exe), because the warning occurs only in the second
+ # step
+ ax_pthread_save_ac_link="$ac_link"
+ ax_pthread_sed='s/conftest\.\$ac_ext/conftest.$ac_objext/g'
+ ax_pthread_link_step=`$as_echo "$ac_link" | sed "$ax_pthread_sed"`
+ ax_pthread_2step_ac_link="($ac_compile) && (echo ==== >&5) && ($ax_pthread_link_step)"
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ for ax_pthread_try in '' -Qunused-arguments -Wno-unused-command-line-argument unknown; do
+ if test "x$ax_pthread_try" = "xunknown"; then :
+ break
fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
- CFLAGS="$save_CFLAGS"
+ CFLAGS="-Werror -Wunknown-warning-option $ax_pthread_try -pthread $ax_pthread_save_CFLAGS"
+ ac_link="$ax_pthread_save_ac_link"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+int main(void){return 0;}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_link="$ax_pthread_2step_ac_link"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+int main(void){return 0;}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ break
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_PTHREAD_REJECT_UNKNOWN" >&5
-$as_echo "$ax_cv_PTHREAD_REJECT_UNKNOWN" >&6; }
-ax_pthread_extra_flags=
-if test "x$ax_cv_PTHREAD_REJECT_UNKNOWN" != "xno"; then :
- ax_pthread_extra_flags="$ax_cv_PTHREAD_REJECT_UNKNOWN"
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+ done
+ ac_link="$ax_pthread_save_ac_link"
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ if test "x$ax_pthread_try" = "x"; then :
+ ax_pthread_try=no
+fi
+ ax_cv_PTHREAD_CLANG_NO_WARN_FLAG="$ax_pthread_try"
+
fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_PTHREAD_CLANG_NO_WARN_FLAG" >&5
+$as_echo "$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG" >&6; }
+
+ case "$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG" in
+ no | unknown) ;;
+ *) PTHREAD_CFLAGS="$ax_cv_PTHREAD_CLANG_NO_WARN_FLAG $PTHREAD_CFLAGS" ;;
+ esac
-if test x"$ax_pthread_ok" = xno; then
-for flag in $ax_pthread_flags; do
+fi # $ax_pthread_clang = yes
- case $flag in
+if test "x$ax_pthread_ok" = "xno"; then
+for ax_pthread_try_flag in $ax_pthread_flags; do
+
+ case $ax_pthread_try_flag in
none)
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work without any flags" >&5
$as_echo_n "checking whether pthreads work without any flags... " >&6; }
@@ -10061,9 +10484,9 @@ $as_echo_n "checking whether pthreads work with -mt -lpthread... " >&6; }
;;
-*)
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work with $flag" >&5
-$as_echo_n "checking whether pthreads work with $flag... " >&6; }
- PTHREAD_CFLAGS="$flag"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work with $ax_pthread_try_flag" >&5
+$as_echo_n "checking whether pthreads work with $ax_pthread_try_flag... " >&6; }
+ PTHREAD_CFLAGS="$ax_pthread_try_flag"
;;
pthread-config)
@@ -10105,22 +10528,24 @@ $as_echo "no" >&6; }
fi
- if test x"$ax_pthread_config" = xno; then continue; fi
+ if test "x$ax_pthread_config" = "xno"; then :
+ continue
+fi
PTHREAD_CFLAGS="`pthread-config --cflags`"
PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`"
;;
*)
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for the pthreads library -l$flag" >&5
-$as_echo_n "checking for the pthreads library -l$flag... " >&6; }
- PTHREAD_LIBS="-l$flag"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for the pthreads library -l$ax_pthread_try_flag" >&5
+$as_echo_n "checking for the pthreads library -l$ax_pthread_try_flag... " >&6; }
+ PTHREAD_LIBS="-l$ax_pthread_try_flag"
;;
esac
- save_LIBS="$LIBS"
- save_CFLAGS="$CFLAGS"
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ ax_pthread_save_LIBS="$LIBS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
LIBS="$PTHREAD_LIBS $LIBS"
- CFLAGS="$CFLAGS $PTHREAD_CFLAGS $ax_pthread_extra_flags"
# Check for various functions. We must include pthread.h,
# since some functions may be macros. (On the Sequent, we
@@ -10131,9 +10556,13 @@ $as_echo_n "checking for the pthreads library -l$flag... " >&6; }
# pthread_cleanup_push because it is one of the few pthread
# functions on Solaris that doesn't have a non-functional libc stub.
# We try pthread_create on general principles.
+
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
#include
+# if $ax_pthread_check_cond
+# error "$ax_pthread_check_macro must be defined"
+# endif
static void routine(void *a) { a = 0; }
static void *start_routine(void *a) { return a; }
int
@@ -10155,14 +10584,14 @@ fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
- LIBS="$save_LIBS"
- CFLAGS="$save_CFLAGS"
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ LIBS="$ax_pthread_save_LIBS"
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5
$as_echo "$ax_pthread_ok" >&6; }
- if test "x$ax_pthread_ok" = xyes; then
- break;
- fi
+ if test "x$ax_pthread_ok" = "xyes"; then :
+ break
+fi
PTHREAD_LIBS=""
PTHREAD_CFLAGS=""
@@ -10170,11 +10599,11 @@ done
fi
# Various other checks:
-if test "x$ax_pthread_ok" = xyes; then
- save_LIBS="$LIBS"
- LIBS="$PTHREAD_LIBS $LIBS"
- save_CFLAGS="$CFLAGS"
+if test "x$ax_pthread_ok" = "xyes"; then
+ ax_pthread_save_CFLAGS="$CFLAGS"
+ ax_pthread_save_LIBS="$LIBS"
CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
# Detect AIX lossage: JOINABLE attribute is called UNDETACHED.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for joinable pthread attribute" >&5
@@ -10183,20 +10612,20 @@ if ${ax_cv_PTHREAD_JOINABLE_ATTR+:} false; then :
$as_echo_n "(cached) " >&6
else
ax_cv_PTHREAD_JOINABLE_ATTR=unknown
- for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do
+ for ax_pthread_attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
#include
int
main ()
{
-int attr = $attr; return attr /* ; */
+int attr = $ax_pthread_attr; return attr /* ; */
;
return 0;
}
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
- ax_cv_PTHREAD_JOINABLE_ATTR=$attr; break
+ ax_cv_PTHREAD_JOINABLE_ATTR=$ax_pthread_attr; break
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
@@ -10205,44 +10634,29 @@ rm -f core conftest.err conftest.$ac_objext \
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_PTHREAD_JOINABLE_ATTR" >&5
$as_echo "$ax_cv_PTHREAD_JOINABLE_ATTR" >&6; }
- if test "$ax_cv_PTHREAD_JOINABLE_ATTR" != unknown && \
- test "$ax_cv_PTHREAD_JOINABLE_ATTR" != PTHREAD_CREATE_JOINABLE; then :
+ if test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xunknown" && \
+ test "x$ax_cv_PTHREAD_JOINABLE_ATTR" != "xPTHREAD_CREATE_JOINABLE" && \
+ test "x$ax_pthread_joinable_attr_defined" != "xyes"; then :
cat >>confdefs.h <<_ACEOF
#define PTHREAD_CREATE_JOINABLE $ax_cv_PTHREAD_JOINABLE_ATTR
_ACEOF
+ ax_pthread_joinable_attr_defined=yes
+
fi
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking if more special flags are required for pthreads" >&5
-$as_echo_n "checking if more special flags are required for pthreads... " >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether more special flags are required for pthreads" >&5
+$as_echo_n "checking whether more special flags are required for pthreads... " >&6; }
if ${ax_cv_PTHREAD_SPECIAL_FLAGS+:} false; then :
$as_echo_n "(cached) " >&6
else
ax_cv_PTHREAD_SPECIAL_FLAGS=no
- ax_pthread_special_flags_added=no
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-# if !defined(_REENTRANT) && !defined(_THREAD_SAFE)
- AX_PTHREAD_NEED_SPECIAL_FLAG
-# endif
-
-_ACEOF
-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
- $EGREP "AX_PTHREAD_NEED_SPECIAL_FLAG" >/dev/null 2>&1; then :
- case $host_os in
- aix* | freebsd*)
- ax_cv_PTHREAD_SPECIAL_FLAGS="-D_THREAD_SAFE"
- ;;
- darwin* | hpux* | osf* | solaris*)
- ax_cv_PTHREAD_SPECIAL_FLAGS="-D_REENTRANT"
- ;;
- esac
-
-fi
-rm -f conftest*
-
+ case $host_os in
+ solaris*)
+ ax_cv_PTHREAD_SPECIAL_FLAGS="-D_POSIX_PTHREAD_SEMANTICS"
+ ;;
+ esac
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_PTHREAD_SPECIAL_FLAGS" >&5
@@ -10280,23 +10694,26 @@ rm -f core conftest.err conftest.$ac_objext \
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_PTHREAD_PRIO_INHERIT" >&5
$as_echo "$ax_cv_PTHREAD_PRIO_INHERIT" >&6; }
- if test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes"; then :
+ if test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes" && \
+ test "x$ax_pthread_prio_inherit_defined" != "xyes"; then :
$as_echo "#define HAVE_PTHREAD_PRIO_INHERIT 1" >>confdefs.h
+ ax_pthread_prio_inherit_defined=yes
+
fi
- LIBS="$save_LIBS"
- CFLAGS="$save_CFLAGS"
+ CFLAGS="$ax_pthread_save_CFLAGS"
+ LIBS="$ax_pthread_save_LIBS"
# More AIX lossage: compile with *_r variant
- if test "x$GCC" != xyes; then
+ if test "x$GCC" != "xyes"; then
case $host_os in
aix*)
case "x/$CC" in #(
x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6) :
#handle absolute path differently from PATH based program lookup
- case "x$CC" in #(
+ case "x$CC" in #(
x/*) :
if as_fn_executable_p ${CC}_r; then :
PTHREAD_CC="${CC}_r"
@@ -10361,7 +10778,7 @@ test -n "$PTHREAD_CC" || PTHREAD_CC="$CC"
# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
-if test x"$ax_pthread_ok" = xyes; then
+if test "x$ax_pthread_ok" = "xyes"; then
$as_echo "#define HAVE_PTHREAD 1" >>confdefs.h
@@ -10424,12 +10841,10 @@ else
int
main ()
{
-#ifndef _AIX
-int strerror_r(int, char *, size_t);
-#else
-/* Older AIX has 'int' for the third argument so we don't test the args. */
-int strerror_r();
-#endif
+char buf[100];
+ switch (strerror_r(1, buf, sizeof(buf)))
+ { case 0: break; default: break; }
+
;
return 0;
}
@@ -10738,12 +11153,9 @@ if test "$ac_res" != no; then :
fi
-# We only use libld in port/dynloader/aix.c
-case $host_os in
- aix*)
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing ldopen" >&5
-$as_echo_n "checking for library containing ldopen... " >&6; }
-if ${ac_cv_search_ldopen+:} false; then :
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing getopt_long" >&5
+$as_echo_n "checking for library containing getopt_long... " >&6; }
+if ${ac_cv_search_getopt_long+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_func_search_save_LIBS=$LIBS
@@ -10756,16 +11168,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
#ifdef __cplusplus
extern "C"
#endif
-char ldopen ();
+char getopt_long ();
int
main ()
{
-return ldopen ();
+return getopt_long ();
;
return 0;
}
_ACEOF
-for ac_lib in '' ld; do
+for ac_lib in '' getopt gnugetopt; do
if test -z "$ac_lib"; then
ac_res="none required"
else
@@ -10773,76 +11185,18 @@ for ac_lib in '' ld; do
LIBS="-l$ac_lib $ac_func_search_save_LIBS"
fi
if ac_fn_c_try_link "$LINENO"; then :
- ac_cv_search_ldopen=$ac_res
+ ac_cv_search_getopt_long=$ac_res
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext
- if ${ac_cv_search_ldopen+:} false; then :
+ if ${ac_cv_search_getopt_long+:} false; then :
break
fi
done
-if ${ac_cv_search_ldopen+:} false; then :
+if ${ac_cv_search_getopt_long+:} false; then :
else
- ac_cv_search_ldopen=no
-fi
-rm conftest.$ac_ext
-LIBS=$ac_func_search_save_LIBS
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_ldopen" >&5
-$as_echo "$ac_cv_search_ldopen" >&6; }
-ac_res=$ac_cv_search_ldopen
-if test "$ac_res" != no; then :
- test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
-
-fi
-
- ;;
-esac
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing getopt_long" >&5
-$as_echo_n "checking for library containing getopt_long... " >&6; }
-if ${ac_cv_search_getopt_long+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- ac_func_search_save_LIBS=$LIBS
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-/* Override any GCC internal prototype to avoid an error.
- Use char because int might match the return type of a GCC
- builtin and then its argument prototype would still apply. */
-#ifdef __cplusplus
-extern "C"
-#endif
-char getopt_long ();
-int
-main ()
-{
-return getopt_long ();
- ;
- return 0;
-}
-_ACEOF
-for ac_lib in '' getopt gnugetopt; do
- if test -z "$ac_lib"; then
- ac_res="none required"
- else
- ac_res=-l$ac_lib
- LIBS="-l$ac_lib $ac_func_search_save_LIBS"
- fi
- if ac_fn_c_try_link "$LINENO"; then :
- ac_cv_search_getopt_long=$ac_res
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext
- if ${ac_cv_search_getopt_long+:} false; then :
- break
-fi
-done
-if ${ac_cv_search_getopt_long+:} false; then :
-
-else
- ac_cv_search_getopt_long=no
+ ac_cv_search_getopt_long=no
fi
rm conftest.$ac_ext
LIBS=$ac_func_search_save_LIBS
@@ -10855,62 +11209,6 @@ if test "$ac_res" != no; then :
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing crypt" >&5
-$as_echo_n "checking for library containing crypt... " >&6; }
-if ${ac_cv_search_crypt+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- ac_func_search_save_LIBS=$LIBS
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-/* Override any GCC internal prototype to avoid an error.
- Use char because int might match the return type of a GCC
- builtin and then its argument prototype would still apply. */
-#ifdef __cplusplus
-extern "C"
-#endif
-char crypt ();
-int
-main ()
-{
-return crypt ();
- ;
- return 0;
-}
-_ACEOF
-for ac_lib in '' crypt; do
- if test -z "$ac_lib"; then
- ac_res="none required"
- else
- ac_res=-l$ac_lib
- LIBS="-l$ac_lib $ac_func_search_save_LIBS"
- fi
- if ac_fn_c_try_link "$LINENO"; then :
- ac_cv_search_crypt=$ac_res
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext
- if ${ac_cv_search_crypt+:} false; then :
- break
-fi
-done
-if ${ac_cv_search_crypt+:} false; then :
-
-else
- ac_cv_search_crypt=no
-fi
-rm conftest.$ac_ext
-LIBS=$ac_func_search_save_LIBS
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_crypt" >&5
-$as_echo "$ac_cv_search_crypt" >&6; }
-ac_res=$ac_cv_search_crypt
-if test "$ac_res" != no; then :
- test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
-
-fi
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shm_open" >&5
$as_echo_n "checking for library containing shm_open... " >&6; }
if ${ac_cv_search_shm_open+:} false; then :
@@ -11346,11 +11644,11 @@ return readline ();
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
- # Older NetBSD, OpenBSD, and Irix have a broken linker that does not
+ # Older NetBSD and OpenBSD have a broken linker that does not
# recognize dependent libraries; assume curses is needed if we didn't
# find any dependency.
case $host_os in
- netbsd* | openbsd* | irix*)
+ netbsd* | openbsd*)
if test x"$pgac_lib" = x"" ; then
pgac_lib=" -lcurses"
fi ;;
@@ -11759,7 +12057,7 @@ done
# defines OPENSSL_VERSION_NUMBER to claim version 2.0.0, even though it
# doesn't have these OpenSSL 1.1.0 functions. So check for individual
# functions.
- for ac_func in OPENSSL_init_ssl BIO_get_data BIO_meth_new ASN1_STRING_get0_data RAND_OpenSSL
+ for ac_func in OPENSSL_init_ssl BIO_get_data BIO_meth_new ASN1_STRING_get0_data
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
@@ -12406,7 +12704,7 @@ $as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h
fi
-for ac_header in atomic.h crypt.h dld.h fp_class.h getopt.h ieeefp.h ifaddrs.h langinfo.h mbarrier.h poll.h sys/epoll.h sys/ipc.h sys/pstat.h sys/resource.h sys/select.h sys/sem.h sys/shm.h sys/sockio.h sys/tas.h sys/un.h termios.h ucred.h utime.h wchar.h wctype.h
+for ac_header in atomic.h copyfile.h fp_class.h getopt.h ieeefp.h ifaddrs.h langinfo.h mbarrier.h poll.h sys/epoll.h sys/ipc.h sys/prctl.h sys/procctl.h sys/pstat.h sys/resource.h sys/select.h sys/sem.h sys/shm.h sys/sockio.h sys/tas.h sys/un.h termios.h ucred.h utime.h wchar.h wctype.h
do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
@@ -13278,28 +13576,57 @@ $as_echo_n "checking for printf format archetype... " >&6; }
if ${pgac_cv_printf_archetype+:} false; then :
$as_echo_n "(cached) " >&6
else
- ac_save_c_werror_flag=$ac_c_werror_flag
+ pgac_cv_printf_archetype=gnu_printf
+ac_save_c_werror_flag=$ac_c_werror_flag
ac_c_werror_flag=yes
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
-extern int
-pgac_write(int ignore, const char *fmt,...)
-__attribute__((format(gnu_printf, 2, 3)));
+extern void pgac_write(int ignore, const char *fmt,...)
+__attribute__((format($pgac_cv_printf_archetype, 2, 3)));
int
main ()
{
+pgac_write(0, "error %s: %m", "foo");
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_archetype_ok=yes
+else
+ ac_archetype_ok=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_c_werror_flag=$ac_save_c_werror_flag
+if [ "$ac_archetype_ok" = no ]; then
+ pgac_cv_printf_archetype=__syslog__
+ ac_save_c_werror_flag=$ac_c_werror_flag
+ac_c_werror_flag=yes
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+extern void pgac_write(int ignore, const char *fmt,...)
+__attribute__((format($pgac_cv_printf_archetype, 2, 3)));
+int
+main ()
+{
+pgac_write(0, "error %s: %m", "foo");
;
return 0;
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- pgac_cv_printf_archetype=gnu_printf
+ ac_archetype_ok=yes
else
- pgac_cv_printf_archetype=printf
+ ac_archetype_ok=no
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
ac_c_werror_flag=$ac_save_c_werror_flag
+
+ if [ "$ac_archetype_ok" = no ]; then
+ pgac_cv_printf_archetype=printf
+ fi
+fi
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_printf_archetype" >&5
$as_echo "$pgac_cv_printf_archetype" >&6; }
@@ -13544,78 +13871,6 @@ if test x"$pgac_cv__types_compatible" = xyes ; then
$as_echo "#define HAVE__BUILTIN_TYPES_COMPATIBLE_P 1" >>confdefs.h
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_bswap16" >&5
-$as_echo_n "checking for __builtin_bswap16... " >&6; }
-if ${pgac_cv__builtin_bswap16+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-static unsigned long int x = __builtin_bswap16(0xaabb);
-
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- pgac_cv__builtin_bswap16=yes
-else
- pgac_cv__builtin_bswap16=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_bswap16" >&5
-$as_echo "$pgac_cv__builtin_bswap16" >&6; }
-if test x"$pgac_cv__builtin_bswap16" = xyes ; then
-
-$as_echo "#define HAVE__BUILTIN_BSWAP16 1" >>confdefs.h
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_bswap32" >&5
-$as_echo_n "checking for __builtin_bswap32... " >&6; }
-if ${pgac_cv__builtin_bswap32+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-static unsigned long int x = __builtin_bswap32(0xaabbccdd);
-
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- pgac_cv__builtin_bswap32=yes
-else
- pgac_cv__builtin_bswap32=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_bswap32" >&5
-$as_echo "$pgac_cv__builtin_bswap32" >&6; }
-if test x"$pgac_cv__builtin_bswap32" = xyes ; then
-
-$as_echo "#define HAVE__BUILTIN_BSWAP32 1" >>confdefs.h
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_bswap64" >&5
-$as_echo_n "checking for __builtin_bswap64... " >&6; }
-if ${pgac_cv__builtin_bswap64+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-static unsigned long int x = __builtin_bswap64(0xaabbccddeeff0011);
-
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- pgac_cv__builtin_bswap64=yes
-else
- pgac_cv__builtin_bswap64=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_bswap64" >&5
-$as_echo "$pgac_cv__builtin_bswap64" >&6; }
-if test x"$pgac_cv__builtin_bswap64" = xyes ; then
-
-$as_echo "#define HAVE__BUILTIN_BSWAP64 1" >>confdefs.h
-
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_constant_p" >&5
$as_echo_n "checking for __builtin_constant_p... " >&6; }
@@ -13708,38 +13963,6 @@ if test x"$pgac_cv_computed_goto" = xyes ; then
$as_echo "#define HAVE_COMPUTED_GOTO 1" >>confdefs.h
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __VA_ARGS__" >&5
-$as_echo_n "checking for __VA_ARGS__... " >&6; }
-if ${pgac_cv__va_args+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-#include
-int
-main ()
-{
-#define debug(...) fprintf(stderr, __VA_ARGS__)
-debug("%s", "blarg");
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- pgac_cv__va_args=yes
-else
- pgac_cv__va_args=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__va_args" >&5
-$as_echo "$pgac_cv__va_args" >&6; }
-if test x"$pgac_cv__va_args" = xyes ; then
-
-$as_echo "#define HAVE__VA_ARGS 1" >>confdefs.h
-
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether struct tm is in sys/time.h or time.h" >&5
$as_echo_n "checking whether struct tm is in sys/time.h or time.h... " >&6; }
@@ -14303,12 +14526,49 @@ fi
fi
-# On PPC, check if assembler supports LWARX instruction's mutex hint bit
case $host_cpu in
+ x86_64)
+ # On x86_64, check if we can compile a popcntq instruction
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether assembler supports x86_64 popcntq" >&5
+$as_echo_n "checking whether assembler supports x86_64 popcntq... " >&6; }
+if ${pgac_cv_have_x86_64_popcntq+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+long long x = 1; long long r;
+ __asm__ __volatile__ (" popcntq %1,%0\n" : "=q"(r) : "rm"(x));
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ pgac_cv_have_x86_64_popcntq=yes
+else
+ pgac_cv_have_x86_64_popcntq=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_have_x86_64_popcntq" >&5
+$as_echo "$pgac_cv_have_x86_64_popcntq" >&6; }
+ if test x"$pgac_cv_have_x86_64_popcntq" = xyes ; then
+
+$as_echo "#define HAVE_X86_64_POPCNTQ 1" >>confdefs.h
+
+ fi
+ ;;
ppc*|powerpc*)
+ # On PPC, check if assembler supports LWARX instruction's mutex hint bit
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether assembler supports lwarx hint bit" >&5
$as_echo_n "checking whether assembler supports lwarx hint bit... " >&6; }
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+if ${pgac_cv_have_ppc_mutex_hint+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
int
@@ -14326,7 +14586,8 @@ else
pgac_cv_have_ppc_mutex_hint=no
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_have_ppc_mutex_hint" >&5
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_have_ppc_mutex_hint" >&5
$as_echo "$pgac_cv_have_ppc_mutex_hint" >&6; }
if test x"$pgac_cv_have_ppc_mutex_hint" = xyes ; then
@@ -14544,7 +14805,6 @@ fi
fi
-# Check for largefile support (must be after AC_SYS_LARGEFILE)
# The cast to long int works around a bug in the HP C Compiler
# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
@@ -14676,8 +14936,8 @@ else
if ${ac_cv_func_accept_arg3+:} false; then :
$as_echo_n "(cached) " >&6
else
- for ac_cv_func_accept_return in 'int' 'unsigned int PASCAL' 'SOCKET WSAAPI'; do
- for ac_cv_func_accept_arg1 in 'int' 'unsigned int' 'SOCKET'; do
+ for ac_cv_func_accept_return in 'int' 'SOCKET WSAAPI' 'unsigned int PASCAL'; do
+ for ac_cv_func_accept_arg1 in 'int' 'SOCKET' 'unsigned int'; do
for ac_cv_func_accept_arg2 in 'struct sockaddr *' 'const struct sockaddr *' 'void *'; do
for ac_cv_func_accept_arg3 in 'int' 'size_t' 'socklen_t' 'unsigned int' 'void'; do
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -14827,7 +15087,7 @@ fi
LIBS_including_readline="$LIBS"
LIBS=`echo "$LIBS" | sed -e 's/-ledit//g' -e 's/-lreadline//g'`
-for ac_func in cbrt clock_gettime dlopen fdatasync getifaddrs getpeerucred getrlimit mbstowcs_l memmove poll posix_fallocate pstat pthread_is_threaded_np readlink setproctitle setsid shm_open symlink sync_file_range utime utimes wcstombs_l
+for ac_func in cbrt clock_gettime copyfile fdatasync getifaddrs getpeerucred getrlimit mbstowcs_l memset_s memmove poll posix_fallocate ppoll pstat pthread_is_threaded_np readlink setproctitle setproctitle_fast setsid shm_open strchrnul strsignal symlink sync_file_range uselocale utime utimes wcstombs_l
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
@@ -14840,24 +15100,255 @@ fi
done
-ac_fn_c_check_func "$LINENO" "fseeko" "ac_cv_func_fseeko"
-if test "x$ac_cv_func_fseeko" = xyes; then :
- $as_echo "#define HAVE_FSEEKO 1" >>confdefs.h
-
+# These typically are compiler builtins, for which AC_CHECK_FUNCS fails.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_bswap16" >&5
+$as_echo_n "checking for __builtin_bswap16... " >&6; }
+if ${pgac_cv__builtin_bswap16+:} false; then :
+ $as_echo_n "(cached) " >&6
else
- case " $LIBOBJS " in
- *" fseeko.$ac_objext "* ) ;;
- *) LIBOBJS="$LIBOBJS fseeko.$ac_objext"
- ;;
-esac
-
-fi
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+int
+call__builtin_bswap16(int x)
+{
+ return __builtin_bswap16(x);
+}
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ pgac_cv__builtin_bswap16=yes
+else
+ pgac_cv__builtin_bswap16=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_bswap16" >&5
+$as_echo "$pgac_cv__builtin_bswap16" >&6; }
+if test x"${pgac_cv__builtin_bswap16}" = xyes ; then
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE__BUILTIN_BSWAP16 1
+_ACEOF
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_bswap32" >&5
+$as_echo_n "checking for __builtin_bswap32... " >&6; }
+if ${pgac_cv__builtin_bswap32+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+call__builtin_bswap32(int x)
+{
+ return __builtin_bswap32(x);
+}
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ pgac_cv__builtin_bswap32=yes
+else
+ pgac_cv__builtin_bswap32=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_bswap32" >&5
+$as_echo "$pgac_cv__builtin_bswap32" >&6; }
+if test x"${pgac_cv__builtin_bswap32}" = xyes ; then
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE__BUILTIN_BSWAP32 1
+_ACEOF
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_bswap64" >&5
+$as_echo_n "checking for __builtin_bswap64... " >&6; }
+if ${pgac_cv__builtin_bswap64+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+call__builtin_bswap64(long int x)
+{
+ return __builtin_bswap64(x);
+}
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ pgac_cv__builtin_bswap64=yes
+else
+ pgac_cv__builtin_bswap64=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_bswap64" >&5
+$as_echo "$pgac_cv__builtin_bswap64" >&6; }
+if test x"${pgac_cv__builtin_bswap64}" = xyes ; then
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE__BUILTIN_BSWAP64 1
+_ACEOF
+
+fi
+# We assume that we needn't test all widths of these explicitly:
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_clz" >&5
+$as_echo_n "checking for __builtin_clz... " >&6; }
+if ${pgac_cv__builtin_clz+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+call__builtin_clz(unsigned int x)
+{
+ return __builtin_clz(x);
+}
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ pgac_cv__builtin_clz=yes
+else
+ pgac_cv__builtin_clz=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_clz" >&5
+$as_echo "$pgac_cv__builtin_clz" >&6; }
+if test x"${pgac_cv__builtin_clz}" = xyes ; then
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE__BUILTIN_CLZ 1
+_ACEOF
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_ctz" >&5
+$as_echo_n "checking for __builtin_ctz... " >&6; }
+if ${pgac_cv__builtin_ctz+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+call__builtin_ctz(unsigned int x)
+{
+ return __builtin_ctz(x);
+}
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ pgac_cv__builtin_ctz=yes
+else
+ pgac_cv__builtin_ctz=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_ctz" >&5
+$as_echo "$pgac_cv__builtin_ctz" >&6; }
+if test x"${pgac_cv__builtin_ctz}" = xyes ; then
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE__BUILTIN_CTZ 1
+_ACEOF
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_popcount" >&5
+$as_echo_n "checking for __builtin_popcount... " >&6; }
+if ${pgac_cv__builtin_popcount+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+call__builtin_popcount(unsigned int x)
+{
+ return __builtin_popcount(x);
+}
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ pgac_cv__builtin_popcount=yes
+else
+ pgac_cv__builtin_popcount=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_popcount" >&5
+$as_echo "$pgac_cv__builtin_popcount" >&6; }
+if test x"${pgac_cv__builtin_popcount}" = xyes ; then
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE__BUILTIN_POPCOUNT 1
+_ACEOF
+
+fi
+
+ac_fn_c_check_func "$LINENO" "fseeko" "ac_cv_func_fseeko"
+if test "x$ac_cv_func_fseeko" = xyes; then :
+ $as_echo "#define HAVE_FSEEKO 1" >>confdefs.h
+
+else
+ case " $LIBOBJS " in
+ *" fseeko.$ac_objext "* ) ;;
+ *) LIBOBJS="$LIBOBJS fseeko.$ac_objext"
+ ;;
+esac
+
+fi
-case $host_os in
- # NetBSD uses a custom fseeko/ftello built on fsetpos/fgetpos
- # Mingw uses macros to access Win32 API calls
- netbsd*|mingw*)
+
+case $host_os in
+ # NetBSD uses a custom fseeko/ftello built on fsetpos/fgetpos
+ # Mingw uses macros to access Win32 API calls
+ netbsd*|mingw*)
$as_echo "#define HAVE_FSEEKO 1" >>confdefs.h
@@ -14936,7 +15427,96 @@ esac
# posix_fadvise() is a no-op on Solaris, so don't incur function overhead
# by calling it, 2009-04-02
# http://src.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/lib/libc/port/gen/posix_fadvise.c
-if test "$PORTNAME" != "solaris"; then
+# The Clang compiler raises a warning for an undeclared identifier that matches
+# a compiler builtin function. All extant Clang versions are affected, as of
+# Clang 3.6.0. Test a builtin known to every version. This problem affects the
+# C and Objective C languages, but Clang does report an error under C++ and
+# Objective C++.
+#
+# Passing -fno-builtin to the compiler would suppress this problem. That
+# strategy would have the advantage of being insensitive to stray warnings, but
+# it would make tests less realistic.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how $CC reports undeclared, standard C functions" >&5
+$as_echo_n "checking how $CC reports undeclared, standard C functions... " >&6; }
+if ${ac_cv_c_decl_report+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+(void) strchr;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ if test -s conftest.err; then :
+ # For AC_CHECK_DECL to react to warnings, the compiler must be silent on
+ # valid AC_CHECK_DECL input. No library function is consistently available
+ # on freestanding implementations, so test against a dummy declaration.
+ # Include always-available headers on the off chance that they somehow
+ # elicit warnings.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include
+#include
+#include
+#include
+extern void ac_decl (int, char *);
+int
+main ()
+{
+#ifdef __cplusplus
+ (void) ac_decl ((int) 0, (char *) 0);
+ (void) ac_decl;
+#else
+ (void) ac_decl;
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ if test -s conftest.err; then :
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot detect from compiler exit status or warnings
+See \`config.log' for more details" "$LINENO" 5; }
+else
+ ac_cv_c_decl_report=warning
+fi
+else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compile a simple declaration test
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "compiler does not report undeclared identifiers
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+else
+ ac_cv_c_decl_report=error
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_decl_report" >&5
+$as_echo "$ac_cv_c_decl_report" >&6; }
+
+case $ac_cv_c_decl_report in
+ warning) ac_c_decl_warn_flag=yes ;;
+ *) ac_c_decl_warn_flag= ;;
+esac
+
+if test "$PORTNAME" != "solaris"; then :
+
for ac_func in posix_fadvise
do :
ac_fn_c_check_func "$LINENO" "posix_fadvise" "ac_cv_func_posix_fadvise"
@@ -14960,7 +15540,8 @@ cat >>confdefs.h <<_ACEOF
#define HAVE_DECL_POSIX_FADVISE $ac_have_decl
_ACEOF
-fi
+
+fi # fi
ac_fn_c_check_decl "$LINENO" "fdatasync" "ac_cv_have_decl_fdatasync" "#include
"
@@ -15019,7 +15600,30 @@ cat >>confdefs.h <<_ACEOF
_ACEOF
-HAVE_IPV6=no
+ac_fn_c_check_decl "$LINENO" "RTLD_GLOBAL" "ac_cv_have_decl_RTLD_GLOBAL" "#include
+"
+if test "x$ac_cv_have_decl_RTLD_GLOBAL" = xyes; then :
+ ac_have_decl=1
+else
+ ac_have_decl=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_RTLD_GLOBAL $ac_have_decl
+_ACEOF
+ac_fn_c_check_decl "$LINENO" "RTLD_NOW" "ac_cv_have_decl_RTLD_NOW" "#include
+"
+if test "x$ac_cv_have_decl_RTLD_NOW" = xyes; then :
+ ac_have_decl=1
+else
+ ac_have_decl=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_RTLD_NOW $ac_have_decl
+_ACEOF
+
+
ac_fn_c_check_type "$LINENO" "struct sockaddr_in6" "ac_cv_type_struct_sockaddr_in6" "$ac_includes_default
#include
"
@@ -15027,11 +15631,9 @@ if test "x$ac_cv_type_struct_sockaddr_in6" = xyes; then :
$as_echo "#define HAVE_IPV6 1" >>confdefs.h
- HAVE_IPV6=yes
fi
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for PS_STRINGS" >&5
$as_echo_n "checking for PS_STRINGS... " >&6; }
if ${pgac_cv_var_PS_STRINGS+:} false; then :
@@ -15068,97 +15670,6 @@ $as_echo "#define HAVE_PS_STRINGS 1" >>confdefs.h
fi
-# We use our snprintf.c emulation if either snprintf() or vsnprintf()
-# is missing. Yes, there are machines that have only one. We may
-# also decide to use snprintf.c if snprintf() is present but does not
-# have all the features we need --- see below.
-
-if test "$PORTNAME" = "win32"; then
- # Win32 gets snprintf.c built unconditionally.
- #
- # To properly translate all NLS languages strings, we must support the
- # *printf() %$ format, which allows *printf() arguments to be selected
- # by position in the translated string.
- #
- # libintl versions < 0.13 use the native *printf() functions, and Win32
- # *printf() doesn't understand %$, so we must use our /port versions,
- # which do understand %$. libintl versions >= 0.13 include their own
- # *printf versions on Win32. The libintl 0.13 release note text is:
- #
- # C format strings with positions, as they arise when a translator
- # needs to reorder a sentence, are now supported on all platforms.
- # On those few platforms (NetBSD and Woe32) for which the native
- # printf()/fprintf()/... functions don't support such format
- # strings, replacements are provided through .
- #
- # We could use libintl >= 0.13's *printf() if we were sure that we had
- # a litint >= 0.13 at runtime, but seeing that there is no clean way
- # to guarantee that, it is best to just use our own, so we are sure to
- # get %$ support. In include/port.h we disable the *printf() macros
- # that might have been defined by libintl.
- #
- # We do this unconditionally whether NLS is used or not so we are sure
- # that all Win32 libraries and binaries behave the same.
- pgac_need_repl_snprintf=yes
-else
- pgac_need_repl_snprintf=no
- for ac_func in snprintf
-do :
- ac_fn_c_check_func "$LINENO" "snprintf" "ac_cv_func_snprintf"
-if test "x$ac_cv_func_snprintf" = xyes; then :
- cat >>confdefs.h <<_ACEOF
-#define HAVE_SNPRINTF 1
-_ACEOF
-
-else
- pgac_need_repl_snprintf=yes
-fi
-done
-
- for ac_func in vsnprintf
-do :
- ac_fn_c_check_func "$LINENO" "vsnprintf" "ac_cv_func_vsnprintf"
-if test "x$ac_cv_func_vsnprintf" = xyes; then :
- cat >>confdefs.h <<_ACEOF
-#define HAVE_VSNPRINTF 1
-_ACEOF
-
-else
- pgac_need_repl_snprintf=yes
-fi
-done
-
-fi
-
-
-# Check whether declares snprintf() and vsnprintf(); if not,
-# include/c.h will provide declarations. Note this is a separate test
-# from whether the functions exist in the C library --- there are
-# systems that have the functions but don't bother to declare them :-(
-
-ac_fn_c_check_decl "$LINENO" "snprintf" "ac_cv_have_decl_snprintf" "$ac_includes_default"
-if test "x$ac_cv_have_decl_snprintf" = xyes; then :
- ac_have_decl=1
-else
- ac_have_decl=0
-fi
-
-cat >>confdefs.h <<_ACEOF
-#define HAVE_DECL_SNPRINTF $ac_have_decl
-_ACEOF
-ac_fn_c_check_decl "$LINENO" "vsnprintf" "ac_cv_have_decl_vsnprintf" "$ac_includes_default"
-if test "x$ac_cv_have_decl_vsnprintf" = xyes; then :
- ac_have_decl=1
-else
- ac_have_decl=0
-fi
-
-cat >>confdefs.h <<_ACEOF
-#define HAVE_DECL_VSNPRINTF $ac_have_decl
-_ACEOF
-
-
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for isinf" >&5
$as_echo_n "checking for isinf... " >&6; }
if ${ac_cv_func_isinf+:} false; then :
@@ -15215,14 +15726,27 @@ done
fi
-ac_fn_c_check_func "$LINENO" "crypt" "ac_cv_func_crypt"
-if test "x$ac_cv_func_crypt" = xyes; then :
- $as_echo "#define HAVE_CRYPT 1" >>confdefs.h
+ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen"
+if test "x$ac_cv_func_dlopen" = xyes; then :
+ $as_echo "#define HAVE_DLOPEN 1" >>confdefs.h
else
case " $LIBOBJS " in
- *" crypt.$ac_objext "* ) ;;
- *) LIBOBJS="$LIBOBJS crypt.$ac_objext"
+ *" dlopen.$ac_objext "* ) ;;
+ *) LIBOBJS="$LIBOBJS dlopen.$ac_objext"
+ ;;
+esac
+
+fi
+
+ac_fn_c_check_func "$LINENO" "explicit_bzero" "ac_cv_func_explicit_bzero"
+if test "x$ac_cv_func_explicit_bzero" = xyes; then :
+ $as_echo "#define HAVE_EXPLICIT_BZERO 1" >>confdefs.h
+
+else
+ case " $LIBOBJS " in
+ *" explicit_bzero.$ac_objext "* ) ;;
+ *) LIBOBJS="$LIBOBJS explicit_bzero.$ac_objext"
;;
esac
@@ -15293,6 +15817,32 @@ esac
fi
+ac_fn_c_check_func "$LINENO" "pread" "ac_cv_func_pread"
+if test "x$ac_cv_func_pread" = xyes; then :
+ $as_echo "#define HAVE_PREAD 1" >>confdefs.h
+
+else
+ case " $LIBOBJS " in
+ *" pread.$ac_objext "* ) ;;
+ *) LIBOBJS="$LIBOBJS pread.$ac_objext"
+ ;;
+esac
+
+fi
+
+ac_fn_c_check_func "$LINENO" "pwrite" "ac_cv_func_pwrite"
+if test "x$ac_cv_func_pwrite" = xyes; then :
+ $as_echo "#define HAVE_PWRITE 1" >>confdefs.h
+
+else
+ case " $LIBOBJS " in
+ *" pwrite.$ac_objext "* ) ;;
+ *) LIBOBJS="$LIBOBJS pwrite.$ac_objext"
+ ;;
+esac
+
+fi
+
ac_fn_c_check_func "$LINENO" "random" "ac_cv_func_random"
if test "x$ac_cv_func_random" = xyes; then :
$as_echo "#define HAVE_RANDOM 1" >>confdefs.h
@@ -15326,20 +15876,7 @@ if test "x$ac_cv_func_srandom" = xyes; then :
else
case " $LIBOBJS " in
*" srandom.$ac_objext "* ) ;;
- *) LIBOBJS="$LIBOBJS srandom.$ac_objext"
- ;;
-esac
-
-fi
-
-ac_fn_c_check_func "$LINENO" "strerror" "ac_cv_func_strerror"
-if test "x$ac_cv_func_strerror" = xyes; then :
- $as_echo "#define HAVE_STRERROR 1" >>confdefs.h
-
-else
- case " $LIBOBJS " in
- *" strerror.$ac_objext "* ) ;;
- *) LIBOBJS="$LIBOBJS strerror.$ac_objext"
+ *) LIBOBJS="$LIBOBJS srandom.$ac_objext"
;;
esac
@@ -15384,7 +15921,39 @@ esac
fi
+ac_fn_c_check_func "$LINENO" "strtof" "ac_cv_func_strtof"
+if test "x$ac_cv_func_strtof" = xyes; then :
+ $as_echo "#define HAVE_STRTOF 1" >>confdefs.h
+
+else
+ case " $LIBOBJS " in
+ *" strtof.$ac_objext "* ) ;;
+ *) LIBOBJS="$LIBOBJS strtof.$ac_objext"
+ ;;
+esac
+
+fi
+
+
+
+case $host_os in
+ # Cygwin and (apparently, based on test results) Mingw both
+ # have a broken strtof(), so substitute the same replacement
+ # code we use with VS2013. That's not a perfect fix, since
+ # (unlike with VS2013) it doesn't avoid double-rounding, but
+ # we have no better options. To get that, though, we have to
+ # force the file to be compiled despite HAVE_STRTOF.
+ mingw*|cygwin*)
+ case " $LIBOBJS " in
+ *" strtof.$ac_objext "* ) ;;
+ *) LIBOBJS="$LIBOBJS strtof.$ac_objext"
+ ;;
+esac
+ { $as_echo "$as_me:${as_lineno-$LINENO}: On $host_os we will use our strtof wrapper." >&5
+$as_echo "$as_me: On $host_os we will use our strtof wrapper." >&6;}
+ ;;
+esac
case $host_os in
@@ -15484,9 +16053,9 @@ esac
fi
-# Solaris' getopt() doesn't do what we want for long options, so always use
-# our version on that platform.
-if test "$PORTNAME" = "solaris"; then
+# On OpenBSD and Solaris, getopt() doesn't do what we want for long options
+# (i.e., allow '-' as a flag character), so use our version on those platforms.
+if test "$PORTNAME" = "openbsd" -o "$PORTNAME" = "solaris"; then
case " $LIBOBJS " in
*" getopt.$ac_objext "* ) ;;
*) LIBOBJS="$LIBOBJS getopt.$ac_objext"
@@ -15514,6 +16083,17 @@ fi
# Win32 (really MinGW) support
if test "$PORTNAME" = "win32"; then
+ for ac_func in _configthreadlocale
+do :
+ ac_fn_c_check_func "$LINENO" "_configthreadlocale" "ac_cv_func__configthreadlocale"
+if test "x$ac_cv_func__configthreadlocale" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE__CONFIGTHREADLOCALE 1
+_ACEOF
+
+fi
+done
+
ac_fn_c_check_func "$LINENO" "gettimeofday" "ac_cv_func_gettimeofday"
if test "x$ac_cv_func_gettimeofday" = xyes; then :
$as_echo "#define HAVE_GETTIMEOFDAY 1" >>confdefs.h
@@ -15615,24 +16195,6 @@ esac
fi
-ac_fn_c_check_decl "$LINENO" "sys_siglist" "ac_cv_have_decl_sys_siglist" "#include
-/* NetBSD declares sys_siglist in unistd.h. */
-#ifdef HAVE_UNISTD_H
-# include
-#endif
-
-"
-if test "x$ac_cv_have_decl_sys_siglist" = xyes; then :
- ac_have_decl=1
-else
- ac_have_decl=0
-fi
-
-cat >>confdefs.h <<_ACEOF
-#define HAVE_DECL_SYS_SIGLIST $ac_have_decl
-_ACEOF
-
-
ac_fn_c_check_func "$LINENO" "syslog" "ac_cv_func_syslog"
if test "x$ac_cv_func_syslog" = xyes; then :
ac_fn_c_check_header_mongrel "$LINENO" "syslog.h" "ac_cv_header_syslog_h" "$ac_includes_default"
@@ -15710,7 +16272,7 @@ $as_echo "#define HAVE_INT_OPTRESET 1" >>confdefs.h
fi
-for ac_func in strtoll strtoq
+for ac_func in strtoll __strtoll strtoq
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
@@ -15722,7 +16284,7 @@ _ACEOF
fi
done
-for ac_func in strtoull strtouq
+for ac_func in strtoull __strtoull strtouq
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
@@ -15734,6 +16296,28 @@ _ACEOF
fi
done
+# strto[u]ll may exist but not be declared
+ac_fn_c_check_decl "$LINENO" "strtoll" "ac_cv_have_decl_strtoll" "$ac_includes_default"
+if test "x$ac_cv_have_decl_strtoll" = xyes; then :
+ ac_have_decl=1
+else
+ ac_have_decl=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_STRTOLL $ac_have_decl
+_ACEOF
+ac_fn_c_check_decl "$LINENO" "strtoull" "ac_cv_have_decl_strtoull" "$ac_includes_default"
+if test "x$ac_cv_have_decl_strtoull" = xyes; then :
+ ac_have_decl=1
+else
+ ac_have_decl=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_STRTOULL $ac_have_decl
+_ACEOF
+
if test "$with_icu" = yes; then
ac_save_CPPFLAGS=$CPPFLAGS
@@ -15752,6 +16336,73 @@ fi
CPPFLAGS=$ac_save_CPPFLAGS
fi
+if test "$with_llvm" = yes; then
+
+ # Check which functionality is present
+ SAVE_CPPFLAGS="$CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $LLVM_CPPFLAGS"
+ ac_fn_c_check_decl "$LINENO" "LLVMOrcGetSymbolAddressIn" "ac_cv_have_decl_LLVMOrcGetSymbolAddressIn" "#include
+"
+if test "x$ac_cv_have_decl_LLVMOrcGetSymbolAddressIn" = xyes; then :
+ ac_have_decl=1
+else
+ ac_have_decl=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN $ac_have_decl
+_ACEOF
+
+ ac_fn_c_check_decl "$LINENO" "LLVMGetHostCPUName" "ac_cv_have_decl_LLVMGetHostCPUName" "#include
+"
+if test "x$ac_cv_have_decl_LLVMGetHostCPUName" = xyes; then :
+ ac_have_decl=1
+else
+ ac_have_decl=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_LLVMGETHOSTCPUNAME $ac_have_decl
+_ACEOF
+ac_fn_c_check_decl "$LINENO" "LLVMGetHostCPUFeatures" "ac_cv_have_decl_LLVMGetHostCPUFeatures" "#include
+"
+if test "x$ac_cv_have_decl_LLVMGetHostCPUFeatures" = xyes; then :
+ ac_have_decl=1
+else
+ ac_have_decl=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_LLVMGETHOSTCPUFEATURES $ac_have_decl
+_ACEOF
+
+ ac_fn_c_check_decl "$LINENO" "LLVMCreateGDBRegistrationListener" "ac_cv_have_decl_LLVMCreateGDBRegistrationListener" "#include
+"
+if test "x$ac_cv_have_decl_LLVMCreateGDBRegistrationListener" = xyes; then :
+ ac_have_decl=1
+else
+ ac_have_decl=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER $ac_have_decl
+_ACEOF
+ac_fn_c_check_decl "$LINENO" "LLVMCreatePerfJITEventListener" "ac_cv_have_decl_LLVMCreatePerfJITEventListener" "#include
+"
+if test "x$ac_cv_have_decl_LLVMCreatePerfJITEventListener" = xyes; then :
+ ac_have_decl=1
+else
+ ac_have_decl=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_LLVMCREATEPERFJITEVENTLISTENER $ac_have_decl
+_ACEOF
+
+ CPPFLAGS="$SAVE_CPPFLAGS"
+
+fi
+
# Lastly, restore full LIBS list and check for readline/libedit symbols
LIBS="$LIBS_including_readline"
@@ -15854,54 +16505,6 @@ fi
# Run tests below here
# --------------------
-# Force use of our snprintf if system's doesn't do arg control
-# See comment above at snprintf test for details.
-if test "$enable_nls" = yes -a "$pgac_need_repl_snprintf" = no; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether snprintf supports argument control" >&5
-$as_echo_n "checking whether snprintf supports argument control... " >&6; }
-if ${pgac_cv_snprintf_arg_control+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- if test "$cross_compiling" = yes; then :
- pgac_cv_snprintf_arg_control=cross
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-#include
-#include
-
-int main()
-{
- char buf[100];
-
- /* can it swap arguments? */
- snprintf(buf, 100, "%2\$d %1\$d", 3, 4);
- if (strcmp(buf, "4 3") != 0)
- return 1;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
- pgac_cv_snprintf_arg_control=yes
-else
- pgac_cv_snprintf_arg_control=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
- conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_snprintf_arg_control" >&5
-$as_echo "$pgac_cv_snprintf_arg_control" >&6; }
-
- if test $pgac_cv_snprintf_arg_control != yes ; then
- pgac_need_repl_snprintf=yes
- fi
-fi
-
-
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether long int is 64 bits" >&5
$as_echo_n "checking whether long int is 64 bits... " >&6; }
@@ -16080,161 +16683,19 @@ cat >>confdefs.h <<_ACEOF
_ACEOF
-# If we found "long int" is 64 bits, assume snprintf handles it. If
-# we found we need to use "long long int", better check. We cope with
-# snprintfs that use %lld, %qd, or %I64d as the format. If none of these
-# work, fall back to our own snprintf emulation (which we know uses %lld).
-
-if test "$HAVE_LONG_LONG_INT_64" = yes ; then
- if test $pgac_need_repl_snprintf = no; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking snprintf length modifier for long long int" >&5
-$as_echo_n "checking snprintf length modifier for long long int... " >&6; }
-if ${pgac_cv_snprintf_long_long_int_modifier+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- for pgac_modifier in 'll' 'q' 'I64'; do
-if test "$cross_compiling" = yes; then :
- pgac_cv_snprintf_long_long_int_modifier=cross; break
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-#include
-#include
-typedef long long int ac_int64;
-#define INT64_FORMAT "%${pgac_modifier}d"
-
-ac_int64 a = 20000001;
-ac_int64 b = 40000005;
-
-int does_int64_snprintf_work()
-{
- ac_int64 c;
- char buf[100];
-
- if (sizeof(ac_int64) != 8)
- return 0; /* doesn't look like the right size */
-
- c = a * b;
- snprintf(buf, 100, INT64_FORMAT, c);
- if (strcmp(buf, "800000140000005") != 0)
- return 0; /* either multiply or snprintf is busted */
- return 1;
-}
-
-int
-main() {
- return (! does_int64_snprintf_work());
-}
-_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
- pgac_cv_snprintf_long_long_int_modifier=$pgac_modifier; break
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
- conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
-done
-fi
-
-LONG_LONG_INT_MODIFIER=''
-
-case $pgac_cv_snprintf_long_long_int_modifier in
- cross) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cannot test (not on host machine)" >&5
-$as_echo "cannot test (not on host machine)" >&6; };;
- ?*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_snprintf_long_long_int_modifier" >&5
-$as_echo "$pgac_cv_snprintf_long_long_int_modifier" >&6; }
- LONG_LONG_INT_MODIFIER=$pgac_cv_snprintf_long_long_int_modifier;;
- *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5
-$as_echo "none" >&6; };;
-esac
- if test "$LONG_LONG_INT_MODIFIER" = ""; then
- # Force usage of our own snprintf, since system snprintf is broken
- pgac_need_repl_snprintf=yes
- LONG_LONG_INT_MODIFIER='ll'
- fi
- else
- # Here if we previously decided we needed to use our own snprintf
- LONG_LONG_INT_MODIFIER='ll'
- fi
+# Select the printf length modifier that goes with that, too.
+if test x"$pg_int64_type" = x"long long int" ; then
+ INT64_MODIFIER='"ll"'
else
- # Here if we are not using 'long long int' at all
- LONG_LONG_INT_MODIFIER='l'
+ INT64_MODIFIER='"l"'
fi
-INT64_MODIFIER="\"$LONG_LONG_INT_MODIFIER\""
-
cat >>confdefs.h <<_ACEOF
#define INT64_MODIFIER $INT64_MODIFIER
_ACEOF
-# Also force use of our snprintf if the system's doesn't support the %z flag.
-if test "$pgac_need_repl_snprintf" = no; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether snprintf supports the %z modifier" >&5
-$as_echo_n "checking whether snprintf supports the %z modifier... " >&6; }
-if ${pgac_cv_snprintf_size_t_support+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- if test "$cross_compiling" = yes; then :
- pgac_cv_snprintf_size_t_support=cross
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-#include
-#include
-
-int main()
-{
- char bufz[100];
- char buf64[100];
-
- /*
- * Print the largest unsigned number fitting in a size_t using both %zu
- * and the previously-determined format for 64-bit integers. Note that
- * we don't run this code unless we know snprintf handles 64-bit ints.
- */
- bufz[0] = '\0'; /* in case snprintf fails to emit anything */
- snprintf(bufz, sizeof(bufz), "%zu", ~((size_t) 0));
- snprintf(buf64, sizeof(buf64), "%" INT64_MODIFIER "u",
- (unsigned PG_INT64_TYPE) ~((size_t) 0));
- if (strcmp(bufz, buf64) != 0)
- return 1;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
- pgac_cv_snprintf_size_t_support=yes
-else
- pgac_cv_snprintf_size_t_support=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
- conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_snprintf_size_t_support" >&5
-$as_echo "$pgac_cv_snprintf_size_t_support" >&6; }
-
- if test "$pgac_cv_snprintf_size_t_support" != yes; then
- pgac_need_repl_snprintf=yes
- fi
-fi
-
-# Now we have checked all the reasons to replace snprintf
-if test $pgac_need_repl_snprintf = yes; then
-
-$as_echo "#define USE_REPL_SNPRINTF 1" >>confdefs.h
-
- case " $LIBOBJS " in
- *" snprintf.$ac_objext "* ) ;;
- *) LIBOBJS="$LIBOBJS snprintf.$ac_objext"
- ;;
-esac
-
-fi
-
# has to be down here, rather than with the other builtins, because
# the test uses PG_INT64_TYPE.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_mul_overflow" >&5
@@ -17344,46 +17805,6 @@ fi
fi
-# In order to detect at runtime, if the ARM CRC Extension is available,
-# we will do "getauxval(AT_HWCAP) & HWCAP_CRC32". Check if we have
-# everything we need for that.
-for ac_func in getauxval
-do :
- ac_fn_c_check_func "$LINENO" "getauxval" "ac_cv_func_getauxval"
-if test "x$ac_cv_func_getauxval" = xyes; then :
- cat >>confdefs.h <<_ACEOF
-#define HAVE_GETAUXVAL 1
-_ACEOF
-
-fi
-done
-
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-#include
-#include
-
-int
-main ()
-{
-
-#ifndef AT_HWCAP
-#error AT_HWCAP not defined
-#endif
-#ifndef HWCAP_CRC32
-#error HWCAP_CRC32 not defined
-#endif
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- HAVE_HWCAP_CRC32=1
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
# Select CRC-32C implementation.
#
# If we are targeting a processor that has Intel SSE 4.2 instructions, we can
@@ -17414,9 +17835,8 @@ if test x"$USE_SLICING_BY_8_CRC32C" = x"" && test x"$USE_SSE42_CRC32C" = x"" &&
if test x"$pgac_armv8_crc32c_intrinsics" = x"yes" && test x"$CFLAGS_ARMV8_CRC32C" = x""; then
USE_ARMV8_CRC32C=1
else
- # ARM CRC Extension, with runtime check? The getauxval() function and
- # HWCAP_CRC32 are needed for the runtime check.
- if test x"$pgac_armv8_crc32c_intrinsics" = x"yes" && test x"$ac_cv_func_getauxval" = x"yes" && test x"$HAVE_HWCAP_CRC32" = x"1"; then
+ # ARM CRC Extension, with runtime check?
+ if test x"$pgac_armv8_crc32c_intrinsics" = x"yes"; then
USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK=1
else
# fall back to slicing-by-8 algorithm, which doesn't require any
@@ -17648,7 +18068,7 @@ fi
# in the template or configure command line.
# If not selected manually, try to select a source automatically.
-if test "$enable_strong_random" = "yes" && test x"$USE_OPENSSL_RANDOM" = x"" && test x"$USE_WIN32_RANDOM" = x"" && test x"$USE_DEV_URANDOM" = x"" ; then
+if test x"$USE_OPENSSL_RANDOM" = x"" && test x"$USE_WIN32_RANDOM" = x"" && test x"$USE_DEV_URANDOM" = x"" ; then
if test x"$with_openssl" = x"yes" ; then
USE_OPENSSL_RANDOM=1
elif test "$PORTNAME" = "win32" ; then
@@ -17682,42 +18102,28 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking which random number source to use" >&5
$as_echo_n "checking which random number source to use... " >&6; }
-if test "$enable_strong_random" = yes ; then
- if test x"$USE_OPENSSL_RANDOM" = x"1" ; then
+if test x"$USE_OPENSSL_RANDOM" = x"1" ; then
$as_echo "#define USE_OPENSSL_RANDOM 1" >>confdefs.h
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: OpenSSL" >&5
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: OpenSSL" >&5
$as_echo "OpenSSL" >&6; }
- elif test x"$USE_WIN32_RANDOM" = x"1" ; then
+elif test x"$USE_WIN32_RANDOM" = x"1" ; then
$as_echo "#define USE_WIN32_RANDOM 1" >>confdefs.h
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: Windows native" >&5
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: Windows native" >&5
$as_echo "Windows native" >&6; }
- elif test x"$USE_DEV_URANDOM" = x"1" ; then
+elif test x"$USE_DEV_URANDOM" = x"1" ; then
$as_echo "#define USE_DEV_URANDOM 1" >>confdefs.h
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: /dev/urandom" >&5
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: /dev/urandom" >&5
$as_echo "/dev/urandom" >&6; }
- else
- as_fn_error $? "
-no source of strong random numbers was found
-PostgreSQL can use OpenSSL or /dev/urandom as a source of random numbers,
-for authentication protocols. You can use --disable-strong-random to use a
-built-in pseudo random number generator, but that may be insecure." "$LINENO" 5
- fi
-
-$as_echo "#define HAVE_STRONG_RANDOM 1" >>confdefs.h
-
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: weak builtin PRNG" >&5
-$as_echo "weak builtin PRNG" >&6; }
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING:
-*** Not using a strong random number source may be insecure." >&5
-$as_echo "$as_me: WARNING:
-*** Not using a strong random number source may be insecure." >&2;}
+ as_fn_error $? "
+no source of strong random numbers was found
+PostgreSQL can use OpenSSL or /dev/urandom as a source of random numbers." "$LINENO" 5
fi
# If not set in template file, set bytes to use libc memset()
@@ -17853,7 +18259,7 @@ $as_echo_n "checking for MSGFMT... " >&6; }
$as_echo "$MSGFMT" >&6; }
fi
- if test -z "$MSGFMT"; then
+ if test -z "$MSGFMT"; then
as_fn_error $? "msgfmt is required for NLS" "$LINENO" 5
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for msgfmt flags" >&5
@@ -18049,7 +18455,14 @@ if test -z "$TCL_CONFIG_SH"; then
set X $pgac_test_dirs; shift
if test $# -eq 0; then
test -z "$TCLSH" && as_fn_error $? "unable to locate tclConfig.sh because no Tcl shell was found" "$LINENO" 5
- set X `echo 'puts $auto_path' | $TCLSH`; shift
+ pgac_test_dirs=`echo 'puts $auto_path' | $TCLSH`
+ # On newer macOS, $auto_path frequently doesn't include the place
+ # where tclConfig.sh actually lives. Append that to the end, so as not
+ # to break cases where a non-default Tcl installation is being used.
+ if test -d "$PG_SYSROOT/System/Library/Frameworks/Tcl.framework" ; then
+ pgac_test_dirs="$pgac_test_dirs $PG_SYSROOT/System/Library/Frameworks/Tcl.framework"
+ fi
+ set X $pgac_test_dirs; shift
fi
for pgac_dir do
@@ -18098,7 +18511,7 @@ fi
# check for
if test "$with_perl" = yes; then
ac_save_CPPFLAGS=$CPPFLAGS
- CPPFLAGS="$CPPFLAGS -I$perl_archlibexp/CORE"
+ CPPFLAGS="$CPPFLAGS $perl_includespec"
ac_fn_c_check_header_compile "$LINENO" "perl.h" "ac_cv_header_perl_h" "#include
"
if test "x$ac_cv_header_perl_h" = xyes; then :
@@ -18217,13 +18630,13 @@ $as_echo "$XMLLINT" >&6; }
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for DocBook XML V4.2" >&5
-$as_echo_n "checking for DocBook XML V4.2... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for DocBook XML V4.5" >&5
+$as_echo_n "checking for DocBook XML V4.5... " >&6; }
if ${pgac_cv_check_docbook+:} false; then :
$as_echo_n "(cached) " >&6
else
cat >conftest.xml <
+
test
@@ -18781,6 +19194,15 @@ _ACEOF
+# If we are inserting PG_SYSROOT into CPPFLAGS, do so symbolically not
+# literally, so that it's possible to override it at build time using
+# a command like "make ... PG_SYSROOT=path". This has to be done after
+# we've finished all configure checks that depend on CPPFLAGS.
+if test x"$PG_SYSROOT" != x; then
+ CPPFLAGS=`echo "$CPPFLAGS" | sed -e "s| $PG_SYSROOT | \\\$(PG_SYSROOT) |"`
+fi
+
+
# Begin output steps
@@ -18829,7 +19251,7 @@ fi
ac_config_files="$ac_config_files GNUmakefile src/Makefile.global"
-ac_config_links="$ac_config_links src/backend/port/dynloader.c:src/backend/port/dynloader/${template}.c src/backend/port/pg_sema.c:${SEMA_IMPLEMENTATION} src/backend/port/pg_shmem.c:${SHMEM_IMPLEMENTATION} src/include/dynloader.h:src/backend/port/dynloader/${template}.h src/include/pg_config_os.h:src/include/port/${template}.h src/Makefile.port:src/makefiles/Makefile.${template}"
+ac_config_links="$ac_config_links src/backend/port/pg_sema.c:${SEMA_IMPLEMENTATION} src/backend/port/pg_shmem.c:${SHMEM_IMPLEMENTATION} src/include/pg_config_os.h:src/include/port/${template}.h src/Makefile.port:src/makefiles/Makefile.${template}"
if test "$PORTNAME" = "win32"; then
@@ -19353,7 +19775,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by PostgreSQL $as_me 11devel, which was
+This file was extended by PostgreSQL $as_me 13devel, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -19417,13 +19839,13 @@ $config_links
Configuration commands:
$config_commands
-Report bugs to ."
+Report bugs to ."
_ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
-PostgreSQL config.status 11devel
+PostgreSQL config.status 13devel
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"
@@ -19550,10 +19972,8 @@ do
"src/backend/port/tas.s") CONFIG_LINKS="$CONFIG_LINKS src/backend/port/tas.s:src/backend/port/tas/${tas_file}" ;;
"GNUmakefile") CONFIG_FILES="$CONFIG_FILES GNUmakefile" ;;
"src/Makefile.global") CONFIG_FILES="$CONFIG_FILES src/Makefile.global" ;;
- "src/backend/port/dynloader.c") CONFIG_LINKS="$CONFIG_LINKS src/backend/port/dynloader.c:src/backend/port/dynloader/${template}.c" ;;
"src/backend/port/pg_sema.c") CONFIG_LINKS="$CONFIG_LINKS src/backend/port/pg_sema.c:${SEMA_IMPLEMENTATION}" ;;
"src/backend/port/pg_shmem.c") CONFIG_LINKS="$CONFIG_LINKS src/backend/port/pg_shmem.c:${SHMEM_IMPLEMENTATION}" ;;
- "src/include/dynloader.h") CONFIG_LINKS="$CONFIG_LINKS src/include/dynloader.h:src/backend/port/dynloader/${template}.h" ;;
"src/include/pg_config_os.h") CONFIG_LINKS="$CONFIG_LINKS src/include/pg_config_os.h:src/include/port/${template}.h" ;;
"src/Makefile.port") CONFIG_LINKS="$CONFIG_LINKS src/Makefile.port:src/makefiles/Makefile.${template}" ;;
"check_win32_symlinks") CONFIG_COMMANDS="$CONFIG_COMMANDS check_win32_symlinks" ;;
diff --git a/configure.in b/configure.in
index da02a56ec66..0d16c1a9711 100644
--- a/configure.in
+++ b/configure.in
@@ -17,13 +17,13 @@ dnl Read the Autoconf manual for details.
dnl
m4_pattern_forbid(^PGAC_)dnl to catch undefined macros
-AC_INIT([PostgreSQL], [11devel], [pgsql-bugs@postgresql.org])
+AC_INIT([PostgreSQL], [13devel], [pgsql-bugs@lists.postgresql.org])
m4_if(m4_defn([m4_PACKAGE_VERSION]), [2.69], [], [m4_fatal([Autoconf version 2.69 is required.
Untested combinations of 'autoconf' and PostgreSQL versions are not
recommended. You can remove the check from 'configure.in' but it is then
your responsibility whether the result works or not.])])
-AC_COPYRIGHT([Copyright (c) 1996-2018, PostgreSQL Global Development Group])
+AC_COPYRIGHT([Copyright (c) 1996-2019, PostgreSQL Global Development Group])
AC_CONFIG_SRCDIR([src/backend/access/common/heaptuple.c])
AC_CONFIG_AUX_DIR(config)
AC_PREFIX_DEFAULT(/usr/local/pgsql)
@@ -79,7 +79,7 @@ PostgreSQL has apparently not been ported to your platform yet.
To try a manual configuration, look into the src/template directory
for a similar platform and use the '--with-template=' option.
-Please also contact to see about
+Please also contact to see about
rectifying this. Include the above 'checking host system type...'
line.
*******************************************************************
@@ -193,13 +193,6 @@ PGAC_ARG_BOOL(enable, spinlocks, yes,
PGAC_ARG_BOOL(enable, atomics, yes,
[do not use atomic operations])
-#
-# Random number generation
-#
-PGAC_ARG_BOOL(enable, strong-random, yes,
- [do not use a strong random number source])
-AC_SUBST(enable_strong_random)
-
#
# --enable-debug adds -g to compiler flags
#
@@ -358,6 +351,14 @@ case $template in
esac
AC_PROG_CC([$pgac_cc_list])
+AC_PROG_CC_C99()
+
+# Error out if the compiler does not support C99, as the codebase
+# relies on that.
+if test "$ac_cv_prog_cc_c99" = no; then
+ AC_MSG_ERROR([C compiler "$CC" does not support C99])
+fi
+
AC_PROG_CXX([$pgac_cxx_list])
# Check if it's Intel's compiler, which (usually) pretends to be gcc,
@@ -383,9 +384,10 @@ AC_SUBST(SUN_STUDIO_CC)
PGAC_ARG_BOOL(with, llvm, no, [build with LLVM based JIT support],
[AC_DEFINE([USE_LLVM], 1, [Define to 1 to build with LLVM based JIT support. (--with-llvm)])])
AC_SUBST(with_llvm)
-if test "$with_llvm" = yes ; then
- PGAC_LLVM_SUPPORT()
-fi
+dnl must use AS_IF here, else AC_REQUIRES inside PGAC_LLVM_SUPPORT malfunctions
+AS_IF([test "$with_llvm" = yes], [
+ PGAC_LLVM_SUPPORT()
+]) # fi
unset CFLAGS
@@ -474,8 +476,18 @@ if test "$GCC" = yes -a "$ICC" = no; then
CFLAGS="-Wall -Wmissing-prototypes -Wpointer-arith"
CXXFLAGS="-Wall -Wpointer-arith"
# These work in some but not all gcc versions
+ save_CFLAGS=$CFLAGS
PGAC_PROG_CC_CFLAGS_OPT([-Wdeclaration-after-statement])
- # -Wdeclaration-after-statement isn't applicable for C++
+ # -Wdeclaration-after-statement isn't applicable for C++. Specific C files
+ # disable it, so AC_SUBST the negative form.
+ PERMIT_DECLARATION_AFTER_STATEMENT=
+ if test x"$save_CFLAGS" != x"$CFLAGS"; then
+ PERMIT_DECLARATION_AFTER_STATEMENT=-Wno-declaration-after-statement
+ fi
+ AC_SUBST(PERMIT_DECLARATION_AFTER_STATEMENT)
+ # Really don't want VLAs to be used in our dialect of C
+ PGAC_PROG_CC_CFLAGS_OPT([-Werror=vla])
+ # -Wvla is not applicable for C++
PGAC_PROG_CC_CFLAGS_OPT([-Wendif-labels])
PGAC_PROG_CXX_CFLAGS_OPT([-Wendif-labels])
PGAC_PROG_CC_CFLAGS_OPT([-Wmissing-format-attribute])
@@ -498,10 +510,22 @@ if test "$GCC" = yes -a "$ICC" = no; then
# We want to suppress clang's unhelpful unused-command-line-argument warnings
# but gcc won't complain about unrecognized -Wno-foo switches, so we have to
# test for the positive form and if that works, add the negative form
+ NOT_THE_CFLAGS=""
PGAC_PROG_CC_VAR_OPT(NOT_THE_CFLAGS, [-Wunused-command-line-argument])
if test -n "$NOT_THE_CFLAGS"; then
CFLAGS="$CFLAGS -Wno-unused-command-line-argument"
fi
+ # Similarly disable useless truncation warnings from gcc 8+
+ NOT_THE_CFLAGS=""
+ PGAC_PROG_CC_VAR_OPT(NOT_THE_CFLAGS, [-Wformat-truncation])
+ if test -n "$NOT_THE_CFLAGS"; then
+ CFLAGS="$CFLAGS -Wno-format-truncation"
+ fi
+ NOT_THE_CFLAGS=""
+ PGAC_PROG_CC_VAR_OPT(NOT_THE_CFLAGS, [-Wstringop-truncation])
+ if test -n "$NOT_THE_CFLAGS"; then
+ CFLAGS="$CFLAGS -Wno-stringop-truncation"
+ fi
elif test "$ICC" = yes; then
# Intel's compiler has a bug/misoptimization in checking for
# division by NAN (NaN == 0), -mp1 fixes it, so add it to the CFLAGS.
@@ -601,6 +625,24 @@ choke me
@%:@endif])], [], [AC_MSG_ERROR([do not put -ffast-math in CFLAGS])])
fi
+# Defend against clang being used on x86-32 without SSE2 enabled. As current
+# versions of clang do not understand -fexcess-precision=standard, the use of
+# x87 floating point operations leads to problems like isinf possibly returning
+# false for a value that is infinite when converted from the 80bit register to
+# the 8byte memory representation.
+#
+# Only perform the test if the compiler doesn't understand
+# -fexcess-precision=standard, that way a potentially fixed compiler will work
+# automatically.
+if test "$pgac_cv_prog_CC_cflags__fexcess_precision_standard" = no; then
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [
+@%:@if defined(__clang__) && defined(__i386__) && !defined(__SSE2_MATH__)
+choke me
+@%:@endif
+])], [],
+[AC_MSG_ERROR([Compiling PostgreSQL with clang, on 32bit x86, requires SSE2 support. Use -msse2 or use gcc.])])
+fi
+
AC_PROG_CPP
AC_SUBST(GCC)
@@ -882,6 +924,7 @@ PGAC_ARG_BOOL(with, libxml, no, [build with XML support],
if test "$with_libxml" = yes ; then
PGAC_PATH_PROGS(XML2_CONFIG, xml2-config)
+ AC_ARG_VAR(XML2_CONFIG, [path to xml2-config utility])dnl
if test -n "$XML2_CONFIG"; then
for pgac_option in `$XML2_CONFIG --cflags`; do
case $pgac_option in
@@ -921,30 +964,6 @@ PGAC_ARG_BOOL(with, zlib, yes,
[do not use Zlib])
AC_SUBST(with_zlib)
-#
-# Elf
-#
-
-# Assume system is ELF if it predefines __ELF__ as 1,
-# otherwise believe host_os based default.
-case $host_os in
- freebsd1*|freebsd2*) elf=no;;
- freebsd3*|freebsd4*) elf=yes;;
-esac
-
-AC_EGREP_CPP(yes,
-[#if __ELF__
- yes
-#endif
-],
-[ELF_SYS=true],
-[if test "X$elf" = "Xyes" ; then
- ELF_SYS=true
-else
- ELF_SYS=
-fi])
-AC_SUBST(ELF_SYS)
-
#
# Assignments
#
@@ -979,7 +998,6 @@ AC_SUBST(install_bin)
PGAC_PATH_PROGS(TAR, tar)
AC_PROG_LN_S
-AC_PROG_AWK
AC_PROG_MKDIR_P
# When Autoconf chooses install-sh as mkdir -p program it tries to generate
# a relative path to it in each makefile where it substitutes it. This clashes
@@ -1003,6 +1021,16 @@ You might have to rebuild your Perl installation. Refer to the
documentation for details. Use --without-perl to disable building
PL/Perl.])
fi
+ # On most platforms, archlibexp is also where the Perl include files live ...
+ perl_includespec="-I$perl_archlibexp/CORE"
+ # ... but on newer macOS versions, we must use -iwithsysroot to look
+ # under $PG_SYSROOT
+ if test \! -f "$perl_archlibexp/CORE/perl.h" ; then
+ if test -f "$PG_SYSROOT$perl_archlibexp/CORE/perl.h" ; then
+ perl_includespec="-iwithsysroot $perl_archlibexp/CORE"
+ fi
+ fi
+ AC_SUBST(perl_includespec)dnl
PGAC_CHECK_PERL_EMBED_CCFLAGS
PGAC_CHECK_PERL_EMBED_LDFLAGS
fi
@@ -1034,11 +1062,11 @@ fi
# other libraries can pull in the pthread functions as a side-effect. We
# want to use the -pthread or similar flags directly, and not rely on
# the side-effects of linking with some other library.
-#
-# note: We have to use AS_IF here rather than plain if. The AC_CHECK_HEADER
-# invocation below is the first one in the script, and autoconf generates
-# additional code for that, which must not be inside the if-block. AS_IF
-# knows how to do that.
+
+dnl note: We have to use AS_IF here rather than plain if. The AC_CHECK_HEADER
+dnl invocation below is the first one in the script, and autoconf generates
+dnl additional code for that, which must not be inside the if-block. AS_IF
+dnl knows how to do that.
AS_IF([test "$enable_thread_safety" = yes -a "$PORTNAME" != "win32"],
[ # then
AX_PTHREAD # set thread flags
@@ -1089,14 +1117,7 @@ AC_SEARCH_LIBS(setproctitle, util)
AC_SEARCH_LIBS(dlopen, dl)
AC_SEARCH_LIBS(socket, [socket ws2_32])
AC_SEARCH_LIBS(shl_load, dld)
-# We only use libld in port/dynloader/aix.c
-case $host_os in
- aix*)
- AC_SEARCH_LIBS(ldopen, ld)
- ;;
-esac
AC_SEARCH_LIBS(getopt_long, [getopt gnugetopt])
-AC_SEARCH_LIBS(crypt, crypt)
AC_SEARCH_LIBS(shm_open, rt)
AC_SEARCH_LIBS(shm_unlink, rt)
AC_SEARCH_LIBS(clock_gettime, [rt posix4])
@@ -1166,7 +1187,7 @@ if test "$with_openssl" = yes ; then
# defines OPENSSL_VERSION_NUMBER to claim version 2.0.0, even though it
# doesn't have these OpenSSL 1.1.0 functions. So check for individual
# functions.
- AC_CHECK_FUNCS([OPENSSL_init_ssl BIO_get_data BIO_meth_new ASN1_STRING_get0_data RAND_OpenSSL])
+ AC_CHECK_FUNCS([OPENSSL_init_ssl BIO_get_data BIO_meth_new ASN1_STRING_get0_data])
# OpenSSL versions before 1.1.0 required setting callback functions, for
# thread-safety. In 1.1.0, it's no longer required, and CRYPTO_lock()
# function was removed.
@@ -1248,7 +1269,34 @@ AC_SUBST(UUID_LIBS)
AC_HEADER_STDBOOL
-AC_CHECK_HEADERS([atomic.h crypt.h dld.h fp_class.h getopt.h ieeefp.h ifaddrs.h langinfo.h mbarrier.h poll.h sys/epoll.h sys/ipc.h sys/pstat.h sys/resource.h sys/select.h sys/sem.h sys/shm.h sys/sockio.h sys/tas.h sys/un.h termios.h ucred.h utime.h wchar.h wctype.h])
+AC_CHECK_HEADERS(m4_normalize([
+ atomic.h
+ copyfile.h
+ fp_class.h
+ getopt.h
+ ieeefp.h
+ ifaddrs.h
+ langinfo.h
+ mbarrier.h
+ poll.h
+ sys/epoll.h
+ sys/ipc.h
+ sys/prctl.h
+ sys/procctl.h
+ sys/pstat.h
+ sys/resource.h
+ sys/select.h
+ sys/sem.h
+ sys/shm.h
+ sys/sockio.h
+ sys/tas.h
+ sys/un.h
+ termios.h
+ ucred.h
+ utime.h
+ wchar.h
+ wctype.h
+]))
# On BSD, test for net/if.h will fail unless sys/socket.h
# is included first.
@@ -1411,13 +1459,9 @@ PGAC_C_FUNCNAME_SUPPORT
PGAC_C_STATIC_ASSERT
PGAC_C_TYPEOF
PGAC_C_TYPES_COMPATIBLE
-PGAC_C_BUILTIN_BSWAP16
-PGAC_C_BUILTIN_BSWAP32
-PGAC_C_BUILTIN_BSWAP64
PGAC_C_BUILTIN_CONSTANT_P
PGAC_C_BUILTIN_UNREACHABLE
PGAC_C_COMPUTED_GOTO
-PGAC_C_VA_ARGS
PGAC_STRUCT_TIMEZONE
PGAC_UNION_SEMUN
PGAC_STRUCT_SOCKADDR_UN
@@ -1469,16 +1513,29 @@ Use --without-zlib to disable zlib support.])],
[#include ])
fi
-# On PPC, check if assembler supports LWARX instruction's mutex hint bit
case $host_cpu in
+ x86_64)
+ # On x86_64, check if we can compile a popcntq instruction
+ AC_CACHE_CHECK([whether assembler supports x86_64 popcntq],
+ [pgac_cv_have_x86_64_popcntq],
+ [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
+ [long long x = 1; long long r;
+ __asm__ __volatile__ (" popcntq %1,%0\n" : "=q"(r) : "rm"(x));])],
+ [pgac_cv_have_x86_64_popcntq=yes],
+ [pgac_cv_have_x86_64_popcntq=no])])
+ if test x"$pgac_cv_have_x86_64_popcntq" = xyes ; then
+ AC_DEFINE(HAVE_X86_64_POPCNTQ, 1, [Define to 1 if the assembler supports X86_64's POPCNTQ instruction.])
+ fi
+ ;;
ppc*|powerpc*)
- AC_MSG_CHECKING([whether assembler supports lwarx hint bit])
- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
+ # On PPC, check if assembler supports LWARX instruction's mutex hint bit
+ AC_CACHE_CHECK([whether assembler supports lwarx hint bit],
+ [pgac_cv_have_ppc_mutex_hint],
+ [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
[int a = 0; int *p = &a; int r;
__asm__ __volatile__ (" lwarx %0,0,%1,1\n" : "=&r"(r) : "r"(p));])],
[pgac_cv_have_ppc_mutex_hint=yes],
- [pgac_cv_have_ppc_mutex_hint=no])
- AC_MSG_RESULT([$pgac_cv_have_ppc_mutex_hint])
+ [pgac_cv_have_ppc_mutex_hint=no])])
if test x"$pgac_cv_have_ppc_mutex_hint" = xyes ; then
AC_DEFINE(HAVE_PPC_LWARX_MUTEX_HINT, 1, [Define to 1 if the assembler supports PPC's LWARX mutex hint bit.])
fi
@@ -1498,7 +1555,7 @@ if test "$PORTNAME" != "win32"; then
AH_VERBATIM([_DARWIN_USE_64_BIT_INODE],[])
fi
-# Check for largefile support (must be after AC_SYS_LARGEFILE)
+dnl Check for largefile support (must be after AC_SYS_LARGEFILE)
AC_CHECK_SIZEOF([off_t])
# If we don't have largefile support, can't handle segsize >= 2GB.
@@ -1528,7 +1585,45 @@ PGAC_FUNC_WCSTOMBS_L
LIBS_including_readline="$LIBS"
LIBS=`echo "$LIBS" | sed -e 's/-ledit//g' -e 's/-lreadline//g'`
-AC_CHECK_FUNCS([cbrt clock_gettime dlopen fdatasync getifaddrs getpeerucred getrlimit mbstowcs_l memmove poll posix_fallocate pstat pthread_is_threaded_np readlink setproctitle setsid shm_open symlink sync_file_range utime utimes wcstombs_l])
+AC_CHECK_FUNCS(m4_normalize([
+ cbrt
+ clock_gettime
+ copyfile
+ fdatasync
+ getifaddrs
+ getpeerucred
+ getrlimit
+ mbstowcs_l
+ memset_s
+ memmove
+ poll
+ posix_fallocate
+ ppoll
+ pstat
+ pthread_is_threaded_np
+ readlink
+ setproctitle
+ setproctitle_fast
+ setsid
+ shm_open
+ strchrnul
+ strsignal
+ symlink
+ sync_file_range
+ uselocale
+ utime
+ utimes
+ wcstombs_l
+]))
+
+# These typically are compiler builtins, for which AC_CHECK_FUNCS fails.
+PGAC_CHECK_BUILTIN_FUNC([__builtin_bswap16], [int x])
+PGAC_CHECK_BUILTIN_FUNC([__builtin_bswap32], [int x])
+PGAC_CHECK_BUILTIN_FUNC([__builtin_bswap64], [long int x])
+# We assume that we needn't test all widths of these explicitly:
+PGAC_CHECK_BUILTIN_FUNC([__builtin_clz], [unsigned int x])
+PGAC_CHECK_BUILTIN_FUNC([__builtin_ctz], [unsigned int x])
+PGAC_CHECK_BUILTIN_FUNC([__builtin_popcount], [unsigned int x])
AC_REPLACE_FUNCS(fseeko)
case $host_os in
@@ -1544,24 +1639,24 @@ esac
# posix_fadvise() is a no-op on Solaris, so don't incur function overhead
# by calling it, 2009-04-02
# http://src.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/lib/libc/port/gen/posix_fadvise.c
-if test "$PORTNAME" != "solaris"; then
+dnl must use AS_IF here, else AC_REQUIRES inside AC_CHECK_DECLS malfunctions
+AS_IF([test "$PORTNAME" != "solaris"], [
AC_CHECK_FUNCS(posix_fadvise)
AC_CHECK_DECLS(posix_fadvise, [], [], [#include ])
-fi
+]) # fi
AC_CHECK_DECLS(fdatasync, [], [], [#include ])
AC_CHECK_DECLS([strlcat, strlcpy, strnlen])
# This is probably only present on macOS, but may as well check always
AC_CHECK_DECLS(F_FULLFSYNC, [], [], [#include ])
-HAVE_IPV6=no
+AC_CHECK_DECLS([RTLD_GLOBAL, RTLD_NOW], [], [], [#include ])
+
AC_CHECK_TYPE([struct sockaddr_in6],
- [AC_DEFINE(HAVE_IPV6, 1, [Define to 1 if you have support for IPv6.])
- HAVE_IPV6=yes],
+ [AC_DEFINE(HAVE_IPV6, 1, [Define to 1 if you have support for IPv6.])],
[],
[$ac_includes_default
#include ])
-AC_SUBST(HAVE_IPV6)
AC_CACHE_CHECK([for PS_STRINGS], [pgac_cv_var_PS_STRINGS],
[AC_LINK_IFELSE([AC_LANG_PROGRAM(
@@ -1577,53 +1672,6 @@ if test "$pgac_cv_var_PS_STRINGS" = yes ; then
fi
-# We use our snprintf.c emulation if either snprintf() or vsnprintf()
-# is missing. Yes, there are machines that have only one. We may
-# also decide to use snprintf.c if snprintf() is present but does not
-# have all the features we need --- see below.
-
-if test "$PORTNAME" = "win32"; then
- # Win32 gets snprintf.c built unconditionally.
- #
- # To properly translate all NLS languages strings, we must support the
- # *printf() %$ format, which allows *printf() arguments to be selected
- # by position in the translated string.
- #
- # libintl versions < 0.13 use the native *printf() functions, and Win32
- # *printf() doesn't understand %$, so we must use our /port versions,
- # which do understand %$. libintl versions >= 0.13 include their own
- # *printf versions on Win32. The libintl 0.13 release note text is:
- #
- # C format strings with positions, as they arise when a translator
- # needs to reorder a sentence, are now supported on all platforms.
- # On those few platforms (NetBSD and Woe32) for which the native
- # printf()/fprintf()/... functions don't support such format
- # strings, replacements are provided through .
- #
- # We could use libintl >= 0.13's *printf() if we were sure that we had
- # a litint >= 0.13 at runtime, but seeing that there is no clean way
- # to guarantee that, it is best to just use our own, so we are sure to
- # get %$ support. In include/port.h we disable the *printf() macros
- # that might have been defined by libintl.
- #
- # We do this unconditionally whether NLS is used or not so we are sure
- # that all Win32 libraries and binaries behave the same.
- pgac_need_repl_snprintf=yes
-else
- pgac_need_repl_snprintf=no
- AC_CHECK_FUNCS(snprintf, [], pgac_need_repl_snprintf=yes)
- AC_CHECK_FUNCS(vsnprintf, [], pgac_need_repl_snprintf=yes)
-fi
-
-
-# Check whether declares snprintf() and vsnprintf(); if not,
-# include/c.h will provide declarations. Note this is a separate test
-# from whether the functions exist in the C library --- there are
-# systems that have the functions but don't bother to declare them :-(
-
-AC_CHECK_DECLS([snprintf, vsnprintf])
-
-
dnl Cannot use AC_CHECK_FUNC because isinf may be a macro
AC_CACHE_CHECK([for isinf], ac_cv_func_isinf,
[AC_LINK_IFELSE([AC_LANG_PROGRAM([
@@ -1642,7 +1690,37 @@ else
AC_CHECK_FUNCS([fpclass fp_class fp_class_d class], [break])
fi
-AC_REPLACE_FUNCS([crypt fls getopt getrusage inet_aton mkdtemp random rint srandom strerror strlcat strlcpy strnlen])
+AC_REPLACE_FUNCS(m4_normalize([
+ dlopen
+ explicit_bzero
+ fls
+ getopt
+ getrusage
+ inet_aton
+ mkdtemp
+ pread
+ pwrite
+ random
+ rint
+ srandom
+ strlcat
+ strlcpy
+ strnlen
+ strtof
+]))
+
+case $host_os in
+ # Cygwin and (apparently, based on test results) Mingw both
+ # have a broken strtof(), so substitute the same replacement
+ # code we use with VS2013. That's not a perfect fix, since
+ # (unlike with VS2013) it doesn't avoid double-rounding, but
+ # we have no better options. To get that, though, we have to
+ # force the file to be compiled despite HAVE_STRTOF.
+ mingw*|cygwin*)
+ AC_LIBOBJ([strtof])
+ AC_MSG_NOTICE([On $host_os we will use our strtof wrapper.])
+ ;;
+esac
case $host_os in
@@ -1676,9 +1754,9 @@ else
AC_LIBOBJ(getopt_long)
fi
-# Solaris' getopt() doesn't do what we want for long options, so always use
-# our version on that platform.
-if test "$PORTNAME" = "solaris"; then
+# On OpenBSD and Solaris, getopt() doesn't do what we want for long options
+# (i.e., allow '-' as a flag character), so use our version on those platforms.
+if test "$PORTNAME" = "openbsd" -o "$PORTNAME" = "solaris"; then
AC_LIBOBJ(getopt)
fi
@@ -1691,6 +1769,7 @@ fi
# Win32 (really MinGW) support
if test "$PORTNAME" = "win32"; then
+ AC_CHECK_FUNCS(_configthreadlocale)
AC_REPLACE_FUNCS(gettimeofday)
AC_LIBOBJ(dirmod)
AC_LIBOBJ(kill)
@@ -1719,14 +1798,6 @@ if test "$PORTNAME" = "cygwin"; then
AC_LIBOBJ(dirmod)
fi
-AC_CHECK_DECLS([sys_siglist], [], [],
-[#include
-/* NetBSD declares sys_siglist in unistd.h. */
-#ifdef HAVE_UNISTD_H
-# include
-#endif
-])
-
AC_CHECK_FUNC(syslog,
[AC_CHECK_HEADER(syslog.h,
[AC_DEFINE(HAVE_SYSLOG, 1, [Define to 1 if you have the syslog interface.])])])
@@ -1749,8 +1820,10 @@ if test x"$pgac_cv_var_int_optreset" = x"yes"; then
AC_DEFINE(HAVE_INT_OPTRESET, 1, [Define to 1 if you have the global variable 'int optreset'.])
fi
-AC_CHECK_FUNCS([strtoll strtoq], [break])
-AC_CHECK_FUNCS([strtoull strtouq], [break])
+AC_CHECK_FUNCS([strtoll __strtoll strtoq], [break])
+AC_CHECK_FUNCS([strtoull __strtoull strtouq], [break])
+# strto[u]ll may exist but not be declared
+AC_CHECK_DECLS([strtoll, strtoull])
if test "$with_icu" = yes; then
ac_save_CPPFLAGS=$CPPFLAGS
@@ -1763,6 +1836,10 @@ if test "$with_icu" = yes; then
CPPFLAGS=$ac_save_CPPFLAGS
fi
+if test "$with_llvm" = yes; then
+ PGAC_CHECK_LLVM_FUNCTIONS()
+fi
+
# Lastly, restore full LIBS list and check for readline/libedit symbols
LIBS="$LIBS_including_readline"
@@ -1791,31 +1868,10 @@ for the exact reason.]])],
# Run tests below here
# --------------------
-# Force use of our snprintf if system's doesn't do arg control
-# See comment above at snprintf test for details.
-if test "$enable_nls" = yes -a "$pgac_need_repl_snprintf" = no; then
- PGAC_FUNC_SNPRINTF_ARG_CONTROL
- if test $pgac_cv_snprintf_arg_control != yes ; then
- pgac_need_repl_snprintf=yes
- fi
-fi
-
-
dnl Check to see if we have a working 64-bit integer type.
-dnl This breaks down into two steps:
-dnl (1) figure out if the compiler has a 64-bit int type with working
-dnl arithmetic, and if so
-dnl (2) see whether snprintf() can format the type correctly. (Currently,
-dnl snprintf is the only library routine we really need for int8 support.)
-dnl It's entirely possible to have a compiler that handles a 64-bit type
-dnl when the C library doesn't; this is fairly likely when using gcc on
-dnl an older platform, for example.
-dnl If there is no native snprintf() or it does not handle the 64-bit type,
-dnl we force our own version of snprintf() to be used instead.
-dnl Note this test must be run after our initial check for snprintf/vsnprintf.
-
-dnl As of Postgres 8.4, we no longer support compilers without a working
-dnl 64-bit type. But we still handle the case of snprintf being broken.
+dnl Since Postgres 8.4, we no longer support compilers without a working
+dnl 64-bit type; but we have to determine whether that type is called
+dnl "long int" or "long long int".
PGAC_TYPE_64BIT_INT([long int])
@@ -1833,46 +1889,15 @@ fi
AC_DEFINE_UNQUOTED(PG_INT64_TYPE, $pg_int64_type,
[Define to the name of a signed 64-bit integer type.])
-# If we found "long int" is 64 bits, assume snprintf handles it. If
-# we found we need to use "long long int", better check. We cope with
-# snprintfs that use %lld, %qd, or %I64d as the format. If none of these
-# work, fall back to our own snprintf emulation (which we know uses %lld).
-
-if test "$HAVE_LONG_LONG_INT_64" = yes ; then
- if test $pgac_need_repl_snprintf = no; then
- PGAC_FUNC_SNPRINTF_LONG_LONG_INT_MODIFIER
- if test "$LONG_LONG_INT_MODIFIER" = ""; then
- # Force usage of our own snprintf, since system snprintf is broken
- pgac_need_repl_snprintf=yes
- LONG_LONG_INT_MODIFIER='ll'
- fi
- else
- # Here if we previously decided we needed to use our own snprintf
- LONG_LONG_INT_MODIFIER='ll'
- fi
+# Select the printf length modifier that goes with that, too.
+if test x"$pg_int64_type" = x"long long int" ; then
+ INT64_MODIFIER='"ll"'
else
- # Here if we are not using 'long long int' at all
- LONG_LONG_INT_MODIFIER='l'
+ INT64_MODIFIER='"l"'
fi
-INT64_MODIFIER="\"$LONG_LONG_INT_MODIFIER\""
-
AC_DEFINE_UNQUOTED(INT64_MODIFIER, $INT64_MODIFIER,
- [Define to the appropriate snprintf length modifier for 64-bit ints.])
-
-# Also force use of our snprintf if the system's doesn't support the %z flag.
-if test "$pgac_need_repl_snprintf" = no; then
- PGAC_FUNC_SNPRINTF_SIZE_T_SUPPORT
- if test "$pgac_cv_snprintf_size_t_support" != yes; then
- pgac_need_repl_snprintf=yes
- fi
-fi
-
-# Now we have checked all the reasons to replace snprintf
-if test $pgac_need_repl_snprintf = yes; then
- AC_DEFINE(USE_REPL_SNPRINTF, 1, [Use replacement snprintf() functions.])
- AC_LIBOBJ(snprintf)
-fi
+ [Define to the appropriate printf length modifier for 64-bit ints.])
# has to be down here, rather than with the other builtins, because
# the test uses PG_INT64_TYPE.
@@ -2014,22 +2039,6 @@ if test x"$pgac_armv8_crc32c_intrinsics" != x"yes"; then
fi
AC_SUBST(CFLAGS_ARMV8_CRC32C)
-# In order to detect at runtime, if the ARM CRC Extension is available,
-# we will do "getauxval(AT_HWCAP) & HWCAP_CRC32". Check if we have
-# everything we need for that.
-AC_CHECK_FUNCS([getauxval])
-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([
-#include
-#include
-], [
-#ifndef AT_HWCAP
-#error AT_HWCAP not defined
-#endif
-#ifndef HWCAP_CRC32
-#error HWCAP_CRC32 not defined
-#endif
-])], [HAVE_HWCAP_CRC32=1])
-
# Select CRC-32C implementation.
#
# If we are targeting a processor that has Intel SSE 4.2 instructions, we can
@@ -2060,9 +2069,8 @@ if test x"$USE_SLICING_BY_8_CRC32C" = x"" && test x"$USE_SSE42_CRC32C" = x"" &&
if test x"$pgac_armv8_crc32c_intrinsics" = x"yes" && test x"$CFLAGS_ARMV8_CRC32C" = x""; then
USE_ARMV8_CRC32C=1
else
- # ARM CRC Extension, with runtime check? The getauxval() function and
- # HWCAP_CRC32 are needed for the runtime check.
- if test x"$pgac_armv8_crc32c_intrinsics" = x"yes" && test x"$ac_cv_func_getauxval" = x"yes" && test x"$HAVE_HWCAP_CRC32" = x"1"; then
+ # ARM CRC Extension, with runtime check?
+ if test x"$pgac_armv8_crc32c_intrinsics" = x"yes"; then
USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK=1
else
# fall back to slicing-by-8 algorithm, which doesn't require any
@@ -2154,7 +2162,7 @@ fi
# in the template or configure command line.
# If not selected manually, try to select a source automatically.
-if test "$enable_strong_random" = "yes" && test x"$USE_OPENSSL_RANDOM" = x"" && test x"$USE_WIN32_RANDOM" = x"" && test x"$USE_DEV_URANDOM" = x"" ; then
+if test x"$USE_OPENSSL_RANDOM" = x"" && test x"$USE_WIN32_RANDOM" = x"" && test x"$USE_DEV_URANDOM" = x"" ; then
if test x"$with_openssl" = x"yes" ; then
USE_OPENSSL_RANDOM=1
elif test "$PORTNAME" = "win32" ; then
@@ -2169,28 +2177,19 @@ if test "$enable_strong_random" = "yes" && test x"$USE_OPENSSL_RANDOM" = x"" &&
fi
AC_MSG_CHECKING([which random number source to use])
-if test "$enable_strong_random" = yes ; then
- if test x"$USE_OPENSSL_RANDOM" = x"1" ; then
- AC_DEFINE(USE_OPENSSL_RANDOM, 1, [Define to use OpenSSL for random number generation])
- AC_MSG_RESULT([OpenSSL])
- elif test x"$USE_WIN32_RANDOM" = x"1" ; then
- AC_DEFINE(USE_WIN32_RANDOM, 1, [Define to use native Windows API for random number generation])
- AC_MSG_RESULT([Windows native])
- elif test x"$USE_DEV_URANDOM" = x"1" ; then
- AC_DEFINE(USE_DEV_URANDOM, 1, [Define to use /dev/urandom for random number generation])
- AC_MSG_RESULT([/dev/urandom])
- else
- AC_MSG_ERROR([
-no source of strong random numbers was found
-PostgreSQL can use OpenSSL or /dev/urandom as a source of random numbers,
-for authentication protocols. You can use --disable-strong-random to use a
-built-in pseudo random number generator, but that may be insecure.])
- fi
- AC_DEFINE(HAVE_STRONG_RANDOM, 1, [Define to use have a strong random number source])
+if test x"$USE_OPENSSL_RANDOM" = x"1" ; then
+ AC_DEFINE(USE_OPENSSL_RANDOM, 1, [Define to use OpenSSL for random number generation])
+ AC_MSG_RESULT([OpenSSL])
+elif test x"$USE_WIN32_RANDOM" = x"1" ; then
+ AC_DEFINE(USE_WIN32_RANDOM, 1, [Define to use native Windows API for random number generation])
+ AC_MSG_RESULT([Windows native])
+elif test x"$USE_DEV_URANDOM" = x"1" ; then
+ AC_DEFINE(USE_DEV_URANDOM, 1, [Define to use /dev/urandom for random number generation])
+ AC_MSG_RESULT([/dev/urandom])
else
- AC_MSG_RESULT([weak builtin PRNG])
- AC_MSG_WARN([
-*** Not using a strong random number source may be insecure.])
+ AC_MSG_ERROR([
+no source of strong random numbers was found
+PostgreSQL can use OpenSSL or /dev/urandom as a source of random numbers.])
fi
# If not set in template file, set bytes to use libc memset()
@@ -2224,7 +2223,7 @@ fi
# check for
if test "$with_perl" = yes; then
ac_save_CPPFLAGS=$CPPFLAGS
- CPPFLAGS="$CPPFLAGS -I$perl_archlibexp/CORE"
+ CPPFLAGS="$CPPFLAGS $perl_includespec"
AC_CHECK_HEADER(perl.h, [], [AC_MSG_ERROR([header file is required for Perl])],
[#include ])
# While we're at it, check that we can link to libperl.
@@ -2257,7 +2256,7 @@ fi
# Check for DocBook and tools
#
PGAC_PATH_XMLLINT
-PGAC_CHECK_DOCBOOK(4.2)
+PGAC_CHECK_DOCBOOK(4.5)
PGAC_PATH_PROGS(DBTOEPUB, dbtoepub)
PGAC_PATH_PROGS(XSLTPROC, xsltproc)
PGAC_PATH_PROGS(FOP, fop)
@@ -2359,6 +2358,15 @@ $AWK '{printf "%d%04d", $1, $2}'`"]
AC_DEFINE_UNQUOTED(PG_VERSION_NUM, $PG_VERSION_NUM, [PostgreSQL version as a number])
AC_SUBST(PG_VERSION_NUM)
+# If we are inserting PG_SYSROOT into CPPFLAGS, do so symbolically not
+# literally, so that it's possible to override it at build time using
+# a command like "make ... PG_SYSROOT=path". This has to be done after
+# we've finished all configure checks that depend on CPPFLAGS.
+if test x"$PG_SYSROOT" != x; then
+ CPPFLAGS=`echo "$CPPFLAGS" | sed -e "s| $PG_SYSROOT | \\\$(PG_SYSROOT) |"`
+fi
+AC_SUBST(PG_SYSROOT)
+
# Begin output steps
@@ -2397,10 +2405,8 @@ AC_SUBST(vpath_build)
AC_CONFIG_FILES([GNUmakefile src/Makefile.global])
AC_CONFIG_LINKS([
- src/backend/port/dynloader.c:src/backend/port/dynloader/${template}.c
src/backend/port/pg_sema.c:${SEMA_IMPLEMENTATION}
src/backend/port/pg_shmem.c:${SHMEM_IMPLEMENTATION}
- src/include/dynloader.h:src/backend/port/dynloader/${template}.h
src/include/pg_config_os.h:src/include/port/${template}.h
src/Makefile.port:src/makefiles/Makefile.${template}
])
diff --git a/contrib/adminpack/Makefile b/contrib/adminpack/Makefile
index afcfac41038..689aca1b38f 100644
--- a/contrib/adminpack/Makefile
+++ b/contrib/adminpack/Makefile
@@ -5,7 +5,7 @@ OBJS = adminpack.o $(WIN32RES)
PG_CPPFLAGS = -I$(libpq_srcdir)
EXTENSION = adminpack
-DATA = adminpack--1.0.sql adminpack--1.0--1.1.sql
+DATA = adminpack--1.0.sql adminpack--1.0--1.1.sql adminpack--1.1--2.0.sql
PGFILEDESC = "adminpack - support functions for pgAdmin"
REGRESS = adminpack
diff --git a/contrib/adminpack/adminpack--1.0--1.1.sql b/contrib/adminpack/adminpack--1.0--1.1.sql
index 22eeee2ffa3..bb581653e0d 100644
--- a/contrib/adminpack/adminpack--1.0--1.1.sql
+++ b/contrib/adminpack/adminpack--1.0--1.1.sql
@@ -3,49 +3,4 @@
-- complain if script is sourced in psql, rather than via ALTER EXTENSION
\echo Use "ALTER EXTENSION adminpack UPDATE TO '1.1'" to load this file. \quit
-/* ***********************************************
- * Administrative functions for PostgreSQL
- * *********************************************** */
-
-/* generic file access functions */
-
-CREATE OR REPLACE FUNCTION pg_catalog.pg_file_write(text, text, bool)
-RETURNS bigint
-AS 'MODULE_PATHNAME', 'pg_file_write_v1_1'
-LANGUAGE C VOLATILE STRICT;
-
-REVOKE EXECUTE ON FUNCTION pg_catalog.pg_file_write(text, text, bool) FROM PUBLIC;
-
-CREATE OR REPLACE FUNCTION pg_catalog.pg_file_rename(text, text, text)
-RETURNS bool
-AS 'MODULE_PATHNAME', 'pg_file_rename_v1_1'
-LANGUAGE C VOLATILE;
-
-REVOKE EXECUTE ON FUNCTION pg_catalog.pg_file_rename(text, text, text) FROM PUBLIC;
-
-CREATE OR REPLACE FUNCTION pg_catalog.pg_file_rename(text, text)
-RETURNS bool
-AS 'SELECT pg_catalog.pg_file_rename($1, $2, NULL::pg_catalog.text);'
-LANGUAGE SQL VOLATILE STRICT;
-
-CREATE OR REPLACE FUNCTION pg_catalog.pg_file_unlink(text)
-RETURNS bool
-AS 'MODULE_PATHNAME', 'pg_file_unlink_v1_1'
-LANGUAGE C VOLATILE STRICT;
-
-REVOKE EXECUTE ON FUNCTION pg_catalog.pg_file_unlink(text) FROM PUBLIC;
-
-CREATE OR REPLACE FUNCTION pg_catalog.pg_logdir_ls()
-RETURNS setof record
-AS 'MODULE_PATHNAME', 'pg_logdir_ls_v1_1'
-LANGUAGE C VOLATILE STRICT;
-
-REVOKE EXECUTE ON FUNCTION pg_catalog.pg_logdir_ls() FROM PUBLIC;
-
-/* These functions are now in the backend and callers should update to use those */
-
-DROP FUNCTION pg_file_read(text, bigint, bigint);
-
-DROP FUNCTION pg_file_length(text);
-
-DROP FUNCTION pg_logfile_rotate();
+REVOKE EXECUTE ON FUNCTION pg_catalog.pg_logfile_rotate() FROM PUBLIC;
diff --git a/contrib/adminpack/adminpack--1.1--2.0.sql b/contrib/adminpack/adminpack--1.1--2.0.sql
new file mode 100644
index 00000000000..ceaeafa3789
--- /dev/null
+++ b/contrib/adminpack/adminpack--1.1--2.0.sql
@@ -0,0 +1,51 @@
+/* contrib/adminpack/adminpack--1.1--2.0.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION adminpack UPDATE TO '2.0'" to load this file. \quit
+
+/* ***********************************************
+ * Administrative functions for PostgreSQL
+ * *********************************************** */
+
+/* generic file access functions */
+
+CREATE OR REPLACE FUNCTION pg_catalog.pg_file_write(text, text, bool)
+RETURNS bigint
+AS 'MODULE_PATHNAME', 'pg_file_write_v1_1'
+LANGUAGE C VOLATILE STRICT;
+
+REVOKE EXECUTE ON FUNCTION pg_catalog.pg_file_write(text, text, bool) FROM PUBLIC;
+
+CREATE OR REPLACE FUNCTION pg_catalog.pg_file_rename(text, text, text)
+RETURNS bool
+AS 'MODULE_PATHNAME', 'pg_file_rename_v1_1'
+LANGUAGE C VOLATILE;
+
+REVOKE EXECUTE ON FUNCTION pg_catalog.pg_file_rename(text, text, text) FROM PUBLIC;
+
+CREATE OR REPLACE FUNCTION pg_catalog.pg_file_rename(text, text)
+RETURNS bool
+AS 'SELECT pg_catalog.pg_file_rename($1, $2, NULL::pg_catalog.text);'
+LANGUAGE SQL VOLATILE STRICT;
+
+CREATE OR REPLACE FUNCTION pg_catalog.pg_file_unlink(text)
+RETURNS bool
+AS 'MODULE_PATHNAME', 'pg_file_unlink_v1_1'
+LANGUAGE C VOLATILE STRICT;
+
+REVOKE EXECUTE ON FUNCTION pg_catalog.pg_file_unlink(text) FROM PUBLIC;
+
+CREATE OR REPLACE FUNCTION pg_catalog.pg_logdir_ls()
+RETURNS setof record
+AS 'MODULE_PATHNAME', 'pg_logdir_ls_v1_1'
+LANGUAGE C VOLATILE STRICT;
+
+REVOKE EXECUTE ON FUNCTION pg_catalog.pg_logdir_ls() FROM PUBLIC;
+
+/* These functions are now in the backend and callers should update to use those */
+
+DROP FUNCTION pg_file_read(text, bigint, bigint);
+
+DROP FUNCTION pg_file_length(text);
+
+DROP FUNCTION pg_logfile_rotate();
diff --git a/contrib/adminpack/adminpack.c b/contrib/adminpack/adminpack.c
index 2ce337e7b5a..e88f0a7a165 100644
--- a/contrib/adminpack/adminpack.c
+++ b/contrib/adminpack/adminpack.c
@@ -3,7 +3,7 @@
* adminpack.c
*
*
- * Copyright (c) 2002-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2002-2019, PostgreSQL Global Development Group
*
* Author: Andreas Pflug
*
@@ -298,7 +298,7 @@ pg_file_rename_internal(text *file1, text *file2, text *file3)
fn2 = convert_and_check_filename(file2, false);
if (file3 == NULL)
- fn3 = 0;
+ fn3 = NULL;
else
fn3 = convert_and_check_filename(file3, false);
@@ -320,7 +320,7 @@ pg_file_rename_internal(text *file1, text *file2, text *file3)
return false;
}
- rc = access(fn3 ? fn3 : fn2, 2);
+ rc = access(fn3 ? fn3 : fn2, W_OK);
if (rc >= 0 || errno != ENOENT)
{
ereport(ERROR,
@@ -502,7 +502,7 @@ pg_logdir_ls_internal(FunctionCallInfo fcinfo)
fctx = palloc(sizeof(directory_fctx));
- tupdesc = CreateTemplateTupleDesc(2, false);
+ tupdesc = CreateTemplateTupleDesc(2);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "starttime",
TIMESTAMPOID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "filename",
diff --git a/contrib/adminpack/adminpack.control b/contrib/adminpack/adminpack.control
index 71f6ad5ddf9..12569dcdd71 100644
--- a/contrib/adminpack/adminpack.control
+++ b/contrib/adminpack/adminpack.control
@@ -1,6 +1,6 @@
# adminpack extension
comment = 'administrative functions for PostgreSQL'
-default_version = '1.1'
+default_version = '2.0'
module_pathname = '$libdir/adminpack'
relocatable = false
schema = pg_catalog
diff --git a/contrib/amcheck/Makefile b/contrib/amcheck/Makefile
index c5764b544fd..dcec3b85203 100644
--- a/contrib/amcheck/Makefile
+++ b/contrib/amcheck/Makefile
@@ -4,7 +4,7 @@ MODULE_big = amcheck
OBJS = verify_nbtree.o $(WIN32RES)
EXTENSION = amcheck
-DATA = amcheck--1.0--1.1.sql amcheck--1.0.sql
+DATA = amcheck--1.1--1.2.sql amcheck--1.0--1.1.sql amcheck--1.0.sql
PGFILEDESC = "amcheck - function for verifying relation integrity"
REGRESS = check check_btree
diff --git a/contrib/amcheck/amcheck--1.1--1.2.sql b/contrib/amcheck/amcheck--1.1--1.2.sql
new file mode 100644
index 00000000000..883530dec74
--- /dev/null
+++ b/contrib/amcheck/amcheck--1.1--1.2.sql
@@ -0,0 +1,19 @@
+/* contrib/amcheck/amcheck--1.1--1.2.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "ALTER EXTENSION amcheck UPDATE TO '1.2'" to load this file. \quit
+
+-- In order to avoid issues with dependencies when updating amcheck to 1.2,
+-- create new, overloaded version of the 1.1 function signature
+
+--
+-- bt_index_parent_check()
+--
+CREATE FUNCTION bt_index_parent_check(index regclass,
+ heapallindexed boolean, rootdescend boolean)
+RETURNS VOID
+AS 'MODULE_PATHNAME', 'bt_index_parent_check'
+LANGUAGE C STRICT PARALLEL RESTRICTED;
+
+-- Don't want this to be available to public
+REVOKE ALL ON FUNCTION bt_index_parent_check(regclass, boolean, boolean) FROM PUBLIC;
diff --git a/contrib/amcheck/amcheck.control b/contrib/amcheck/amcheck.control
index 469048403db..c6e310046d4 100644
--- a/contrib/amcheck/amcheck.control
+++ b/contrib/amcheck/amcheck.control
@@ -1,5 +1,5 @@
# amcheck extension
comment = 'functions for verifying relation integrity'
-default_version = '1.1'
+default_version = '1.2'
module_pathname = '$libdir/amcheck'
relocatable = true
diff --git a/contrib/amcheck/expected/check_btree.out b/contrib/amcheck/expected/check_btree.out
index ed80ac46648..d7480fc96dc 100644
--- a/contrib/amcheck/expected/check_btree.out
+++ b/contrib/amcheck/expected/check_btree.out
@@ -1,7 +1,12 @@
--- minimal test, basically just verifying that amcheck
CREATE TABLE bttest_a(id int8);
CREATE TABLE bttest_b(id int8);
CREATE TABLE bttest_multi(id int8, data int8);
+CREATE TABLE delete_test_table (a bigint, b bigint, c bigint, d bigint);
+-- Stabalize tests
+ALTER TABLE bttest_a SET (autovacuum_enabled = false);
+ALTER TABLE bttest_b SET (autovacuum_enabled = false);
+ALTER TABLE bttest_multi SET (autovacuum_enabled = false);
+ALTER TABLE delete_test_table SET (autovacuum_enabled = false);
INSERT INTO bttest_a SELECT * FROM generate_series(1, 100000);
INSERT INTO bttest_b SELECT * FROM generate_series(100000, 1, -1);
INSERT INTO bttest_multi SELECT i, i%2 FROM generate_series(1, 100000) as i;
@@ -9,9 +14,9 @@ CREATE INDEX bttest_a_idx ON bttest_a USING btree (id);
CREATE INDEX bttest_b_idx ON bttest_b USING btree (id);
CREATE UNIQUE INDEX bttest_multi_idx ON bttest_multi
USING btree (id) INCLUDE (data);
-CREATE ROLE bttest_role;
+CREATE ROLE regress_bttest_role;
-- verify permissions are checked (error due to function not callable)
-SET ROLE bttest_role;
+SET ROLE regress_bttest_role;
SELECT bt_index_check('bttest_a_idx'::regclass);
ERROR: permission denied for function bt_index_check
SELECT bt_index_parent_check('bttest_a_idx'::regclass);
@@ -20,11 +25,11 @@ RESET ROLE;
-- we, intentionally, don't check relation permissions - it's useful
-- to run this cluster-wide with a restricted account, and as tested
-- above explicit permission has to be granted for that.
-GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO bttest_role;
-GRANT EXECUTE ON FUNCTION bt_index_parent_check(regclass) TO bttest_role;
-GRANT EXECUTE ON FUNCTION bt_index_check(regclass, boolean) TO bttest_role;
-GRANT EXECUTE ON FUNCTION bt_index_parent_check(regclass, boolean) TO bttest_role;
-SET ROLE bttest_role;
+GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO regress_bttest_role;
+GRANT EXECUTE ON FUNCTION bt_index_parent_check(regclass) TO regress_bttest_role;
+GRANT EXECUTE ON FUNCTION bt_index_check(regclass, boolean) TO regress_bttest_role;
+GRANT EXECUTE ON FUNCTION bt_index_parent_check(regclass, boolean) TO regress_bttest_role;
+SET ROLE regress_bttest_role;
SELECT bt_index_check('bttest_a_idx');
bt_index_check
----------------
@@ -104,25 +109,62 @@ SELECT bt_index_check('bttest_multi_idx');
(1 row)
--- more expansive test for index with included columns
-SELECT bt_index_parent_check('bttest_multi_idx', true);
+-- more expansive tests for index with included columns
+SELECT bt_index_parent_check('bttest_multi_idx', true, true);
bt_index_parent_check
-----------------------
(1 row)
--- repeat expansive test for index built using insertions
+-- repeat expansive tests for index built using insertions
TRUNCATE bttest_multi;
INSERT INTO bttest_multi SELECT i, i%2 FROM generate_series(1, 100000) as i;
-SELECT bt_index_parent_check('bttest_multi_idx', true);
+SELECT bt_index_parent_check('bttest_multi_idx', true, true);
bt_index_parent_check
-----------------------
(1 row)
+--
+-- Test for multilevel page deletion/downlink present checks, and rootdescend
+-- checks
+--
+INSERT INTO delete_test_table SELECT i, 1, 2, 3 FROM generate_series(1,80000) i;
+ALTER TABLE delete_test_table ADD PRIMARY KEY (a,b,c,d);
+-- Delete most entries, and vacuum, deleting internal pages and creating "fast
+-- root"
+DELETE FROM delete_test_table WHERE a < 79990;
+VACUUM delete_test_table;
+SELECT bt_index_parent_check('delete_test_table_pkey', true);
+ bt_index_parent_check
+-----------------------
+
+(1 row)
+
+--
+-- BUG #15597: must not assume consistent input toasting state when forming
+-- tuple. Bloom filter must fingerprint normalized index tuple representation.
+--
+CREATE TABLE toast_bug(buggy text);
+ALTER TABLE toast_bug ALTER COLUMN buggy SET STORAGE plain;
+-- pg_attribute entry for toasty.buggy will have plain storage:
+CREATE INDEX toasty ON toast_bug(buggy);
+-- Whereas pg_attribute entry for toast_bug.buggy now has extended storage:
+ALTER TABLE toast_bug ALTER COLUMN buggy SET STORAGE extended;
+-- Insert compressible heap tuple (comfortably exceeds TOAST_TUPLE_THRESHOLD):
+INSERT INTO toast_bug SELECT repeat('a', 2200);
+-- Should not get false positive report of corruption:
+SELECT bt_index_check('toasty', true);
+ bt_index_check
+----------------
+
+(1 row)
+
-- cleanup
DROP TABLE bttest_a;
DROP TABLE bttest_b;
DROP TABLE bttest_multi;
-DROP OWNED BY bttest_role; -- permissions
-DROP ROLE bttest_role;
+DROP TABLE delete_test_table;
+DROP TABLE toast_bug;
+DROP OWNED BY regress_bttest_role; -- permissions
+DROP ROLE regress_bttest_role;
diff --git a/contrib/amcheck/sql/check_btree.sql b/contrib/amcheck/sql/check_btree.sql
index 4ca9d2d0ed1..9a1987598da 100644
--- a/contrib/amcheck/sql/check_btree.sql
+++ b/contrib/amcheck/sql/check_btree.sql
@@ -1,7 +1,13 @@
--- minimal test, basically just verifying that amcheck
CREATE TABLE bttest_a(id int8);
CREATE TABLE bttest_b(id int8);
CREATE TABLE bttest_multi(id int8, data int8);
+CREATE TABLE delete_test_table (a bigint, b bigint, c bigint, d bigint);
+
+-- Stabalize tests
+ALTER TABLE bttest_a SET (autovacuum_enabled = false);
+ALTER TABLE bttest_b SET (autovacuum_enabled = false);
+ALTER TABLE bttest_multi SET (autovacuum_enabled = false);
+ALTER TABLE delete_test_table SET (autovacuum_enabled = false);
INSERT INTO bttest_a SELECT * FROM generate_series(1, 100000);
INSERT INTO bttest_b SELECT * FROM generate_series(100000, 1, -1);
@@ -12,10 +18,10 @@ CREATE INDEX bttest_b_idx ON bttest_b USING btree (id);
CREATE UNIQUE INDEX bttest_multi_idx ON bttest_multi
USING btree (id) INCLUDE (data);
-CREATE ROLE bttest_role;
+CREATE ROLE regress_bttest_role;
-- verify permissions are checked (error due to function not callable)
-SET ROLE bttest_role;
+SET ROLE regress_bttest_role;
SELECT bt_index_check('bttest_a_idx'::regclass);
SELECT bt_index_parent_check('bttest_a_idx'::regclass);
RESET ROLE;
@@ -23,11 +29,11 @@ RESET ROLE;
-- we, intentionally, don't check relation permissions - it's useful
-- to run this cluster-wide with a restricted account, and as tested
-- above explicit permission has to be granted for that.
-GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO bttest_role;
-GRANT EXECUTE ON FUNCTION bt_index_parent_check(regclass) TO bttest_role;
-GRANT EXECUTE ON FUNCTION bt_index_check(regclass, boolean) TO bttest_role;
-GRANT EXECUTE ON FUNCTION bt_index_parent_check(regclass, boolean) TO bttest_role;
-SET ROLE bttest_role;
+GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO regress_bttest_role;
+GRANT EXECUTE ON FUNCTION bt_index_parent_check(regclass) TO regress_bttest_role;
+GRANT EXECUTE ON FUNCTION bt_index_check(regclass, boolean) TO regress_bttest_role;
+GRANT EXECUTE ON FUNCTION bt_index_parent_check(regclass, boolean) TO regress_bttest_role;
+SET ROLE regress_bttest_role;
SELECT bt_index_check('bttest_a_idx');
SELECT bt_index_parent_check('bttest_a_idx');
RESET ROLE;
@@ -63,17 +69,46 @@ COMMIT;
-- normal check outside of xact for index with included columns
SELECT bt_index_check('bttest_multi_idx');
--- more expansive test for index with included columns
-SELECT bt_index_parent_check('bttest_multi_idx', true);
+-- more expansive tests for index with included columns
+SELECT bt_index_parent_check('bttest_multi_idx', true, true);
--- repeat expansive test for index built using insertions
+-- repeat expansive tests for index built using insertions
TRUNCATE bttest_multi;
INSERT INTO bttest_multi SELECT i, i%2 FROM generate_series(1, 100000) as i;
-SELECT bt_index_parent_check('bttest_multi_idx', true);
+SELECT bt_index_parent_check('bttest_multi_idx', true, true);
+
+--
+-- Test for multilevel page deletion/downlink present checks, and rootdescend
+-- checks
+--
+INSERT INTO delete_test_table SELECT i, 1, 2, 3 FROM generate_series(1,80000) i;
+ALTER TABLE delete_test_table ADD PRIMARY KEY (a,b,c,d);
+-- Delete most entries, and vacuum, deleting internal pages and creating "fast
+-- root"
+DELETE FROM delete_test_table WHERE a < 79990;
+VACUUM delete_test_table;
+SELECT bt_index_parent_check('delete_test_table_pkey', true);
+
+--
+-- BUG #15597: must not assume consistent input toasting state when forming
+-- tuple. Bloom filter must fingerprint normalized index tuple representation.
+--
+CREATE TABLE toast_bug(buggy text);
+ALTER TABLE toast_bug ALTER COLUMN buggy SET STORAGE plain;
+-- pg_attribute entry for toasty.buggy will have plain storage:
+CREATE INDEX toasty ON toast_bug(buggy);
+-- Whereas pg_attribute entry for toast_bug.buggy now has extended storage:
+ALTER TABLE toast_bug ALTER COLUMN buggy SET STORAGE extended;
+-- Insert compressible heap tuple (comfortably exceeds TOAST_TUPLE_THRESHOLD):
+INSERT INTO toast_bug SELECT repeat('a', 2200);
+-- Should not get false positive report of corruption:
+SELECT bt_index_check('toasty', true);
-- cleanup
DROP TABLE bttest_a;
DROP TABLE bttest_b;
DROP TABLE bttest_multi;
-DROP OWNED BY bttest_role; -- permissions
-DROP ROLE bttest_role;
+DROP TABLE delete_test_table;
+DROP TABLE toast_bug;
+DROP OWNED BY regress_bttest_role; -- permissions
+DROP ROLE regress_bttest_role;
diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c
index be0206d58ed..05e7d678ed4 100644
--- a/contrib/amcheck/verify_nbtree.c
+++ b/contrib/amcheck/verify_nbtree.c
@@ -14,7 +14,7 @@
* that every visible heap tuple has a matching index tuple.
*
*
- * Copyright (c) 2017-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2017-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/amcheck/verify_nbtree.c
@@ -25,6 +25,8 @@
#include "access/htup_details.h"
#include "access/nbtree.h"
+#include "access/table.h"
+#include "access/tableam.h"
#include "access/transam.h"
#include "access/xact.h"
#include "catalog/index.h"
@@ -33,6 +35,7 @@
#include "lib/bloomfilter.h"
#include "miscadmin.h"
#include "storage/lmgr.h"
+#include "storage/smgr.h"
#include "utils/memutils.h"
#include "utils/snapmgr.h"
@@ -44,6 +47,8 @@ PG_MODULE_MAGIC;
* block per level, which is bound by the range of BlockNumber:
*/
#define InvalidBtreeLevel ((uint32) InvalidBlockNumber)
+#define BTreeTupleGetNKeyAtts(itup, rel) \
+ Min(IndexRelationGetNumberOfKeyAttributes(rel), BTreeTupleGetNAtts(itup, rel))
/*
* State associated with verifying a B-Tree index
@@ -65,10 +70,14 @@ typedef struct BtreeCheckState
/* B-Tree Index Relation and associated heap relation */
Relation rel;
Relation heaprel;
+ /* rel is heapkeyspace index? */
+ bool heapkeyspace;
/* ShareLock held on heap/index, rather than AccessShareLock? */
bool readonly;
/* Also verifying heap has no unindexed tuples? */
bool heapallindexed;
+ /* Also making sure non-pivot tuples can be found by new search? */
+ bool rootdescend;
/* Per-page context */
MemoryContext targetcontext;
/* Buffer access strategy */
@@ -91,6 +100,10 @@ typedef struct BtreeCheckState
/* Bloom filter fingerprints B-Tree index */
bloom_filter *filter;
+ /* Bloom filter fingerprints downlink blocks within tree */
+ bloom_filter *downlinkfilter;
+ /* Right half of incomplete split marker */
+ bool rightsplit;
/* Debug counter */
int64 heaptuplespresent;
} BtreeCheckState;
@@ -114,32 +127,46 @@ PG_FUNCTION_INFO_V1(bt_index_check);
PG_FUNCTION_INFO_V1(bt_index_parent_check);
static void bt_index_check_internal(Oid indrelid, bool parentcheck,
- bool heapallindexed);
+ bool heapallindexed, bool rootdescend);
static inline void btree_index_checkable(Relation rel);
+static inline bool btree_index_mainfork_expected(Relation rel);
static void bt_check_every_level(Relation rel, Relation heaprel,
- bool readonly, bool heapallindexed);
+ bool heapkeyspace, bool readonly, bool heapallindexed,
+ bool rootdescend);
static BtreeLevel bt_check_level_from_leftmost(BtreeCheckState *state,
- BtreeLevel level);
+ BtreeLevel level);
static void bt_target_page_check(BtreeCheckState *state);
-static ScanKey bt_right_page_check_scankey(BtreeCheckState *state);
-static void bt_downlink_check(BtreeCheckState *state, BlockNumber childblock,
- ScanKey targetkey);
+static BTScanInsert bt_right_page_check_scankey(BtreeCheckState *state);
+static void bt_downlink_check(BtreeCheckState *state, BTScanInsert targetkey,
+ BlockNumber childblock);
+static void bt_downlink_missing_check(BtreeCheckState *state);
static void bt_tuple_present_callback(Relation index, HeapTuple htup,
- Datum *values, bool *isnull,
- bool tupleIsAlive, void *checkstate);
+ Datum *values, bool *isnull,
+ bool tupleIsAlive, void *checkstate);
+static IndexTuple bt_normalize_tuple(BtreeCheckState *state,
+ IndexTuple itup);
+static bool bt_rootdescend(BtreeCheckState *state, IndexTuple itup);
static inline bool offset_is_negative_infinity(BTPageOpaque opaque,
- OffsetNumber offset);
+ OffsetNumber offset);
+static inline bool invariant_l_offset(BtreeCheckState *state, BTScanInsert key,
+ OffsetNumber upperbound);
static inline bool invariant_leq_offset(BtreeCheckState *state,
- ScanKey key,
- OffsetNumber upperbound);
-static inline bool invariant_geq_offset(BtreeCheckState *state,
- ScanKey key,
- OffsetNumber lowerbound);
-static inline bool invariant_leq_nontarget_offset(BtreeCheckState *state,
- Page other,
- ScanKey key,
- OffsetNumber upperbound);
+ BTScanInsert key,
+ OffsetNumber upperbound);
+static inline bool invariant_g_offset(BtreeCheckState *state, BTScanInsert key,
+ OffsetNumber lowerbound);
+static inline bool invariant_l_nontarget_offset(BtreeCheckState *state,
+ BTScanInsert key,
+ BlockNumber nontargetblock,
+ Page nontarget,
+ OffsetNumber upperbound);
static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum);
+static inline BTScanInsert bt_mkscankey_pivotsearch(Relation rel,
+ IndexTuple itup);
+static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block,
+ Page page, OffsetNumber offset);
+static inline ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state,
+ IndexTuple itup, bool nonpivot);
/*
* bt_index_check(index regclass, heapallindexed boolean)
@@ -159,7 +186,7 @@ bt_index_check(PG_FUNCTION_ARGS)
if (PG_NARGS() == 2)
heapallindexed = PG_GETARG_BOOL(1);
- bt_index_check_internal(indrelid, false, heapallindexed);
+ bt_index_check_internal(indrelid, false, heapallindexed, false);
PG_RETURN_VOID();
}
@@ -178,11 +205,14 @@ bt_index_parent_check(PG_FUNCTION_ARGS)
{
Oid indrelid = PG_GETARG_OID(0);
bool heapallindexed = false;
+ bool rootdescend = false;
- if (PG_NARGS() == 2)
+ if (PG_NARGS() >= 2)
heapallindexed = PG_GETARG_BOOL(1);
+ if (PG_NARGS() == 3)
+ rootdescend = PG_GETARG_BOOL(2);
- bt_index_check_internal(indrelid, true, heapallindexed);
+ bt_index_check_internal(indrelid, true, heapallindexed, rootdescend);
PG_RETURN_VOID();
}
@@ -191,7 +221,8 @@ bt_index_parent_check(PG_FUNCTION_ARGS)
* Helper for bt_index_[parent_]check, coordinating the bulk of the work.
*/
static void
-bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed)
+bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed,
+ bool rootdescend)
{
Oid heapid;
Relation indrel;
@@ -213,7 +244,7 @@ bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed)
*/
heapid = IndexGetRelation(indrelid, true);
if (OidIsValid(heapid))
- heaprel = heap_open(heapid, lockmode);
+ heaprel = table_open(heapid, lockmode);
else
heaprel = NULL;
@@ -222,12 +253,12 @@ bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed)
* with heap relation locked first to prevent deadlocking). In hot
* standby mode this will raise an error when parentcheck is true.
*
- * There is no need for the usual indcheckxmin usability horizon test here,
- * even in the heapallindexed case, because index undergoing verification
- * only needs to have entries for a new transaction snapshot. (If this is
- * a parentcheck verification, there is no question about committed or
- * recently dead heap tuples lacking index entries due to concurrent
- * activity.)
+ * There is no need for the usual indcheckxmin usability horizon test
+ * here, even in the heapallindexed case, because index undergoing
+ * verification only needs to have entries for a new transaction snapshot.
+ * (If this is a parentcheck verification, there is no question about
+ * committed or recently dead heap tuples lacking index entries due to
+ * concurrent activity.)
*/
indrel = index_open(indrelid, lockmode);
@@ -245,8 +276,22 @@ bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed)
/* Relation suitable for checking as B-Tree? */
btree_index_checkable(indrel);
- /* Check index, possibly against table it is an index on */
- bt_check_every_level(indrel, heaprel, parentcheck, heapallindexed);
+ if (btree_index_mainfork_expected(indrel))
+ {
+ bool heapkeyspace;
+
+ RelationOpenSmgr(indrel);
+ if (!smgrexists(indrel->rd_smgr, MAIN_FORKNUM))
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("index \"%s\" lacks a main relation fork",
+ RelationGetRelationName(indrel))));
+
+ /* Check index, possibly against table it is an index on */
+ heapkeyspace = _bt_heapkeyspace(indrel);
+ bt_check_every_level(indrel, heaprel, heapkeyspace, parentcheck,
+ heapallindexed, rootdescend);
+ }
/*
* Release locks early. That's ok here because nothing in the called
@@ -255,7 +300,7 @@ bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed)
*/
index_close(indrel, lockmode);
if (heaprel)
- heap_close(heaprel, lockmode);
+ table_close(heaprel, lockmode);
}
/*
@@ -284,12 +329,34 @@ btree_index_checkable(Relation rel)
errdetail("Index \"%s\" is associated with temporary relation.",
RelationGetRelationName(rel))));
- if (!IndexIsValid(rel->rd_index))
+ if (!rel->rd_index->indisvalid)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot check index \"%s\"",
RelationGetRelationName(rel)),
- errdetail("Index is not valid")));
+ errdetail("Index is not valid.")));
+}
+
+/*
+ * Check if B-Tree index relation should have a file for its main relation
+ * fork. Verification uses this to skip unlogged indexes when in hot standby
+ * mode, where there is simply nothing to verify.
+ *
+ * NB: Caller should call btree_index_checkable() before calling here.
+ */
+static inline bool
+btree_index_mainfork_expected(Relation rel)
+{
+ if (rel->rd_rel->relpersistence != RELPERSISTENCE_UNLOGGED ||
+ !RecoveryInProgress())
+ return true;
+
+ ereport(NOTICE,
+ (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
+ errmsg("cannot verify unlogged index \"%s\" during recovery, skipping",
+ RelationGetRelationName(rel))));
+
+ return false;
}
/*
@@ -316,8 +383,8 @@ btree_index_checkable(Relation rel)
* parent/child check cannot be affected.)
*/
static void
-bt_check_every_level(Relation rel, Relation heaprel, bool readonly,
- bool heapallindexed)
+bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace,
+ bool readonly, bool heapallindexed, bool rootdescend)
{
BtreeCheckState *state;
Page metapage;
@@ -335,19 +402,30 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly,
/*
* Initialize state for entire verification operation
*/
- state = palloc(sizeof(BtreeCheckState));
+ state = palloc0(sizeof(BtreeCheckState));
state->rel = rel;
state->heaprel = heaprel;
+ state->heapkeyspace = heapkeyspace;
state->readonly = readonly;
state->heapallindexed = heapallindexed;
+ state->rootdescend = rootdescend;
if (state->heapallindexed)
{
+ int64 total_pages;
int64 total_elems;
uint64 seed;
- /* Size Bloom filter based on estimated number of tuples in index */
- total_elems = (int64) state->rel->rd_rel->reltuples;
+ /*
+ * Size Bloom filter based on estimated number of tuples in index,
+ * while conservatively assuming that each block must contain at least
+ * MaxIndexTuplesPerPage / 5 non-pivot tuples. (Non-leaf pages cannot
+ * contain non-pivot tuples. That's okay because they generally make
+ * up no more than about 1% of all pages in the index.)
+ */
+ total_pages = RelationGetNumberOfBlocks(rel);
+ total_elems = Max(total_pages * (MaxIndexTuplesPerPage / 5),
+ (int64) state->rel->rd_rel->reltuples);
/* Random seed relies on backend srandom() call to avoid repetition */
seed = random();
/* Create Bloom filter to fingerprint index */
@@ -356,10 +434,13 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly,
/*
* Register our own snapshot in !readonly case, rather than asking
- * IndexBuildHeapScan() to do this for us later. This needs to happen
- * before index fingerprinting begins, so we can later be certain that
- * index fingerprinting should have reached all tuples returned by
- * IndexBuildHeapScan().
+ * table_index_build_scan() to do this for us later. This needs to
+ * happen before index fingerprinting begins, so we can later be
+ * certain that index fingerprinting should have reached all tuples
+ * returned by table_index_build_scan().
+ *
+ * In readonly case, we also check for problems with missing
+ * downlinks. A second Bloom filter is used for this.
*/
if (!state->readonly)
{
@@ -370,13 +451,13 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly,
* READ COMMITTED mode. A new snapshot is guaranteed to have all
* the entries it requires in the index.
*
- * We must defend against the possibility that an old xact snapshot
- * was returned at higher isolation levels when that snapshot is
- * not safe for index scans of the target index. This is possible
- * when the snapshot sees tuples that are before the index's
- * indcheckxmin horizon. Throwing an error here should be very
- * rare. It doesn't seem worth using a secondary snapshot to avoid
- * this.
+ * We must defend against the possibility that an old xact
+ * snapshot was returned at higher isolation levels when that
+ * snapshot is not safe for index scans of the target index. This
+ * is possible when the snapshot sees tuples that are before the
+ * index's indcheckxmin horizon. Throwing an error here should be
+ * very rare. It doesn't seem worth using a secondary snapshot to
+ * avoid this.
*/
if (IsolationUsesXactSnapshot() && rel->rd_index->indcheckxmin &&
!TransactionIdPrecedes(HeapTupleHeaderGetXmin(rel->rd_indextuple->t_data),
@@ -386,8 +467,30 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly,
errmsg("index \"%s\" cannot be verified using transaction snapshot",
RelationGetRelationName(rel))));
}
+ else
+ {
+ /*
+ * Extra readonly downlink check.
+ *
+ * In readonly case, we know that there cannot be a concurrent
+ * page split or a concurrent page deletion, which gives us the
+ * opportunity to verify that every non-ignorable page had a
+ * downlink one level up. We must be tolerant of interrupted page
+ * splits and page deletions, though. This is taken care of in
+ * bt_downlink_missing_check().
+ */
+ state->downlinkfilter = bloom_create(total_pages, work_mem, seed);
+ }
}
+ Assert(!state->rootdescend || state->readonly);
+ if (state->rootdescend && !state->heapkeyspace)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot verify that tuples from index \"%s\" can each be found by an independent index search",
+ RelationGetRelationName(rel)),
+ errhint("Only B-Tree version 4 indexes support rootdescend verification.")));
+
/* Create context for page */
state->targetcontext = AllocSetContextCreate(CurrentMemoryContext,
"amcheck context",
@@ -426,6 +529,12 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly,
current.istruerootlevel = true;
while (current.leftmost != P_NONE)
{
+ /*
+ * Leftmost page on level cannot be right half of incomplete split.
+ * This can go stale immediately in !readonly case.
+ */
+ state->rightsplit = false;
+
/*
* Verify this level, and get left most page for next level down, if
* not at leaf level
@@ -447,21 +556,32 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly,
if (state->heapallindexed)
{
IndexInfo *indexinfo = BuildIndexInfo(state->rel);
- HeapScanDesc scan;
+ TableScanDesc scan;
+
+ /* Report on extra downlink checks performed in readonly case */
+ if (state->readonly)
+ {
+ ereport(DEBUG1,
+ (errmsg_internal("finished verifying presence of downlink blocks within index \"%s\" with bitset %.2f%% set",
+ RelationGetRelationName(rel),
+ 100.0 * bloom_prop_bits_set(state->downlinkfilter))));
+ bloom_free(state->downlinkfilter);
+ }
/*
- * Create our own scan for IndexBuildHeapScan(), rather than getting it
- * to do so for us. This is required so that we can actually use the
- * MVCC snapshot registered earlier in !readonly case.
+ * Create our own scan for table_index_build_scan(), rather than
+ * getting it to do so for us. This is required so that we can
+ * actually use the MVCC snapshot registered earlier in !readonly
+ * case.
*
- * Note that IndexBuildHeapScan() calls heap_endscan() for us.
+ * Note that table_index_build_scan() calls heap_endscan() for us.
*/
- scan = heap_beginscan_strat(state->heaprel, /* relation */
- snapshot, /* snapshot */
- 0, /* number of keys */
- NULL, /* scan key */
- true, /* buffer access strategy OK */
- true); /* syncscan OK? */
+ scan = table_beginscan_strat(state->heaprel, /* relation */
+ snapshot, /* snapshot */
+ 0, /* number of keys */
+ NULL, /* scan key */
+ true, /* buffer access strategy OK */
+ true); /* syncscan OK? */
/*
* Scan will behave as the first scan of a CREATE INDEX CONCURRENTLY
@@ -490,8 +610,8 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly,
RelationGetRelationName(state->rel),
RelationGetRelationName(state->heaprel));
- IndexBuildHeapScan(state->heaprel, state->rel, indexinfo, true,
- bt_tuple_present_callback, (void *) state, scan);
+ table_index_build_scan(state->heaprel, state->rel, indexinfo, true, false,
+ bt_tuple_present_callback, (void *) state, scan);
ereport(DEBUG1,
(errmsg_internal("finished verifying presence of " INT64_FORMAT " tuples from table \"%s\" with bitset %.2f%% set",
@@ -564,6 +684,25 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level)
if (P_IGNORE(opaque))
{
+ /*
+ * Since there cannot be a concurrent VACUUM operation in readonly
+ * mode, and since a page has no links within other pages
+ * (siblings and parent) once it is marked fully deleted, it
+ * should be impossible to land on a fully deleted page in
+ * readonly mode. See bt_downlink_check() for further details.
+ *
+ * The bt_downlink_check() P_ISDELETED() check is repeated here so
+ * that pages that are only reachable through sibling links get
+ * checked.
+ */
+ if (state->readonly && P_ISDELETED(opaque))
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("downlink or sibling link points to deleted block in index \"%s\"",
+ RelationGetRelationName(state->rel)),
+ errdetail_internal("Block=%u left block=%u left link from block=%u.",
+ current, leftcurrent, opaque->btpo_prev)));
+
if (P_RIGHTMOST(opaque))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
@@ -615,9 +754,11 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level)
ItemId itemid;
/* Internal page -- downlink gets leftmost on next level */
- itemid = PageGetItemId(state->target, P_FIRSTDATAKEY(opaque));
+ itemid = PageGetItemIdCareful(state, state->targetblock,
+ state->target,
+ P_FIRSTDATAKEY(opaque));
itup = (IndexTuple) PageGetItem(state->target, itemid);
- nextleveldown.leftmost = ItemPointerGetBlockNumberNoCheck(&(itup->t_tid));
+ nextleveldown.leftmost = BTreeInnerTupleGetDownLink(itup);
nextleveldown.level = opaque->btpo.level - 1;
}
else
@@ -639,6 +780,10 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level)
*/
}
+ /*
+ * readonly mode can only ever land on live pages and half-dead pages,
+ * so sibling pointers should always be in mutual agreement
+ */
if (state->readonly && opaque->btpo_prev != leftcurrent)
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
@@ -668,6 +813,13 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level)
errmsg("circular link chain found in block %u of index \"%s\"",
current, RelationGetRelationName(state->rel))));
+ /*
+ * Record if page that is about to become target is the right half of
+ * an incomplete page split. This can go stale immediately in
+ * !readonly case.
+ */
+ state->rightsplit = P_INCOMPLETE_SPLIT(opaque);
+
leftcurrent = current;
current = opaque->btpo_next;
@@ -687,24 +839,31 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level)
* target page:
*
* - That every "real" data item is less than or equal to the high key, which
- * is an upper bound on the items on the pages (where there is a high key at
- * all -- pages that are rightmost lack one).
+ * is an upper bound on the items on the page. Data items should be
+ * strictly less than the high key when the page is an internal page.
*
- * - That within the page, every "real" item is less than or equal to the item
- * immediately to its right, if any (i.e., that the items are in order within
- * the page, so that the binary searches performed by index scans are sane).
+ * - That within the page, every data item is strictly less than the item
+ * immediately to its right, if any (i.e., that the items are in order
+ * within the page, so that the binary searches performed by index scans are
+ * sane).
*
- * - That the last item stored on the page is less than or equal to the first
- * "real" data item on the page to the right (if such a first item is
+ * - That the last data item stored on the page is strictly less than the
+ * first data item on the page to the right (when such a first item is
* available).
*
- * Furthermore, when state passed shows ShareLock held, and target page is
- * internal page, function also checks:
+ * - Various checks on the structure of tuples themselves. For example, check
+ * that non-pivot tuples have no truncated attributes.
*
- * - That all child pages respect downlinks lower bound.
+ * Furthermore, when state passed shows ShareLock held, function also checks:
+ *
+ * - That all child pages respect strict lower bound from parent's pivot
+ * tuple.
+ *
+ * - That downlink to block was encountered in parent where that's expected.
+ * (Limited to heapallindexed readonly callers.)
*
* This is also where heapallindexed callers use their Bloom filter to
- * fingerprint IndexTuples.
+ * fingerprint IndexTuples for later table_index_build_scan() verification.
*
* Note: Memory allocated in this routine is expected to be released by caller
* resetting state->targetcontext.
@@ -722,43 +881,39 @@ bt_target_page_check(BtreeCheckState *state)
elog(DEBUG2, "verifying %u items on %s block %u", max,
P_ISLEAF(topaque) ? "leaf" : "internal", state->targetblock);
-
- /* Check the number of attributes in high key if any */
+ /*
+ * Check the number of attributes in high key. Note, rightmost page
+ * doesn't contain a high key, so nothing to check
+ */
if (!P_RIGHTMOST(topaque))
{
- if (!_bt_check_natts(state->rel, state->target, P_HIKEY))
- {
- ItemId itemid;
- IndexTuple itup;
- char *itid,
- *htid;
+ ItemId itemid;
+ IndexTuple itup;
- itemid = PageGetItemId(state->target, P_HIKEY);
+ /* Verify line pointer before checking tuple */
+ itemid = PageGetItemIdCareful(state, state->targetblock,
+ state->target, P_HIKEY);
+ if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target,
+ P_HIKEY))
+ {
itup = (IndexTuple) PageGetItem(state->target, itemid);
- itid = psprintf("(%u,%u)", state->targetblock, P_HIKEY);
- htid = psprintf("(%u,%u)",
- ItemPointerGetBlockNumberNoCheck(&(itup->t_tid)),
- ItemPointerGetOffsetNumberNoCheck(&(itup->t_tid)));
-
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg("wrong number of index tuple attributes for index \"%s\"",
+ errmsg("wrong number of high key index tuple attributes in index \"%s\"",
RelationGetRelationName(state->rel)),
- errdetail_internal("Index tid=%s natts=%u points to %s tid=%s page lsn=%X/%X.",
- itid,
- BTreeTupGetNAtts(itup, state->rel),
+ errdetail_internal("Index block=%u natts=%u block type=%s page lsn=%X/%X.",
+ state->targetblock,
+ BTreeTupleGetNAtts(itup, state->rel),
P_ISLEAF(topaque) ? "heap" : "index",
- htid,
(uint32) (state->targetlsn >> 32),
(uint32) state->targetlsn)));
}
}
-
/*
* Loop over page items, starting from first non-highkey item, not high
- * key (if any). Also, immediately skip "negative infinity" real item (if
- * any).
+ * key (if any). Most tests are not performed for the "negative infinity"
+ * real item (if any).
*/
for (offset = P_FIRSTDATAKEY(topaque);
offset <= max;
@@ -766,19 +921,21 @@ bt_target_page_check(BtreeCheckState *state)
{
ItemId itemid;
IndexTuple itup;
- ScanKey skey;
size_t tupsize;
+ BTScanInsert skey;
+ bool lowersizelimit;
CHECK_FOR_INTERRUPTS();
- itemid = PageGetItemId(state->target, offset);
+ itemid = PageGetItemIdCareful(state, state->targetblock,
+ state->target, offset);
itup = (IndexTuple) PageGetItem(state->target, itemid);
tupsize = IndexTupleSize(itup);
/*
* lp_len should match the IndexTuple reported length exactly, since
- * lp_len is completely redundant in indexes, and both sources of tuple
- * length are MAXALIGN()'d. nbtree does not use lp_len all that
+ * lp_len is completely redundant in indexes, and both sources of
+ * tuple length are MAXALIGN()'d. nbtree does not use lp_len all that
* frequently, and is surprisingly tolerant of corrupt lp_len fields.
*/
if (tupsize != ItemIdGetLength(itemid))
@@ -791,10 +948,11 @@ bt_target_page_check(BtreeCheckState *state)
tupsize, ItemIdGetLength(itemid),
(uint32) (state->targetlsn >> 32),
(uint32) state->targetlsn),
- errhint("This could be a torn page problem")));
+ errhint("This could be a torn page problem.")));
/* Check the number of index tuple attributes */
- if (!_bt_check_natts(state->rel, state->target, offset))
+ if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target,
+ offset))
{
char *itid,
*htid;
@@ -806,30 +964,123 @@ bt_target_page_check(BtreeCheckState *state)
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg("wrong number of index tuple attributes for index \"%s\"",
+ errmsg("wrong number of index tuple attributes in index \"%s\"",
RelationGetRelationName(state->rel)),
errdetail_internal("Index tid=%s natts=%u points to %s tid=%s page lsn=%X/%X.",
itid,
- BTreeTupGetNAtts(itup, state->rel),
+ BTreeTupleGetNAtts(itup, state->rel),
P_ISLEAF(topaque) ? "heap" : "index",
htid,
(uint32) (state->targetlsn >> 32),
(uint32) state->targetlsn)));
}
+ /* Fingerprint downlink blocks in heapallindexed + readonly case */
+ if (state->heapallindexed && state->readonly && !P_ISLEAF(topaque))
+ {
+ BlockNumber childblock = BTreeInnerTupleGetDownLink(itup);
+
+ bloom_add_element(state->downlinkfilter,
+ (unsigned char *) &childblock,
+ sizeof(BlockNumber));
+ }
+
/*
- * Don't try to generate scankey using "negative infinity" garbage
- * data on internal pages
+ * Don't try to generate scankey using "negative infinity" item on
+ * internal pages. They are always truncated to zero attributes.
*/
if (offset_is_negative_infinity(topaque, offset))
continue;
+ /*
+ * Readonly callers may optionally verify that non-pivot tuples can
+ * each be found by an independent search that starts from the root
+ */
+ if (state->rootdescend && P_ISLEAF(topaque) &&
+ !bt_rootdescend(state, itup))
+ {
+ char *itid,
+ *htid;
+
+ itid = psprintf("(%u,%u)", state->targetblock, offset);
+ htid = psprintf("(%u,%u)",
+ ItemPointerGetBlockNumber(&(itup->t_tid)),
+ ItemPointerGetOffsetNumber(&(itup->t_tid)));
+
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("could not find tuple using search from root page in index \"%s\"",
+ RelationGetRelationName(state->rel)),
+ errdetail_internal("Index tid=%s points to heap tid=%s page lsn=%X/%X.",
+ itid, htid,
+ (uint32) (state->targetlsn >> 32),
+ (uint32) state->targetlsn)));
+ }
+
/* Build insertion scankey for current page offset */
- skey = _bt_mkscankey(state->rel, itup);
+ skey = bt_mkscankey_pivotsearch(state->rel, itup);
+
+ /*
+ * Make sure tuple size does not exceed the relevant BTREE_VERSION
+ * specific limit.
+ *
+ * BTREE_VERSION 4 (which introduced heapkeyspace rules) requisitioned
+ * a small amount of space from BTMaxItemSize() in order to ensure
+ * that suffix truncation always has enough space to add an explicit
+ * heap TID back to a tuple -- we pessimistically assume that every
+ * newly inserted tuple will eventually need to have a heap TID
+ * appended during a future leaf page split, when the tuple becomes
+ * the basis of the new high key (pivot tuple) for the leaf page.
+ *
+ * Since the reclaimed space is reserved for that purpose, we must not
+ * enforce the slightly lower limit when the extra space has been used
+ * as intended. In other words, there is only a cross-version
+ * difference in the limit on tuple size within leaf pages.
+ *
+ * Still, we're particular about the details within BTREE_VERSION 4
+ * internal pages. Pivot tuples may only use the extra space for its
+ * designated purpose. Enforce the lower limit for pivot tuples when
+ * an explicit heap TID isn't actually present. (In all other cases
+ * suffix truncation is guaranteed to generate a pivot tuple that's no
+ * larger than the first right tuple provided to it by its caller.)
+ */
+ lowersizelimit = skey->heapkeyspace &&
+ (P_ISLEAF(topaque) || BTreeTupleGetHeapTID(itup) == NULL);
+ if (tupsize > (lowersizelimit ? BTMaxItemSize(state->target) :
+ BTMaxItemSizeNoHeapTid(state->target)))
+ {
+ char *itid,
+ *htid;
+
+ itid = psprintf("(%u,%u)", state->targetblock, offset);
+ htid = psprintf("(%u,%u)",
+ ItemPointerGetBlockNumberNoCheck(&(itup->t_tid)),
+ ItemPointerGetOffsetNumberNoCheck(&(itup->t_tid)));
+
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("index row size %zu exceeds maximum for index \"%s\"",
+ tupsize, RelationGetRelationName(state->rel)),
+ errdetail_internal("Index tid=%s points to %s tid=%s page lsn=%X/%X.",
+ itid,
+ P_ISLEAF(topaque) ? "heap" : "index",
+ htid,
+ (uint32) (state->targetlsn >> 32),
+ (uint32) state->targetlsn)));
+ }
/* Fingerprint leaf page tuples (those that point to the heap) */
if (state->heapallindexed && P_ISLEAF(topaque) && !ItemIdIsDead(itemid))
- bloom_add_element(state->filter, (unsigned char *) itup, tupsize);
+ {
+ IndexTuple norm;
+
+ norm = bt_normalize_tuple(state, itup);
+ bloom_add_element(state->filter, (unsigned char *) norm,
+ IndexTupleSize(norm));
+ /* Be tidy */
+ if (norm != itup)
+ pfree(norm);
+ }
/*
* * High key check *
@@ -850,9 +1101,35 @@ bt_target_page_check(BtreeCheckState *state)
* grandparents (as well as great-grandparents, and so on). We don't
* go to those lengths because that would be prohibitively expensive,
* and probably not markedly more effective in practice.
+ *
+ * On the leaf level, we check that the key is <= the highkey.
+ * However, on non-leaf levels we check that the key is < the highkey,
+ * because the high key is "just another separator" rather than a copy
+ * of some existing key item; we expect it to be unique among all keys
+ * on the same level. (Suffix truncation will sometimes produce a
+ * leaf highkey that is an untruncated copy of the lastleft item, but
+ * never any other item, which necessitates weakening the leaf level
+ * check to <=.)
+ *
+ * Full explanation for why a highkey is never truly a copy of another
+ * item from the same level on internal levels:
+ *
+ * While the new left page's high key is copied from the first offset
+ * on the right page during an internal page split, that's not the
+ * full story. In effect, internal pages are split in the middle of
+ * the firstright tuple, not between the would-be lastleft and
+ * firstright tuples: the firstright key ends up on the left side as
+ * left's new highkey, and the firstright downlink ends up on the
+ * right side as right's new "negative infinity" item. The negative
+ * infinity tuple is truncated to zero attributes, so we're only left
+ * with the downlink. In other words, the copying is just an
+ * implementation detail of splitting in the middle of a (pivot)
+ * tuple. (See also: "Notes About Data Representation" in the nbtree
+ * README.)
*/
if (!P_RIGHTMOST(topaque) &&
- !invariant_leq_offset(state, skey, P_HIKEY))
+ !(P_ISLEAF(topaque) ? invariant_leq_offset(state, skey, P_HIKEY) :
+ invariant_l_offset(state, skey, P_HIKEY)))
{
char *itid,
*htid;
@@ -878,11 +1155,10 @@ bt_target_page_check(BtreeCheckState *state)
* * Item order check *
*
* Check that items are stored on page in logical order, by checking
- * current item is less than or equal to next item (if any).
+ * current item is strictly less than next item (if any).
*/
if (OffsetNumberNext(offset) <= max &&
- !invariant_leq_offset(state, skey,
- OffsetNumberNext(offset)))
+ !invariant_l_offset(state, skey, OffsetNumberNext(offset)))
{
char *itid,
*htid,
@@ -897,7 +1173,9 @@ bt_target_page_check(BtreeCheckState *state)
OffsetNumberNext(offset));
/* Reuse itup to get pointed-to heap location of second item */
- itemid = PageGetItemId(state->target, OffsetNumberNext(offset));
+ itemid = PageGetItemIdCareful(state, state->targetblock,
+ state->target,
+ OffsetNumberNext(offset));
itup = (IndexTuple) PageGetItem(state->target, itemid);
nhtid = psprintf("(%u,%u)",
ItemPointerGetBlockNumberNoCheck(&(itup->t_tid)),
@@ -939,13 +1217,13 @@ bt_target_page_check(BtreeCheckState *state)
*/
else if (offset == max)
{
- ScanKey rightkey;
+ BTScanInsert rightkey;
/* Get item in next/right page */
rightkey = bt_right_page_check_scankey(state);
if (rightkey &&
- !invariant_geq_offset(state, rightkey, max))
+ !invariant_g_offset(state, rightkey, max))
{
/*
* As explained at length in bt_right_page_check_scankey(),
@@ -989,11 +1267,19 @@ bt_target_page_check(BtreeCheckState *state)
*/
if (!P_ISLEAF(topaque) && state->readonly)
{
- BlockNumber childblock = ItemPointerGetBlockNumberNoCheck(&(itup->t_tid));
+ BlockNumber childblock = BTreeInnerTupleGetDownLink(itup);
- bt_downlink_check(state, childblock, skey);
+ bt_downlink_check(state, skey, childblock);
}
}
+
+ /*
+ * * Check if page has a downlink in parent *
+ *
+ * This can only be checked in heapallindexed + readonly case.
+ */
+ if (state->heapallindexed && state->readonly)
+ bt_downlink_missing_check(state);
}
/*
@@ -1012,11 +1298,12 @@ bt_target_page_check(BtreeCheckState *state)
* Note that !readonly callers must reverify that target page has not
* been concurrently deleted.
*/
-static ScanKey
+static BTScanInsert
bt_right_page_check_scankey(BtreeCheckState *state)
{
BTPageOpaque opaque;
ItemId rightitem;
+ IndexTuple firstitup;
BlockNumber targetnext;
Page rightpage;
OffsetNumber nline;
@@ -1114,9 +1401,9 @@ bt_right_page_check_scankey(BtreeCheckState *state)
* continued existence of target block as non-ignorable (not half-dead or
* deleted) implies that target page was not merged into from the right by
* deletion; the key space at or after target never moved left. Target's
- * parent either has the same downlink to target as before, or a <=
+ * parent either has the same downlink to target as before, or a <
* downlink due to deletion at the left of target. Target either has the
- * same highkey as before, or a highkey <= before when there is a page
+ * same highkey as before, or a highkey < before when there is a page
* split. (The rightmost concurrently-split-from-target-page page will
* still have the same highkey as target was originally found to have,
* which for our purposes is equivalent to target's highkey itself never
@@ -1173,7 +1460,8 @@ bt_right_page_check_scankey(BtreeCheckState *state)
if (P_ISLEAF(opaque) && nline >= P_FIRSTDATAKEY(opaque))
{
/* Return first data item (if any) */
- rightitem = PageGetItemId(rightpage, P_FIRSTDATAKEY(opaque));
+ rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
+ P_FIRSTDATAKEY(opaque));
}
else if (!P_ISLEAF(opaque) &&
nline >= OffsetNumberNext(P_FIRSTDATAKEY(opaque)))
@@ -1182,8 +1470,8 @@ bt_right_page_check_scankey(BtreeCheckState *state)
* Return first item after the internal page's "negative infinity"
* item
*/
- rightitem = PageGetItemId(rightpage,
- OffsetNumberNext(P_FIRSTDATAKEY(opaque)));
+ rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
+ OffsetNumberNext(P_FIRSTDATAKEY(opaque)));
}
else
{
@@ -1204,8 +1492,8 @@ bt_right_page_check_scankey(BtreeCheckState *state)
* Return first real item scankey. Note that this relies on right page
* memory remaining allocated.
*/
- return _bt_mkscankey(state->rel,
- (IndexTuple) PageGetItem(rightpage, rightitem));
+ firstitup = (IndexTuple) PageGetItem(rightpage, rightitem);
+ return bt_mkscankey_pivotsearch(state->rel, firstitup);
}
/*
@@ -1218,8 +1506,8 @@ bt_right_page_check_scankey(BtreeCheckState *state)
* verification this way around is much more practical.
*/
static void
-bt_downlink_check(BtreeCheckState *state, BlockNumber childblock,
- ScanKey targetkey)
+bt_downlink_check(BtreeCheckState *state, BTScanInsert targetkey,
+ BlockNumber childblock)
{
OffsetNumber offset;
OffsetNumber maxoffset;
@@ -1268,7 +1556,8 @@ bt_downlink_check(BtreeCheckState *state, BlockNumber childblock,
/*
* Verify child page has the downlink key from target page (its parent) as
- * a lower bound.
+ * a lower bound; downlink must be strictly less than all keys on the
+ * page.
*
* Check all items, rather than checking just the first and trusting that
* the operator class obeys the transitive law.
@@ -1277,21 +1566,73 @@ bt_downlink_check(BtreeCheckState *state, BlockNumber childblock,
copaque = (BTPageOpaque) PageGetSpecialPointer(child);
maxoffset = PageGetMaxOffsetNumber(child);
+ /*
+ * Since there cannot be a concurrent VACUUM operation in readonly mode,
+ * and since a page has no links within other pages (siblings and parent)
+ * once it is marked fully deleted, it should be impossible to land on a
+ * fully deleted page.
+ *
+ * It does not quite make sense to enforce that the page cannot even be
+ * half-dead, despite the fact the downlink is modified at the same stage
+ * that the child leaf page is marked half-dead. That's incorrect because
+ * there may occasionally be multiple downlinks from a chain of pages
+ * undergoing deletion, where multiple successive calls are made to
+ * _bt_unlink_halfdead_page() by VACUUM before it can finally safely mark
+ * the leaf page as fully dead. While _bt_mark_page_halfdead() usually
+ * removes the downlink to the leaf page that is marked half-dead, that's
+ * not guaranteed, so it's possible we'll land on a half-dead page with a
+ * downlink due to an interrupted multi-level page deletion.
+ *
+ * We go ahead with our checks if the child page is half-dead. It's safe
+ * to do so because we do not test the child's high key, so it does not
+ * matter that the original high key will have been replaced by a dummy
+ * truncated high key within _bt_mark_page_halfdead(). All other page
+ * items are left intact on a half-dead page, so there is still something
+ * to test.
+ */
+ if (P_ISDELETED(copaque))
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("downlink to deleted page found in index \"%s\"",
+ RelationGetRelationName(state->rel)),
+ errdetail_internal("Parent block=%u child block=%u parent page lsn=%X/%X.",
+ state->targetblock, childblock,
+ (uint32) (state->targetlsn >> 32),
+ (uint32) state->targetlsn)));
+
for (offset = P_FIRSTDATAKEY(copaque);
offset <= maxoffset;
offset = OffsetNumberNext(offset))
{
/*
* Skip comparison of target page key against "negative infinity"
- * item, if any. Checking it would indicate that it's not an upper
- * bound, but that's only because of the hard-coding within
- * _bt_compare().
+ * item, if any. Checking it would indicate that it's not a strict
+ * lower bound, but that's only because of the hard-coding for
+ * negative infinity items within _bt_compare().
+ *
+ * If nbtree didn't truncate negative infinity tuples during internal
+ * page splits then we'd expect child's negative infinity key to be
+ * equal to the scankey/downlink from target/parent (it would be a
+ * "low key" in this hypothetical scenario, and so it would still need
+ * to be treated as a special case here).
+ *
+ * Negative infinity items can be thought of as a strict lower bound
+ * that works transitively, with the last non-negative-infinity pivot
+ * followed during a descent from the root as its "true" strict lower
+ * bound. Only a small number of negative infinity items are truly
+ * negative infinity; those that are the first items of leftmost
+ * internal pages. In more general terms, a negative infinity item is
+ * only negative infinity with respect to the subtree that the page is
+ * at the root of.
+ *
+ * See also: bt_rootdescend(), which can even detect transitive
+ * inconsistencies on cousin leaf pages.
*/
if (offset_is_negative_infinity(copaque, offset))
continue;
- if (!invariant_leq_nontarget_offset(state, child,
- targetkey, offset))
+ if (!invariant_l_nontarget_offset(state, targetkey, childblock, child,
+ offset))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("down-link lower bound invariant violated for index \"%s\"",
@@ -1306,7 +1647,195 @@ bt_downlink_check(BtreeCheckState *state, BlockNumber childblock,
}
/*
- * Per-tuple callback from IndexBuildHeapScan, used to determine if index has
+ * Checks if page is missing a downlink that it should have.
+ *
+ * A page that lacks a downlink/parent may indicate corruption. However, we
+ * must account for the fact that a missing downlink can occasionally be
+ * encountered in a non-corrupt index. This can be due to an interrupted page
+ * split, or an interrupted multi-level page deletion (i.e. there was a hard
+ * crash or an error during a page split, or while VACUUM was deleting a
+ * multi-level chain of pages).
+ *
+ * Note that this can only be called in readonly mode, so there is no need to
+ * be concerned about concurrent page splits or page deletions.
+ */
+static void
+bt_downlink_missing_check(BtreeCheckState *state)
+{
+ BTPageOpaque topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
+ ItemId itemid;
+ IndexTuple itup;
+ Page child;
+ BTPageOpaque copaque;
+ uint32 level;
+ BlockNumber childblk;
+
+ Assert(state->heapallindexed && state->readonly);
+ Assert(!P_IGNORE(topaque));
+
+ /* No next level up with downlinks to fingerprint from the true root */
+ if (P_ISROOT(topaque))
+ return;
+
+ /*
+ * Incomplete (interrupted) page splits can account for the lack of a
+ * downlink. Some inserting transaction should eventually complete the
+ * page split in passing, when it notices that the left sibling page is
+ * P_INCOMPLETE_SPLIT().
+ *
+ * In general, VACUUM is not prepared for there to be no downlink to a
+ * page that it deletes. This is the main reason why the lack of a
+ * downlink can be reported as corruption here. It's not obvious that an
+ * invalid missing downlink can result in wrong answers to queries,
+ * though, since index scans that land on the child may end up
+ * consistently moving right. The handling of concurrent page splits (and
+ * page deletions) within _bt_moveright() cannot distinguish
+ * inconsistencies that last for a moment from inconsistencies that are
+ * permanent and irrecoverable.
+ *
+ * VACUUM isn't even prepared to delete pages that have no downlink due to
+ * an incomplete page split, but it can detect and reason about that case
+ * by design, so it shouldn't be taken to indicate corruption. See
+ * _bt_pagedel() for full details.
+ */
+ if (state->rightsplit)
+ {
+ ereport(DEBUG1,
+ (errcode(ERRCODE_NO_DATA),
+ errmsg("harmless interrupted page split detected in index %s",
+ RelationGetRelationName(state->rel)),
+ errdetail_internal("Block=%u level=%u left sibling=%u page lsn=%X/%X.",
+ state->targetblock, topaque->btpo.level,
+ topaque->btpo_prev,
+ (uint32) (state->targetlsn >> 32),
+ (uint32) state->targetlsn)));
+ return;
+ }
+
+ /* Target's downlink is typically present in parent/fingerprinted */
+ if (!bloom_lacks_element(state->downlinkfilter,
+ (unsigned char *) &state->targetblock,
+ sizeof(BlockNumber)))
+ return;
+
+ /*
+ * Target is probably the "top parent" of a multi-level page deletion.
+ * We'll need to descend the subtree to make sure that descendant pages
+ * are consistent with that, though.
+ *
+ * If the target page (which must be non-ignorable) is a leaf page, then
+ * clearly it can't be the top parent. The lack of a downlink is probably
+ * a symptom of a broad problem that could just as easily cause
+ * inconsistencies anywhere else.
+ */
+ if (P_ISLEAF(topaque))
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("leaf index block lacks downlink in index \"%s\"",
+ RelationGetRelationName(state->rel)),
+ errdetail_internal("Block=%u page lsn=%X/%X.",
+ state->targetblock,
+ (uint32) (state->targetlsn >> 32),
+ (uint32) state->targetlsn)));
+
+ /* Descend from the target page, which is an internal page */
+ elog(DEBUG1, "checking for interrupted multi-level deletion due to missing downlink in index \"%s\"",
+ RelationGetRelationName(state->rel));
+
+ level = topaque->btpo.level;
+ itemid = PageGetItemIdCareful(state, state->targetblock, state->target,
+ P_FIRSTDATAKEY(topaque));
+ itup = (IndexTuple) PageGetItem(state->target, itemid);
+ childblk = BTreeInnerTupleGetDownLink(itup);
+ for (;;)
+ {
+ CHECK_FOR_INTERRUPTS();
+
+ child = palloc_btree_page(state, childblk);
+ copaque = (BTPageOpaque) PageGetSpecialPointer(child);
+
+ if (P_ISLEAF(copaque))
+ break;
+
+ /* Do an extra sanity check in passing on internal pages */
+ if (copaque->btpo.level != level - 1)
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg_internal("downlink points to block in index \"%s\" whose level is not one level down",
+ RelationGetRelationName(state->rel)),
+ errdetail_internal("Top parent/target block=%u block pointed to=%u expected level=%u level in pointed to block=%u.",
+ state->targetblock, childblk,
+ level - 1, copaque->btpo.level)));
+
+ level = copaque->btpo.level;
+ itemid = PageGetItemIdCareful(state, childblk, child,
+ P_FIRSTDATAKEY(copaque));
+ itup = (IndexTuple) PageGetItem(child, itemid);
+ childblk = BTreeInnerTupleGetDownLink(itup);
+ /* Be slightly more pro-active in freeing this memory, just in case */
+ pfree(child);
+ }
+
+ /*
+ * Since there cannot be a concurrent VACUUM operation in readonly mode,
+ * and since a page has no links within other pages (siblings and parent)
+ * once it is marked fully deleted, it should be impossible to land on a
+ * fully deleted page. See bt_downlink_check() for further details.
+ *
+ * The bt_downlink_check() P_ISDELETED() check is repeated here because
+ * bt_downlink_check() does not visit pages reachable through negative
+ * infinity items. Besides, bt_downlink_check() is unwilling to descend
+ * multiple levels. (The similar bt_downlink_check() P_ISDELETED() check
+ * within bt_check_level_from_leftmost() won't reach the page either,
+ * since the leaf's live siblings should have their sibling links updated
+ * to bypass the deletion target page when it is marked fully dead.)
+ *
+ * If this error is raised, it might be due to a previous multi-level page
+ * deletion that failed to realize that it wasn't yet safe to mark the
+ * leaf page as fully dead. A "dangling downlink" will still remain when
+ * this happens. The fact that the dangling downlink's page (the leaf's
+ * parent/ancestor page) lacked a downlink is incidental.
+ */
+ if (P_ISDELETED(copaque))
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg_internal("downlink to deleted leaf page found in index \"%s\"",
+ RelationGetRelationName(state->rel)),
+ errdetail_internal("Top parent/target block=%u leaf block=%u top parent/target lsn=%X/%X.",
+ state->targetblock, childblk,
+ (uint32) (state->targetlsn >> 32),
+ (uint32) state->targetlsn)));
+
+ /*
+ * Iff leaf page is half-dead, its high key top parent link should point
+ * to what VACUUM considered to be the top parent page at the instant it
+ * was interrupted. Provided the high key link actually points to the
+ * target page, the missing downlink we detected is consistent with there
+ * having been an interrupted multi-level page deletion. This means that
+ * the subtree with the target page at its root (a page deletion chain) is
+ * in a consistent state, enabling VACUUM to resume deleting the entire
+ * chain the next time it encounters the half-dead leaf page.
+ */
+ if (P_ISHALFDEAD(copaque) && !P_RIGHTMOST(copaque))
+ {
+ itemid = PageGetItemIdCareful(state, childblk, child, P_HIKEY);
+ itup = (IndexTuple) PageGetItem(child, itemid);
+ if (BTreeTupleGetTopParent(itup) == state->targetblock)
+ return;
+ }
+
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("internal index block lacks downlink in index \"%s\"",
+ RelationGetRelationName(state->rel)),
+ errdetail_internal("Block=%u level=%u page lsn=%X/%X.",
+ state->targetblock, topaque->btpo.level,
+ (uint32) (state->targetlsn >> 32),
+ (uint32) state->targetlsn)));
+}
+
+/*
+ * Per-tuple callback from table_index_build_scan, used to determine if index has
* all the entries that definitely should have been observed in leaf pages of
* the target index (that is, all IndexTuples that were fingerprinted by our
* Bloom filter). All heapallindexed checks occur here.
@@ -1331,7 +1860,7 @@ bt_downlink_check(BtreeCheckState *state, BlockNumber childblock,
* verification, just in case it's a cross-page invariant issue, though that
* isn't particularly likely.
*
- * IndexBuildHeapScan() expects to be able to find the root tuple when a
+ * table_index_build_scan() expects to be able to find the root tuple when a
* heap-only tuple (the live tuple at the end of some HOT chain) needs to be
* indexed, in order to replace the actual tuple's TID with the root tuple's
* TID (which is what we're actually passed back here). The index build heap
@@ -1347,7 +1876,7 @@ bt_downlink_check(BtreeCheckState *state, BlockNumber childblock,
* setting will probably also leave the index in a corrupt state before too
* long, the problem is nonetheless that there is heap corruption.)
*
- * Heap-only tuple handling within IndexBuildHeapScan() works in a way that
+ * Heap-only tuple handling within table_index_build_scan() works in a way that
* helps us to detect index tuples that contain the wrong values (values that
* don't match the latest tuple in the HOT chain). This can happen when there
* is no superseding index tuple due to a faulty assessment of HOT safety,
@@ -1365,41 +1894,24 @@ bt_tuple_present_callback(Relation index, HeapTuple htup, Datum *values,
bool *isnull, bool tupleIsAlive, void *checkstate)
{
BtreeCheckState *state = (BtreeCheckState *) checkstate;
- IndexTuple itup;
+ IndexTuple itup,
+ norm;
Assert(state->heapallindexed);
- /*
- * Generate an index tuple for fingerprinting.
- *
- * Index tuple formation is assumed to be deterministic, and IndexTuples
- * are assumed immutable. While the LP_DEAD bit is mutable in leaf pages,
- * that's ItemId metadata, which was not fingerprinted. (There will often
- * be some dead-to-everyone IndexTuples fingerprinted by the Bloom filter,
- * but we only try to detect the absence of needed tuples, so that's okay.)
- *
- * Note that we rely on deterministic index_form_tuple() TOAST compression.
- * If index_form_tuple() was ever enhanced to compress datums out-of-line,
- * or otherwise varied when or how compression was applied, our assumption
- * would break, leading to false positive reports of corruption. For now,
- * we don't decompress/normalize toasted values as part of fingerprinting.
- *
- * In future, non-pivot index tuples might get use of
- * BT_N_KEYS_OFFSET_MASK. Then binary representation of index tuple linked
- * to particular heap tuple might vary and meeds to be normalized before
- * bloom filter lookup.
- */
+ /* Generate a normalized index tuple for fingerprinting */
itup = index_form_tuple(RelationGetDescr(index), values, isnull);
itup->t_tid = htup->t_self;
+ norm = bt_normalize_tuple(state, itup);
/* Probe Bloom filter -- tuple should be present */
- if (bloom_lacks_element(state->filter, (unsigned char *) itup,
- IndexTupleSize(itup)))
+ if (bloom_lacks_element(state->filter, (unsigned char *) norm,
+ IndexTupleSize(norm)))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("heap tuple (%u,%u) from table \"%s\" lacks matching index tuple within index \"%s\"",
- ItemPointerGetBlockNumberNoCheck(&(itup->t_tid)),
- ItemPointerGetOffsetNumberNoCheck(&(itup->t_tid)),
+ ItemPointerGetBlockNumber(&(itup->t_tid)),
+ ItemPointerGetOffsetNumber(&(itup->t_tid)),
RelationGetRelationName(state->heaprel),
RelationGetRelationName(state->rel)),
!state->readonly
@@ -1408,6 +1920,190 @@ bt_tuple_present_callback(Relation index, HeapTuple htup, Datum *values,
state->heaptuplespresent++;
pfree(itup);
+ /* Cannot leak memory here */
+ if (norm != itup)
+ pfree(norm);
+}
+
+/*
+ * Normalize an index tuple for fingerprinting.
+ *
+ * In general, index tuple formation is assumed to be deterministic by
+ * heapallindexed verification, and IndexTuples are assumed immutable. While
+ * the LP_DEAD bit is mutable in leaf pages, that's ItemId metadata, which is
+ * not fingerprinted. Normalization is required to compensate for corner
+ * cases where the determinism assumption doesn't quite work.
+ *
+ * There is currently one such case: index_form_tuple() does not try to hide
+ * the source TOAST state of input datums. The executor applies TOAST
+ * compression for heap tuples based on different criteria to the compression
+ * applied within btinsert()'s call to index_form_tuple(): it sometimes
+ * compresses more aggressively, resulting in compressed heap tuple datums but
+ * uncompressed corresponding index tuple datums. A subsequent heapallindexed
+ * verification will get a logically equivalent though bitwise unequal tuple
+ * from index_form_tuple(). False positive heapallindexed corruption reports
+ * could occur without normalizing away the inconsistency.
+ *
+ * Returned tuple is often caller's own original tuple. Otherwise, it is a
+ * new representation of caller's original index tuple, palloc()'d in caller's
+ * memory context.
+ *
+ * Note: This routine is not concerned with distinctions about the
+ * representation of tuples beyond those that might break heapallindexed
+ * verification. In particular, it won't try to normalize opclass-equal
+ * datums with potentially distinct representations (e.g., btree/numeric_ops
+ * index datums will not get their display scale normalized-away here).
+ * Normalization may need to be expanded to handle more cases in the future,
+ * though. For example, it's possible that non-pivot tuples could in the
+ * future have alternative logically equivalent representations due to using
+ * the INDEX_ALT_TID_MASK bit to implement intelligent deduplication.
+ */
+static IndexTuple
+bt_normalize_tuple(BtreeCheckState *state, IndexTuple itup)
+{
+ TupleDesc tupleDescriptor = RelationGetDescr(state->rel);
+ Datum normalized[INDEX_MAX_KEYS];
+ bool isnull[INDEX_MAX_KEYS];
+ bool toast_free[INDEX_MAX_KEYS];
+ bool formnewtup = false;
+ IndexTuple reformed;
+ int i;
+
+ /* Easy case: It's immediately clear that tuple has no varlena datums */
+ if (!IndexTupleHasVarwidths(itup))
+ return itup;
+
+ for (i = 0; i < tupleDescriptor->natts; i++)
+ {
+ Form_pg_attribute att;
+
+ att = TupleDescAttr(tupleDescriptor, i);
+
+ /* Assume untoasted/already normalized datum initially */
+ toast_free[i] = false;
+ normalized[i] = index_getattr(itup, att->attnum,
+ tupleDescriptor,
+ &isnull[i]);
+ if (att->attbyval || att->attlen != -1 || isnull[i])
+ continue;
+
+ /*
+ * Callers always pass a tuple that could safely be inserted into the
+ * index without further processing, so an external varlena header
+ * should never be encountered here
+ */
+ if (VARATT_IS_EXTERNAL(DatumGetPointer(normalized[i])))
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("external varlena datum in tuple that references heap row (%u,%u) in index \"%s\"",
+ ItemPointerGetBlockNumber(&(itup->t_tid)),
+ ItemPointerGetOffsetNumber(&(itup->t_tid)),
+ RelationGetRelationName(state->rel))));
+ else if (VARATT_IS_COMPRESSED(DatumGetPointer(normalized[i])))
+ {
+ formnewtup = true;
+ normalized[i] = PointerGetDatum(PG_DETOAST_DATUM(normalized[i]));
+ toast_free[i] = true;
+ }
+ }
+
+ /* Easier case: Tuple has varlena datums, none of which are compressed */
+ if (!formnewtup)
+ return itup;
+
+ /*
+ * Hard case: Tuple had compressed varlena datums that necessitate
+ * creating normalized version of the tuple from uncompressed input datums
+ * (normalized input datums). This is rather naive, but shouldn't be
+ * necessary too often.
+ *
+ * Note that we rely on deterministic index_form_tuple() TOAST compression
+ * of normalized input.
+ */
+ reformed = index_form_tuple(tupleDescriptor, normalized, isnull);
+ reformed->t_tid = itup->t_tid;
+
+ /* Cannot leak memory here */
+ for (i = 0; i < tupleDescriptor->natts; i++)
+ if (toast_free[i])
+ pfree(DatumGetPointer(normalized[i]));
+
+ return reformed;
+}
+
+/*
+ * Search for itup in index, starting from fast root page. itup must be a
+ * non-pivot tuple. This is only supported with heapkeyspace indexes, since
+ * we rely on having fully unique keys to find a match with only a single
+ * visit to a leaf page, barring an interrupted page split, where we may have
+ * to move right. (A concurrent page split is impossible because caller must
+ * be readonly caller.)
+ *
+ * This routine can detect very subtle transitive consistency issues across
+ * more than one level of the tree. Leaf pages all have a high key (even the
+ * rightmost page has a conceptual positive infinity high key), but not a low
+ * key. Their downlink in parent is a lower bound, which along with the high
+ * key is almost enough to detect every possible inconsistency. A downlink
+ * separator key value won't always be available from parent, though, because
+ * the first items of internal pages are negative infinity items, truncated
+ * down to zero attributes during internal page splits. While it's true that
+ * bt_downlink_check() and the high key check can detect most imaginable key
+ * space problems, there are remaining problems it won't detect with non-pivot
+ * tuples in cousin leaf pages. Starting a search from the root for every
+ * existing leaf tuple detects small inconsistencies in upper levels of the
+ * tree that cannot be detected any other way. (Besides all this, this is
+ * probably also useful as a direct test of the code used by index scans
+ * themselves.)
+ */
+static bool
+bt_rootdescend(BtreeCheckState *state, IndexTuple itup)
+{
+ BTScanInsert key;
+ BTStack stack;
+ Buffer lbuf;
+ bool exists;
+
+ key = _bt_mkscankey(state->rel, itup);
+ Assert(key->heapkeyspace && key->scantid != NULL);
+
+ /*
+ * Search from root.
+ *
+ * Ideally, we would arrange to only move right within _bt_search() when
+ * an interrupted page split is detected (i.e. when the incomplete split
+ * bit is found to be set), but for now we accept the possibility that
+ * that could conceal an inconsistency.
+ */
+ Assert(state->readonly && state->rootdescend);
+ exists = false;
+ stack = _bt_search(state->rel, key, &lbuf, BT_READ, NULL);
+
+ if (BufferIsValid(lbuf))
+ {
+ BTInsertStateData insertstate;
+ OffsetNumber offnum;
+ Page page;
+
+ insertstate.itup = itup;
+ insertstate.itemsz = MAXALIGN(IndexTupleSize(itup));
+ insertstate.itup_key = key;
+ insertstate.bounds_valid = false;
+ insertstate.buf = lbuf;
+
+ /* Get matching tuple on leaf page */
+ offnum = _bt_binsrch_insert(state->rel, &insertstate);
+ /* Compare first >= matching item on leaf page, if any */
+ page = BufferGetPage(lbuf);
+ if (offnum <= PageGetMaxOffsetNumber(page) &&
+ _bt_compare(state->rel, key, page, offnum) == 0)
+ exists = true;
+ _bt_relbuf(state->rel, lbuf);
+ }
+
+ _bt_freestack(stack);
+ pfree(key);
+
+ return exists;
}
/*
@@ -1430,9 +2126,9 @@ offset_is_negative_infinity(BTPageOpaque opaque, OffsetNumber offset)
* infinity item is either first or second line item, or there is none
* within page.
*
- * "Negative infinity" tuple is a special corner case of pivot tuples,
- * it has zero attributes while rest of pivot tuples have nkeyatts number
- * of attributes.
+ * Negative infinity items are a special case among pivot tuples. They
+ * always have zero attributes, while all other pivot tuples always have
+ * nkeyatts attributes.
*
* Right-most pages don't have a high key, but could be said to
* conceptually have a "positive infinity" high key. Thus, there is a
@@ -1446,65 +2142,181 @@ offset_is_negative_infinity(BTPageOpaque opaque, OffsetNumber offset)
return !P_ISLEAF(opaque) && offset == P_FIRSTDATAKEY(opaque);
}
+/*
+ * Does the invariant hold that the key is strictly less than a given upper
+ * bound offset item?
+ *
+ * Verifies line pointer on behalf of caller.
+ *
+ * If this function returns false, convention is that caller throws error due
+ * to corruption.
+ */
+static inline bool
+invariant_l_offset(BtreeCheckState *state, BTScanInsert key,
+ OffsetNumber upperbound)
+{
+ ItemId itemid;
+ int32 cmp;
+
+ Assert(key->pivotsearch);
+
+ /* Verify line pointer before checking tuple */
+ itemid = PageGetItemIdCareful(state, state->targetblock, state->target,
+ upperbound);
+ /* pg_upgrade'd indexes may legally have equal sibling tuples */
+ if (!key->heapkeyspace)
+ return invariant_leq_offset(state, key, upperbound);
+
+ cmp = _bt_compare(state->rel, key, state->target, upperbound);
+
+ /*
+ * _bt_compare() is capable of determining that a scankey with a
+ * filled-out attribute is greater than pivot tuples where the comparison
+ * is resolved at a truncated attribute (value of attribute in pivot is
+ * minus infinity). However, it is not capable of determining that a
+ * scankey is _less than_ a tuple on the basis of a comparison resolved at
+ * _scankey_ minus infinity attribute. Complete an extra step to simulate
+ * having minus infinity values for omitted scankey attribute(s).
+ */
+ if (cmp == 0)
+ {
+ BTPageOpaque topaque;
+ IndexTuple ritup;
+ int uppnkeyatts;
+ ItemPointer rheaptid;
+ bool nonpivot;
+
+ ritup = (IndexTuple) PageGetItem(state->target, itemid);
+ topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
+ nonpivot = P_ISLEAF(topaque) && upperbound >= P_FIRSTDATAKEY(topaque);
+
+ /* Get number of keys + heap TID for item to the right */
+ uppnkeyatts = BTreeTupleGetNKeyAtts(ritup, state->rel);
+ rheaptid = BTreeTupleGetHeapTIDCareful(state, ritup, nonpivot);
+
+ /* Heap TID is tiebreaker key attribute */
+ if (key->keysz == uppnkeyatts)
+ return key->scantid == NULL && rheaptid != NULL;
+
+ return key->keysz < uppnkeyatts;
+ }
+
+ return cmp < 0;
+}
+
/*
* Does the invariant hold that the key is less than or equal to a given upper
* bound offset item?
*
+ * Caller should have verified that upperbound's line pointer is consistent
+ * using PageGetItemIdCareful() call.
+ *
* If this function returns false, convention is that caller throws error due
* to corruption.
*/
static inline bool
-invariant_leq_offset(BtreeCheckState *state, ScanKey key,
+invariant_leq_offset(BtreeCheckState *state, BTScanInsert key,
OffsetNumber upperbound)
{
- int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(state->rel);
int32 cmp;
- cmp = _bt_compare(state->rel, nkeyatts, key, state->target, upperbound);
+ Assert(key->pivotsearch);
+
+ cmp = _bt_compare(state->rel, key, state->target, upperbound);
return cmp <= 0;
}
/*
- * Does the invariant hold that the key is greater than or equal to a given
- * lower bound offset item?
+ * Does the invariant hold that the key is strictly greater than a given lower
+ * bound offset item?
+ *
+ * Caller should have verified that lowerbound's line pointer is consistent
+ * using PageGetItemIdCareful() call.
*
* If this function returns false, convention is that caller throws error due
* to corruption.
*/
static inline bool
-invariant_geq_offset(BtreeCheckState *state, ScanKey key,
- OffsetNumber lowerbound)
+invariant_g_offset(BtreeCheckState *state, BTScanInsert key,
+ OffsetNumber lowerbound)
{
- int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(state->rel);
int32 cmp;
- cmp = _bt_compare(state->rel, nkeyatts, key, state->target, lowerbound);
+ Assert(key->pivotsearch);
+
+ cmp = _bt_compare(state->rel, key, state->target, lowerbound);
+
+ /* pg_upgrade'd indexes may legally have equal sibling tuples */
+ if (!key->heapkeyspace)
+ return cmp >= 0;
- return cmp >= 0;
+ /*
+ * No need to consider the possibility that scankey has attributes that we
+ * need to force to be interpreted as negative infinity. _bt_compare() is
+ * able to determine that scankey is greater than negative infinity. The
+ * distinction between "==" and "<" isn't interesting here, since
+ * corruption is indicated either way.
+ */
+ return cmp > 0;
}
/*
- * Does the invariant hold that the key is less than or equal to a given upper
+ * Does the invariant hold that the key is strictly less than a given upper
* bound offset item, with the offset relating to a caller-supplied page that
- * is not the current target page? Caller's non-target page is typically a
- * child page of the target, checked as part of checking a property of the
- * target page (i.e. the key comes from the target).
+ * is not the current target page?
+ *
+ * Caller's non-target page is a child page of the target, checked as part of
+ * checking a property of the target page (i.e. the key comes from the
+ * target). Verifies line pointer on behalf of caller.
*
* If this function returns false, convention is that caller throws error due
* to corruption.
*/
static inline bool
-invariant_leq_nontarget_offset(BtreeCheckState *state,
- Page nontarget, ScanKey key,
- OffsetNumber upperbound)
+invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key,
+ BlockNumber nontargetblock, Page nontarget,
+ OffsetNumber upperbound)
{
- int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(state->rel);
+ ItemId itemid;
int32 cmp;
- cmp = _bt_compare(state->rel, nkeyatts, key, nontarget, upperbound);
+ Assert(key->pivotsearch);
- return cmp <= 0;
+ /* Verify line pointer before checking tuple */
+ itemid = PageGetItemIdCareful(state, nontargetblock, nontarget,
+ upperbound);
+ cmp = _bt_compare(state->rel, key, nontarget, upperbound);
+
+ /* pg_upgrade'd indexes may legally have equal sibling tuples */
+ if (!key->heapkeyspace)
+ return cmp <= 0;
+
+ /* See invariant_l_offset() for an explanation of this extra step */
+ if (cmp == 0)
+ {
+ IndexTuple child;
+ int uppnkeyatts;
+ ItemPointer childheaptid;
+ BTPageOpaque copaque;
+ bool nonpivot;
+
+ child = (IndexTuple) PageGetItem(nontarget, itemid);
+ copaque = (BTPageOpaque) PageGetSpecialPointer(nontarget);
+ nonpivot = P_ISLEAF(copaque) && upperbound >= P_FIRSTDATAKEY(copaque);
+
+ /* Get number of keys + heap TID for child/non-target item */
+ uppnkeyatts = BTreeTupleGetNKeyAtts(child, state->rel);
+ childheaptid = BTreeTupleGetHeapTIDCareful(state, child, nonpivot);
+
+ /* Heap TID is tiebreaker key attribute */
+ if (key->keysz == uppnkeyatts)
+ return key->scantid == NULL && childheaptid != NULL;
+
+ return key->keysz < uppnkeyatts;
+ }
+
+ return cmp < 0;
}
/*
@@ -1525,6 +2337,7 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
Buffer buffer;
Page page;
BTPageOpaque opaque;
+ OffsetNumber maxoffset;
page = palloc(BLCKSZ);
@@ -1571,9 +2384,13 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("version mismatch in index \"%s\": file version %d, "
- "current version %d, minimal supported version %d",
+ "current version %d, minimum supported version %d",
RelationGetRelationName(state->rel),
- metad->btm_version, BTREE_VERSION, BTREE_MIN_VERSION)));
+ metad->btm_version, BTREE_VERSION,
+ BTREE_MIN_VERSION)));
+
+ /* Finished with metapage checks */
+ return page;
}
/*
@@ -1586,12 +2403,66 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
errmsg("invalid leaf page level %u for block %u in index \"%s\"",
opaque->btpo.level, blocknum, RelationGetRelationName(state->rel))));
- if (blocknum != BTREE_METAPAGE && !P_ISLEAF(opaque) &&
- !P_ISDELETED(opaque) && opaque->btpo.level == 0)
+ if (!P_ISLEAF(opaque) && !P_ISDELETED(opaque) &&
+ opaque->btpo.level == 0)
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("invalid internal page level 0 for block %u in index \"%s\"",
- opaque->btpo.level, RelationGetRelationName(state->rel))));
+ blocknum, RelationGetRelationName(state->rel))));
+
+ /*
+ * Sanity checks for number of items on page.
+ *
+ * As noted at the beginning of _bt_binsrch(), an internal page must have
+ * children, since there must always be a negative infinity downlink
+ * (there may also be a highkey). In the case of non-rightmost leaf
+ * pages, there must be at least a highkey.
+ *
+ * This is correct when pages are half-dead, since internal pages are
+ * never half-dead, and leaf pages must have a high key when half-dead
+ * (the rightmost page can never be deleted). It's also correct with
+ * fully deleted pages: _bt_unlink_halfdead_page() doesn't change anything
+ * about the target page other than setting the page as fully dead, and
+ * setting its xact field. In particular, it doesn't change the sibling
+ * links in the deletion target itself, since they're required when index
+ * scans land on the deletion target, and then need to move right (or need
+ * to move left, in the case of backward index scans).
+ */
+ maxoffset = PageGetMaxOffsetNumber(page);
+ if (maxoffset > MaxIndexTuplesPerPage)
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("Number of items on block %u of index \"%s\" exceeds MaxIndexTuplesPerPage (%u)",
+ blocknum, RelationGetRelationName(state->rel),
+ MaxIndexTuplesPerPage)));
+
+ if (!P_ISLEAF(opaque) && maxoffset < P_FIRSTDATAKEY(opaque))
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("internal block %u in index \"%s\" lacks high key and/or at least one downlink",
+ blocknum, RelationGetRelationName(state->rel))));
+
+ if (P_ISLEAF(opaque) && !P_RIGHTMOST(opaque) && maxoffset < P_HIKEY)
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("non-rightmost leaf block %u in index \"%s\" lacks high key item",
+ blocknum, RelationGetRelationName(state->rel))));
+
+ /*
+ * In general, internal pages are never marked half-dead, except on
+ * versions of Postgres prior to 9.4, where it can be valid transient
+ * state. This state is nonetheless treated as corruption by VACUUM on
+ * from version 9.4 on, so do the same here. See _bt_pagedel() for full
+ * details.
+ *
+ * Internal pages should never have garbage items, either.
+ */
+ if (!P_ISLEAF(opaque) && P_ISHALFDEAD(opaque))
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("internal page block %u in index \"%s\" is half-dead",
+ blocknum, RelationGetRelationName(state->rel)),
+ errhint("This can be caused by an interrupted VACUUM in version 9.3 or older, before upgrade. Please REINDEX it.")));
if (!P_ISLEAF(opaque) && P_HAS_GARBAGE(opaque))
ereport(ERROR,
@@ -1601,3 +2472,102 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
return page;
}
+
+/*
+ * _bt_mkscankey() wrapper that automatically prevents insertion scankey from
+ * being considered greater than the pivot tuple that its values originated
+ * from (or some other identical pivot tuple) in the common case where there
+ * are truncated/minus infinity attributes. Without this extra step, there
+ * are forms of corruption that amcheck could theoretically fail to report.
+ *
+ * For example, invariant_g_offset() might miss a cross-page invariant failure
+ * on an internal level if the scankey built from the first item on the
+ * target's right sibling page happened to be equal to (not greater than) the
+ * last item on target page. The !pivotsearch tiebreaker in _bt_compare()
+ * might otherwise cause amcheck to assume (rather than actually verify) that
+ * the scankey is greater.
+ */
+static inline BTScanInsert
+bt_mkscankey_pivotsearch(Relation rel, IndexTuple itup)
+{
+ BTScanInsert skey;
+
+ skey = _bt_mkscankey(rel, itup);
+ skey->pivotsearch = true;
+
+ return skey;
+}
+
+/*
+ * PageGetItemId() wrapper that validates returned line pointer.
+ *
+ * Buffer page/page item access macros generally trust that line pointers are
+ * not corrupt, which might cause problems for verification itself. For
+ * example, there is no bounds checking in PageGetItem(). Passing it a
+ * corrupt line pointer can cause it to return a tuple/pointer that is unsafe
+ * to dereference.
+ *
+ * Validating line pointers before tuples avoids undefined behavior and
+ * assertion failures with corrupt indexes, making the verification process
+ * more robust and predictable.
+ */
+static ItemId
+PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page,
+ OffsetNumber offset)
+{
+ ItemId itemid = PageGetItemId(page, offset);
+
+ if (ItemIdGetOffset(itemid) + ItemIdGetLength(itemid) >
+ BLCKSZ - sizeof(BTPageOpaqueData))
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("line pointer points past end of tuple space in index \"%s\"",
+ RelationGetRelationName(state->rel)),
+ errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
+ block, offset, ItemIdGetOffset(itemid),
+ ItemIdGetLength(itemid),
+ ItemIdGetFlags(itemid))));
+
+ /*
+ * Verify that line pointer isn't LP_REDIRECT or LP_UNUSED, since nbtree
+ * never uses either. Verify that line pointer has storage, too, since
+ * even LP_DEAD items should within nbtree.
+ */
+ if (ItemIdIsRedirected(itemid) || !ItemIdIsUsed(itemid) ||
+ ItemIdGetLength(itemid) == 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("invalid line pointer storage in index \"%s\"",
+ RelationGetRelationName(state->rel)),
+ errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
+ block, offset, ItemIdGetOffset(itemid),
+ ItemIdGetLength(itemid),
+ ItemIdGetFlags(itemid))));
+
+ return itemid;
+}
+
+/*
+ * BTreeTupleGetHeapTID() wrapper that lets caller enforce that a heap TID must
+ * be present in cases where that is mandatory.
+ *
+ * This doesn't add much as of BTREE_VERSION 4, since the INDEX_ALT_TID_MASK
+ * bit is effectively a proxy for whether or not the tuple is a pivot tuple.
+ * It may become more useful in the future, when non-pivot tuples support their
+ * own alternative INDEX_ALT_TID_MASK representation.
+ */
+static inline ItemPointer
+BTreeTupleGetHeapTIDCareful(BtreeCheckState *state, IndexTuple itup,
+ bool nonpivot)
+{
+ ItemPointer result = BTreeTupleGetHeapTID(itup);
+ BlockNumber targetblock = state->targetblock;
+
+ if (result == NULL && nonpivot)
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("block %u or its right sibling block or child block in index \"%s\" contains non-pivot tuple that lacks a heap TID",
+ targetblock, RelationGetRelationName(state->rel))));
+
+ return result;
+}
diff --git a/contrib/auth_delay/auth_delay.c b/contrib/auth_delay/auth_delay.c
index ad047b365f2..b7ad7448d5e 100644
--- a/contrib/auth_delay/auth_delay.c
+++ b/contrib/auth_delay/auth_delay.c
@@ -2,7 +2,7 @@
*
* auth_delay.c
*
- * Copyright (c) 2010-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/auth_delay/auth_delay.c
diff --git a/contrib/auto_explain/auto_explain.c b/contrib/auto_explain/auto_explain.c
index ea4f957cfa0..a9536c2de05 100644
--- a/contrib/auto_explain/auto_explain.c
+++ b/contrib/auto_explain/auto_explain.c
@@ -3,7 +3,7 @@
* auto_explain.c
*
*
- * Copyright (c) 2008-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2008-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/auto_explain/auto_explain.c
@@ -14,8 +14,10 @@
#include
+#include "access/parallel.h"
#include "commands/explain.h"
#include "executor/instrument.h"
+#include "jit/jit.h"
#include "utils/guc.h"
PG_MODULE_MAGIC;
@@ -27,7 +29,9 @@ static bool auto_explain_log_verbose = false;
static bool auto_explain_log_buffers = false;
static bool auto_explain_log_triggers = false;
static bool auto_explain_log_timing = true;
+static bool auto_explain_log_settings = false;
static int auto_explain_log_format = EXPLAIN_FORMAT_TEXT;
+static int auto_explain_log_level = LOG;
static bool auto_explain_log_nested_statements = false;
static double auto_explain_sample_rate = 1;
@@ -39,29 +43,44 @@ static const struct config_enum_entry format_options[] = {
{NULL, 0, false}
};
+static const struct config_enum_entry loglevel_options[] = {
+ {"debug5", DEBUG5, false},
+ {"debug4", DEBUG4, false},
+ {"debug3", DEBUG3, false},
+ {"debug2", DEBUG2, false},
+ {"debug1", DEBUG1, false},
+ {"debug", DEBUG2, true},
+ {"info", INFO, false},
+ {"notice", NOTICE, false},
+ {"warning", WARNING, false},
+ {"log", LOG, false},
+ {NULL, 0, false}
+};
+
/* Current nesting depth of ExecutorRun calls */
static int nesting_level = 0;
+/* Is the current top-level query to be sampled? */
+static bool current_query_sampled = false;
+
+#define auto_explain_enabled() \
+ (auto_explain_log_min_duration >= 0 && \
+ (nesting_level == 0 || auto_explain_log_nested_statements) && \
+ current_query_sampled)
+
/* Saved hook values in case of unload */
static ExecutorStart_hook_type prev_ExecutorStart = NULL;
static ExecutorRun_hook_type prev_ExecutorRun = NULL;
static ExecutorFinish_hook_type prev_ExecutorFinish = NULL;
static ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
-/* Is the current query sampled, per backend */
-static bool current_query_sampled = true;
-
-#define auto_explain_enabled() \
- (auto_explain_log_min_duration >= 0 && \
- (nesting_level == 0 || auto_explain_log_nested_statements))
-
void _PG_init(void);
void _PG_fini(void);
static void explain_ExecutorStart(QueryDesc *queryDesc, int eflags);
static void explain_ExecutorRun(QueryDesc *queryDesc,
- ScanDirection direction,
- uint64 count, bool execute_once);
+ ScanDirection direction,
+ uint64 count, bool execute_once);
static void explain_ExecutorFinish(QueryDesc *queryDesc);
static void explain_ExecutorEnd(QueryDesc *queryDesc);
@@ -96,6 +115,17 @@ _PG_init(void)
NULL,
NULL);
+ DefineCustomBoolVariable("auto_explain.log_settings",
+ "Log modified configuration parameters affecting query planning.",
+ NULL,
+ &auto_explain_log_settings,
+ false,
+ PGC_SUSET,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
DefineCustomBoolVariable("auto_explain.log_verbose",
"Use EXPLAIN VERBOSE for plan logging.",
NULL,
@@ -141,6 +171,18 @@ _PG_init(void)
NULL,
NULL);
+ DefineCustomEnumVariable("auto_explain.log_level",
+ "Log level for the plan.",
+ NULL,
+ &auto_explain_log_level,
+ LOG,
+ loglevel_options,
+ PGC_SUSET,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
DefineCustomBoolVariable("auto_explain.log_nested_statements",
"Log nested statements.",
NULL,
@@ -209,14 +251,25 @@ static void
explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
{
/*
- * For rate sampling, randomly choose top-level statement. Either all
- * nested statements will be explained or none will.
+ * At the beginning of each top-level statement, decide whether we'll
+ * sample this statement. If nested-statement explaining is enabled,
+ * either all nested statements will be explained or none will.
+ *
+ * When in a parallel worker, we should do nothing, which we can implement
+ * cheaply by pretending we decided not to sample the current statement.
+ * If EXPLAIN is active in the parent session, data will be collected and
+ * reported back to the parent, and it's no business of ours to interfere.
*/
- if (auto_explain_log_min_duration >= 0 && nesting_level == 0)
- current_query_sampled = (random() < auto_explain_sample_rate *
- MAX_RANDOM_VALUE);
+ if (nesting_level == 0)
+ {
+ if (auto_explain_log_min_duration >= 0 && !IsParallelWorker())
+ current_query_sampled = (random() < auto_explain_sample_rate *
+ ((double) MAX_RANDOM_VALUE + 1));
+ else
+ current_query_sampled = false;
+ }
- if (auto_explain_enabled() && current_query_sampled)
+ if (auto_explain_enabled())
{
/* Enable per-node instrumentation iff log_analyze is required. */
if (auto_explain_log_analyze && (eflags & EXEC_FLAG_EXPLAIN_ONLY) == 0)
@@ -235,7 +288,7 @@ explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
else
standard_ExecutorStart(queryDesc, eflags);
- if (auto_explain_enabled() && current_query_sampled)
+ if (auto_explain_enabled())
{
/*
* Set up to track total elapsed time in ExecutorRun. Make sure the
@@ -306,7 +359,7 @@ explain_ExecutorFinish(QueryDesc *queryDesc)
static void
explain_ExecutorEnd(QueryDesc *queryDesc)
{
- if (queryDesc->totaltime && auto_explain_enabled() && current_query_sampled)
+ if (queryDesc->totaltime && auto_explain_enabled())
{
double msec;
@@ -328,12 +381,15 @@ explain_ExecutorEnd(QueryDesc *queryDesc)
es->timing = (es->analyze && auto_explain_log_timing);
es->summary = es->analyze;
es->format = auto_explain_log_format;
+ es->settings = auto_explain_log_settings;
ExplainBeginOutput(es);
ExplainQueryText(es, queryDesc);
ExplainPrintPlan(es, queryDesc);
if (es->analyze && auto_explain_log_triggers)
ExplainPrintTriggers(es, queryDesc);
+ if (es->costs)
+ ExplainPrintJITSummary(es, queryDesc);
ExplainEndOutput(es);
/* Remove last line break */
@@ -353,7 +409,7 @@ explain_ExecutorEnd(QueryDesc *queryDesc)
* reported. This isn't ideal but trying to do it here would
* often result in duplication.
*/
- ereport(LOG,
+ ereport(auto_explain_log_level,
(errmsg("duration: %.3f ms plan:\n%s",
msec, es->str->data),
errhidestmt(true)));
diff --git a/contrib/bloom/Makefile b/contrib/bloom/Makefile
index 13bd397b705..146878870ec 100644
--- a/contrib/bloom/Makefile
+++ b/contrib/bloom/Makefile
@@ -9,6 +9,10 @@ PGFILEDESC = "bloom access method - signature file based index"
REGRESS = bloom
+# Disable TAP tests for this module for now, as these are unstable on several
+# buildfarm environments.
+# TAP_TESTS = 1
+
ifdef USE_PGXS
PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
@@ -19,6 +23,3 @@ top_builddir = ../..
include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/contrib/contrib-global.mk
endif
-
-wal-check: temp-install
- $(prove_check)
diff --git a/contrib/bloom/blcost.c b/contrib/bloom/blcost.c
index fa0f17a217f..f9fe57fb845 100644
--- a/contrib/bloom/blcost.c
+++ b/contrib/bloom/blcost.c
@@ -3,7 +3,7 @@
* blcost.c
* Cost estimate function for bloom indexes.
*
- * Copyright (c) 2016-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2016-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/bloom/blcost.c
@@ -13,7 +13,6 @@
#include "postgres.h"
#include "fmgr.h"
-#include "optimizer/cost.h"
#include "utils/selfuncs.h"
#include "bloom.h"
@@ -28,19 +27,15 @@ blcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
double *indexPages)
{
IndexOptInfo *index = path->indexinfo;
- List *qinfos;
GenericCosts costs;
- /* Do preliminary analysis of indexquals */
- qinfos = deconstruct_indexquals(path);
-
MemSet(&costs, 0, sizeof(costs));
/* We have to visit all index tuples anyway */
costs.numIndexTuples = index->tuples;
/* Use generic estimate */
- genericcostestimate(root, path, loop_count, qinfos, &costs);
+ genericcostestimate(root, path, loop_count, &costs);
*indexStartupCost = costs.indexStartupCost;
*indexTotalCost = costs.indexTotalCost;
diff --git a/contrib/bloom/blinsert.c b/contrib/bloom/blinsert.c
index 4afdea7c9a3..4b2186b8dda 100644
--- a/contrib/bloom/blinsert.c
+++ b/contrib/bloom/blinsert.c
@@ -3,7 +3,7 @@
* blinsert.c
* Bloom index build and insert functions.
*
- * Copyright (c) 2016-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2016-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/bloom/blinsert.c
@@ -14,6 +14,7 @@
#include "access/genam.h"
#include "access/generic_xlog.h"
+#include "access/tableam.h"
#include "catalog/index.h"
#include "miscadmin.h"
#include "storage/bufmgr.h"
@@ -36,7 +37,7 @@ typedef struct
int64 indtuples; /* total number of tuples indexed */
MemoryContext tmpCtx; /* temporary memory context reset after each
* tuple */
- char data[BLCKSZ]; /* cached page */
+ PGAlignedBlock data; /* cached page */
int count; /* number of tuples in cached page */
} BloomBuildState;
@@ -52,7 +53,7 @@ flushCachedPage(Relation index, BloomBuildState *buildstate)
state = GenericXLogStart(index);
page = GenericXLogRegisterBuffer(state, buffer, GENERIC_XLOG_FULL_IMAGE);
- memcpy(page, buildstate->data, BLCKSZ);
+ memcpy(page, buildstate->data.data, BLCKSZ);
GenericXLogFinish(state);
UnlockReleaseBuffer(buffer);
}
@@ -63,13 +64,13 @@ flushCachedPage(Relation index, BloomBuildState *buildstate)
static void
initCachedPage(BloomBuildState *buildstate)
{
- memset(buildstate->data, 0, BLCKSZ);
- BloomInitPage(buildstate->data, 0);
+ memset(buildstate->data.data, 0, BLCKSZ);
+ BloomInitPage(buildstate->data.data, 0);
buildstate->count = 0;
}
/*
- * Per-tuple callback from IndexBuildHeapScan.
+ * Per-tuple callback for table_index_build_scan.
*/
static void
bloomBuildCallback(Relation index, HeapTuple htup, Datum *values,
@@ -84,7 +85,7 @@ bloomBuildCallback(Relation index, HeapTuple htup, Datum *values,
itup = BloomFormTuple(&buildstate->blstate, &htup->t_self, values, isnull);
/* Try to add next item to cached page */
- if (BloomPageAddItem(&buildstate->blstate, buildstate->data, itup))
+ if (BloomPageAddItem(&buildstate->blstate, buildstate->data.data, itup))
{
/* Next item was added successfully */
buildstate->count++;
@@ -98,7 +99,7 @@ bloomBuildCallback(Relation index, HeapTuple htup, Datum *values,
initCachedPage(buildstate);
- if (!BloomPageAddItem(&buildstate->blstate, buildstate->data, itup))
+ if (!BloomPageAddItem(&buildstate->blstate, buildstate->data.data, itup))
{
/* We shouldn't be here since we're inserting to the empty page */
elog(ERROR, "could not add new bloom tuple to empty page");
@@ -141,9 +142,9 @@ blbuild(Relation heap, Relation index, IndexInfo *indexInfo)
initCachedPage(&buildstate);
/* Do the heap scan */
- reltuples = IndexBuildHeapScan(heap, index, indexInfo, true,
- bloomBuildCallback, (void *) &buildstate,
- NULL);
+ reltuples = table_index_build_scan(heap, index, indexInfo, true, true,
+ bloomBuildCallback, (void *) &buildstate,
+ NULL);
/* Flush last page if needed (it will be, unless heap was empty) */
if (buildstate.count > 0)
diff --git a/contrib/bloom/bloom.h b/contrib/bloom/bloom.h
index 3973ac75e8f..010148eb032 100644
--- a/contrib/bloom/bloom.h
+++ b/contrib/bloom/bloom.h
@@ -3,7 +3,7 @@
* bloom.h
* Header for bloom index.
*
- * Copyright (c) 2016-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2016-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/bloom/bloom.h
@@ -17,7 +17,7 @@
#include "access/generic_xlog.h"
#include "access/itup.h"
#include "access/xlog.h"
-#include "nodes/relation.h"
+#include "nodes/pathnodes.h"
#include "fmgr.h"
/* Support procedures numbers */
@@ -137,6 +137,7 @@ typedef struct BloomMetaPageData
typedef struct BloomState
{
FmgrInfo hashFn[INDEX_MAX_KEYS];
+ Oid collations[INDEX_MAX_KEYS];
BloomOptions opts; /* copy of options on index's metapage */
int32 nColumns;
@@ -188,26 +189,26 @@ extern bool blvalidate(Oid opclassoid);
/* index access method interface functions */
extern bool blinsert(Relation index, Datum *values, bool *isnull,
- ItemPointer ht_ctid, Relation heapRel,
- IndexUniqueCheck checkUnique,
- struct IndexInfo *indexInfo);
+ ItemPointer ht_ctid, Relation heapRel,
+ IndexUniqueCheck checkUnique,
+ struct IndexInfo *indexInfo);
extern IndexScanDesc blbeginscan(Relation r, int nkeys, int norderbys);
extern int64 blgetbitmap(IndexScanDesc scan, TIDBitmap *tbm);
extern void blrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
- ScanKey orderbys, int norderbys);
+ ScanKey orderbys, int norderbys);
extern void blendscan(IndexScanDesc scan);
extern IndexBuildResult *blbuild(Relation heap, Relation index,
- struct IndexInfo *indexInfo);
+ struct IndexInfo *indexInfo);
extern void blbuildempty(Relation index);
extern IndexBulkDeleteResult *blbulkdelete(IndexVacuumInfo *info,
- IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback,
- void *callback_state);
+ IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback,
+ void *callback_state);
extern IndexBulkDeleteResult *blvacuumcleanup(IndexVacuumInfo *info,
- IndexBulkDeleteResult *stats);
+ IndexBulkDeleteResult *stats);
extern bytea *bloptions(Datum reloptions, bool validate);
extern void blcostestimate(PlannerInfo *root, IndexPath *path,
- double loop_count, Cost *indexStartupCost,
- Cost *indexTotalCost, Selectivity *indexSelectivity,
- double *indexCorrelation, double *indexPages);
+ double loop_count, Cost *indexStartupCost,
+ Cost *indexTotalCost, Selectivity *indexSelectivity,
+ double *indexCorrelation, double *indexPages);
#endif
diff --git a/contrib/bloom/blscan.c b/contrib/bloom/blscan.c
index 0744d74de75..49e364ac12d 100644
--- a/contrib/bloom/blscan.c
+++ b/contrib/bloom/blscan.c
@@ -3,7 +3,7 @@
* blscan.c
* Bloom index scan functions.
*
- * Copyright (c) 2016-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2016-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/bloom/blscan.c
@@ -76,7 +76,7 @@ blendscan(IndexScanDesc scan)
}
/*
- * Insert all matching tuples into to a bitmap.
+ * Insert all matching tuples into a bitmap.
*/
int64
blgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
diff --git a/contrib/bloom/blutils.c b/contrib/bloom/blutils.c
index 6b2b9e37426..dbb24cb5b24 100644
--- a/contrib/bloom/blutils.c
+++ b/contrib/bloom/blutils.c
@@ -3,7 +3,7 @@
* blutils.c
* Bloom index utilities.
*
- * Portions Copyright (c) 2016-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2016-2019, PostgreSQL Global Development Group
* Portions Copyright (c) 1990-1993, Regents of the University of California
*
* IDENTIFICATION
@@ -60,7 +60,8 @@ _PG_init(void)
/* Option for length of signature */
add_int_reloption(bl_relopt_kind, "length",
"Length of signature in bits",
- DEFAULT_BLOOM_LENGTH, 1, MAX_BLOOM_LENGTH);
+ DEFAULT_BLOOM_LENGTH, 1, MAX_BLOOM_LENGTH,
+ AccessExclusiveLock);
bl_relopt_tab[0].optname = "length";
bl_relopt_tab[0].opttype = RELOPT_TYPE_INT;
bl_relopt_tab[0].offset = offsetof(BloomOptions, bloomLength);
@@ -71,7 +72,8 @@ _PG_init(void)
snprintf(buf, sizeof(buf), "col%d", i + 1);
add_int_reloption(bl_relopt_kind, buf,
"Number of bits generated for each index column",
- DEFAULT_BLOOM_BITS, 1, MAX_BLOOM_BITS);
+ DEFAULT_BLOOM_BITS, 1, MAX_BLOOM_BITS,
+ AccessExclusiveLock);
bl_relopt_tab[i + 1].optname = MemoryContextStrdup(TopMemoryContext,
buf);
bl_relopt_tab[i + 1].opttype = RELOPT_TYPE_INT;
@@ -132,6 +134,7 @@ blhandler(PG_FUNCTION_ARGS)
amroutine->amcostestimate = blcostestimate;
amroutine->amoptions = bloptions;
amroutine->amproperty = NULL;
+ amroutine->ambuildphasename = NULL;
amroutine->amvalidate = blvalidate;
amroutine->ambeginscan = blbeginscan;
amroutine->amrescan = blrescan;
@@ -163,6 +166,7 @@ initBloomState(BloomState *state, Relation index)
fmgr_info_copy(&(state->hashFn[i]),
index_getprocinfo(index, i + 1, BLOOM_HASH_PROC),
CurrentMemoryContext);
+ state->collations[i] = index->rd_indcollation[i];
}
/* Initialize amcache if needed with options from metapage */
@@ -267,7 +271,7 @@ signValue(BloomState *state, BloomSignatureWord *sign, Datum value, int attno)
* different columns will be mapped into different bits because of step
* above
*/
- hashVal = DatumGetInt32(FunctionCall1(&state->hashFn[attno], value));
+ hashVal = DatumGetInt32(FunctionCall1Coll(&state->hashFn[attno], state->collations[attno], value));
mySrand(hashVal ^ myRand());
for (j = 0; j < state->opts.bitSize[attno]; j++)
@@ -339,7 +343,7 @@ BloomPageAddItem(BloomState *state, Page page, BloomTuple *tuple)
/*
* Allocate a new page (either by recycling, or by extending the index file)
* The returned buffer is already pinned and exclusive-locked
- * Caller is responsible for initializing the page by calling BloomInitBuffer
+ * Caller is responsible for initializing the page by calling BloomInitPage
*/
Buffer
BloomNewBuffer(Relation index)
diff --git a/contrib/bloom/blvacuum.c b/contrib/bloom/blvacuum.c
index 7530a664abc..0c33d1e1951 100644
--- a/contrib/bloom/blvacuum.c
+++ b/contrib/bloom/blvacuum.c
@@ -3,7 +3,7 @@
* blvacuum.c
* Bloom VACUUM functions.
*
- * Copyright (c) 2016-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2016-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/bloom/blvacuum.c
diff --git a/contrib/bloom/blvalidate.c b/contrib/bloom/blvalidate.c
index 7235f123073..e9bd1b4f03e 100644
--- a/contrib/bloom/blvalidate.c
+++ b/contrib/bloom/blvalidate.c
@@ -3,7 +3,7 @@
* blvalidate.c
* Opclass validator for bloom.
*
- * Copyright (c) 2016-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2016-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/bloom/blvalidate.c
diff --git a/contrib/bloom/expected/bloom.out b/contrib/bloom/expected/bloom.out
index 5ab9e34f823..dae12a7d3e7 100644
--- a/contrib/bloom/expected/bloom.out
+++ b/contrib/bloom/expected/bloom.out
@@ -5,6 +5,7 @@ CREATE TABLE tst (
);
INSERT INTO tst SELECT i%10, substr(md5(i::text), 1, 1) FROM generate_series(1,2000) i;
CREATE INDEX bloomidx ON tst USING bloom (i, t) WITH (col1 = 3);
+ALTER INDEX bloomidx SET (length=80);
SET enable_seqscan=on;
SET enable_bitmapscan=off;
SET enable_indexscan=off;
diff --git a/contrib/bloom/sql/bloom.sql b/contrib/bloom/sql/bloom.sql
index 32755f2b1a5..4733e1e7050 100644
--- a/contrib/bloom/sql/bloom.sql
+++ b/contrib/bloom/sql/bloom.sql
@@ -7,6 +7,7 @@ CREATE TABLE tst (
INSERT INTO tst SELECT i%10, substr(md5(i::text), 1, 1) FROM generate_series(1,2000) i;
CREATE INDEX bloomidx ON tst USING bloom (i, t) WITH (col1 = 3);
+ALTER INDEX bloomidx SET (length=80);
SET enable_seqscan=on;
SET enable_bitmapscan=off;
diff --git a/contrib/bloom/t/001_wal.pl b/contrib/bloom/t/001_wal.pl
index 1b319c993c9..0f2628b5575 100644
--- a/contrib/bloom/t/001_wal.pl
+++ b/contrib/bloom/t/001_wal.pl
@@ -16,7 +16,7 @@ sub test_index_replay
# Wait for standby to catch up
my $applname = $node_standby->name;
my $caughtup_query =
-"SELECT pg_current_wal_lsn() <= write_lsn FROM pg_stat_replication WHERE application_name = '$applname';";
+ "SELECT pg_current_wal_lsn() <= write_lsn FROM pg_stat_replication WHERE application_name = '$applname';";
$node_master->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for standby 1 to catch up";
@@ -36,6 +36,7 @@ sub test_index_replay
my $standby_result = $node_standby->safe_psql("postgres", $queries);
is($master_result, $standby_result, "$test_name: query result matches");
+ return;
}
# Initialize master node
@@ -57,7 +58,7 @@ sub test_index_replay
$node_master->safe_psql("postgres", "CREATE EXTENSION bloom;");
$node_master->safe_psql("postgres", "CREATE TABLE tst (i int4, t text);");
$node_master->safe_psql("postgres",
-"INSERT INTO tst SELECT i%10, substr(md5(i::text), 1, 1) FROM generate_series(1,100000) i;"
+ "INSERT INTO tst SELECT i%10, substr(md5(i::text), 1, 1) FROM generate_series(1,100000) i;"
);
$node_master->safe_psql("postgres",
"CREATE INDEX bloomidx ON tst USING bloom (i, t) WITH (col1 = 3);");
@@ -74,7 +75,7 @@ sub test_index_replay
test_index_replay("vacuum $i");
my ($start, $end) = (100001 + ($i - 1) * 10000, 100000 + $i * 10000);
$node_master->safe_psql("postgres",
-"INSERT INTO tst SELECT i%10, substr(md5(i::text), 1, 1) FROM generate_series($start,$end) i;"
+ "INSERT INTO tst SELECT i%10, substr(md5(i::text), 1, 1) FROM generate_series($start,$end) i;"
);
test_index_replay("insert $i");
}
diff --git a/contrib/btree_gin/btree_gin.c b/contrib/btree_gin/btree_gin.c
index a660681e581..2ecf7a2d87c 100644
--- a/contrib/btree_gin/btree_gin.c
+++ b/contrib/btree_gin/btree_gin.c
@@ -10,6 +10,7 @@
#include "utils/bytea.h"
#include "utils/cash.h"
#include "utils/date.h"
+#include "utils/float.h"
#include "utils/inet.h"
#include "utils/numeric.h"
#include "utils/timestamp.h"
@@ -88,6 +89,7 @@ gin_btree_extract_query(FunctionCallInfo fcinfo,
case BTGreaterEqualStrategyNumber:
case BTGreaterStrategyNumber:
*ptr_partialmatch = true;
+ /* FALLTHROUGH */
case BTEqualStrategyNumber:
entries[0] = datum;
break;
@@ -483,8 +485,12 @@ GIN_SUPPORT(anyenum, false, leftmostvalue_enum, gin_enum_cmp)
static Datum
leftmostvalue_uuid(void)
{
- /* palloc0 will create the UUID with all zeroes: "00000000-0000-0000-0000-000000000000" */
- pg_uuid_t *retval = (pg_uuid_t *) palloc0(sizeof(pg_uuid_t));
+ /*
+ * palloc0 will create the UUID with all zeroes:
+ * "00000000-0000-0000-0000-000000000000"
+ */
+ pg_uuid_t *retval = (pg_uuid_t *) palloc0(sizeof(pg_uuid_t));
+
return UUIDPGetDatum(retval);
}
@@ -493,7 +499,8 @@ GIN_SUPPORT(uuid, false, leftmostvalue_uuid, uuid_cmp)
static Datum
leftmostvalue_name(void)
{
- NameData* result = (NameData *) palloc0(NAMEDATALEN);
+ NameData *result = (NameData *) palloc0(NAMEDATALEN);
+
return NameGetDatum(result);
}
diff --git a/contrib/btree_gist/btree_ts.c b/contrib/btree_gist/btree_ts.c
index 18740cad383..49d1849d889 100644
--- a/contrib/btree_gist/btree_ts.c
+++ b/contrib/btree_gist/btree_ts.c
@@ -9,6 +9,7 @@
#include "btree_utils_num.h"
#include "utils/builtins.h"
#include "utils/datetime.h"
+#include "utils/float.h"
typedef struct
{
diff --git a/contrib/btree_gist/btree_utils_num.c b/contrib/btree_gist/btree_utils_num.c
index 29b0faf997f..7564a403c7d 100644
--- a/contrib/btree_gist/btree_utils_num.c
+++ b/contrib/btree_gist/btree_utils_num.c
@@ -185,10 +185,10 @@ gbt_num_union(GBT_NUMKEY *out, const GistEntryVector *entryvec, const gbtree_nin
c.upper = &cur[tinfo->size];
/* if out->lower > cur->lower, adopt cur as lower */
if (tinfo->f_gt(o.lower, c.lower, flinfo))
- memcpy((void *) o.lower, (void *) c.lower, tinfo->size);
+ memcpy(unconstify(GBT_NUMKEY *, o.lower), c.lower, tinfo->size);
/* if out->upper < cur->upper, adopt cur as upper */
if (tinfo->f_lt(o.upper, c.upper, flinfo))
- memcpy((void *) o.upper, (void *) c.upper, tinfo->size);
+ memcpy(unconstify(GBT_NUMKEY *, o.upper), c.upper, tinfo->size);
}
return out;
@@ -206,10 +206,10 @@ gbt_num_same(const GBT_NUMKEY *a, const GBT_NUMKEY *b, const gbtree_ninfo *tinfo
GBT_NUMKEY_R b1,
b2;
- b1.lower = &(((GBT_NUMKEY *) a)[0]);
- b1.upper = &(((GBT_NUMKEY *) a)[tinfo->size]);
- b2.lower = &(((GBT_NUMKEY *) b)[0]);
- b2.upper = &(((GBT_NUMKEY *) b)[tinfo->size]);
+ b1.lower = &(a[0]);
+ b1.upper = &(a[tinfo->size]);
+ b2.lower = &(b[0]);
+ b2.upper = &(b[tinfo->size]);
return (tinfo->f_eq(b1.lower, b2.lower, flinfo) &&
tinfo->f_eq(b1.upper, b2.upper, flinfo));
@@ -227,8 +227,8 @@ gbt_num_bin_union(Datum *u, GBT_NUMKEY *e, const gbtree_ninfo *tinfo, FmgrInfo *
if (!DatumGetPointer(*u))
{
*u = PointerGetDatum(palloc0(tinfo->indexsize));
- memcpy((void *) &(((GBT_NUMKEY *) DatumGetPointer(*u))[0]), (void *) rd.lower, tinfo->size);
- memcpy((void *) &(((GBT_NUMKEY *) DatumGetPointer(*u))[tinfo->size]), (void *) rd.upper, tinfo->size);
+ memcpy(&(((GBT_NUMKEY *) DatumGetPointer(*u))[0]), rd.lower, tinfo->size);
+ memcpy(&(((GBT_NUMKEY *) DatumGetPointer(*u))[tinfo->size]), rd.upper, tinfo->size);
}
else
{
@@ -236,10 +236,10 @@ gbt_num_bin_union(Datum *u, GBT_NUMKEY *e, const gbtree_ninfo *tinfo, FmgrInfo *
ur.lower = &(((GBT_NUMKEY *) DatumGetPointer(*u))[0]);
ur.upper = &(((GBT_NUMKEY *) DatumGetPointer(*u))[tinfo->size]);
- if (tinfo->f_gt((void *) ur.lower, (void *) rd.lower, flinfo))
- memcpy((void *) ur.lower, (void *) rd.lower, tinfo->size);
- if (tinfo->f_lt((void *) ur.upper, (void *) rd.upper, flinfo))
- memcpy((void *) ur.upper, (void *) rd.upper, tinfo->size);
+ if (tinfo->f_gt(ur.lower, rd.lower, flinfo))
+ memcpy(unconstify(GBT_NUMKEY *, ur.lower), rd.lower, tinfo->size);
+ if (tinfo->f_lt(ur.upper, rd.upper, flinfo))
+ memcpy(unconstify(GBT_NUMKEY *, ur.upper), rd.upper, tinfo->size);
}
}
diff --git a/contrib/btree_gist/btree_utils_num.h b/contrib/btree_gist/btree_utils_num.h
index d7945f856c8..50907b3b5c6 100644
--- a/contrib/btree_gist/btree_utils_num.h
+++ b/contrib/btree_gist/btree_utils_num.h
@@ -110,26 +110,26 @@ do { \
extern Interval *abs_interval(Interval *a);
extern bool gbt_num_consistent(const GBT_NUMKEY_R *key, const void *query,
- const StrategyNumber *strategy, bool is_leaf,
- const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
+ const StrategyNumber *strategy, bool is_leaf,
+ const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
extern float8 gbt_num_distance(const GBT_NUMKEY_R *key, const void *query,
- bool is_leaf, const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
+ bool is_leaf, const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
extern GIST_SPLITVEC *gbt_num_picksplit(const GistEntryVector *entryvec, GIST_SPLITVEC *v,
- const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
+ const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
extern GISTENTRY *gbt_num_compress(GISTENTRY *entry, const gbtree_ninfo *tinfo);
extern GISTENTRY *gbt_num_fetch(GISTENTRY *entry, const gbtree_ninfo *tinfo);
extern void *gbt_num_union(GBT_NUMKEY *out, const GistEntryVector *entryvec,
- const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
+ const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
extern bool gbt_num_same(const GBT_NUMKEY *a, const GBT_NUMKEY *b,
- const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
+ const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
extern void gbt_num_bin_union(Datum *u, GBT_NUMKEY *e,
- const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
+ const gbtree_ninfo *tinfo, FmgrInfo *flinfo);
#endif
diff --git a/contrib/btree_gist/btree_utils_var.h b/contrib/btree_gist/btree_utils_var.h
index 15d847c1394..1f2ca43e6bc 100644
--- a/contrib/btree_gist/btree_utils_var.h
+++ b/contrib/btree_gist/btree_utils_var.h
@@ -52,22 +52,22 @@ extern GBT_VARKEY *gbt_var_key_copy(const GBT_VARKEY_R *u);
extern GISTENTRY *gbt_var_compress(GISTENTRY *entry, const gbtree_vinfo *tinfo);
extern GBT_VARKEY *gbt_var_union(const GistEntryVector *entryvec, int32 *size,
- Oid collation, const gbtree_vinfo *tinfo, FmgrInfo *flinfo);
+ Oid collation, const gbtree_vinfo *tinfo, FmgrInfo *flinfo);
extern bool gbt_var_same(Datum d1, Datum d2, Oid collation,
- const gbtree_vinfo *tinfo, FmgrInfo *flinfo);
+ const gbtree_vinfo *tinfo, FmgrInfo *flinfo);
extern float *gbt_var_penalty(float *res, const GISTENTRY *o, const GISTENTRY *n,
- Oid collation, const gbtree_vinfo *tinfo, FmgrInfo *flinfo);
+ Oid collation, const gbtree_vinfo *tinfo, FmgrInfo *flinfo);
extern bool gbt_var_consistent(GBT_VARKEY_R *key, const void *query,
- StrategyNumber strategy, Oid collation, bool is_leaf,
- const gbtree_vinfo *tinfo, FmgrInfo *flinfo);
+ StrategyNumber strategy, Oid collation, bool is_leaf,
+ const gbtree_vinfo *tinfo, FmgrInfo *flinfo);
extern GIST_SPLITVEC *gbt_var_picksplit(const GistEntryVector *entryvec, GIST_SPLITVEC *v,
- Oid collation, const gbtree_vinfo *tinfo, FmgrInfo *flinfo);
+ Oid collation, const gbtree_vinfo *tinfo, FmgrInfo *flinfo);
extern void gbt_var_bin_union(Datum *u, GBT_VARKEY *e, Oid collation,
- const gbtree_vinfo *tinfo, FmgrInfo *flinfo);
+ const gbtree_vinfo *tinfo, FmgrInfo *flinfo);
#endif
diff --git a/contrib/btree_gist/expected/bit.out b/contrib/btree_gist/expected/bit.out
index 8606baf366e..e57871f310b 100644
--- a/contrib/btree_gist/expected/bit.out
+++ b/contrib/btree_gist/expected/bit.out
@@ -68,9 +68,9 @@ SELECT count(*) FROM bittmp WHERE a > '011011000100010111011000110000100';
SET enable_bitmapscan=off;
EXPLAIN (COSTS OFF)
SELECT a FROM bittmp WHERE a BETWEEN '1000000' and '1000001';
- QUERY PLAN
------------------------------------------------------------------------
+ QUERY PLAN
+---------------------------------------------------------------------
Index Only Scan using bitidx on bittmp
- Index Cond: ((a >= B'1000000'::"bit") AND (a <= B'1000001'::"bit"))
+ Index Cond: ((a >= '1000000'::"bit") AND (a <= '1000001'::"bit"))
(2 rows)
diff --git a/contrib/btree_gist/expected/cash.out b/contrib/btree_gist/expected/cash.out
index cacbd718541..7fbc7355929 100644
--- a/contrib/btree_gist/expected/cash.out
+++ b/contrib/btree_gist/expected/cash.out
@@ -1,5 +1,5 @@
-- money check
-CREATE TABLE moneytmp (a money) WITH OIDS;
+CREATE TABLE moneytmp (a money);
\copy moneytmp from 'data/cash.data'
SET enable_seqscan=on;
SELECT count(*) FROM moneytmp WHERE a < '22649.64';
diff --git a/contrib/btree_gist/expected/float4.out b/contrib/btree_gist/expected/float4.out
index abbd9eef4e8..dfe732049e6 100644
--- a/contrib/btree_gist/expected/float4.out
+++ b/contrib/btree_gist/expected/float4.out
@@ -33,11 +33,11 @@ SELECT count(*) FROM float4tmp WHERE a > -179.0;
(1 row)
SELECT a, a <-> '-179.0' FROM float4tmp ORDER BY a <-> '-179.0' LIMIT 3;
- a | ?column?
-----------+----------
- -179 | 0
- -189.024 | 10.0239
- -158.177 | 20.8226
+ a | ?column?
+------------+-----------
+ -179 | 0
+ -189.02386 | 10.023865
+ -158.17741 | 20.822586
(3 rows)
CREATE INDEX float4idx ON float4tmp USING gist ( a );
@@ -82,10 +82,10 @@ SELECT a, a <-> '-179.0' FROM float4tmp ORDER BY a <-> '-179.0' LIMIT 3;
(3 rows)
SELECT a, a <-> '-179.0' FROM float4tmp ORDER BY a <-> '-179.0' LIMIT 3;
- a | ?column?
-----------+----------
- -179 | 0
- -189.024 | 10.0239
- -158.177 | 20.8226
+ a | ?column?
+------------+-----------
+ -179 | 0
+ -189.02386 | 10.023865
+ -158.17741 | 20.822586
(3 rows)
diff --git a/contrib/btree_gist/expected/float8.out b/contrib/btree_gist/expected/float8.out
index 5111dbdfaea..ebd0ef3d689 100644
--- a/contrib/btree_gist/expected/float8.out
+++ b/contrib/btree_gist/expected/float8.out
@@ -33,11 +33,11 @@ SELECT count(*) FROM float8tmp WHERE a > -1890.0;
(1 row)
SELECT a, a <-> '-1890.0' FROM float8tmp ORDER BY a <-> '-1890.0' LIMIT 3;
- a | ?column?
---------------+------------
- -1890 | 0
- -2003.634512 | 113.634512
- -1769.73634 | 120.26366
+ a | ?column?
+--------------+--------------------
+ -1890 | 0
+ -2003.634512 | 113.63451200000009
+ -1769.73634 | 120.26366000000007
(3 rows)
CREATE INDEX float8idx ON float8tmp USING gist ( a );
@@ -82,10 +82,10 @@ SELECT a, a <-> '-1890.0' FROM float8tmp ORDER BY a <-> '-1890.0' LIMIT 3;
(3 rows)
SELECT a, a <-> '-1890.0' FROM float8tmp ORDER BY a <-> '-1890.0' LIMIT 3;
- a | ?column?
---------------+------------
- -1890 | 0
- -2003.634512 | 113.634512
- -1769.73634 | 120.26366
+ a | ?column?
+--------------+--------------------
+ -1890 | 0
+ -2003.634512 | 113.63451200000009
+ -1769.73634 | 120.26366000000007
(3 rows)
diff --git a/contrib/btree_gist/expected/inet.out b/contrib/btree_gist/expected/inet.out
index 905f55d740b..c323d903da4 100644
--- a/contrib/btree_gist/expected/inet.out
+++ b/contrib/btree_gist/expected/inet.out
@@ -64,18 +64,16 @@ SELECT count(*) FROM inettmp WHERE a > '89.225.196.191'::inet;
386
(1 row)
-VACUUM inettmp;
+VACUUM ANALYZE inettmp;
-- gist_inet_ops lacks a fetch function, so this should not be index-only scan
EXPLAIN (COSTS OFF)
SELECT count(*) FROM inettmp WHERE a = '89.225.196.191'::inet;
- QUERY PLAN
---------------------------------------------------------
+ QUERY PLAN
+--------------------------------------------------
Aggregate
- -> Bitmap Heap Scan on inettmp
- Recheck Cond: (a = '89.225.196.191'::inet)
- -> Bitmap Index Scan on inetidx
- Index Cond: (a = '89.225.196.191'::inet)
-(5 rows)
+ -> Index Scan using inetidx on inettmp
+ Index Cond: (a = '89.225.196.191'::inet)
+(3 rows)
SELECT count(*) FROM inettmp WHERE a = '89.225.196.191'::inet;
count
@@ -88,14 +86,12 @@ CREATE INDEX ON inettmp USING gist (a gist_inet_ops, a inet_ops);
-- likewise here (checks for core planner bug)
EXPLAIN (COSTS OFF)
SELECT count(*) FROM inettmp WHERE a = '89.225.196.191'::inet;
- QUERY PLAN
---------------------------------------------------------
+ QUERY PLAN
+----------------------------------------------------
Aggregate
- -> Bitmap Heap Scan on inettmp
- Recheck Cond: (a = '89.225.196.191'::inet)
- -> Bitmap Index Scan on inettmp_a_a1_idx
- Index Cond: (a = '89.225.196.191'::inet)
-(5 rows)
+ -> Index Scan using inettmp_a_a1_idx on inettmp
+ Index Cond: (a = '89.225.196.191'::inet)
+(3 rows)
SELECT count(*) FROM inettmp WHERE a = '89.225.196.191'::inet;
count
diff --git a/contrib/btree_gist/expected/oid.out b/contrib/btree_gist/expected/oid.out
index ffa90c3c3c7..776bbb10267 100644
--- a/contrib/btree_gist/expected/oid.out
+++ b/contrib/btree_gist/expected/oid.out
@@ -1,64 +1,66 @@
-- oid check
SET enable_seqscan=on;
-SELECT count(*) FROM moneytmp WHERE oid < ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+CREATE TEMPORARY TABLE oidtmp (oid oid);
+INSERT INTO oidtmp SELECT g.i::oid FROM generate_series(1, 1000) g(i);
+SELECT count(*) FROM oidtmp WHERE oid < 17;
count
-------
- 372
+ 16
(1 row)
-SELECT count(*) FROM moneytmp WHERE oid <= ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid <= 17;
count
-------
- 373
+ 17
(1 row)
-SELECT count(*) FROM moneytmp WHERE oid = ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid = 17;
count
-------
1
(1 row)
-SELECT count(*) FROM moneytmp WHERE oid >= ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid >= 17;
count
-------
- 228
+ 984
(1 row)
-SELECT count(*) FROM moneytmp WHERE oid > ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid > 17;
count
-------
- 227
+ 983
(1 row)
-CREATE INDEX oididx ON moneytmp USING gist ( oid );
+CREATE INDEX oididx ON oidtmp USING gist ( oid );
SET enable_seqscan=off;
-SELECT count(*) FROM moneytmp WHERE oid < ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid < 17;
count
-------
- 372
+ 16
(1 row)
-SELECT count(*) FROM moneytmp WHERE oid <= ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid <= 17;
count
-------
- 373
+ 17
(1 row)
-SELECT count(*) FROM moneytmp WHERE oid = ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid = 17;
count
-------
1
(1 row)
-SELECT count(*) FROM moneytmp WHERE oid >= ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid >= 17;
count
-------
- 228
+ 984
(1 row)
-SELECT count(*) FROM moneytmp WHERE oid > ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid > 17;
count
-------
- 227
+ 983
(1 row)
diff --git a/contrib/btree_gist/expected/varbit.out b/contrib/btree_gist/expected/varbit.out
index 538ace85c90..ede36bc3ead 100644
--- a/contrib/btree_gist/expected/varbit.out
+++ b/contrib/btree_gist/expected/varbit.out
@@ -68,9 +68,9 @@ SELECT count(*) FROM varbittmp WHERE a > '1110100111010'::varbit;
SET enable_bitmapscan=off;
EXPLAIN (COSTS OFF)
SELECT a FROM bittmp WHERE a BETWEEN '1000000' and '1000001';
- QUERY PLAN
------------------------------------------------------------------------
+ QUERY PLAN
+---------------------------------------------------------------------
Index Only Scan using bitidx on bittmp
- Index Cond: ((a >= B'1000000'::"bit") AND (a <= B'1000001'::"bit"))
+ Index Cond: ((a >= '1000000'::"bit") AND (a <= '1000001'::"bit"))
(2 rows)
diff --git a/contrib/btree_gist/sql/cash.sql b/contrib/btree_gist/sql/cash.sql
index 0e037984e1b..4526cc4f0aa 100644
--- a/contrib/btree_gist/sql/cash.sql
+++ b/contrib/btree_gist/sql/cash.sql
@@ -1,6 +1,6 @@
-- money check
-CREATE TABLE moneytmp (a money) WITH OIDS;
+CREATE TABLE moneytmp (a money);
\copy moneytmp from 'data/cash.data'
diff --git a/contrib/btree_gist/sql/inet.sql b/contrib/btree_gist/sql/inet.sql
index 08952f2c449..4b8d354b00e 100644
--- a/contrib/btree_gist/sql/inet.sql
+++ b/contrib/btree_gist/sql/inet.sql
@@ -30,7 +30,7 @@ SELECT count(*) FROM inettmp WHERE a >= '89.225.196.191'::inet;
SELECT count(*) FROM inettmp WHERE a > '89.225.196.191'::inet;
-VACUUM inettmp;
+VACUUM ANALYZE inettmp;
-- gist_inet_ops lacks a fetch function, so this should not be index-only scan
EXPLAIN (COSTS OFF)
diff --git a/contrib/btree_gist/sql/oid.sql b/contrib/btree_gist/sql/oid.sql
index fd03b82bd44..c9358234ce9 100644
--- a/contrib/btree_gist/sql/oid.sql
+++ b/contrib/btree_gist/sql/oid.sql
@@ -2,26 +2,29 @@
SET enable_seqscan=on;
-SELECT count(*) FROM moneytmp WHERE oid < ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+CREATE TEMPORARY TABLE oidtmp (oid oid);
+INSERT INTO oidtmp SELECT g.i::oid FROM generate_series(1, 1000) g(i);
-SELECT count(*) FROM moneytmp WHERE oid <= ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid < 17;
-SELECT count(*) FROM moneytmp WHERE oid = ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid <= 17;
-SELECT count(*) FROM moneytmp WHERE oid >= ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid = 17;
-SELECT count(*) FROM moneytmp WHERE oid > ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid >= 17;
-CREATE INDEX oididx ON moneytmp USING gist ( oid );
+SELECT count(*) FROM oidtmp WHERE oid > 17;
+
+CREATE INDEX oididx ON oidtmp USING gist ( oid );
SET enable_seqscan=off;
-SELECT count(*) FROM moneytmp WHERE oid < ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid < 17;
-SELECT count(*) FROM moneytmp WHERE oid <= ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid <= 17;
-SELECT count(*) FROM moneytmp WHERE oid = ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid = 17;
-SELECT count(*) FROM moneytmp WHERE oid >= ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid >= 17;
-SELECT count(*) FROM moneytmp WHERE oid > ( SELECT oid FROM moneytmp WHERE a = '22649.64' );
+SELECT count(*) FROM oidtmp WHERE oid > 17;
diff --git a/contrib/citext/Makefile b/contrib/citext/Makefile
index e32a7de9464..e37dcf9b584 100644
--- a/contrib/citext/Makefile
+++ b/contrib/citext/Makefile
@@ -3,7 +3,9 @@
MODULES = citext
EXTENSION = citext
-DATA = citext--1.4.sql citext--1.4--1.5.sql \
+DATA = citext--1.4.sql \
+ citext--1.5--1.6.sql \
+ citext--1.4--1.5.sql \
citext--1.3--1.4.sql \
citext--1.2--1.3.sql citext--1.1--1.2.sql \
citext--1.0--1.1.sql citext--unpackaged--1.0.sql
diff --git a/contrib/citext/citext--1.5--1.6.sql b/contrib/citext/citext--1.5--1.6.sql
new file mode 100644
index 00000000000..32268983aef
--- /dev/null
+++ b/contrib/citext/citext--1.5--1.6.sql
@@ -0,0 +1,12 @@
+/* contrib/citext/citext--1.5--1.6.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION citext UPDATE TO '1.6'" to load this file. \quit
+
+CREATE FUNCTION citext_hash_extended(citext, int8)
+RETURNS int8
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT IMMUTABLE PARALLEL SAFE;
+
+ALTER OPERATOR FAMILY citext_ops USING hash ADD
+ FUNCTION 2 citext_hash_extended(citext, int8);
diff --git a/contrib/citext/citext.c b/contrib/citext/citext.c
index 2c0e48e2bc1..a4adafe8958 100644
--- a/contrib/citext/citext.c
+++ b/contrib/citext/citext.c
@@ -3,10 +3,10 @@
*/
#include "postgres.h"
-#include "access/hash.h"
#include "catalog/pg_collation.h"
#include "utils/builtins.h"
#include "utils/formatting.h"
+#include "utils/hashutils.h"
#include "utils/varlena.h"
PG_MODULE_MAGIC;
@@ -153,6 +153,26 @@ citext_hash(PG_FUNCTION_ARGS)
PG_RETURN_DATUM(result);
}
+PG_FUNCTION_INFO_V1(citext_hash_extended);
+
+Datum
+citext_hash_extended(PG_FUNCTION_ARGS)
+{
+ text *txt = PG_GETARG_TEXT_PP(0);
+ uint64 seed = PG_GETARG_INT64(1);
+ char *str;
+ Datum result;
+
+ str = str_tolower(VARDATA_ANY(txt), VARSIZE_ANY_EXHDR(txt), DEFAULT_COLLATION_OID);
+ result = hash_any_extended((unsigned char *) str, strlen(str), seed);
+ pfree(str);
+
+ /* Avoid leaking memory for toasted inputs */
+ PG_FREE_IF_COPY(txt, 0);
+
+ PG_RETURN_DATUM(result);
+}
+
/*
* ==================
* OPERATOR FUNCTIONS
diff --git a/contrib/citext/citext.control b/contrib/citext/citext.control
index 4cd6e09331f..a872a3f012b 100644
--- a/contrib/citext/citext.control
+++ b/contrib/citext/citext.control
@@ -1,5 +1,5 @@
# citext extension
comment = 'data type for case-insensitive character strings'
-default_version = '1.5'
+default_version = '1.6'
module_pathname = '$libdir/citext'
relocatable = true
diff --git a/contrib/citext/expected/citext.out b/contrib/citext/expected/citext.out
index 95373182af5..96800be9c03 100644
--- a/contrib/citext/expected/citext.out
+++ b/contrib/citext/expected/citext.out
@@ -222,6 +222,18 @@ SELECT citext_cmp('B'::citext, 'a'::citext) > 0 AS true;
t
(1 row)
+-- Check the citext_hash() and citext_hash_extended() function explicitly.
+SELECT v as value, citext_hash(v)::bit(32) as standard,
+ citext_hash_extended(v, 0)::bit(32) as extended0,
+ citext_hash_extended(v, 1)::bit(32) as extended1
+FROM (VALUES (NULL::citext), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'),
+ ('muop28x03'), ('yi3nm0d73')) x(v)
+WHERE citext_hash(v)::bit(32) != citext_hash_extended(v, 0)::bit(32)
+ OR citext_hash(v)::bit(32) = citext_hash_extended(v, 1)::bit(32);
+ value | standard | extended0 | extended1
+-------+----------+-----------+-----------
+(0 rows)
+
-- Do some tests using a table and index.
CREATE TEMP TABLE try (
name citext PRIMARY KEY
@@ -1829,7 +1841,7 @@ SELECT regexp_match('foobarbequebaz'::citext, '(BAR)(BEQUE)'::citext, 'c'::citex
-- g is not allowed
SELECT regexp_match('foobarbequebazmorebarbequetoo'::citext, '(BAR)(BEQUE)'::citext, 'g') AS "error";
-ERROR: regexp_match does not support the global option
+ERROR: regexp_match() does not support the "global" option
HINT: Use the regexp_matches function instead.
CONTEXT: SQL function "regexp_match" statement 1
SELECT regexp_matches('foobarbequebaz'::citext, '(bar)(beque)') = ARRAY[ 'bar', 'beque' ] AS t;
@@ -2336,8 +2348,8 @@ SELECT *
WHERE t.id IS NULL OR m.id IS NULL;
id | name | id | name
----+------+----+------
- 2 | two | |
| | 2 | Two
+ 2 | two | |
(2 rows)
REFRESH MATERIALIZED VIEW CONCURRENTLY citext_matview;
@@ -2597,7 +2609,7 @@ SELECT citext_pattern_ge('b'::citext, 'A'::citext) AS true;
t
(1 row)
--- Multi-byte tests below are diabled like the sanity tests above.
+-- Multi-byte tests below are disabled like the sanity tests above.
-- Uncomment to run them.
-- Test ~<~ and ~<=~
SELECT 'a'::citext ~<~ 'B'::citext AS t;
diff --git a/contrib/citext/expected/citext_1.out b/contrib/citext/expected/citext_1.out
index 855ec3f10b7..33e3676d3c4 100644
--- a/contrib/citext/expected/citext_1.out
+++ b/contrib/citext/expected/citext_1.out
@@ -222,6 +222,18 @@ SELECT citext_cmp('B'::citext, 'a'::citext) > 0 AS true;
t
(1 row)
+-- Check the citext_hash() and citext_hash_extended() function explicitly.
+SELECT v as value, citext_hash(v)::bit(32) as standard,
+ citext_hash_extended(v, 0)::bit(32) as extended0,
+ citext_hash_extended(v, 1)::bit(32) as extended1
+FROM (VALUES (NULL::citext), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'),
+ ('muop28x03'), ('yi3nm0d73')) x(v)
+WHERE citext_hash(v)::bit(32) != citext_hash_extended(v, 0)::bit(32)
+ OR citext_hash(v)::bit(32) = citext_hash_extended(v, 1)::bit(32);
+ value | standard | extended0 | extended1
+-------+----------+-----------+-----------
+(0 rows)
+
-- Do some tests using a table and index.
CREATE TEMP TABLE try (
name citext PRIMARY KEY
@@ -1829,7 +1841,7 @@ SELECT regexp_match('foobarbequebaz'::citext, '(BAR)(BEQUE)'::citext, 'c'::citex
-- g is not allowed
SELECT regexp_match('foobarbequebazmorebarbequetoo'::citext, '(BAR)(BEQUE)'::citext, 'g') AS "error";
-ERROR: regexp_match does not support the global option
+ERROR: regexp_match() does not support the "global" option
HINT: Use the regexp_matches function instead.
CONTEXT: SQL function "regexp_match" statement 1
SELECT regexp_matches('foobarbequebaz'::citext, '(bar)(beque)') = ARRAY[ 'bar', 'beque' ] AS t;
@@ -2336,8 +2348,8 @@ SELECT *
WHERE t.id IS NULL OR m.id IS NULL;
id | name | id | name
----+------+----+------
- 2 | two | |
| | 2 | Two
+ 2 | two | |
(2 rows)
REFRESH MATERIALIZED VIEW CONCURRENTLY citext_matview;
@@ -2597,7 +2609,7 @@ SELECT citext_pattern_ge('b'::citext, 'A'::citext) AS true;
t
(1 row)
--- Multi-byte tests below are diabled like the sanity tests above.
+-- Multi-byte tests below are disabled like the sanity tests above.
-- Uncomment to run them.
-- Test ~<~ and ~<=~
SELECT 'a'::citext ~<~ 'B'::citext AS t;
diff --git a/contrib/citext/sql/citext.sql b/contrib/citext/sql/citext.sql
index 2732be436dc..261b73cfa6c 100644
--- a/contrib/citext/sql/citext.sql
+++ b/contrib/citext/sql/citext.sql
@@ -89,6 +89,15 @@ SELECT citext_cmp('aardvark'::citext, 'aardVark'::citext) AS zero;
SELECT citext_cmp('AARDVARK'::citext, 'AARDVARK'::citext) AS zero;
SELECT citext_cmp('B'::citext, 'a'::citext) > 0 AS true;
+-- Check the citext_hash() and citext_hash_extended() function explicitly.
+SELECT v as value, citext_hash(v)::bit(32) as standard,
+ citext_hash_extended(v, 0)::bit(32) as extended0,
+ citext_hash_extended(v, 1)::bit(32) as extended1
+FROM (VALUES (NULL::citext), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'),
+ ('muop28x03'), ('yi3nm0d73')) x(v)
+WHERE citext_hash(v)::bit(32) != citext_hash_extended(v, 0)::bit(32)
+ OR citext_hash(v)::bit(32) = citext_hash_extended(v, 1)::bit(32);
+
-- Do some tests using a table and index.
CREATE TEMP TABLE try (
@@ -801,7 +810,7 @@ SELECT citext_pattern_ge('b'::citext, 'a'::citext) AS true;
SELECT citext_pattern_ge('B'::citext, 'a'::citext) AS true;
SELECT citext_pattern_ge('b'::citext, 'A'::citext) AS true;
--- Multi-byte tests below are diabled like the sanity tests above.
+-- Multi-byte tests below are disabled like the sanity tests above.
-- Uncomment to run them.
-- Test ~<~ and ~<=~
diff --git a/contrib/cube/Makefile b/contrib/cube/Makefile
index accb7d28a39..5e7b524dc22 100644
--- a/contrib/cube/Makefile
+++ b/contrib/cube/Makefile
@@ -9,7 +9,9 @@ DATA = cube--1.2.sql cube--1.2--1.3.sql cube--1.3--1.4.sql \
cube--unpackaged--1.0.sql
PGFILEDESC = "cube - multidimensional cube data type"
-REGRESS = cube
+HEADERS = cubedata.h
+
+REGRESS = cube cube_sci
EXTRA_CLEAN = y.tab.c y.tab.h
diff --git a/contrib/cube/cube.c b/contrib/cube/cube.c
index d96ca1ec1fd..b7203668760 100644
--- a/contrib/cube/cube.c
+++ b/contrib/cube/cube.c
@@ -8,13 +8,12 @@
#include "postgres.h"
-#include
#include
#include "access/gist.h"
#include "access/stratnum.h"
#include "utils/array.h"
-#include "utils/builtins.h"
+#include "utils/float.h"
#include "cubedata.h"
@@ -101,7 +100,7 @@ bool g_cube_leaf_consistent(NDBOX *key, NDBOX *query, StrategyNumber strategy);
bool g_cube_internal_consistent(NDBOX *key, NDBOX *query, StrategyNumber strategy);
/*
-** Auxiliary funxtions
+** Auxiliary functions
*/
static double distance_1D(double a1, double a2, double b1, double b2);
static bool cube_is_point_internal(NDBOX *cube);
@@ -152,6 +151,13 @@ cube_a_f8_f8(PG_FUNCTION_ARGS)
errmsg("cannot work with arrays containing NULLs")));
dim = ARRNELEMS(ur);
+ if (dim > CUBE_MAX_DIM)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("can't extend cube"),
+ errdetail("A cube cannot have more than %d dimensions.",
+ CUBE_MAX_DIM)));
+
if (ARRNELEMS(ll) != dim)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_ELEMENT_ERROR),
@@ -209,6 +215,12 @@ cube_a_f8(PG_FUNCTION_ARGS)
errmsg("cannot work with arrays containing NULLs")));
dim = ARRNELEMS(ur);
+ if (dim > CUBE_MAX_DIM)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("array is too long"),
+ errdetail("A cube cannot have more than %d dimensions.",
+ CUBE_MAX_DIM)));
dur = ARRPTR(ur);
@@ -243,6 +255,13 @@ cube_subset(PG_FUNCTION_ARGS)
dx = (int32 *) ARR_DATA_PTR(idx);
dim = ARRNELEMS(idx);
+ if (dim > CUBE_MAX_DIM)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("array is too long"),
+ errdetail("A cube cannot have more than %d dimensions.",
+ CUBE_MAX_DIM)));
+
size = IS_POINT(c) ? POINT_SIZE(dim) : CUBE_SIZE(dim);
result = (NDBOX *) palloc0(size);
SET_VARSIZE(result, size);
@@ -571,7 +590,7 @@ g_cube_picksplit(PG_FUNCTION_ARGS)
v->spl_nright++;
}
}
- *left = *right = FirstOffsetNumber; /* sentinel value, see dosplit() */
+ *left = *right = FirstOffsetNumber; /* sentinel value */
v->spl_ldatum = PointerGetDatum(datum_l);
v->spl_rdatum = PointerGetDatum(datum_r);
@@ -1361,9 +1380,10 @@ g_cube_distance(PG_FUNCTION_ARGS)
if (coord <= 2 * DIM(cube))
{
/* dimension index */
- int index = (coord - 1) / 2;
+ int index = (coord - 1) / 2;
+
/* whether this is upper bound (lower bound otherwise) */
- bool upper = ((coord - 1) % 2 == 1);
+ bool upper = ((coord - 1) % 2 == 1);
if (IS_POINT(cube))
{
@@ -1596,9 +1616,10 @@ cube_coord_llur(PG_FUNCTION_ARGS)
if (coord <= 2 * DIM(cube))
{
/* dimension index */
- int index = (coord - 1) / 2;
+ int index = (coord - 1) / 2;
+
/* whether this is upper bound (lower bound otherwise) */
- bool upper = ((coord - 1) % 2 == 1);
+ bool upper = ((coord - 1) % 2 == 1);
if (IS_POINT(cube))
{
@@ -1615,8 +1636,8 @@ cube_coord_llur(PG_FUNCTION_ARGS)
else
{
/*
- * Return zero if coordinate is out of bound. That reproduces logic of
- * how cubes with low dimension number are expanded during GiST
+ * Return zero if coordinate is out of bound. That reproduces logic
+ * of how cubes with low dimension number are expanded during GiST
* indexing.
*/
result = 0.0;
@@ -1754,6 +1775,13 @@ cube_c_f8(PG_FUNCTION_ARGS)
int size;
int i;
+ if (DIM(cube) + 1 > CUBE_MAX_DIM)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("can't extend cube"),
+ errdetail("A cube cannot have more than %d dimensions.",
+ CUBE_MAX_DIM)));
+
if (IS_POINT(cube))
{
size = POINT_SIZE((DIM(cube) + 1));
@@ -1795,6 +1823,13 @@ cube_c_f8_f8(PG_FUNCTION_ARGS)
int size;
int i;
+ if (DIM(cube) + 1 > CUBE_MAX_DIM)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("can't extend cube"),
+ errdetail("A cube cannot have more than %d dimensions.",
+ CUBE_MAX_DIM)));
+
if (IS_POINT(cube) && (x1 == x2))
{
size = POINT_SIZE((DIM(cube) + 1));
diff --git a/contrib/cube/cubeparse.y b/contrib/cube/cubeparse.y
index 1b65fa967c0..deb2efdc0da 100644
--- a/contrib/cube/cubeparse.y
+++ b/contrib/cube/cubeparse.y
@@ -7,7 +7,7 @@
#include "postgres.h"
#include "cubedata.h"
-#include "utils/builtins.h"
+#include "utils/float.h"
/* All grammar constructs return strings */
#define YYSTYPE char *
diff --git a/contrib/cube/expected/cube.out b/contrib/cube/expected/cube.out
index 6378db3004e..5b89cb1a26b 100644
--- a/contrib/cube/expected/cube.out
+++ b/contrib/cube/expected/cube.out
@@ -62,90 +62,6 @@ SELECT '-1.0'::cube AS cube;
(-1)
(1 row)
-SELECT '1e27'::cube AS cube;
- cube
----------
- (1e+27)
-(1 row)
-
-SELECT '-1e27'::cube AS cube;
- cube
-----------
- (-1e+27)
-(1 row)
-
-SELECT '1.0e27'::cube AS cube;
- cube
----------
- (1e+27)
-(1 row)
-
-SELECT '-1.0e27'::cube AS cube;
- cube
-----------
- (-1e+27)
-(1 row)
-
-SELECT '1e+27'::cube AS cube;
- cube
----------
- (1e+27)
-(1 row)
-
-SELECT '-1e+27'::cube AS cube;
- cube
-----------
- (-1e+27)
-(1 row)
-
-SELECT '1.0e+27'::cube AS cube;
- cube
----------
- (1e+27)
-(1 row)
-
-SELECT '-1.0e+27'::cube AS cube;
- cube
-----------
- (-1e+27)
-(1 row)
-
-SELECT '1e-7'::cube AS cube;
- cube
----------
- (1e-07)
-(1 row)
-
-SELECT '-1e-7'::cube AS cube;
- cube
-----------
- (-1e-07)
-(1 row)
-
-SELECT '1.0e-7'::cube AS cube;
- cube
----------
- (1e-07)
-(1 row)
-
-SELECT '-1.0e-7'::cube AS cube;
- cube
-----------
- (-1e-07)
-(1 row)
-
-SELECT '1e-300'::cube AS cube;
- cube
-----------
- (1e-300)
-(1 row)
-
-SELECT '-1e-300'::cube AS cube;
- cube
------------
- (-1e-300)
-(1 row)
-
SELECT 'infinity'::cube AS cube;
cube
------------
@@ -164,40 +80,22 @@ SELECT 'NaN'::cube AS cube;
(NaN)
(1 row)
-SELECT '1234567890123456'::cube AS cube;
- cube
-------------------------
- (1.23456789012346e+15)
-(1 row)
-
-SELECT '+1234567890123456'::cube AS cube;
- cube
-------------------------
- (1.23456789012346e+15)
-(1 row)
-
-SELECT '-1234567890123456'::cube AS cube;
- cube
--------------------------
- (-1.23456789012346e+15)
-(1 row)
-
SELECT '.1234567890123456'::cube AS cube;
- cube
----------------------
- (0.123456789012346)
+ cube
+----------------------
+ (0.1234567890123456)
(1 row)
SELECT '+.1234567890123456'::cube AS cube;
- cube
----------------------
- (0.123456789012346)
+ cube
+----------------------
+ (0.1234567890123456)
(1 row)
SELECT '-.1234567890123456'::cube AS cube;
- cube
-----------------------
- (-0.123456789012346)
+ cube
+-----------------------
+ (-0.1234567890123456)
(1 row)
-- simple lists (points)
@@ -520,6 +418,17 @@ SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
ERROR: Index out of bounds
SELECT cube_subset(cube('(6,7,8),(6,7,8)'), ARRAY[4,0]);
ERROR: Index out of bounds
+-- test for limits: this should pass
+SELECT cube_subset(cube('(6,7,8),(6,7,8)'), array(SELECT 1 as a FROM generate_series(1,100)));
+ cube_subset
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ (6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6)
+(1 row)
+
+-- and this should fail
+SELECT cube_subset(cube('(6,7,8),(6,7,8)'), array(SELECT 1 as a FROM generate_series(1,101)));
+ERROR: array is too long
+DETAIL: A cube cannot have more than 100 dimensions.
--
-- Test point processing
--
@@ -592,6 +501,7 @@ SELECT cube(cube(1,2), 42, 24); -- cube_c_f8_f8
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
--
+-- create too big cube from literal
select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)'::cube;
ERROR: invalid input syntax for cube
LINE 1: select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0...
@@ -602,6 +512,34 @@ ERROR: invalid input syntax for cube
LINE 1: select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0...
^
DETAIL: A cube cannot have more than 100 dimensions.
+-- from an array
+select cube(array(SELECT 0 as a FROM generate_series(1,101)));
+ERROR: array is too long
+DETAIL: A cube cannot have more than 100 dimensions.
+select cube(array(SELECT 0 as a FROM generate_series(1,101)),array(SELECT 0 as a FROM generate_series(1,101)));
+ERROR: can't extend cube
+DETAIL: A cube cannot have more than 100 dimensions.
+-- extend cube beyond limit
+-- this should work
+select cube(array(SELECT 0 as a FROM generate_series(1,100)));
+ cube
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+(1 row)
+
+select cube(array(SELECT 0 as a FROM generate_series(1,100)),array(SELECT 0 as a FROM generate_series(1,100)));
+ cube
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+(1 row)
+
+-- this should fail
+select cube(cube(array(SELECT 0 as a FROM generate_series(1,100))), 0);
+ERROR: can't extend cube
+DETAIL: A cube cannot have more than 100 dimensions.
+select cube(cube(array(SELECT 0 as a FROM generate_series(1,100)),array(SELECT 0 as a FROM generate_series(1,100))), 0, 0);
+ERROR: can't extend cube
+DETAIL: A cube cannot have more than 100 dimensions.
--
-- testing the operators
--
@@ -1005,9 +943,9 @@ SELECT cube_distance('(42,42,42,42)'::cube,'(137,137,137,137)'::cube);
(1 row)
SELECT cube_distance('(42,42,42)'::cube,'(137,137)'::cube);
- cube_distance
-------------------
- 140.762210837994
+ cube_distance
+--------------------
+ 140.76221083799445
(1 row)
-- Test of cube function (text to cube)
@@ -1418,8 +1356,9 @@ SELECT cube_size('(42,137)'::cube);
0
(1 row)
--- Test of distances
+-- Test of distances (euclidean distance may not be bit-exact)
--
+SET extra_float_digits = 0;
SELECT cube_distance('(1,1)'::cube, '(4,5)'::cube);
cube_distance
---------------
@@ -1432,6 +1371,7 @@ SELECT '(1,1)'::cube <-> '(4,5)'::cube as d_e;
5
(1 row)
+RESET extra_float_digits;
SELECT distance_chebyshev('(1,1)'::cube, '(4,5)'::cube);
distance_chebyshev
--------------------
@@ -1619,6 +1559,7 @@ RESET enable_bitmapscan;
INSERT INTO test_cube VALUES ('(1,1)'), ('(100000)'), ('(0, 100000)'); -- Some corner cases
SET enable_seqscan = false;
-- Test different metrics
+SET extra_float_digits = 0;
SELECT *, c <-> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <-> '(100, 100),(500, 500)'::cube LIMIT 5;
c | dist
-------------------------+------------------
@@ -1629,6 +1570,7 @@ SELECT *, c <-> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c
(1444, 403),(1346, 344) | 846
(5 rows)
+RESET extra_float_digits;
SELECT *, c <=> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <=> '(100, 100),(500, 500)'::cube LIMIT 5;
c | dist
-------------------------+------
@@ -1813,6 +1755,7 @@ SELECT c~>(-4), c FROM test_cube ORDER BY c~>(-4) LIMIT 15; -- descending by upp
-- Same queries with sequential scan (should give the same results as above)
RESET enable_seqscan;
SET enable_indexscan = OFF;
+SET extra_float_digits = 0;
SELECT *, c <-> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <-> '(100, 100),(500, 500)'::cube LIMIT 5;
c | dist
-------------------------+------------------
@@ -1823,6 +1766,7 @@ SELECT *, c <-> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c
(1444, 403),(1346, 344) | 846
(5 rows)
+RESET extra_float_digits;
SELECT *, c <=> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <=> '(100, 100),(500, 500)'::cube LIMIT 5;
c | dist
-------------------------+------
diff --git a/contrib/cube/expected/cube_2.out b/contrib/cube/expected/cube_2.out
deleted file mode 100644
index 75fe405c497..00000000000
--- a/contrib/cube/expected/cube_2.out
+++ /dev/null
@@ -1,2006 +0,0 @@
---
--- Test cube datatype
---
-CREATE EXTENSION cube;
--- Check whether any of our opclasses fail amvalidate
-SELECT amname, opcname
-FROM pg_opclass opc LEFT JOIN pg_am am ON am.oid = opcmethod
-WHERE opc.oid >= 16384 AND NOT amvalidate(opc.oid);
- amname | opcname
---------+---------
-(0 rows)
-
---
--- testing the input and output functions
---
--- Any number (a one-dimensional point)
-SELECT '1'::cube AS cube;
- cube
-------
- (1)
-(1 row)
-
-SELECT '-1'::cube AS cube;
- cube
-------
- (-1)
-(1 row)
-
-SELECT '1.'::cube AS cube;
- cube
-------
- (1)
-(1 row)
-
-SELECT '-1.'::cube AS cube;
- cube
-------
- (-1)
-(1 row)
-
-SELECT '.1'::cube AS cube;
- cube
--------
- (0.1)
-(1 row)
-
-SELECT '-.1'::cube AS cube;
- cube
---------
- (-0.1)
-(1 row)
-
-SELECT '1.0'::cube AS cube;
- cube
-------
- (1)
-(1 row)
-
-SELECT '-1.0'::cube AS cube;
- cube
-------
- (-1)
-(1 row)
-
-SELECT '1e27'::cube AS cube;
- cube
-----------
- (1e+027)
-(1 row)
-
-SELECT '-1e27'::cube AS cube;
- cube
------------
- (-1e+027)
-(1 row)
-
-SELECT '1.0e27'::cube AS cube;
- cube
-----------
- (1e+027)
-(1 row)
-
-SELECT '-1.0e27'::cube AS cube;
- cube
------------
- (-1e+027)
-(1 row)
-
-SELECT '1e+27'::cube AS cube;
- cube
-----------
- (1e+027)
-(1 row)
-
-SELECT '-1e+27'::cube AS cube;
- cube
------------
- (-1e+027)
-(1 row)
-
-SELECT '1.0e+27'::cube AS cube;
- cube
-----------
- (1e+027)
-(1 row)
-
-SELECT '-1.0e+27'::cube AS cube;
- cube
------------
- (-1e+027)
-(1 row)
-
-SELECT '1e-7'::cube AS cube;
- cube
-----------
- (1e-007)
-(1 row)
-
-SELECT '-1e-7'::cube AS cube;
- cube
------------
- (-1e-007)
-(1 row)
-
-SELECT '1.0e-7'::cube AS cube;
- cube
-----------
- (1e-007)
-(1 row)
-
-SELECT '-1.0e-7'::cube AS cube;
- cube
------------
- (-1e-007)
-(1 row)
-
-SELECT '1e-300'::cube AS cube;
- cube
-----------
- (1e-300)
-(1 row)
-
-SELECT '-1e-300'::cube AS cube;
- cube
------------
- (-1e-300)
-(1 row)
-
-SELECT 'infinity'::cube AS cube;
- cube
-------------
- (Infinity)
-(1 row)
-
-SELECT '-infinity'::cube AS cube;
- cube
--------------
- (-Infinity)
-(1 row)
-
-SELECT 'NaN'::cube AS cube;
- cube
--------
- (NaN)
-(1 row)
-
-SELECT '1234567890123456'::cube AS cube;
- cube
--------------------------
- (1.23456789012346e+015)
-(1 row)
-
-SELECT '+1234567890123456'::cube AS cube;
- cube
--------------------------
- (1.23456789012346e+015)
-(1 row)
-
-SELECT '-1234567890123456'::cube AS cube;
- cube
---------------------------
- (-1.23456789012346e+015)
-(1 row)
-
-SELECT '.1234567890123456'::cube AS cube;
- cube
----------------------
- (0.123456789012346)
-(1 row)
-
-SELECT '+.1234567890123456'::cube AS cube;
- cube
----------------------
- (0.123456789012346)
-(1 row)
-
-SELECT '-.1234567890123456'::cube AS cube;
- cube
-----------------------
- (-0.123456789012346)
-(1 row)
-
--- simple lists (points)
-SELECT '()'::cube AS cube;
- cube
-------
- ()
-(1 row)
-
-SELECT '1,2'::cube AS cube;
- cube
---------
- (1, 2)
-(1 row)
-
-SELECT '(1,2)'::cube AS cube;
- cube
---------
- (1, 2)
-(1 row)
-
-SELECT '1,2,3,4,5'::cube AS cube;
- cube
------------------
- (1, 2, 3, 4, 5)
-(1 row)
-
-SELECT '(1,2,3,4,5)'::cube AS cube;
- cube
------------------
- (1, 2, 3, 4, 5)
-(1 row)
-
--- double lists (cubes)
-SELECT '(),()'::cube AS cube;
- cube
-------
- ()
-(1 row)
-
-SELECT '(0),(0)'::cube AS cube;
- cube
-------
- (0)
-(1 row)
-
-SELECT '(0),(1)'::cube AS cube;
- cube
----------
- (0),(1)
-(1 row)
-
-SELECT '[(0),(0)]'::cube AS cube;
- cube
-------
- (0)
-(1 row)
-
-SELECT '[(0),(1)]'::cube AS cube;
- cube
----------
- (0),(1)
-(1 row)
-
-SELECT '(0,0,0,0),(0,0,0,0)'::cube AS cube;
- cube
---------------
- (0, 0, 0, 0)
-(1 row)
-
-SELECT '(0,0,0,0),(1,0,0,0)'::cube AS cube;
- cube
----------------------------
- (0, 0, 0, 0),(1, 0, 0, 0)
-(1 row)
-
-SELECT '[(0,0,0,0),(0,0,0,0)]'::cube AS cube;
- cube
---------------
- (0, 0, 0, 0)
-(1 row)
-
-SELECT '[(0,0,0,0),(1,0,0,0)]'::cube AS cube;
- cube
----------------------------
- (0, 0, 0, 0),(1, 0, 0, 0)
-(1 row)
-
--- invalid input: parse errors
-SELECT ''::cube AS cube;
-ERROR: invalid input syntax for cube
-LINE 1: SELECT ''::cube AS cube;
- ^
-DETAIL: syntax error at end of input
-SELECT 'ABC'::cube AS cube;
-ERROR: invalid input syntax for cube
-LINE 1: SELECT 'ABC'::cube AS cube;
- ^
-DETAIL: syntax error at or near "A"
-SELECT '[]'::cube AS cube;
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '[]'::cube AS cube;
- ^
-DETAIL: syntax error at or near "]"
-SELECT '[()]'::cube AS cube;
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '[()]'::cube AS cube;
- ^
-DETAIL: syntax error at or near "]"
-SELECT '[(1)]'::cube AS cube;
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '[(1)]'::cube AS cube;
- ^
-DETAIL: syntax error at or near "]"
-SELECT '[(1),]'::cube AS cube;
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '[(1),]'::cube AS cube;
- ^
-DETAIL: syntax error at or near "]"
-SELECT '[(1),2]'::cube AS cube;
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '[(1),2]'::cube AS cube;
- ^
-DETAIL: syntax error at or near "2"
-SELECT '[(1),(2),(3)]'::cube AS cube;
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '[(1),(2),(3)]'::cube AS cube;
- ^
-DETAIL: syntax error at or near ","
-SELECT '1,'::cube AS cube;
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '1,'::cube AS cube;
- ^
-DETAIL: syntax error at end of input
-SELECT '1,2,'::cube AS cube;
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '1,2,'::cube AS cube;
- ^
-DETAIL: syntax error at end of input
-SELECT '1,,2'::cube AS cube;
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '1,,2'::cube AS cube;
- ^
-DETAIL: syntax error at or near ","
-SELECT '(1,)'::cube AS cube;
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '(1,)'::cube AS cube;
- ^
-DETAIL: syntax error at or near ")"
-SELECT '(1,2,)'::cube AS cube;
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '(1,2,)'::cube AS cube;
- ^
-DETAIL: syntax error at or near ")"
-SELECT '(1,,2)'::cube AS cube;
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '(1,,2)'::cube AS cube;
- ^
-DETAIL: syntax error at or near ","
--- invalid input: semantic errors and trailing garbage
-SELECT '[(1),(2)],'::cube AS cube; -- 0
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '[(1),(2)],'::cube AS cube;
- ^
-DETAIL: syntax error at or near ","
-SELECT '[(1,2,3),(2,3)]'::cube AS cube; -- 1
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '[(1,2,3),(2,3)]'::cube AS cube;
- ^
-DETAIL: Different point dimensions in (1,2,3) and (2,3).
-SELECT '[(1,2),(1,2,3)]'::cube AS cube; -- 1
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '[(1,2),(1,2,3)]'::cube AS cube;
- ^
-DETAIL: Different point dimensions in (1,2) and (1,2,3).
-SELECT '(1),(2),'::cube AS cube; -- 2
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '(1),(2),'::cube AS cube;
- ^
-DETAIL: syntax error at or near ","
-SELECT '(1,2,3),(2,3)'::cube AS cube; -- 3
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '(1,2,3),(2,3)'::cube AS cube;
- ^
-DETAIL: Different point dimensions in (1,2,3) and (2,3).
-SELECT '(1,2),(1,2,3)'::cube AS cube; -- 3
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '(1,2),(1,2,3)'::cube AS cube;
- ^
-DETAIL: Different point dimensions in (1,2) and (1,2,3).
-SELECT '(1,2,3)ab'::cube AS cube; -- 4
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '(1,2,3)ab'::cube AS cube;
- ^
-DETAIL: syntax error at or near "a"
-SELECT '(1,2,3)a'::cube AS cube; -- 5
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '(1,2,3)a'::cube AS cube;
- ^
-DETAIL: syntax error at or near "a"
-SELECT '(1,2)('::cube AS cube; -- 5
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '(1,2)('::cube AS cube;
- ^
-DETAIL: syntax error at or near "("
-SELECT '1,2ab'::cube AS cube; -- 6
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '1,2ab'::cube AS cube;
- ^
-DETAIL: syntax error at or near "a"
-SELECT '1 e7'::cube AS cube; -- 6
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '1 e7'::cube AS cube;
- ^
-DETAIL: syntax error at or near "e"
-SELECT '1,2a'::cube AS cube; -- 7
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '1,2a'::cube AS cube;
- ^
-DETAIL: syntax error at or near "a"
-SELECT '1..2'::cube AS cube; -- 7
-ERROR: invalid input syntax for cube
-LINE 1: SELECT '1..2'::cube AS cube;
- ^
-DETAIL: syntax error at or near ".2"
-SELECT '-1e-700'::cube AS cube; -- out of range
-ERROR: "-1e-700" is out of range for type double precision
-LINE 1: SELECT '-1e-700'::cube AS cube;
- ^
---
--- Testing building cubes from float8 values
---
-SELECT cube(0::float8);
- cube
-------
- (0)
-(1 row)
-
-SELECT cube(1::float8);
- cube
-------
- (1)
-(1 row)
-
-SELECT cube(1,2);
- cube
----------
- (1),(2)
-(1 row)
-
-SELECT cube(cube(1,2),3);
- cube
----------------
- (1, 3),(2, 3)
-(1 row)
-
-SELECT cube(cube(1,2),3,4);
- cube
----------------
- (1, 3),(2, 4)
-(1 row)
-
-SELECT cube(cube(cube(1,2),3,4),5);
- cube
----------------------
- (1, 3, 5),(2, 4, 5)
-(1 row)
-
-SELECT cube(cube(cube(1,2),3,4),5,6);
- cube
----------------------
- (1, 3, 5),(2, 4, 6)
-(1 row)
-
---
--- Test that the text -> cube cast was installed.
---
-SELECT '(0)'::text::cube;
- cube
-------
- (0)
-(1 row)
-
---
--- Test the float[] -> cube cast
---
-SELECT cube('{0,1,2}'::float[], '{3,4,5}'::float[]);
- cube
----------------------
- (0, 1, 2),(3, 4, 5)
-(1 row)
-
-SELECT cube('{0,1,2}'::float[], '{3}'::float[]);
-ERROR: UR and LL arrays must be of same length
-SELECT cube(NULL::float[], '{3}'::float[]);
- cube
-------
-
-(1 row)
-
-SELECT cube('{0,1,2}'::float[]);
- cube
------------
- (0, 1, 2)
-(1 row)
-
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
- cube_subset
----------------------------
- (5, 3, 1, 1),(8, 7, 6, 6)
-(1 row)
-
-SELECT cube_subset(cube('(1,3,5),(1,3,5)'), ARRAY[3,2,1,1]);
- cube_subset
---------------
- (5, 3, 1, 1)
-(1 row)
-
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
-ERROR: Index out of bounds
-SELECT cube_subset(cube('(6,7,8),(6,7,8)'), ARRAY[4,0]);
-ERROR: Index out of bounds
---
--- Test point processing
---
-SELECT cube('(1,2),(1,2)'); -- cube_in
- cube
---------
- (1, 2)
-(1 row)
-
-SELECT cube('{0,1,2}'::float[], '{0,1,2}'::float[]); -- cube_a_f8_f8
- cube
------------
- (0, 1, 2)
-(1 row)
-
-SELECT cube('{5,6,7,8}'::float[]); -- cube_a_f8
- cube
---------------
- (5, 6, 7, 8)
-(1 row)
-
-SELECT cube(1.37); -- cube_f8
- cube
---------
- (1.37)
-(1 row)
-
-SELECT cube(1.37, 1.37); -- cube_f8_f8
- cube
---------
- (1.37)
-(1 row)
-
-SELECT cube(cube(1,1), 42); -- cube_c_f8
- cube
----------
- (1, 42)
-(1 row)
-
-SELECT cube(cube(1,2), 42); -- cube_c_f8
- cube
------------------
- (1, 42),(2, 42)
-(1 row)
-
-SELECT cube(cube(1,1), 42, 42); -- cube_c_f8_f8
- cube
----------
- (1, 42)
-(1 row)
-
-SELECT cube(cube(1,1), 42, 24); -- cube_c_f8_f8
- cube
------------------
- (1, 42),(1, 24)
-(1 row)
-
-SELECT cube(cube(1,2), 42, 42); -- cube_c_f8_f8
- cube
------------------
- (1, 42),(2, 42)
-(1 row)
-
-SELECT cube(cube(1,2), 42, 24); -- cube_c_f8_f8
- cube
------------------
- (1, 42),(2, 24)
-(1 row)
-
---
--- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
---
-select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)'::cube;
-ERROR: invalid input syntax for cube
-LINE 1: select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0...
- ^
-DETAIL: A cube cannot have more than 100 dimensions.
-select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)'::cube;
-ERROR: invalid input syntax for cube
-LINE 1: select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0...
- ^
-DETAIL: A cube cannot have more than 100 dimensions.
---
--- testing the operators
---
--- equality/inequality:
---
-SELECT '24, 33.20'::cube = '24, 33.20'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '24, 33.20'::cube != '24, 33.20'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '24, 33.20'::cube = '24, 33.21'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '24, 33.20'::cube != '24, 33.21'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(2,0),(3,1)'::cube = '(2,0,0,0,0),(3,1,0,0,0)'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '(2,0),(3,1)'::cube = '(2,0,0,0,0),(3,1,0,0,1)'::cube AS bool;
- bool
-------
- f
-(1 row)
-
--- "lower than" / "greater than"
--- (these operators are not useful for anything but ordering)
---
-SELECT '1'::cube > '2'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '1'::cube < '2'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '1,1'::cube > '1,2'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '1,1'::cube < '1,2'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(2,0),(3,1)'::cube > '(2,0,0,0,0),(3,1,0,0,1)'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '(2,0),(3,1)'::cube < '(2,0,0,0,0),(3,1,0,0,1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(2,0),(3,1)'::cube > '(2,0,0,0,1),(3,1,0,0,0)'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '(2,0),(3,1)'::cube < '(2,0,0,0,1),(3,1,0,0,0)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(2,0),(3,1)'::cube > '(2,0,0,0,0),(3,1,0,0,0)'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '(2,0),(3,1)'::cube < '(2,0,0,0,0),(3,1,0,0,0)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(2,0,0,0,0),(3,1,0,0,1)'::cube > '(2,0),(3,1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(2,0,0,0,0),(3,1,0,0,1)'::cube < '(2,0),(3,1)'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '(2,0,0,0,1),(3,1,0,0,0)'::cube > '(2,0),(3,1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(2,0,0,0,1),(3,1,0,0,0)'::cube < '(2,0),(3,1)'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '(2,0,0,0,0),(3,1,0,0,0)'::cube > '(2,0),(3,1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(2,0,0,0,0),(3,1,0,0,0)'::cube < '(2,0),(3,1)'::cube AS bool;
- bool
-------
- f
-(1 row)
-
--- "overlap"
---
-SELECT '1'::cube && '1'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '1'::cube && '2'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '0'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '1'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '1,1,1'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '[(1,1,1),(2,2,2)]'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '[(1,1),(2,2)]'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '[(2,1,1),(2,2,2)]'::cube AS bool;
- bool
-------
- f
-(1 row)
-
--- "contained in" (the left operand is the cube entirely enclosed by
--- the right operand):
---
-SELECT '0'::cube <@ '0'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0,0,0'::cube <@ '0,0,0'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0,0'::cube <@ '0,0,1'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0,0,0'::cube <@ '0,0,1'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '1,0,0'::cube <@ '0,0,1'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '(1,0,0),(0,0,1)'::cube <@ '(1,0,0),(0,0,1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(1,0,0),(0,0,1)'::cube <@ '(-1,-1,-1),(1,1,1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(1,0,0),(0,0,1)'::cube <@ '(-1,-1,-1,-1),(1,1,1,1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0'::cube <@ '(-1),(1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '1'::cube <@ '(-1),(1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '-1'::cube <@ '(-1),(1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(-1),(1)'::cube <@ '(-1),(1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(-1),(1)'::cube <@ '(-1,-1),(1,1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(-2),(1)'::cube <@ '(-1),(1)'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '(-2),(1)'::cube <@ '(-1,-1),(1,1)'::cube AS bool;
- bool
-------
- f
-(1 row)
-
--- "contains" (the left operand is the cube that entirely encloses the
--- right operand)
---
-SELECT '0'::cube @> '0'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0,0,0'::cube @> '0,0,0'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0,0,1'::cube @> '0,0'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0,0,1'::cube @> '0,0,0'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '0,0,1'::cube @> '1,0,0'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '(1,0,0),(0,0,1)'::cube @> '(1,0,0),(0,0,1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(-1,-1,-1),(1,1,1)'::cube @> '(1,0,0),(0,0,1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(-1,-1,-1,-1),(1,1,1,1)'::cube @> '(1,0,0),(0,0,1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(-1),(1)'::cube @> '0'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(-1),(1)'::cube @> '1'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(-1),(1)'::cube @> '-1'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(-1),(1)'::cube @> '(-1),(1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(-1,-1),(1,1)'::cube @> '(-1),(1)'::cube AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '(-1),(1)'::cube @> '(-2),(1)'::cube AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '(-1,-1),(1,1)'::cube @> '(-2),(1)'::cube AS bool;
- bool
-------
- f
-(1 row)
-
--- Test of distance function
---
-SELECT cube_distance('(0)'::cube,'(2,2,2,2)'::cube);
- cube_distance
----------------
- 4
-(1 row)
-
-SELECT cube_distance('(0)'::cube,'(.3,.4)'::cube);
- cube_distance
----------------
- 0.5
-(1 row)
-
-SELECT cube_distance('(2,3,4)'::cube,'(2,3,4)'::cube);
- cube_distance
----------------
- 0
-(1 row)
-
-SELECT cube_distance('(42,42,42,42)'::cube,'(137,137,137,137)'::cube);
- cube_distance
----------------
- 190
-(1 row)
-
-SELECT cube_distance('(42,42,42)'::cube,'(137,137)'::cube);
- cube_distance
-------------------
- 140.762210837994
-(1 row)
-
--- Test of cube function (text to cube)
---
-SELECT cube('(1,1.2)'::text);
- cube
-----------
- (1, 1.2)
-(1 row)
-
-SELECT cube(NULL);
- cube
-------
-
-(1 row)
-
--- Test of cube_dim function (dimensions stored in cube)
---
-SELECT cube_dim('(0)'::cube);
- cube_dim
-----------
- 1
-(1 row)
-
-SELECT cube_dim('(0,0)'::cube);
- cube_dim
-----------
- 2
-(1 row)
-
-SELECT cube_dim('(0,0,0)'::cube);
- cube_dim
-----------
- 3
-(1 row)
-
-SELECT cube_dim('(42,42,42),(42,42,42)'::cube);
- cube_dim
-----------
- 3
-(1 row)
-
-SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
- cube_dim
-----------
- 5
-(1 row)
-
--- Test of cube_ll_coord function (retrieves LL coordinate values)
---
-SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
- cube_ll_coord
----------------
- -1
-(1 row)
-
-SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 2);
- cube_ll_coord
----------------
- -2
-(1 row)
-
-SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 3);
- cube_ll_coord
----------------
- 0
-(1 row)
-
-SELECT cube_ll_coord('(1,2),(1,2)'::cube, 1);
- cube_ll_coord
----------------
- 1
-(1 row)
-
-SELECT cube_ll_coord('(1,2),(1,2)'::cube, 2);
- cube_ll_coord
----------------
- 2
-(1 row)
-
-SELECT cube_ll_coord('(1,2),(1,2)'::cube, 3);
- cube_ll_coord
----------------
- 0
-(1 row)
-
-SELECT cube_ll_coord('(42,137)'::cube, 1);
- cube_ll_coord
----------------
- 42
-(1 row)
-
-SELECT cube_ll_coord('(42,137)'::cube, 2);
- cube_ll_coord
----------------
- 137
-(1 row)
-
-SELECT cube_ll_coord('(42,137)'::cube, 3);
- cube_ll_coord
----------------
- 0
-(1 row)
-
--- Test of cube_ur_coord function (retrieves UR coordinate values)
---
-SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
- cube_ur_coord
----------------
- 2
-(1 row)
-
-SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 2);
- cube_ur_coord
----------------
- 1
-(1 row)
-
-SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 3);
- cube_ur_coord
----------------
- 0
-(1 row)
-
-SELECT cube_ur_coord('(1,2),(1,2)'::cube, 1);
- cube_ur_coord
----------------
- 1
-(1 row)
-
-SELECT cube_ur_coord('(1,2),(1,2)'::cube, 2);
- cube_ur_coord
----------------
- 2
-(1 row)
-
-SELECT cube_ur_coord('(1,2),(1,2)'::cube, 3);
- cube_ur_coord
----------------
- 0
-(1 row)
-
-SELECT cube_ur_coord('(42,137)'::cube, 1);
- cube_ur_coord
----------------
- 42
-(1 row)
-
-SELECT cube_ur_coord('(42,137)'::cube, 2);
- cube_ur_coord
----------------
- 137
-(1 row)
-
-SELECT cube_ur_coord('(42,137)'::cube, 3);
- cube_ur_coord
----------------
- 0
-(1 row)
-
--- Test of cube_is_point
---
-SELECT cube_is_point('(0)'::cube);
- cube_is_point
----------------
- t
-(1 row)
-
-SELECT cube_is_point('(0,1,2)'::cube);
- cube_is_point
----------------
- t
-(1 row)
-
-SELECT cube_is_point('(0,1,2),(0,1,2)'::cube);
- cube_is_point
----------------
- t
-(1 row)
-
-SELECT cube_is_point('(0,1,2),(-1,1,2)'::cube);
- cube_is_point
----------------
- f
-(1 row)
-
-SELECT cube_is_point('(0,1,2),(0,-1,2)'::cube);
- cube_is_point
----------------
- f
-(1 row)
-
-SELECT cube_is_point('(0,1,2),(0,1,-2)'::cube);
- cube_is_point
----------------
- f
-(1 row)
-
--- Test of cube_enlarge (enlarging and shrinking cubes)
---
-SELECT cube_enlarge('(0)'::cube, 0, 0);
- cube_enlarge
---------------
- (0)
-(1 row)
-
-SELECT cube_enlarge('(0)'::cube, 0, 1);
- cube_enlarge
---------------
- (0)
-(1 row)
-
-SELECT cube_enlarge('(0)'::cube, 0, 2);
- cube_enlarge
---------------
- (0)
-(1 row)
-
-SELECT cube_enlarge('(2),(-2)'::cube, 0, 4);
- cube_enlarge
---------------
- (-2),(2)
-(1 row)
-
-SELECT cube_enlarge('(0)'::cube, 1, 0);
- cube_enlarge
---------------
- (-1),(1)
-(1 row)
-
-SELECT cube_enlarge('(0)'::cube, 1, 1);
- cube_enlarge
---------------
- (-1),(1)
-(1 row)
-
-SELECT cube_enlarge('(0)'::cube, 1, 2);
- cube_enlarge
------------------
- (-1, -1),(1, 1)
-(1 row)
-
-SELECT cube_enlarge('(2),(-2)'::cube, 1, 4);
- cube_enlarge
--------------------------------
- (-3, -1, -1, -1),(3, 1, 1, 1)
-(1 row)
-
-SELECT cube_enlarge('(0)'::cube, -1, 0);
- cube_enlarge
---------------
- (0)
-(1 row)
-
-SELECT cube_enlarge('(0)'::cube, -1, 1);
- cube_enlarge
---------------
- (0)
-(1 row)
-
-SELECT cube_enlarge('(0)'::cube, -1, 2);
- cube_enlarge
---------------
- (0)
-(1 row)
-
-SELECT cube_enlarge('(2),(-2)'::cube, -1, 4);
- cube_enlarge
---------------
- (-1),(1)
-(1 row)
-
-SELECT cube_enlarge('(0,0,0)'::cube, 1, 0);
- cube_enlarge
-------------------------
- (-1, -1, -1),(1, 1, 1)
-(1 row)
-
-SELECT cube_enlarge('(0,0,0)'::cube, 1, 2);
- cube_enlarge
-------------------------
- (-1, -1, -1),(1, 1, 1)
-(1 row)
-
-SELECT cube_enlarge('(2,-2),(-3,7)'::cube, 1, 2);
- cube_enlarge
------------------
- (-4, -3),(3, 8)
-(1 row)
-
-SELECT cube_enlarge('(2,-2),(-3,7)'::cube, 3, 2);
- cube_enlarge
-------------------
- (-6, -5),(5, 10)
-(1 row)
-
-SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -1, 2);
- cube_enlarge
------------------
- (-2, -1),(1, 6)
-(1 row)
-
-SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -3, 2);
- cube_enlarge
----------------------
- (-0.5, 1),(-0.5, 4)
-(1 row)
-
-SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -23, 5);
- cube_enlarge
---------------
- (42, 0, 0)
-(1 row)
-
-SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -24, 5);
- cube_enlarge
---------------
- (42, 0, 0)
-(1 row)
-
--- Test of cube_union (MBR for two cubes)
---
-SELECT cube_union('(1,2),(3,4)'::cube, '(5,6,7),(8,9,10)'::cube);
- cube_union
-----------------------
- (1, 2, 0),(8, 9, 10)
-(1 row)
-
-SELECT cube_union('(1,2)'::cube, '(4,2,0,0)'::cube);
- cube_union
----------------------------
- (1, 2, 0, 0),(4, 2, 0, 0)
-(1 row)
-
-SELECT cube_union('(1,2),(1,2)'::cube, '(4,2),(4,2)'::cube);
- cube_union
----------------
- (1, 2),(4, 2)
-(1 row)
-
-SELECT cube_union('(1,2),(1,2)'::cube, '(1,2),(1,2)'::cube);
- cube_union
-------------
- (1, 2)
-(1 row)
-
-SELECT cube_union('(1,2),(1,2)'::cube, '(1,2,0),(1,2,0)'::cube);
- cube_union
-------------
- (1, 2, 0)
-(1 row)
-
--- Test of cube_inter
---
-SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (16,15)'::cube); -- intersects
- cube_inter
------------------
- (3, 4),(10, 11)
-(1 row)
-
-SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (6,5)'::cube); -- includes
- cube_inter
----------------
- (3, 4),(6, 5)
-(1 row)
-
-SELECT cube_inter('(1,2),(10,11)'::cube, '(13,14), (16,15)'::cube); -- no intersection
- cube_inter
--------------------
- (13, 14),(10, 11)
-(1 row)
-
-SELECT cube_inter('(1,2),(10,11)'::cube, '(3,14), (16,15)'::cube); -- no intersection, but one dimension intersects
- cube_inter
-------------------
- (3, 14),(10, 11)
-(1 row)
-
-SELECT cube_inter('(1,2),(10,11)'::cube, '(10,11), (16,15)'::cube); -- point intersection
- cube_inter
-------------
- (10, 11)
-(1 row)
-
-SELECT cube_inter('(1,2,3)'::cube, '(1,2,3)'::cube); -- point args
- cube_inter
-------------
- (1, 2, 3)
-(1 row)
-
-SELECT cube_inter('(1,2,3)'::cube, '(5,6,3)'::cube); -- point args
- cube_inter
----------------------
- (5, 6, 3),(1, 2, 3)
-(1 row)
-
--- Test of cube_size
---
-SELECT cube_size('(4,8),(15,16)'::cube);
- cube_size
------------
- 88
-(1 row)
-
-SELECT cube_size('(42,137)'::cube);
- cube_size
------------
- 0
-(1 row)
-
--- Test of distances
---
-SELECT cube_distance('(1,1)'::cube, '(4,5)'::cube);
- cube_distance
----------------
- 5
-(1 row)
-
-SELECT '(1,1)'::cube <-> '(4,5)'::cube as d_e;
- d_e
------
- 5
-(1 row)
-
-SELECT distance_chebyshev('(1,1)'::cube, '(4,5)'::cube);
- distance_chebyshev
---------------------
- 4
-(1 row)
-
-SELECT '(1,1)'::cube <=> '(4,5)'::cube as d_c;
- d_c
------
- 4
-(1 row)
-
-SELECT distance_taxicab('(1,1)'::cube, '(4,5)'::cube);
- distance_taxicab
-------------------
- 7
-(1 row)
-
-SELECT '(1,1)'::cube <#> '(4,5)'::cube as d_t;
- d_t
------
- 7
-(1 row)
-
--- zero for overlapping
-SELECT cube_distance('(2,2),(10,10)'::cube, '(0,0),(5,5)'::cube);
- cube_distance
----------------
- 0
-(1 row)
-
-SELECT distance_chebyshev('(2,2),(10,10)'::cube, '(0,0),(5,5)'::cube);
- distance_chebyshev
---------------------
- 0
-(1 row)
-
-SELECT distance_taxicab('(2,2),(10,10)'::cube, '(0,0),(5,5)'::cube);
- distance_taxicab
-------------------
- 0
-(1 row)
-
--- coordinate access
-SELECT cube(array[10,20,30], array[40,50,60])->1;
- ?column?
-----------
- 10
-(1 row)
-
-SELECT cube(array[40,50,60], array[10,20,30])->1;
- ?column?
-----------
- 40
-(1 row)
-
-SELECT cube(array[10,20,30], array[40,50,60])->6;
- ?column?
-----------
- 60
-(1 row)
-
-SELECT cube(array[10,20,30], array[40,50,60])->0;
-ERROR: cube index 0 is out of bounds
-SELECT cube(array[10,20,30], array[40,50,60])->7;
-ERROR: cube index 7 is out of bounds
-SELECT cube(array[10,20,30], array[40,50,60])->-1;
-ERROR: cube index -1 is out of bounds
-SELECT cube(array[10,20,30], array[40,50,60])->-6;
-ERROR: cube index -6 is out of bounds
-SELECT cube(array[10,20,30])->3;
- ?column?
-----------
- 30
-(1 row)
-
-SELECT cube(array[10,20,30])->6;
- ?column?
-----------
- 30
-(1 row)
-
-SELECT cube(array[10,20,30])->-6;
-ERROR: cube index -6 is out of bounds
--- "normalized" coordinate access
-SELECT cube(array[10,20,30], array[40,50,60])~>1;
- ?column?
-----------
- 10
-(1 row)
-
-SELECT cube(array[40,50,60], array[10,20,30])~>1;
- ?column?
-----------
- 10
-(1 row)
-
-SELECT cube(array[10,20,30], array[40,50,60])~>2;
- ?column?
-----------
- 40
-(1 row)
-
-SELECT cube(array[40,50,60], array[10,20,30])~>2;
- ?column?
-----------
- 40
-(1 row)
-
-SELECT cube(array[10,20,30], array[40,50,60])~>3;
- ?column?
-----------
- 20
-(1 row)
-
-SELECT cube(array[40,50,60], array[10,20,30])~>3;
- ?column?
-----------
- 20
-(1 row)
-
-SELECT cube(array[40,50,60], array[10,20,30])~>0;
-ERROR: zero cube index is not defined
-SELECT cube(array[40,50,60], array[10,20,30])~>4;
- ?column?
-----------
- 50
-(1 row)
-
-SELECT cube(array[40,50,60], array[10,20,30])~>(-1);
- ?column?
-----------
- -10
-(1 row)
-
--- Load some example data and build the index
---
-CREATE TABLE test_cube (c cube);
-\copy test_cube from 'data/test_cube.data'
-CREATE INDEX test_cube_ix ON test_cube USING gist (c);
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
- c
---------------------------
- (337, 455),(240, 359)
- (759, 187),(662, 163)
- (1444, 403),(1346, 344)
- (1594, 1043),(1517, 971)
- (2424, 160),(2424, 81)
-(5 rows)
-
--- Test sorting
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
- c
---------------------------
- (337, 455),(240, 359)
- (759, 187),(662, 163)
- (1444, 403),(1346, 344)
- (1594, 1043),(1517, 971)
- (2424, 160),(2424, 81)
-(5 rows)
-
--- Test index-only scans
-SET enable_bitmapscan = false;
-EXPLAIN (COSTS OFF)
-SELECT c FROM test_cube WHERE c <@ '(3000,1000),(0,0)' ORDER BY c;
- QUERY PLAN
---------------------------------------------------------
- Sort
- Sort Key: c
- -> Index Only Scan using test_cube_ix on test_cube
- Index Cond: (c <@ '(3000, 1000),(0, 0)'::cube)
-(4 rows)
-
-SELECT c FROM test_cube WHERE c <@ '(3000,1000),(0,0)' ORDER BY c;
- c
--------------------------
- (337, 455),(240, 359)
- (759, 187),(662, 163)
- (1444, 403),(1346, 344)
- (2424, 160),(2424, 81)
-(4 rows)
-
-RESET enable_bitmapscan;
--- Test kNN
-INSERT INTO test_cube VALUES ('(1,1)'), ('(100000)'), ('(0, 100000)'); -- Some corner cases
-SET enable_seqscan = false;
--- Test different metrics
-SELECT *, c <-> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <-> '(100, 100),(500, 500)'::cube LIMIT 5;
- c | dist
--------------------------+------------------
- (337, 455),(240, 359) | 0
- (1, 1) | 140.007142674936
- (759, 187),(662, 163) | 162
- (948, 1201),(907, 1156) | 772.000647668122
- (1444, 403),(1346, 344) | 846
-(5 rows)
-
-SELECT *, c <=> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <=> '(100, 100),(500, 500)'::cube LIMIT 5;
- c | dist
--------------------------+------
- (337, 455),(240, 359) | 0
- (1, 1) | 99
- (759, 187),(662, 163) | 162
- (948, 1201),(907, 1156) | 656
- (1444, 403),(1346, 344) | 846
-(5 rows)
-
-SELECT *, c <#> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <#> '(100, 100),(500, 500)'::cube LIMIT 5;
- c | dist
--------------------------+------
- (337, 455),(240, 359) | 0
- (759, 187),(662, 163) | 162
- (1, 1) | 198
- (1444, 403),(1346, 344) | 846
- (369, 1457),(278, 1409) | 909
-(5 rows)
-
--- Test sorting by coordinates
-SELECT c~>1, c FROM test_cube ORDER BY c~>1 LIMIT 15; -- ascending by left bound
- ?column? | c
-----------+---------------------------
- 0 | (0, 100000)
- 1 | (1, 1)
- 3 | (54, 38679),(3, 38602)
- 15 | (83, 10271),(15, 10265)
- 64 | (122, 46832),(64, 46762)
- 92 | (167, 17214),(92, 17184)
- 107 | (161, 24465),(107, 24374)
- 120 | (162, 26040),(120, 25963)
- 138 | (154, 4019),(138, 3990)
- 175 | (259, 1850),(175, 1820)
- 179 | (207, 40886),(179, 40879)
- 204 | (288, 49588),(204, 49571)
- 226 | (270, 32616),(226, 32607)
- 235 | (318, 31489),(235, 31404)
- 240 | (337, 455),(240, 359)
-(15 rows)
-
-SELECT c~>2, c FROM test_cube ORDER BY c~>2 LIMIT 15; -- ascending by right bound
- ?column? | c
-----------+---------------------------
- 0 | (0, 100000)
- 1 | (1, 1)
- 54 | (54, 38679),(3, 38602)
- 83 | (83, 10271),(15, 10265)
- 122 | (122, 46832),(64, 46762)
- 154 | (154, 4019),(138, 3990)
- 161 | (161, 24465),(107, 24374)
- 162 | (162, 26040),(120, 25963)
- 167 | (167, 17214),(92, 17184)
- 207 | (207, 40886),(179, 40879)
- 259 | (259, 1850),(175, 1820)
- 270 | (270, 29508),(264, 29440)
- 270 | (270, 32616),(226, 32607)
- 288 | (288, 49588),(204, 49571)
- 318 | (318, 31489),(235, 31404)
-(15 rows)
-
-SELECT c~>3, c FROM test_cube ORDER BY c~>3 LIMIT 15; -- ascending by lower bound
- ?column? | c
-----------+---------------------------
- 0 | (100000)
- 1 | (1, 1)
- 6 | (30333, 50),(30273, 6)
- 43 | (43301, 75),(43227, 43)
- 51 | (19650, 142),(19630, 51)
- 81 | (2424, 160),(2424, 81)
- 108 | (3449, 171),(3354, 108)
- 109 | (18037, 155),(17941, 109)
- 114 | (28511, 208),(28479, 114)
- 118 | (19946, 217),(19941, 118)
- 139 | (16906, 191),(16816, 139)
- 163 | (759, 187),(662, 163)
- 181 | (22684, 266),(22656, 181)
- 213 | (24423, 255),(24360, 213)
- 222 | (45989, 249),(45910, 222)
-(15 rows)
-
-SELECT c~>4, c FROM test_cube ORDER BY c~>4 LIMIT 15; -- ascending by upper bound
- ?column? | c
-----------+---------------------------
- 0 | (100000)
- 1 | (1, 1)
- 50 | (30333, 50),(30273, 6)
- 75 | (43301, 75),(43227, 43)
- 142 | (19650, 142),(19630, 51)
- 155 | (18037, 155),(17941, 109)
- 160 | (2424, 160),(2424, 81)
- 171 | (3449, 171),(3354, 108)
- 187 | (759, 187),(662, 163)
- 191 | (16906, 191),(16816, 139)
- 208 | (28511, 208),(28479, 114)
- 217 | (19946, 217),(19941, 118)
- 249 | (45989, 249),(45910, 222)
- 255 | (24423, 255),(24360, 213)
- 266 | (22684, 266),(22656, 181)
-(15 rows)
-
-SELECT c~>(-1), c FROM test_cube ORDER BY c~>(-1) LIMIT 15; -- descending by left bound
- ?column? | c
-----------+-------------------------------
- -100000 | (100000)
- -49951 | (50027, 49230),(49951, 49214)
- -49937 | (49980, 35004),(49937, 34963)
- -49927 | (49985, 6436),(49927, 6338)
- -49908 | (49999, 27218),(49908, 27176)
- -49905 | (49954, 1340),(49905, 1294)
- -49902 | (49944, 25163),(49902, 25153)
- -49898 | (49981, 34876),(49898, 34786)
- -49897 | (49957, 43390),(49897, 43384)
- -49848 | (49853, 18504),(49848, 18503)
- -49818 | (49902, 41752),(49818, 41746)
- -49810 | (49907, 30225),(49810, 30158)
- -49808 | (49843, 5175),(49808, 5145)
- -49805 | (49887, 24274),(49805, 24184)
- -49798 | (49847, 7128),(49798, 7067)
-(15 rows)
-
-SELECT c~>(-2), c FROM test_cube ORDER BY c~>(-2) LIMIT 15; -- descending by right bound
- ?column? | c
-----------+-------------------------------
- -100000 | (100000)
- -50027 | (50027, 49230),(49951, 49214)
- -49999 | (49999, 27218),(49908, 27176)
- -49985 | (49985, 6436),(49927, 6338)
- -49981 | (49981, 34876),(49898, 34786)
- -49980 | (49980, 35004),(49937, 34963)
- -49957 | (49957, 43390),(49897, 43384)
- -49954 | (49954, 1340),(49905, 1294)
- -49944 | (49944, 25163),(49902, 25153)
- -49907 | (49907, 30225),(49810, 30158)
- -49902 | (49902, 41752),(49818, 41746)
- -49887 | (49887, 24274),(49805, 24184)
- -49853 | (49853, 18504),(49848, 18503)
- -49847 | (49847, 7128),(49798, 7067)
- -49843 | (49843, 5175),(49808, 5145)
-(15 rows)
-
-SELECT c~>(-3), c FROM test_cube ORDER BY c~>(-3) LIMIT 15; -- descending by lower bound
- ?column? | c
-----------+-------------------------------
- -100000 | (0, 100000)
- -49992 | (30746, 50040),(30727, 49992)
- -49987 | (36311, 50073),(36258, 49987)
- -49934 | (3531, 49962),(3463, 49934)
- -49915 | (17954, 49975),(17865, 49915)
- -49914 | (2168, 50012),(2108, 49914)
- -49913 | (31287, 49923),(31236, 49913)
- -49885 | (21551, 49983),(21492, 49885)
- -49878 | (43925, 49912),(43888, 49878)
- -49849 | (19128, 49932),(19112, 49849)
- -49844 | (38266, 49852),(38233, 49844)
- -49836 | (14913, 49873),(14849, 49836)
- -49834 | (37595, 49849),(37581, 49834)
- -49830 | (46151, 49848),(46058, 49830)
- -49818 | (29261, 49910),(29247, 49818)
-(15 rows)
-
-SELECT c~>(-4), c FROM test_cube ORDER BY c~>(-4) LIMIT 15; -- descending by upper bound
- ?column? | c
-----------+-------------------------------
- -100000 | (0, 100000)
- -50073 | (36311, 50073),(36258, 49987)
- -50040 | (30746, 50040),(30727, 49992)
- -50012 | (2168, 50012),(2108, 49914)
- -49983 | (21551, 49983),(21492, 49885)
- -49975 | (17954, 49975),(17865, 49915)
- -49962 | (3531, 49962),(3463, 49934)
- -49932 | (19128, 49932),(19112, 49849)
- -49923 | (31287, 49923),(31236, 49913)
- -49912 | (43925, 49912),(43888, 49878)
- -49910 | (29261, 49910),(29247, 49818)
- -49873 | (14913, 49873),(14849, 49836)
- -49858 | (20007, 49858),(19921, 49778)
- -49852 | (38266, 49852),(38233, 49844)
- -49849 | (37595, 49849),(37581, 49834)
-(15 rows)
-
--- Same queries with sequential scan (should give the same results as above)
-RESET enable_seqscan;
-SET enable_indexscan = OFF;
-SELECT *, c <-> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <-> '(100, 100),(500, 500)'::cube LIMIT 5;
- c | dist
--------------------------+------------------
- (337, 455),(240, 359) | 0
- (1, 1) | 140.007142674936
- (759, 187),(662, 163) | 162
- (948, 1201),(907, 1156) | 772.000647668122
- (1444, 403),(1346, 344) | 846
-(5 rows)
-
-SELECT *, c <=> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <=> '(100, 100),(500, 500)'::cube LIMIT 5;
- c | dist
--------------------------+------
- (337, 455),(240, 359) | 0
- (1, 1) | 99
- (759, 187),(662, 163) | 162
- (948, 1201),(907, 1156) | 656
- (1444, 403),(1346, 344) | 846
-(5 rows)
-
-SELECT *, c <#> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <#> '(100, 100),(500, 500)'::cube LIMIT 5;
- c | dist
--------------------------+------
- (337, 455),(240, 359) | 0
- (759, 187),(662, 163) | 162
- (1, 1) | 198
- (1444, 403),(1346, 344) | 846
- (369, 1457),(278, 1409) | 909
-(5 rows)
-
-SELECT c~>1, c FROM test_cube ORDER BY c~>1 LIMIT 15; -- ascending by left bound
- ?column? | c
-----------+---------------------------
- 0 | (0, 100000)
- 1 | (1, 1)
- 3 | (54, 38679),(3, 38602)
- 15 | (83, 10271),(15, 10265)
- 64 | (122, 46832),(64, 46762)
- 92 | (167, 17214),(92, 17184)
- 107 | (161, 24465),(107, 24374)
- 120 | (162, 26040),(120, 25963)
- 138 | (154, 4019),(138, 3990)
- 175 | (259, 1850),(175, 1820)
- 179 | (207, 40886),(179, 40879)
- 204 | (288, 49588),(204, 49571)
- 226 | (270, 32616),(226, 32607)
- 235 | (318, 31489),(235, 31404)
- 240 | (337, 455),(240, 359)
-(15 rows)
-
-SELECT c~>2, c FROM test_cube ORDER BY c~>2 LIMIT 15; -- ascending by right bound
- ?column? | c
-----------+---------------------------
- 0 | (0, 100000)
- 1 | (1, 1)
- 54 | (54, 38679),(3, 38602)
- 83 | (83, 10271),(15, 10265)
- 122 | (122, 46832),(64, 46762)
- 154 | (154, 4019),(138, 3990)
- 161 | (161, 24465),(107, 24374)
- 162 | (162, 26040),(120, 25963)
- 167 | (167, 17214),(92, 17184)
- 207 | (207, 40886),(179, 40879)
- 259 | (259, 1850),(175, 1820)
- 270 | (270, 29508),(264, 29440)
- 270 | (270, 32616),(226, 32607)
- 288 | (288, 49588),(204, 49571)
- 318 | (318, 31489),(235, 31404)
-(15 rows)
-
-SELECT c~>3, c FROM test_cube ORDER BY c~>3 LIMIT 15; -- ascending by lower bound
- ?column? | c
-----------+---------------------------
- 0 | (100000)
- 1 | (1, 1)
- 6 | (30333, 50),(30273, 6)
- 43 | (43301, 75),(43227, 43)
- 51 | (19650, 142),(19630, 51)
- 81 | (2424, 160),(2424, 81)
- 108 | (3449, 171),(3354, 108)
- 109 | (18037, 155),(17941, 109)
- 114 | (28511, 208),(28479, 114)
- 118 | (19946, 217),(19941, 118)
- 139 | (16906, 191),(16816, 139)
- 163 | (759, 187),(662, 163)
- 181 | (22684, 266),(22656, 181)
- 213 | (24423, 255),(24360, 213)
- 222 | (45989, 249),(45910, 222)
-(15 rows)
-
-SELECT c~>4, c FROM test_cube ORDER BY c~>4 LIMIT 15; -- ascending by upper bound
- ?column? | c
-----------+---------------------------
- 0 | (100000)
- 1 | (1, 1)
- 50 | (30333, 50),(30273, 6)
- 75 | (43301, 75),(43227, 43)
- 142 | (19650, 142),(19630, 51)
- 155 | (18037, 155),(17941, 109)
- 160 | (2424, 160),(2424, 81)
- 171 | (3449, 171),(3354, 108)
- 187 | (759, 187),(662, 163)
- 191 | (16906, 191),(16816, 139)
- 208 | (28511, 208),(28479, 114)
- 217 | (19946, 217),(19941, 118)
- 249 | (45989, 249),(45910, 222)
- 255 | (24423, 255),(24360, 213)
- 266 | (22684, 266),(22656, 181)
-(15 rows)
-
-SELECT c~>(-1), c FROM test_cube ORDER BY c~>(-1) LIMIT 15; -- descending by left bound
- ?column? | c
-----------+-------------------------------
- -100000 | (100000)
- -49951 | (50027, 49230),(49951, 49214)
- -49937 | (49980, 35004),(49937, 34963)
- -49927 | (49985, 6436),(49927, 6338)
- -49908 | (49999, 27218),(49908, 27176)
- -49905 | (49954, 1340),(49905, 1294)
- -49902 | (49944, 25163),(49902, 25153)
- -49898 | (49981, 34876),(49898, 34786)
- -49897 | (49957, 43390),(49897, 43384)
- -49848 | (49853, 18504),(49848, 18503)
- -49818 | (49902, 41752),(49818, 41746)
- -49810 | (49907, 30225),(49810, 30158)
- -49808 | (49843, 5175),(49808, 5145)
- -49805 | (49887, 24274),(49805, 24184)
- -49798 | (49847, 7128),(49798, 7067)
-(15 rows)
-
-SELECT c~>(-2), c FROM test_cube ORDER BY c~>(-2) LIMIT 15; -- descending by right bound
- ?column? | c
-----------+-------------------------------
- -100000 | (100000)
- -50027 | (50027, 49230),(49951, 49214)
- -49999 | (49999, 27218),(49908, 27176)
- -49985 | (49985, 6436),(49927, 6338)
- -49981 | (49981, 34876),(49898, 34786)
- -49980 | (49980, 35004),(49937, 34963)
- -49957 | (49957, 43390),(49897, 43384)
- -49954 | (49954, 1340),(49905, 1294)
- -49944 | (49944, 25163),(49902, 25153)
- -49907 | (49907, 30225),(49810, 30158)
- -49902 | (49902, 41752),(49818, 41746)
- -49887 | (49887, 24274),(49805, 24184)
- -49853 | (49853, 18504),(49848, 18503)
- -49847 | (49847, 7128),(49798, 7067)
- -49843 | (49843, 5175),(49808, 5145)
-(15 rows)
-
-SELECT c~>(-3), c FROM test_cube ORDER BY c~>(-3) LIMIT 15; -- descending by lower bound
- ?column? | c
-----------+-------------------------------
- -100000 | (0, 100000)
- -49992 | (30746, 50040),(30727, 49992)
- -49987 | (36311, 50073),(36258, 49987)
- -49934 | (3531, 49962),(3463, 49934)
- -49915 | (17954, 49975),(17865, 49915)
- -49914 | (2168, 50012),(2108, 49914)
- -49913 | (31287, 49923),(31236, 49913)
- -49885 | (21551, 49983),(21492, 49885)
- -49878 | (43925, 49912),(43888, 49878)
- -49849 | (19128, 49932),(19112, 49849)
- -49844 | (38266, 49852),(38233, 49844)
- -49836 | (14913, 49873),(14849, 49836)
- -49834 | (37595, 49849),(37581, 49834)
- -49830 | (46151, 49848),(46058, 49830)
- -49818 | (29261, 49910),(29247, 49818)
-(15 rows)
-
-SELECT c~>(-4), c FROM test_cube ORDER BY c~>(-4) LIMIT 15; -- descending by upper bound
- ?column? | c
-----------+-------------------------------
- -100000 | (0, 100000)
- -50073 | (36311, 50073),(36258, 49987)
- -50040 | (30746, 50040),(30727, 49992)
- -50012 | (2168, 50012),(2108, 49914)
- -49983 | (21551, 49983),(21492, 49885)
- -49975 | (17954, 49975),(17865, 49915)
- -49962 | (3531, 49962),(3463, 49934)
- -49932 | (19128, 49932),(19112, 49849)
- -49923 | (31287, 49923),(31236, 49913)
- -49912 | (43925, 49912),(43888, 49878)
- -49910 | (29261, 49910),(29247, 49818)
- -49873 | (14913, 49873),(14849, 49836)
- -49858 | (20007, 49858),(19921, 49778)
- -49852 | (38266, 49852),(38233, 49844)
- -49849 | (37595, 49849),(37581, 49834)
-(15 rows)
-
-RESET enable_indexscan;
diff --git a/contrib/cube/expected/cube_sci.out b/contrib/cube/expected/cube_sci.out
new file mode 100644
index 00000000000..488499ac8ea
--- /dev/null
+++ b/contrib/cube/expected/cube_sci.out
@@ -0,0 +1,106 @@
+---
+--- Testing cube output in scientific notation. This was put into separate
+--- test, because has platform-depending output.
+---
+SELECT '1e27'::cube AS cube;
+ cube
+---------
+ (1e+27)
+(1 row)
+
+SELECT '-1e27'::cube AS cube;
+ cube
+----------
+ (-1e+27)
+(1 row)
+
+SELECT '1.0e27'::cube AS cube;
+ cube
+---------
+ (1e+27)
+(1 row)
+
+SELECT '-1.0e27'::cube AS cube;
+ cube
+----------
+ (-1e+27)
+(1 row)
+
+SELECT '1e+27'::cube AS cube;
+ cube
+---------
+ (1e+27)
+(1 row)
+
+SELECT '-1e+27'::cube AS cube;
+ cube
+----------
+ (-1e+27)
+(1 row)
+
+SELECT '1.0e+27'::cube AS cube;
+ cube
+---------
+ (1e+27)
+(1 row)
+
+SELECT '-1.0e+27'::cube AS cube;
+ cube
+----------
+ (-1e+27)
+(1 row)
+
+SELECT '1e-7'::cube AS cube;
+ cube
+---------
+ (1e-07)
+(1 row)
+
+SELECT '-1e-7'::cube AS cube;
+ cube
+----------
+ (-1e-07)
+(1 row)
+
+SELECT '1.0e-7'::cube AS cube;
+ cube
+---------
+ (1e-07)
+(1 row)
+
+SELECT '-1.0e-7'::cube AS cube;
+ cube
+----------
+ (-1e-07)
+(1 row)
+
+SELECT '1e-300'::cube AS cube;
+ cube
+----------
+ (1e-300)
+(1 row)
+
+SELECT '-1e-300'::cube AS cube;
+ cube
+-----------
+ (-1e-300)
+(1 row)
+
+SELECT '1234567890123456'::cube AS cube;
+ cube
+-------------------------
+ (1.234567890123456e+15)
+(1 row)
+
+SELECT '+1234567890123456'::cube AS cube;
+ cube
+-------------------------
+ (1.234567890123456e+15)
+(1 row)
+
+SELECT '-1234567890123456'::cube AS cube;
+ cube
+--------------------------
+ (-1.234567890123456e+15)
+(1 row)
+
diff --git a/contrib/cube/sql/cube.sql b/contrib/cube/sql/cube.sql
index f599e7f7c03..7f8b2e39799 100644
--- a/contrib/cube/sql/cube.sql
+++ b/contrib/cube/sql/cube.sql
@@ -22,26 +22,9 @@ SELECT '.1'::cube AS cube;
SELECT '-.1'::cube AS cube;
SELECT '1.0'::cube AS cube;
SELECT '-1.0'::cube AS cube;
-SELECT '1e27'::cube AS cube;
-SELECT '-1e27'::cube AS cube;
-SELECT '1.0e27'::cube AS cube;
-SELECT '-1.0e27'::cube AS cube;
-SELECT '1e+27'::cube AS cube;
-SELECT '-1e+27'::cube AS cube;
-SELECT '1.0e+27'::cube AS cube;
-SELECT '-1.0e+27'::cube AS cube;
-SELECT '1e-7'::cube AS cube;
-SELECT '-1e-7'::cube AS cube;
-SELECT '1.0e-7'::cube AS cube;
-SELECT '-1.0e-7'::cube AS cube;
-SELECT '1e-300'::cube AS cube;
-SELECT '-1e-300'::cube AS cube;
SELECT 'infinity'::cube AS cube;
SELECT '-infinity'::cube AS cube;
SELECT 'NaN'::cube AS cube;
-SELECT '1234567890123456'::cube AS cube;
-SELECT '+1234567890123456'::cube AS cube;
-SELECT '-1234567890123456'::cube AS cube;
SELECT '.1234567890123456'::cube AS cube;
SELECT '+.1234567890123456'::cube AS cube;
SELECT '-.1234567890123456'::cube AS cube;
@@ -125,6 +108,12 @@ SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
SELECT cube_subset(cube('(1,3,5),(1,3,5)'), ARRAY[3,2,1,1]);
SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
SELECT cube_subset(cube('(6,7,8),(6,7,8)'), ARRAY[4,0]);
+-- test for limits: this should pass
+SELECT cube_subset(cube('(6,7,8),(6,7,8)'), array(SELECT 1 as a FROM generate_series(1,100)));
+-- and this should fail
+SELECT cube_subset(cube('(6,7,8),(6,7,8)'), array(SELECT 1 as a FROM generate_series(1,101)));
+
+
--
-- Test point processing
@@ -144,9 +133,21 @@ SELECT cube(cube(1,2), 42, 24); -- cube_c_f8_f8
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
--
-
+-- create too big cube from literal
select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)'::cube;
select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)'::cube;
+-- from an array
+select cube(array(SELECT 0 as a FROM generate_series(1,101)));
+select cube(array(SELECT 0 as a FROM generate_series(1,101)),array(SELECT 0 as a FROM generate_series(1,101)));
+
+-- extend cube beyond limit
+-- this should work
+select cube(array(SELECT 0 as a FROM generate_series(1,100)));
+select cube(array(SELECT 0 as a FROM generate_series(1,100)),array(SELECT 0 as a FROM generate_series(1,100)));
+-- this should fail
+select cube(cube(array(SELECT 0 as a FROM generate_series(1,100))), 0);
+select cube(cube(array(SELECT 0 as a FROM generate_series(1,100)),array(SELECT 0 as a FROM generate_series(1,100))), 0, 0);
+
--
-- testing the operators
@@ -335,10 +336,12 @@ SELECT cube_inter('(1,2,3)'::cube, '(5,6,3)'::cube); -- point args
SELECT cube_size('(4,8),(15,16)'::cube);
SELECT cube_size('(42,137)'::cube);
--- Test of distances
+-- Test of distances (euclidean distance may not be bit-exact)
--
+SET extra_float_digits = 0;
SELECT cube_distance('(1,1)'::cube, '(4,5)'::cube);
SELECT '(1,1)'::cube <-> '(4,5)'::cube as d_e;
+RESET extra_float_digits;
SELECT distance_chebyshev('(1,1)'::cube, '(4,5)'::cube);
SELECT '(1,1)'::cube <=> '(4,5)'::cube as d_c;
SELECT distance_taxicab('(1,1)'::cube, '(4,5)'::cube);
@@ -394,7 +397,9 @@ INSERT INTO test_cube VALUES ('(1,1)'), ('(100000)'), ('(0, 100000)'); -- Some c
SET enable_seqscan = false;
-- Test different metrics
+SET extra_float_digits = 0;
SELECT *, c <-> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <-> '(100, 100),(500, 500)'::cube LIMIT 5;
+RESET extra_float_digits;
SELECT *, c <=> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <=> '(100, 100),(500, 500)'::cube LIMIT 5;
SELECT *, c <#> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <#> '(100, 100),(500, 500)'::cube LIMIT 5;
@@ -411,7 +416,9 @@ SELECT c~>(-4), c FROM test_cube ORDER BY c~>(-4) LIMIT 15; -- descending by upp
-- Same queries with sequential scan (should give the same results as above)
RESET enable_seqscan;
SET enable_indexscan = OFF;
+SET extra_float_digits = 0;
SELECT *, c <-> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <-> '(100, 100),(500, 500)'::cube LIMIT 5;
+RESET extra_float_digits;
SELECT *, c <=> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <=> '(100, 100),(500, 500)'::cube LIMIT 5;
SELECT *, c <#> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <#> '(100, 100),(500, 500)'::cube LIMIT 5;
SELECT c~>1, c FROM test_cube ORDER BY c~>1 LIMIT 15; -- ascending by left bound
diff --git a/contrib/cube/sql/cube_sci.sql b/contrib/cube/sql/cube_sci.sql
new file mode 100644
index 00000000000..35a540779a8
--- /dev/null
+++ b/contrib/cube/sql/cube_sci.sql
@@ -0,0 +1,22 @@
+---
+--- Testing cube output in scientific notation. This was put into separate
+--- test, because has platform-depending output.
+---
+
+SELECT '1e27'::cube AS cube;
+SELECT '-1e27'::cube AS cube;
+SELECT '1.0e27'::cube AS cube;
+SELECT '-1.0e27'::cube AS cube;
+SELECT '1e+27'::cube AS cube;
+SELECT '-1e+27'::cube AS cube;
+SELECT '1.0e+27'::cube AS cube;
+SELECT '-1.0e+27'::cube AS cube;
+SELECT '1e-7'::cube AS cube;
+SELECT '-1e-7'::cube AS cube;
+SELECT '1.0e-7'::cube AS cube;
+SELECT '-1.0e-7'::cube AS cube;
+SELECT '1e-300'::cube AS cube;
+SELECT '-1e-300'::cube AS cube;
+SELECT '1234567890123456'::cube AS cube;
+SELECT '+1234567890123456'::cube AS cube;
+SELECT '-1234567890123456'::cube AS cube;
diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index c6460688486..e432457e2d5 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -9,7 +9,7 @@
* Shridhar Daithankar
*
* contrib/dblink/dblink.c
- * Copyright (c) 2001-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2001-2019, PostgreSQL Global Development Group
* ALL RIGHTS RESERVED;
*
* Permission to use, copy, modify, and distribute this software and its
@@ -37,7 +37,9 @@
#include "libpq-fe.h"
#include "access/htup_details.h"
+#include "access/relation.h"
#include "access/reloptions.h"
+#include "access/table.h"
#include "catalog/indexing.h"
#include "catalog/namespace.h"
#include "catalog/pg_foreign_data_wrapper.h"
@@ -58,7 +60,6 @@
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/rel.h"
-#include "utils/tqual.h"
#include "utils/varlena.h"
PG_MODULE_MAGIC;
@@ -88,12 +89,12 @@ typedef struct storeInfo
static Datum dblink_record_internal(FunctionCallInfo fcinfo, bool is_async);
static void prepTuplestoreResult(FunctionCallInfo fcinfo);
static void materializeResult(FunctionCallInfo fcinfo, PGconn *conn,
- PGresult *res);
+ PGresult *res);
static void materializeQueryResult(FunctionCallInfo fcinfo,
- PGconn *conn,
- const char *conname,
- const char *sql,
- bool fail);
+ PGconn *conn,
+ const char *conname,
+ const char *sql,
+ bool fail);
static PGresult *storeQueryResult(volatile storeInfo *sinfo, PGconn *conn, const char *sql);
static void storeRow(volatile storeInfo *sinfo, PGresult *res, bool first);
static remoteConn *getConnectionByName(const char *name);
@@ -113,14 +114,14 @@ static char *generate_relation_name(Relation rel);
static void dblink_connstr_check(const char *connstr);
static void dblink_security_check(PGconn *conn, remoteConn *rconn);
static void dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
- bool fail, const char *fmt,...) pg_attribute_printf(5, 6);
+ bool fail, const char *fmt,...) pg_attribute_printf(5, 6);
static char *get_connect_string(const char *servername);
static char *escape_param_str(const char *from);
static void validate_pkattnums(Relation rel,
- int2vector *pkattnums_arg, int32 pknumatts_arg,
- int **pkattnums, int *pknumatts);
+ int2vector *pkattnums_arg, int32 pknumatts_arg,
+ int **pkattnums, int *pknumatts);
static bool is_valid_dblink_option(const PQconninfoOption *options,
- const char *option, Oid context);
+ const char *option, Oid context);
static int applyRemoteGucs(PGconn *conn);
static void restoreLocalGucs(int nestlevel);
@@ -849,7 +850,7 @@ materializeResult(FunctionCallInfo fcinfo, PGconn *conn, PGresult *res)
* need a tuple descriptor representing one TEXT column to return
* the command status string as our result tuple
*/
- tupdesc = CreateTemplateTupleDesc(1, false);
+ tupdesc = CreateTemplateTupleDesc(1);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "status",
TEXTOID, -1, 0);
ntuples = 1;
@@ -981,13 +982,11 @@ materializeQueryResult(FunctionCallInfo fcinfo,
{
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
PGresult *volatile res = NULL;
- volatile storeInfo sinfo;
+ volatile storeInfo sinfo = {0};
/* prepTuplestoreResult must have been called previously */
Assert(rsinfo->returnMode == SFRM_Materialize);
- /* initialize storeInfo to empty */
- memset((void *) &sinfo, 0, sizeof(sinfo));
sinfo.fcinfo = fcinfo;
PG_TRY();
@@ -1032,7 +1031,7 @@ materializeQueryResult(FunctionCallInfo fcinfo,
* need a tuple descriptor representing one TEXT column to return
* the command status string as our result tuple
*/
- tupdesc = CreateTemplateTupleDesc(1, false);
+ tupdesc = CreateTemplateTupleDesc(1);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "status",
TEXTOID, -1, 0);
attinmeta = TupleDescGetAttInMetadata(tupdesc);
@@ -1526,7 +1525,7 @@ dblink_get_pkey(PG_FUNCTION_ARGS)
/*
* need a tuple descriptor representing one INT and one TEXT column
*/
- tupdesc = CreateTemplateTupleDesc(2, false);
+ tupdesc = CreateTemplateTupleDesc(2);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "position",
INT4OID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "colname",
@@ -1904,7 +1903,7 @@ dblink_get_notify(PG_FUNCTION_ARGS)
per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
oldcontext = MemoryContextSwitchTo(per_query_ctx);
- tupdesc = CreateTemplateTupleDesc(DBLINK_NOTIFY_COLS, false);
+ tupdesc = CreateTemplateTupleDesc(DBLINK_NOTIFY_COLS);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "notify_name",
TEXTOID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "be_pid",
@@ -2048,7 +2047,7 @@ get_pkey_attnames(Relation rel, int16 *indnkeyatts)
tupdesc = rel->rd_att;
/* Prepare to scan pg_index for entries having indrelid = this rel. */
- indexRelation = heap_open(IndexRelationId, AccessShareLock);
+ indexRelation = table_open(IndexRelationId, AccessShareLock);
ScanKeyInit(&skey,
Anum_pg_index_indrelid,
BTEqualStrategyNumber, F_OIDEQ,
@@ -2077,7 +2076,7 @@ get_pkey_attnames(Relation rel, int16 *indnkeyatts)
}
systable_endscan(scan);
- heap_close(indexRelation, AccessShareLock);
+ table_close(indexRelation, AccessShareLock);
return result;
}
@@ -2501,7 +2500,7 @@ get_rel_from_relname(text *relname_text, LOCKMODE lockmode, AclMode aclmode)
AclResult aclresult;
relvar = makeRangeVarFromNameList(textToQualifiedNameList(relname_text));
- rel = heap_openrv(relvar, lockmode);
+ rel = table_openrv(relvar, lockmode);
aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
aclmode);
diff --git a/contrib/dblink/expected/dblink.out b/contrib/dblink/expected/dblink.out
index dfd49b937e8..6ceabb453c0 100644
--- a/contrib/dblink/expected/dblink.out
+++ b/contrib/dblink/expected/dblink.out
@@ -1154,7 +1154,7 @@ FROM dblink_fetch('myconn','error_cursor', 1) AS t(i int);
SELECT *
FROM dblink_fetch('myconn','error_cursor', 1) AS t(i int);
-ERROR: invalid input syntax for integer: "not an int"
+ERROR: invalid input syntax for type integer: "not an int"
-- Make sure that the local settings have retained their values in spite
-- of shenanigans on the connection.
SHOW datestyle;
diff --git a/contrib/dict_int/dict_int.c b/contrib/dict_int/dict_int.c
index 56ede37089e..628b9769c32 100644
--- a/contrib/dict_int/dict_int.c
+++ b/contrib/dict_int/dict_int.c
@@ -3,7 +3,7 @@
* dict_int.c
* Text search dictionary for integers
*
- * Copyright (c) 2007-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/dict_int/dict_int.c
diff --git a/contrib/dict_xsyn/dict_xsyn.c b/contrib/dict_xsyn/dict_xsyn.c
index a79ece240ce..509e14aee07 100644
--- a/contrib/dict_xsyn/dict_xsyn.c
+++ b/contrib/dict_xsyn/dict_xsyn.c
@@ -3,7 +3,7 @@
* dict_xsyn.c
* Extended synonym dictionary
*
- * Copyright (c) 2007-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/dict_xsyn/dict_xsyn.c
diff --git a/contrib/earthdistance/expected/earthdistance.out b/contrib/earthdistance/expected/earthdistance.out
index 89022491cb6..26a843c3faa 100644
--- a/contrib/earthdistance/expected/earthdistance.out
+++ b/contrib/earthdistance/expected/earthdistance.out
@@ -882,11 +882,12 @@ SELECT earth_box(ll_to_earth(90,180),
--
-- Test the recommended constraints.
--
-SELECT is_point(ll_to_earth(0,0));
-ERROR: function is_point(earth) does not exist
-LINE 1: SELECT is_point(ll_to_earth(0,0));
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
+SELECT cube_is_point(ll_to_earth(0,0));
+ cube_is_point
+---------------
+ t
+(1 row)
+
SELECT cube_dim(ll_to_earth(0,0)) <= 3;
?column?
----------
@@ -900,11 +901,12 @@ SELECT abs(cube_distance(ll_to_earth(0,0), '(0)'::cube) / earth() - 1) <
t
(1 row)
-SELECT is_point(ll_to_earth(30,60));
-ERROR: function is_point(earth) does not exist
-LINE 1: SELECT is_point(ll_to_earth(30,60));
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
+SELECT cube_is_point(ll_to_earth(30,60));
+ cube_is_point
+---------------
+ t
+(1 row)
+
SELECT cube_dim(ll_to_earth(30,60)) <= 3;
?column?
----------
@@ -918,11 +920,12 @@ SELECT abs(cube_distance(ll_to_earth(30,60), '(0)'::cube) / earth() - 1) <
t
(1 row)
-SELECT is_point(ll_to_earth(60,90));
-ERROR: function is_point(earth) does not exist
-LINE 1: SELECT is_point(ll_to_earth(60,90));
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
+SELECT cube_is_point(ll_to_earth(60,90));
+ cube_is_point
+---------------
+ t
+(1 row)
+
SELECT cube_dim(ll_to_earth(60,90)) <= 3;
?column?
----------
@@ -936,11 +939,12 @@ SELECT abs(cube_distance(ll_to_earth(60,90), '(0)'::cube) / earth() - 1) <
t
(1 row)
-SELECT is_point(ll_to_earth(-30,-90));
-ERROR: function is_point(earth) does not exist
-LINE 1: SELECT is_point(ll_to_earth(-30,-90));
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
+SELECT cube_is_point(ll_to_earth(-30,-90));
+ cube_is_point
+---------------
+ t
+(1 row)
+
SELECT cube_dim(ll_to_earth(-30,-90)) <= 3;
?column?
----------
@@ -985,7 +989,7 @@ HINT: You can drop extension cube instead.
create table foo (f1 cube, f2 int);
drop extension cube; -- fail, foo.f1 requires it
ERROR: cannot drop extension cube because other objects depend on it
-DETAIL: table foo column f1 depends on type cube
+DETAIL: column f1 of table foo depends on type cube
HINT: Use DROP ... CASCADE to drop the dependent objects too.
drop table foo;
drop extension cube;
@@ -1039,15 +1043,15 @@ create extension cube with schema c;
create table foo (f1 c.cube, f2 int);
drop extension cube; -- fail, foo.f1 requires it
ERROR: cannot drop extension cube because other objects depend on it
-DETAIL: table foo column f1 depends on type c.cube
+DETAIL: column f1 of table foo depends on type c.cube
HINT: Use DROP ... CASCADE to drop the dependent objects too.
drop schema c; -- fail, cube requires it
ERROR: cannot drop schema c because other objects depend on it
DETAIL: extension cube depends on schema c
-table foo column f1 depends on type c.cube
+column f1 of table foo depends on type c.cube
HINT: Use DROP ... CASCADE to drop the dependent objects too.
drop extension cube cascade;
-NOTICE: drop cascades to table foo column f1
+NOTICE: drop cascades to column f1 of table foo
\d foo
Table "public.foo"
Column | Type | Collation | Nullable | Default
diff --git a/contrib/earthdistance/sql/earthdistance.sql b/contrib/earthdistance/sql/earthdistance.sql
index 860450276f6..41455612175 100644
--- a/contrib/earthdistance/sql/earthdistance.sql
+++ b/contrib/earthdistance/sql/earthdistance.sql
@@ -282,19 +282,19 @@ SELECT earth_box(ll_to_earth(90,180),
-- Test the recommended constraints.
--
-SELECT is_point(ll_to_earth(0,0));
+SELECT cube_is_point(ll_to_earth(0,0));
SELECT cube_dim(ll_to_earth(0,0)) <= 3;
SELECT abs(cube_distance(ll_to_earth(0,0), '(0)'::cube) / earth() - 1) <
'10e-12'::float8;
-SELECT is_point(ll_to_earth(30,60));
+SELECT cube_is_point(ll_to_earth(30,60));
SELECT cube_dim(ll_to_earth(30,60)) <= 3;
SELECT abs(cube_distance(ll_to_earth(30,60), '(0)'::cube) / earth() - 1) <
'10e-12'::float8;
-SELECT is_point(ll_to_earth(60,90));
+SELECT cube_is_point(ll_to_earth(60,90));
SELECT cube_dim(ll_to_earth(60,90)) <= 3;
SELECT abs(cube_distance(ll_to_earth(60,90), '(0)'::cube) / earth() - 1) <
'10e-12'::float8;
-SELECT is_point(ll_to_earth(-30,-90));
+SELECT cube_is_point(ll_to_earth(-30,-90));
SELECT cube_dim(ll_to_earth(-30,-90)) <= 3;
SELECT abs(cube_distance(ll_to_earth(-30,-90), '(0)'::cube) / earth() - 1) <
'10e-12'::float8;
diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c
index 2cf09aecf6e..549821ca84c 100644
--- a/contrib/file_fdw/file_fdw.c
+++ b/contrib/file_fdw/file_fdw.c
@@ -3,7 +3,7 @@
* file_fdw.c
* foreign-data wrapper for server-side flat files (or programs).
*
- * Copyright (c) 2010-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/file_fdw/file_fdw.c
@@ -18,6 +18,7 @@
#include "access/htup_details.h"
#include "access/reloptions.h"
#include "access/sysattr.h"
+#include "access/table.h"
#include "catalog/pg_authid.h"
#include "catalog/pg_foreign_table.h"
#include "commands/copy.h"
@@ -28,11 +29,10 @@
#include "foreign/foreign.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
-#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
#include "optimizer/pathnode.h"
#include "optimizer/planmain.h"
#include "optimizer/restrictinfo.h"
-#include "optimizer/var.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/sampling.h"
@@ -117,49 +117,49 @@ PG_FUNCTION_INFO_V1(file_fdw_validator);
* FDW callback routines
*/
static void fileGetForeignRelSize(PlannerInfo *root,
- RelOptInfo *baserel,
- Oid foreigntableid);
+ RelOptInfo *baserel,
+ Oid foreigntableid);
static void fileGetForeignPaths(PlannerInfo *root,
- RelOptInfo *baserel,
- Oid foreigntableid);
+ RelOptInfo *baserel,
+ Oid foreigntableid);
static ForeignScan *fileGetForeignPlan(PlannerInfo *root,
- RelOptInfo *baserel,
- Oid foreigntableid,
- ForeignPath *best_path,
- List *tlist,
- List *scan_clauses,
- Plan *outer_plan);
+ RelOptInfo *baserel,
+ Oid foreigntableid,
+ ForeignPath *best_path,
+ List *tlist,
+ List *scan_clauses,
+ Plan *outer_plan);
static void fileExplainForeignScan(ForeignScanState *node, ExplainState *es);
static void fileBeginForeignScan(ForeignScanState *node, int eflags);
static TupleTableSlot *fileIterateForeignScan(ForeignScanState *node);
static void fileReScanForeignScan(ForeignScanState *node);
static void fileEndForeignScan(ForeignScanState *node);
static bool fileAnalyzeForeignTable(Relation relation,
- AcquireSampleRowsFunc *func,
- BlockNumber *totalpages);
+ AcquireSampleRowsFunc *func,
+ BlockNumber *totalpages);
static bool fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
/*
* Helper functions
*/
static bool is_valid_option(const char *option, Oid context);
static void fileGetOptions(Oid foreigntableid,
- char **filename,
- bool *is_program,
- List **other_options);
+ char **filename,
+ bool *is_program,
+ List **other_options);
static List *get_file_fdw_attribute_options(Oid relid);
static bool check_selective_binary_conversion(RelOptInfo *baserel,
- Oid foreigntableid,
- List **columns);
+ Oid foreigntableid,
+ List **columns);
static void estimate_size(PlannerInfo *root, RelOptInfo *baserel,
- FileFdwPlanState *fdw_private);
+ FileFdwPlanState *fdw_private);
static void estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
- FileFdwPlanState *fdw_private,
- Cost *startup_cost, Cost *total_cost);
-static int file_acquire_sample_rows(Relation onerel, int elevel,
- HeapTuple *rows, int targrows,
- double *totalrows, double *totaldeadrows);
+ FileFdwPlanState *fdw_private,
+ Cost *startup_cost, Cost *total_cost);
+static int file_acquire_sample_rows(Relation onerel, int elevel,
+ HeapTuple *rows, int targrows,
+ double *totalrows, double *totaldeadrows);
/*
@@ -360,8 +360,7 @@ fileGetOptions(Oid foreigntableid,
ForeignServer *server;
ForeignDataWrapper *wrapper;
List *options;
- ListCell *lc,
- *prev;
+ ListCell *lc;
/*
* Extract options from FDW objects. We ignore user mappings because
@@ -387,7 +386,6 @@ fileGetOptions(Oid foreigntableid,
*/
*filename = NULL;
*is_program = false;
- prev = NULL;
foreach(lc, options)
{
DefElem *def = (DefElem *) lfirst(lc);
@@ -395,17 +393,16 @@ fileGetOptions(Oid foreigntableid,
if (strcmp(def->defname, "filename") == 0)
{
*filename = defGetString(def);
- options = list_delete_cell(options, lc, prev);
+ options = foreach_delete_current(options, lc);
break;
}
else if (strcmp(def->defname, "program") == 0)
{
*filename = defGetString(def);
*is_program = true;
- options = list_delete_cell(options, lc, prev);
+ options = foreach_delete_current(options, lc);
break;
}
- prev = lc;
}
/*
@@ -438,7 +435,7 @@ get_file_fdw_attribute_options(Oid relid)
List *options = NIL;
- rel = heap_open(relid, AccessShareLock);
+ rel = table_open(relid, AccessShareLock);
tupleDesc = RelationGetDescr(rel);
natts = tupleDesc->natts;
@@ -480,7 +477,7 @@ get_file_fdw_attribute_options(Oid relid)
}
}
- heap_close(rel, AccessShareLock);
+ table_close(rel, AccessShareLock);
/*
* Return DefElem only when some column(s) have force_not_null /
@@ -556,6 +553,10 @@ fileGetForeignPaths(PlannerInfo *root,
* Create a ForeignPath node and add it as only possible path. We use the
* fdw_private list of the path to carry the convert_selectively option;
* it will be propagated into the fdw_private list of the Plan node.
+ *
+ * We don't support pushing join clauses into the quals of this path, but
+ * it could still have required parameterization due to LATERAL refs in
+ * its tlist.
*/
add_path(baserel, (Path *)
create_foreignscan_path(root, baserel,
@@ -564,7 +565,7 @@ fileGetForeignPaths(PlannerInfo *root,
startup_cost,
total_cost,
NIL, /* no pathkeys */
- NULL, /* no outer rel either */
+ baserel->lateral_relids,
NULL, /* no extra plan */
coptions));
@@ -727,8 +728,7 @@ fileIterateForeignScan(ForeignScanState *node)
*/
ExecClearTuple(slot);
found = NextCopyFrom(festate->cstate, NULL,
- slot->tts_values, slot->tts_isnull,
- NULL);
+ slot->tts_values, slot->tts_isnull);
if (found)
ExecStoreVirtualTuple(slot);
@@ -892,7 +892,7 @@ check_selective_binary_conversion(RelOptInfo *baserel,
}
/* Convert attribute numbers to column names. */
- rel = heap_open(foreigntableid, AccessShareLock);
+ rel = table_open(foreigntableid, AccessShareLock);
tupleDesc = RelationGetDescr(rel);
while ((attnum = bms_first_member(attrs_used)) >= 0)
@@ -919,6 +919,13 @@ check_selective_binary_conversion(RelOptInfo *baserel,
/* Skip dropped attributes (probably shouldn't see any here). */
if (attr->attisdropped)
continue;
+
+ /*
+ * Skip generated columns (COPY won't accept them in the column
+ * list)
+ */
+ if (attr->attgenerated)
+ continue;
*columns = lappend(*columns, makeString(pstrdup(attname)));
}
}
@@ -934,7 +941,7 @@ check_selective_binary_conversion(RelOptInfo *baserel,
numattrs++;
}
- heap_close(rel, AccessShareLock);
+ table_close(rel, AccessShareLock);
/* If there's a whole-row reference, fail: we need all the columns. */
if (has_wholerow)
@@ -1148,7 +1155,7 @@ file_acquire_sample_rows(Relation onerel, int elevel,
MemoryContextReset(tupcontext);
MemoryContextSwitchTo(tupcontext);
- found = NextCopyFrom(cstate, NULL, values, nulls, NULL);
+ found = NextCopyFrom(cstate, NULL, values, nulls);
MemoryContextSwitchTo(oldcontext);
diff --git a/contrib/file_fdw/input/file_fdw.source b/contrib/file_fdw/input/file_fdw.source
index a5e79a4549a..45b728eeb3d 100644
--- a/contrib/file_fdw/input/file_fdw.source
+++ b/contrib/file_fdw/input/file_fdw.source
@@ -189,6 +189,12 @@ SELECT tableoid::regclass, * FROM p1;
SELECT tableoid::regclass, * FROM p2;
DROP TABLE pt;
+-- generated column tests
+CREATE FOREIGN TABLE gft1 (a int, b text, c text GENERATED ALWAYS AS ('foo') STORED) SERVER file_server
+OPTIONS (format 'csv', filename '@abs_srcdir@/data/list1.csv', delimiter ',');
+SELECT a, c FROM gft1;
+DROP FOREIGN TABLE gft1;
+
-- privilege tests
SET ROLE regress_file_fdw_superuser;
SELECT * FROM agg_text ORDER BY a;
diff --git a/contrib/file_fdw/output/file_fdw.source b/contrib/file_fdw/output/file_fdw.source
index 853c9f9b28b..52b4d5f1df7 100644
--- a/contrib/file_fdw/output/file_fdw.source
+++ b/contrib/file_fdw/output/file_fdw.source
@@ -375,6 +375,17 @@ SELECT tableoid::regclass, * FROM p2;
(3 rows)
DROP TABLE pt;
+-- generated column tests
+CREATE FOREIGN TABLE gft1 (a int, b text, c text GENERATED ALWAYS AS ('foo') STORED) SERVER file_server
+OPTIONS (format 'csv', filename '@abs_srcdir@/data/list1.csv', delimiter ',');
+SELECT a, c FROM gft1;
+ a | c
+---+--------
+ 1 | _null_
+ 1 | _null_
+(2 rows)
+
+DROP FOREIGN TABLE gft1;
-- privilege tests
SET ROLE regress_file_fdw_superuser;
SELECT * FROM agg_text ORDER BY a;
diff --git a/contrib/fuzzystrmatch/dmetaphone.c b/contrib/fuzzystrmatch/dmetaphone.c
index 16e4c66167b..f2f16bc883a 100644
--- a/contrib/fuzzystrmatch/dmetaphone.c
+++ b/contrib/fuzzystrmatch/dmetaphone.c
@@ -52,7 +52,7 @@
/***************************** COPYRIGHT NOTICES ***********************
Most of this code is directly from the Text::DoubleMetaphone perl module
-version 0.05 available from http://www.cpan.org.
+version 0.05 available from https://www.cpan.org/.
It bears this copyright notice:
@@ -191,7 +191,7 @@ dmetaphone_alt(PG_FUNCTION_ARGS)
(v = (t*)repalloc((v),((n)*sizeof(t))))
/*
- * Don't do pfree - it seems to cause a segv sometimes - which might have just
+ * Don't do pfree - it seems to cause a SIGSEGV sometimes - which might have just
* been caused by reloading the module in development.
* So we rely on context cleanup - Tom Lane says pfree shouldn't be necessary
* in a case like this.
diff --git a/contrib/fuzzystrmatch/fuzzystrmatch.c b/contrib/fuzzystrmatch/fuzzystrmatch.c
index 05774658dc8..b8992f7c3cb 100644
--- a/contrib/fuzzystrmatch/fuzzystrmatch.c
+++ b/contrib/fuzzystrmatch/fuzzystrmatch.c
@@ -6,7 +6,7 @@
* Joe Conway
*
* contrib/fuzzystrmatch/fuzzystrmatch.c
- * Copyright (c) 2001-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2001-2019, PostgreSQL Global Development Group
* ALL RIGHTS RESERVED;
*
* metaphone()
diff --git a/contrib/hstore/Makefile b/contrib/hstore/Makefile
index ab7fef39793..b29d02b1372 100644
--- a/contrib/hstore/Makefile
+++ b/contrib/hstore/Makefile
@@ -5,12 +5,16 @@ OBJS = hstore_io.o hstore_op.o hstore_gist.o hstore_gin.o hstore_compat.o \
$(WIN32RES)
EXTENSION = hstore
-DATA = hstore--1.4.sql hstore--1.4--1.5.sql \
+DATA = hstore--1.4.sql \
+ hstore--1.5--1.6.sql \
+ hstore--1.4--1.5.sql \
hstore--1.3--1.4.sql hstore--1.2--1.3.sql \
hstore--1.1--1.2.sql hstore--1.0--1.1.sql \
hstore--unpackaged--1.0.sql
PGFILEDESC = "hstore - key/value pair data type"
+HEADERS = hstore.h
+
REGRESS = hstore
ifdef USE_PGXS
diff --git a/contrib/hstore/expected/hstore.out b/contrib/hstore/expected/hstore.out
index f0d421602d5..4f1db01b3eb 100644
--- a/contrib/hstore/expected/hstore.out
+++ b/contrib/hstore/expected/hstore.out
@@ -1515,3 +1515,15 @@ select json_agg(q) from (select f1, hstore_to_json_loose(f2) as f2 from test_jso
{"f1":"rec2","f2":{"b": false, "c": "null", "d": -12345, "e": "012345.6", "f": -1.234, "g": 0.345e-4, "a key": 2}}]
(1 row)
+-- Check the hstore_hash() and hstore_hash_extended() function explicitly.
+SELECT v as value, hstore_hash(v)::bit(32) as standard,
+ hstore_hash_extended(v, 0)::bit(32) as extended0,
+ hstore_hash_extended(v, 1)::bit(32) as extended1
+FROM (VALUES (NULL::hstore), (''), ('"a key" =>1'), ('c => null'),
+ ('e => 012345'), ('g => 2.345e+4')) x(v)
+WHERE hstore_hash(v)::bit(32) != hstore_hash_extended(v, 0)::bit(32)
+ OR hstore_hash(v)::bit(32) = hstore_hash_extended(v, 1)::bit(32);
+ value | standard | extended0 | extended1
+-------+----------+-----------+-----------
+(0 rows)
+
diff --git a/contrib/hstore/hstore--1.5--1.6.sql b/contrib/hstore/hstore--1.5--1.6.sql
new file mode 100644
index 00000000000..c5a2bae02ff
--- /dev/null
+++ b/contrib/hstore/hstore--1.5--1.6.sql
@@ -0,0 +1,12 @@
+/* contrib/hstore/hstore--1.5--1.6.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION hstore UPDATE TO '1.6'" to load this file. \quit
+
+CREATE FUNCTION hstore_hash_extended(hstore, int8)
+RETURNS int8
+AS 'MODULE_PATHNAME','hstore_hash_extended'
+LANGUAGE C STRICT IMMUTABLE PARALLEL SAFE;
+
+ALTER OPERATOR FAMILY hash_hstore_ops USING hash ADD
+ FUNCTION 2 hstore_hash_extended(hstore, int8);
diff --git a/contrib/hstore/hstore.control b/contrib/hstore/hstore.control
index 8a719475b82..93688cdd83c 100644
--- a/contrib/hstore/hstore.control
+++ b/contrib/hstore/hstore.control
@@ -1,5 +1,5 @@
# hstore extension
comment = 'data type for storing sets of (key, value) pairs'
-default_version = '1.5'
+default_version = '1.6'
module_pathname = '$libdir/hstore'
relocatable = true
diff --git a/contrib/hstore/hstore_compat.c b/contrib/hstore/hstore_compat.c
index b95ce9b4aaa..1d4e7484e4d 100644
--- a/contrib/hstore/hstore_compat.c
+++ b/contrib/hstore/hstore_compat.c
@@ -238,34 +238,35 @@ hstoreUpgrade(Datum orig)
HStore *hs = (HStore *) PG_DETOAST_DATUM(orig);
int valid_new;
int valid_old;
- bool writable;
/* Return immediately if no conversion needed */
- if ((hs->size_ & HS_FLAG_NEWVERSION) ||
- hs->size_ == 0 ||
+ if (hs->size_ & HS_FLAG_NEWVERSION)
+ return hs;
+
+ /* Do we have a writable copy? If not, make one. */
+ if ((void *) hs == (void *) DatumGetPointer(orig))
+ hs = (HStore *) PG_DETOAST_DATUM_COPY(orig);
+
+ if (hs->size_ == 0 ||
(VARSIZE(hs) < 32768 && HSE_ISFIRST((ARRPTR(hs)[0]))))
+ {
+ HS_SETCOUNT(hs, HS_COUNT(hs));
+ HS_FIXSIZE(hs, HS_COUNT(hs));
return hs;
+ }
valid_new = hstoreValidNewFormat(hs);
valid_old = hstoreValidOldFormat(hs);
- /* Do we have a writable copy? */
- writable = ((void *) hs != (void *) DatumGetPointer(orig));
if (!valid_old || hs->size_ == 0)
{
if (valid_new)
{
/*
- * force the "new version" flag and the correct varlena length,
- * but only if we have a writable copy already (which we almost
- * always will, since short new-format values won't come through
- * here)
+ * force the "new version" flag and the correct varlena length.
*/
- if (writable)
- {
- HS_SETCOUNT(hs, HS_COUNT(hs));
- HS_FIXSIZE(hs, HS_COUNT(hs));
- }
+ HS_SETCOUNT(hs, HS_COUNT(hs));
+ HS_FIXSIZE(hs, HS_COUNT(hs));
return hs;
}
else
@@ -302,15 +303,10 @@ hstoreUpgrade(Datum orig)
elog(WARNING, "ambiguous hstore value resolved as hstore-new");
/*
- * force the "new version" flag and the correct varlena length, but
- * only if we have a writable copy already (which we almost always
- * will, since short new-format values won't come through here)
+ * force the "new version" flag and the correct varlena length.
*/
- if (writable)
- {
- HS_SETCOUNT(hs, HS_COUNT(hs));
- HS_FIXSIZE(hs, HS_COUNT(hs));
- }
+ HS_SETCOUNT(hs, HS_COUNT(hs));
+ HS_FIXSIZE(hs, HS_COUNT(hs));
return hs;
#else
elog(WARNING, "ambiguous hstore value resolved as hstore-old");
@@ -318,13 +314,8 @@ hstoreUpgrade(Datum orig)
}
/*
- * must have an old-style value. Overwrite it in place as a new-style one,
- * making sure we have a writable copy first.
+ * must have an old-style value. Overwrite it in place as a new-style one.
*/
-
- if (!writable)
- hs = (HStore *) PG_DETOAST_DATUM_COPY(orig);
-
{
int count = hs->size_;
HEntry *new_entries = ARRPTR(hs);
diff --git a/contrib/hstore/hstore_gist.c b/contrib/hstore/hstore_gist.c
index 6d24d2f468a..6a885f2926f 100644
--- a/contrib/hstore/hstore_gist.c
+++ b/contrib/hstore/hstore_gist.c
@@ -19,9 +19,6 @@
typedef char BITVEC[SIGLEN];
typedef char *BITVECP;
-#define SIGPTR(x) ( (BITVECP) ARR_DATA_PTR(x) )
-
-
#define LOOPBYTE \
for(i=0;i1, b => t, c => null, d=> 12
('rec2','"a key" =>2, b => f, c => "null", d=> -12345, e => 012345.6, f=> -1.234, g=> 0.345e-4');
select json_agg(q) from test_json_agg q;
select json_agg(q) from (select f1, hstore_to_json_loose(f2) as f2 from test_json_agg) q;
+
+-- Check the hstore_hash() and hstore_hash_extended() function explicitly.
+SELECT v as value, hstore_hash(v)::bit(32) as standard,
+ hstore_hash_extended(v, 0)::bit(32) as extended0,
+ hstore_hash_extended(v, 1)::bit(32) as extended1
+FROM (VALUES (NULL::hstore), (''), ('"a key" =>1'), ('c => null'),
+ ('e => 012345'), ('g => 2.345e+4')) x(v)
+WHERE hstore_hash(v)::bit(32) != hstore_hash_extended(v, 0)::bit(32)
+ OR hstore_hash(v)::bit(32) = hstore_hash_extended(v, 1)::bit(32);
diff --git a/contrib/hstore_plperl/Makefile b/contrib/hstore_plperl/Makefile
index f63cba27456..5076e21e0ee 100644
--- a/contrib/hstore_plperl/Makefile
+++ b/contrib/hstore_plperl/Makefile
@@ -4,7 +4,6 @@ MODULE_big = hstore_plperl
OBJS = hstore_plperl.o $(WIN32RES)
PGFILEDESC = "hstore_plperl - hstore transform for plperl"
-PG_CPPFLAGS = -I$(top_srcdir)/src/pl/plperl -I$(top_srcdir)/contrib/hstore
EXTENSION = hstore_plperl hstore_plperlu
DATA = hstore_plperl--1.0.sql hstore_plperlu--1.0.sql
@@ -13,10 +12,12 @@ REGRESS = hstore_plperl hstore_plperlu create_transform
EXTRA_INSTALL = contrib/hstore
ifdef USE_PGXS
+PG_CPPFLAGS = -I$(includedir_server)/extension
PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
include $(PGXS)
else
+PG_CPPFLAGS = -I$(top_srcdir)/src/pl/plperl -I$(top_srcdir)/contrib
subdir = contrib/hstore_plperl
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
@@ -34,8 +35,5 @@ rpathdir = $(perl_archlibexp)/CORE
SHLIB_LINK += $(perl_embed_ldflags)
endif
-# As with plperl we need to make sure that the CORE directory is included
-# last, probably because it sometimes contains some header files with names
-# that clash with some of ours, or with some that we include, notably on
-# Windows.
-override CPPFLAGS := $(CPPFLAGS) $(perl_embed_ccflags) -I$(perl_archlibexp)/CORE
+# As with plperl we need to include the perl_includespec directory last.
+override CPPFLAGS := $(CPPFLAGS) $(perl_embed_ccflags) $(perl_includespec)
diff --git a/contrib/hstore_plperl/expected/hstore_plperl.out b/contrib/hstore_plperl/expected/hstore_plperl.out
index 25fc506c23f..1ab09a94cda 100644
--- a/contrib/hstore_plperl/expected/hstore_plperl.out
+++ b/contrib/hstore_plperl/expected/hstore_plperl.out
@@ -41,6 +41,25 @@ SELECT test2arr();
{"\"a\"=>\"1\", \"b\"=>\"boo\", \"c\"=>NULL","\"d\"=>\"2\""}
(1 row)
+-- check error cases
+CREATE OR REPLACE FUNCTION test2() RETURNS hstore
+LANGUAGE plperl
+TRANSFORM FOR TYPE hstore
+AS $$
+return 42;
+$$;
+SELECT test2();
+ERROR: cannot transform non-hash Perl value to hstore
+CONTEXT: PL/Perl function "test2"
+CREATE OR REPLACE FUNCTION test2() RETURNS hstore
+LANGUAGE plperl
+TRANSFORM FOR TYPE hstore
+AS $$
+return [1, 2];
+$$;
+SELECT test2();
+ERROR: cannot transform non-hash Perl value to hstore
+CONTEXT: PL/Perl function "test2"
DROP FUNCTION test2();
DROP FUNCTION test2arr();
DROP EXTENSION hstore_plperl;
diff --git a/contrib/hstore_plperl/hstore_plperl.c b/contrib/hstore_plperl/hstore_plperl.c
index 6bc3bb37fc3..1316b0500be 100644
--- a/contrib/hstore_plperl/hstore_plperl.c
+++ b/contrib/hstore_plperl/hstore_plperl.c
@@ -1,11 +1,9 @@
#include "postgres.h"
-#undef _
-
#include "fmgr.h"
+#include "hstore/hstore.h"
#include "plperl.h"
#include "plperl_helpers.h"
-#include "hstore.h"
PG_MODULE_MAGIC;
@@ -101,7 +99,8 @@ Datum
plperl_to_hstore(PG_FUNCTION_ARGS)
{
dTHX;
- HV *hv = (HV *) SvRV((SV *) PG_GETARG_POINTER(0));
+ SV *in = (SV *) PG_GETARG_POINTER(0);
+ HV *hv;
HE *he;
int32 buflen;
int32 i;
@@ -109,6 +108,17 @@ plperl_to_hstore(PG_FUNCTION_ARGS)
HStore *out;
Pairs *pairs;
+ /* Dereference references recursively. */
+ while (SvROK(in))
+ in = SvRV(in);
+
+ /* Now we must have a hash. */
+ if (SvTYPE(in) != SVt_PVHV)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ (errmsg("cannot transform non-hash Perl value to hstore"))));
+ hv = (HV *) in;
+
pcount = hv_iterinit(hv);
pairs = palloc(pcount * sizeof(Pairs));
diff --git a/contrib/hstore_plperl/sql/hstore_plperl.sql b/contrib/hstore_plperl/sql/hstore_plperl.sql
index 9398aedfbbd..ad1db7eae17 100644
--- a/contrib/hstore_plperl/sql/hstore_plperl.sql
+++ b/contrib/hstore_plperl/sql/hstore_plperl.sql
@@ -31,6 +31,25 @@ $$;
SELECT test2arr();
+-- check error cases
+CREATE OR REPLACE FUNCTION test2() RETURNS hstore
+LANGUAGE plperl
+TRANSFORM FOR TYPE hstore
+AS $$
+return 42;
+$$;
+
+SELECT test2();
+
+CREATE OR REPLACE FUNCTION test2() RETURNS hstore
+LANGUAGE plperl
+TRANSFORM FOR TYPE hstore
+AS $$
+return [1, 2];
+$$;
+
+SELECT test2();
+
DROP FUNCTION test2();
DROP FUNCTION test2arr();
diff --git a/contrib/hstore_plpython/Makefile b/contrib/hstore_plpython/Makefile
index b81735ab910..6877e7a072c 100644
--- a/contrib/hstore_plpython/Makefile
+++ b/contrib/hstore_plpython/Makefile
@@ -4,19 +4,21 @@ MODULE_big = hstore_plpython$(python_majorversion)
OBJS = hstore_plpython.o $(WIN32RES)
PGFILEDESC = "hstore_plpython - hstore transform for plpython"
-PG_CPPFLAGS = -I$(top_srcdir)/src/pl/plpython $(python_includespec) -I$(top_srcdir)/contrib/hstore -DPLPYTHON_LIBNAME='"plpython$(python_majorversion)"'
-
EXTENSION = hstore_plpythonu hstore_plpython2u hstore_plpython3u
DATA = hstore_plpythonu--1.0.sql hstore_plpython2u--1.0.sql hstore_plpython3u--1.0.sql
REGRESS = hstore_plpython
REGRESS_PLPYTHON3_MANGLE := $(REGRESS)
+PG_CPPFLAGS = $(python_includespec) -DPLPYTHON_LIBNAME='"plpython$(python_majorversion)"'
+
ifdef USE_PGXS
+PG_CPPFLAGS += -I$(includedir_server)/extension
PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
include $(PGXS)
else
+PG_CPPFLAGS += -I$(top_srcdir)/src/pl/plpython -I$(top_srcdir)/contrib
subdir = contrib/hstore_plpython
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
diff --git a/contrib/hstore_plpython/hstore_plpython.c b/contrib/hstore_plpython/hstore_plpython.c
index 218e6612b14..93705f0d54c 100644
--- a/contrib/hstore_plpython/hstore_plpython.c
+++ b/contrib/hstore_plpython/hstore_plpython.c
@@ -3,7 +3,7 @@
#include "fmgr.h"
#include "plpython.h"
#include "plpy_typeio.h"
-#include "hstore.h"
+#include "hstore/hstore.h"
PG_MODULE_MAGIC;
@@ -128,9 +128,9 @@ Datum
plpython_to_hstore(PG_FUNCTION_ARGS)
{
PyObject *dict;
- volatile PyObject *items_v = NULL;
- int32 pcount;
- HStore *out;
+ PyObject *volatile items;
+ Py_ssize_t pcount;
+ HStore *volatile out;
dict = (PyObject *) PG_GETARG_POINTER(0);
if (!PyMapping_Check(dict))
@@ -139,14 +139,13 @@ plpython_to_hstore(PG_FUNCTION_ARGS)
errmsg("not a Python mapping")));
pcount = PyMapping_Size(dict);
- items_v = PyMapping_Items(dict);
+ items = PyMapping_Items(dict);
PG_TRY();
{
int32 buflen;
- int32 i;
+ Py_ssize_t i;
Pairs *pairs;
- PyObject *items = (PyObject *) items_v;
pairs = palloc(pcount * sizeof(*pairs));
@@ -177,17 +176,18 @@ plpython_to_hstore(PG_FUNCTION_ARGS)
pairs[i].isnull = false;
}
}
- Py_DECREF(items_v);
pcount = hstoreUniquePairs(pairs, pcount, &buflen);
out = hstorePairs(pairs, pcount, buflen);
}
PG_CATCH();
{
- Py_DECREF(items_v);
+ Py_DECREF(items);
PG_RE_THROW();
}
PG_END_TRY();
+ Py_DECREF(items);
+
PG_RETURN_POINTER(out);
}
diff --git a/contrib/intarray/_int.h b/contrib/intarray/_int.h
index b689eb7dedf..f03fdf9add9 100644
--- a/contrib/intarray/_int.h
+++ b/contrib/intarray/_int.h
@@ -85,12 +85,6 @@ typedef struct
#define GETSIGN(x) ( (BITVECP)( (char*)x+GTHDRSIZE ) )
-/*
- * types for functions
- */
-typedef ArrayType *(*formarray) (ArrayType *, ArrayType *);
-typedef void (*formfloat) (ArrayType *, float *);
-
/*
* useful functions
*/
diff --git a/contrib/intarray/_int_gist.c b/contrib/intarray/_int_gist.c
index 911d18023b9..e5a8011daf8 100644
--- a/contrib/intarray/_int_gist.c
+++ b/contrib/intarray/_int_gist.c
@@ -12,6 +12,17 @@
#define GETENTRY(vec,pos) ((ArrayType *) DatumGetPointer((vec)->vector[(pos)].key))
+/*
+ * Control the maximum sparseness of compressed keys.
+ *
+ * The upper safe bound for this limit is half the maximum allocatable array
+ * size. A lower bound would give more guarantees that pathological data
+ * wouldn't eat excessive CPU and memory, but at the expense of breaking
+ * possibly working (after a fashion) indexes.
+ */
+#define MAXNUMELTS (Min((MaxAllocSize / sizeof(Datum)),((MaxAllocSize - ARR_OVERHEAD_NONULLS(1)) / sizeof(int)))/2)
+/* or: #define MAXNUMELTS 1000000 */
+
/*
** GiST support methods
*/
@@ -85,8 +96,13 @@ g_int_consistent(PG_FUNCTION_ARGS)
retval = inner_int_contains(query,
(ArrayType *) DatumGetPointer(entry->key));
else
- retval = inner_int_overlap((ArrayType *) DatumGetPointer(entry->key),
- query);
+ {
+ /*
+ * Unfortunately, because empty arrays could be anywhere in
+ * the index, we must search the whole tree.
+ */
+ retval = true;
+ }
break;
default:
retval = false;
@@ -141,11 +157,13 @@ g_int_compress(PG_FUNCTION_ARGS)
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
GISTENTRY *retval;
ArrayType *r;
- int len;
+ int len,
+ lenr;
int *dr;
int i,
- min,
+ j,
cand;
+ int64 min;
if (entry->leafkey)
{
@@ -186,23 +204,65 @@ g_int_compress(PG_FUNCTION_ARGS)
dr = ARRPTR(r);
- for (i = len - 1; i >= 0; i--)
- dr[2 * i] = dr[2 * i + 1] = dr[i];
+ /*
+ * "len" at this point is the number of ranges we will construct.
+ * "lenr" is the number of ranges we must eventually remove by
+ * merging, we must be careful to remove no more than this number.
+ */
+ lenr = len - MAXNUMRANGE;
+
+ /*
+ * Initially assume we can merge consecutive ints into a range. but we
+ * must count every value removed and stop when lenr runs out
+ */
+ for (j = i = len - 1; i > 0 && lenr > 0; i--, j--)
+ {
+ int r_end = dr[i];
+ int r_start = r_end;
- len *= 2;
+ while (i > 0 && lenr > 0 && dr[i - 1] == r_start - 1)
+ --r_start, --i, --lenr;
+ dr[2 * j] = r_start;
+ dr[2 * j + 1] = r_end;
+ }
+ /* just copy the rest, if any, as trivial ranges */
+ for (; i >= 0; i--, j--)
+ dr[2 * j] = dr[2 * j + 1] = dr[i];
+
+ if (++j)
+ {
+ /*
+ * shunt everything down to start at the right place
+ */
+ memmove((void *) &dr[0], (void *) &dr[2 * j], 2 * (len - j) * sizeof(int32));
+ }
+
+ /*
+ * make "len" be number of array elements, not ranges
+ */
+ len = 2 * (len - j);
cand = 1;
while (len > MAXNUMRANGE * 2)
{
- min = INT_MAX;
+ min = PG_INT64_MAX;
for (i = 2; i < len; i += 2)
- if (min > (dr[i] - dr[i - 1]))
+ if (min > ((int64) dr[i] - (int64) dr[i - 1]))
{
- min = (dr[i] - dr[i - 1]);
+ min = ((int64) dr[i] - (int64) dr[i - 1]);
cand = i;
}
memmove((void *) &dr[cand - 1], (void *) &dr[cand + 1], (len - cand - 1) * sizeof(int32));
len -= 2;
}
+
+ /*
+ * check sparseness of result
+ */
+ lenr = internal_size(dr, len);
+ if (lenr < 0 || lenr > MAXNUMELTS)
+ ereport(ERROR,
+ (errmsg("data is too sparse, recreate index using gist__intbig_ops opclass instead")));
+
r = resize_intArrayType(r, len);
retval = palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(r),
@@ -260,6 +320,9 @@ g_int_decompress(PG_FUNCTION_ARGS)
din = ARRPTR(in);
lenr = internal_size(din, lenin);
+ if (lenr < 0 || lenr > MAXNUMELTS)
+ ereport(ERROR,
+ (errmsg("compressed array is too big, recreate index using gist__intbig_ops opclass instead")));
r = new_intArrayType(lenr);
dr = ARRPTR(r);
diff --git a/contrib/intarray/_int_selfuncs.c b/contrib/intarray/_int_selfuncs.c
index 4c3f60c1dd4..aebffae66c2 100644
--- a/contrib/intarray/_int_selfuncs.c
+++ b/contrib/intarray/_int_selfuncs.c
@@ -3,7 +3,7 @@
* _int_selfuncs.c
* Functions for selectivity estimation of intarray operators
*
- * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -35,7 +35,7 @@ PG_FUNCTION_INFO_V1(_int_matchsel);
static Selectivity int_query_opr_selec(ITEM *item, Datum *values, float4 *freqs,
- int nmncelems, float4 minfreq);
+ int nmncelems, float4 minfreq);
static int compare_val_int4(const void *a, const void *b);
/*
@@ -43,7 +43,7 @@ static int compare_val_int4(const void *a, const void *b);
*
* The default array selectivity operators for the @>, && and @< operators
* work fine for integer arrays. However, if we tried to just use arraycontsel
- * and arracontjoinsel directly as the cost estimator functions for our
+ * and arraycontjoinsel directly as the cost estimator functions for our
* operators, they would not work as intended, because they look at the
* operator's OID. Our operators behave exactly like the built-in anyarray
* versions, but we must tell the cost estimator functions which built-in
diff --git a/contrib/intarray/_int_tool.c b/contrib/intarray/_int_tool.c
index ee8fb64a478..fde8d15e2c2 100644
--- a/contrib/intarray/_int_tool.c
+++ b/contrib/intarray/_int_tool.c
@@ -3,6 +3,8 @@
*/
#include "postgres.h"
+#include
+
#include "catalog/pg_type.h"
#include "_int.h"
@@ -220,7 +222,17 @@ ArrayType *
new_intArrayType(int num)
{
ArrayType *r;
- int nbytes = ARR_OVERHEAD_NONULLS(1) + sizeof(int) * num;
+ int nbytes;
+
+ /* if no elements, return a zero-dimensional array */
+ if (num <= 0)
+ {
+ Assert(num == 0);
+ r = construct_empty_array(INT4OID);
+ return r;
+ }
+
+ nbytes = ARR_OVERHEAD_NONULLS(1) + sizeof(int) * num;
r = (ArrayType *) palloc0(nbytes);
@@ -237,19 +249,22 @@ new_intArrayType(int num)
ArrayType *
resize_intArrayType(ArrayType *a, int num)
{
- int nbytes = ARR_DATA_OFFSET(a) + sizeof(int) * num;
+ int nbytes;
int i;
/* if no elements, return a zero-dimensional array */
- if (num == 0)
+ if (num <= 0)
{
- ARR_NDIM(a) = 0;
+ Assert(num == 0);
+ a = construct_empty_array(INT4OID);
return a;
}
if (num == ARRNELEMS(a))
return a;
+ nbytes = ARR_DATA_OFFSET(a) + sizeof(int) * num;
+
a = (ArrayType *) repalloc(a, nbytes);
SET_VARSIZE(a, nbytes);
@@ -277,16 +292,18 @@ copy_intArrayType(ArrayType *a)
int
internal_size(int *a, int len)
{
- int i,
- size = 0;
+ int i;
+ int64 size = 0;
for (i = 0; i < len; i += 2)
{
if (!i || a[i] != a[i - 1]) /* do not count repeated range */
- size += a[i + 1] - a[i] + 1;
+ size += (int64) (a[i + 1]) - (int64) (a[i]) + 1;
}
- return size;
+ if (size > (int64) INT_MAX || size < (int64) INT_MIN)
+ return -1; /* overflow */
+ return (int) size;
}
/* unique-ify elements of r in-place ... r must be sorted already */
diff --git a/contrib/intarray/_intbig_gist.c b/contrib/intarray/_intbig_gist.c
index de7bc82a234..2a20abecc6c 100644
--- a/contrib/intarray/_intbig_gist.c
+++ b/contrib/intarray/_intbig_gist.c
@@ -5,6 +5,7 @@
#include "access/gist.h"
#include "access/stratnum.h"
+#include "port/pg_bitutils.h"
#include "_int.h"
@@ -19,27 +20,6 @@ PG_FUNCTION_INFO_V1(g_intbig_penalty);
PG_FUNCTION_INFO_V1(g_intbig_picksplit);
PG_FUNCTION_INFO_V1(g_intbig_union);
PG_FUNCTION_INFO_V1(g_intbig_same);
-
-/* Number of one-bits in an unsigned byte */
-static const uint8 number_of_ones[256] = {
- 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
-};
-
PG_FUNCTION_INFO_V1(_intbig_in);
PG_FUNCTION_INFO_V1(_intbig_out);
@@ -207,12 +187,7 @@ g_intbig_compress(PG_FUNCTION_ARGS)
static int32
sizebitvec(BITVECP sign)
{
- int32 size = 0,
- i;
-
- LOOPBYTE
- size += number_of_ones[(unsigned char) sign[i]];
- return size;
+ return pg_popcount(sign, SIGLEN);
}
static int
@@ -225,7 +200,8 @@ hemdistsign(BITVECP a, BITVECP b)
LOOPBYTE
{
diff = (unsigned char) (a[i] ^ b[i]);
- dist += number_of_ones[diff];
+ /* Using the popcount functions here isn't likely to win */
+ dist += pg_number_of_ones[diff];
}
return dist;
}
@@ -591,7 +567,13 @@ g_intbig_consistent(PG_FUNCTION_ARGS)
}
}
else
- retval = _intbig_overlap((GISTTYPE *) DatumGetPointer(entry->key), query);
+ {
+ /*
+ * Unfortunately, because empty arrays could be anywhere in
+ * the index, we must search the whole tree.
+ */
+ retval = true;
+ }
break;
default:
retval = false;
diff --git a/contrib/intarray/bench/create_test.pl b/contrib/intarray/bench/create_test.pl
index f3262df05b2..d2c678bb53c 100755
--- a/contrib/intarray/bench/create_test.pl
+++ b/contrib/intarray/bench/create_test.pl
@@ -83,4 +83,5 @@ sub copytable
while (<$fff>) { print; }
close $fff;
print "\\.\n";
+ return;
}
diff --git a/contrib/intarray/expected/_int.out b/contrib/intarray/expected/_int.out
index 0a5dd463acb..c92a56524a3 100644
--- a/contrib/intarray/expected/_int.out
+++ b/contrib/intarray/expected/_int.out
@@ -151,6 +151,30 @@ SELECT '{-1,3,1}'::int[] & '{1,2}';
{1}
(1 row)
+SELECT '{1}'::int[] & '{2}'::int[];
+ ?column?
+----------
+ {}
+(1 row)
+
+SELECT array_dims('{1}'::int[] & '{2}'::int[]);
+ array_dims
+------------
+
+(1 row)
+
+SELECT ('{1}'::int[] & '{2}'::int[]) = '{}'::int[];
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT ('{}'::int[] & '{}'::int[]) = '{}'::int[];
+ ?column?
+----------
+ t
+(1 row)
+
--test query_int
SELECT '1'::query_int;
query_int
@@ -407,6 +431,18 @@ SELECT count(*) from test__int WHERE a @> '{20,23}';
12
(1 row)
+SELECT count(*) from test__int WHERE a <@ '{73,23,20}';
+ count
+-------
+ 10
+(1 row)
+
+SELECT count(*) from test__int WHERE a = '{73,23,20}';
+ count
+-------
+ 1
+(1 row)
+
SELECT count(*) from test__int WHERE a @@ '50&68';
count
-------
@@ -425,6 +461,19 @@ SELECT count(*) from test__int WHERE a @@ '(20&23)|(50&68)';
21
(1 row)
+SELECT count(*) from test__int WHERE a @@ '20 | !21';
+ count
+-------
+ 6566
+(1 row)
+
+SELECT count(*) from test__int WHERE a @@ '!20 & !21';
+ count
+-------
+ 6343
+(1 row)
+
+SET enable_seqscan = off; -- not all of these would use index by default
CREATE INDEX text_idx on test__int using gist ( a gist__int_ops );
SELECT count(*) from test__int WHERE a && '{23,50}';
count
@@ -456,6 +505,18 @@ SELECT count(*) from test__int WHERE a @> '{20,23}';
12
(1 row)
+SELECT count(*) from test__int WHERE a <@ '{73,23,20}';
+ count
+-------
+ 10
+(1 row)
+
+SELECT count(*) from test__int WHERE a = '{73,23,20}';
+ count
+-------
+ 1
+(1 row)
+
SELECT count(*) from test__int WHERE a @@ '50&68';
count
-------
@@ -474,6 +535,18 @@ SELECT count(*) from test__int WHERE a @@ '(20&23)|(50&68)';
21
(1 row)
+SELECT count(*) from test__int WHERE a @@ '20 | !21';
+ count
+-------
+ 6566
+(1 row)
+
+SELECT count(*) from test__int WHERE a @@ '!20 & !21';
+ count
+-------
+ 6343
+(1 row)
+
DROP INDEX text_idx;
CREATE INDEX text_idx on test__int using gist ( a gist__intbig_ops );
SELECT count(*) from test__int WHERE a && '{23,50}';
@@ -506,6 +579,18 @@ SELECT count(*) from test__int WHERE a @> '{20,23}';
12
(1 row)
+SELECT count(*) from test__int WHERE a <@ '{73,23,20}';
+ count
+-------
+ 10
+(1 row)
+
+SELECT count(*) from test__int WHERE a = '{73,23,20}';
+ count
+-------
+ 1
+(1 row)
+
SELECT count(*) from test__int WHERE a @@ '50&68';
count
-------
@@ -524,6 +609,18 @@ SELECT count(*) from test__int WHERE a @@ '(20&23)|(50&68)';
21
(1 row)
+SELECT count(*) from test__int WHERE a @@ '20 | !21';
+ count
+-------
+ 6566
+(1 row)
+
+SELECT count(*) from test__int WHERE a @@ '!20 & !21';
+ count
+-------
+ 6343
+(1 row)
+
DROP INDEX text_idx;
CREATE INDEX text_idx on test__int using gin ( a gin__int_ops );
SELECT count(*) from test__int WHERE a && '{23,50}';
@@ -556,6 +653,18 @@ SELECT count(*) from test__int WHERE a @> '{20,23}';
12
(1 row)
+SELECT count(*) from test__int WHERE a <@ '{73,23,20}';
+ count
+-------
+ 10
+(1 row)
+
+SELECT count(*) from test__int WHERE a = '{73,23,20}';
+ count
+-------
+ 1
+(1 row)
+
SELECT count(*) from test__int WHERE a @@ '50&68';
count
-------
@@ -574,3 +683,16 @@ SELECT count(*) from test__int WHERE a @@ '(20&23)|(50&68)';
21
(1 row)
+SELECT count(*) from test__int WHERE a @@ '20 | !21';
+ count
+-------
+ 6566
+(1 row)
+
+SELECT count(*) from test__int WHERE a @@ '!20 & !21';
+ count
+-------
+ 6343
+(1 row)
+
+RESET enable_seqscan;
diff --git a/contrib/intarray/sql/_int.sql b/contrib/intarray/sql/_int.sql
index 44e1a729b4f..6ca7e3cca7e 100644
--- a/contrib/intarray/sql/_int.sql
+++ b/contrib/intarray/sql/_int.sql
@@ -30,6 +30,10 @@ SELECT '{123,623,445}'::int[] | 1623;
SELECT '{123,623,445}'::int[] | '{1623,623}';
SELECT '{123,623,445}'::int[] & '{1623,623}';
SELECT '{-1,3,1}'::int[] & '{1,2}';
+SELECT '{1}'::int[] & '{2}'::int[];
+SELECT array_dims('{1}'::int[] & '{2}'::int[]);
+SELECT ('{1}'::int[] & '{2}'::int[]) = '{}'::int[];
+SELECT ('{}'::int[] & '{}'::int[]) = '{}'::int[];
--test query_int
@@ -81,9 +85,15 @@ SELECT count(*) from test__int WHERE a @@ '23|50';
SELECT count(*) from test__int WHERE a @> '{23,50}';
SELECT count(*) from test__int WHERE a @@ '23&50';
SELECT count(*) from test__int WHERE a @> '{20,23}';
+SELECT count(*) from test__int WHERE a <@ '{73,23,20}';
+SELECT count(*) from test__int WHERE a = '{73,23,20}';
SELECT count(*) from test__int WHERE a @@ '50&68';
SELECT count(*) from test__int WHERE a @> '{20,23}' or a @> '{50,68}';
SELECT count(*) from test__int WHERE a @@ '(20&23)|(50&68)';
+SELECT count(*) from test__int WHERE a @@ '20 | !21';
+SELECT count(*) from test__int WHERE a @@ '!20 & !21';
+
+SET enable_seqscan = off; -- not all of these would use index by default
CREATE INDEX text_idx on test__int using gist ( a gist__int_ops );
@@ -92,9 +102,13 @@ SELECT count(*) from test__int WHERE a @@ '23|50';
SELECT count(*) from test__int WHERE a @> '{23,50}';
SELECT count(*) from test__int WHERE a @@ '23&50';
SELECT count(*) from test__int WHERE a @> '{20,23}';
+SELECT count(*) from test__int WHERE a <@ '{73,23,20}';
+SELECT count(*) from test__int WHERE a = '{73,23,20}';
SELECT count(*) from test__int WHERE a @@ '50&68';
SELECT count(*) from test__int WHERE a @> '{20,23}' or a @> '{50,68}';
SELECT count(*) from test__int WHERE a @@ '(20&23)|(50&68)';
+SELECT count(*) from test__int WHERE a @@ '20 | !21';
+SELECT count(*) from test__int WHERE a @@ '!20 & !21';
DROP INDEX text_idx;
CREATE INDEX text_idx on test__int using gist ( a gist__intbig_ops );
@@ -104,9 +118,13 @@ SELECT count(*) from test__int WHERE a @@ '23|50';
SELECT count(*) from test__int WHERE a @> '{23,50}';
SELECT count(*) from test__int WHERE a @@ '23&50';
SELECT count(*) from test__int WHERE a @> '{20,23}';
+SELECT count(*) from test__int WHERE a <@ '{73,23,20}';
+SELECT count(*) from test__int WHERE a = '{73,23,20}';
SELECT count(*) from test__int WHERE a @@ '50&68';
SELECT count(*) from test__int WHERE a @> '{20,23}' or a @> '{50,68}';
SELECT count(*) from test__int WHERE a @@ '(20&23)|(50&68)';
+SELECT count(*) from test__int WHERE a @@ '20 | !21';
+SELECT count(*) from test__int WHERE a @@ '!20 & !21';
DROP INDEX text_idx;
CREATE INDEX text_idx on test__int using gin ( a gin__int_ops );
@@ -116,6 +134,12 @@ SELECT count(*) from test__int WHERE a @@ '23|50';
SELECT count(*) from test__int WHERE a @> '{23,50}';
SELECT count(*) from test__int WHERE a @@ '23&50';
SELECT count(*) from test__int WHERE a @> '{20,23}';
+SELECT count(*) from test__int WHERE a <@ '{73,23,20}';
+SELECT count(*) from test__int WHERE a = '{73,23,20}';
SELECT count(*) from test__int WHERE a @@ '50&68';
SELECT count(*) from test__int WHERE a @> '{20,23}' or a @> '{50,68}';
SELECT count(*) from test__int WHERE a @@ '(20&23)|(50&68)';
+SELECT count(*) from test__int WHERE a @@ '20 | !21';
+SELECT count(*) from test__int WHERE a @@ '!20 & !21';
+
+RESET enable_seqscan;
diff --git a/contrib/isn/Makefile b/contrib/isn/Makefile
index ab6b175f9a7..c3600dac300 100644
--- a/contrib/isn/Makefile
+++ b/contrib/isn/Makefile
@@ -7,6 +7,9 @@ DATA = isn--1.1.sql isn--1.1--1.2.sql \
isn--1.0--1.1.sql isn--unpackaged--1.0.sql
PGFILEDESC = "isn - data types for international product numbering standards"
+# the other .h files are data tables, we don't install those
+HEADERS_isn = isn.h
+
REGRESS = isn
ifdef USE_PGXS
diff --git a/contrib/isn/isn.c b/contrib/isn/isn.c
index 897d83e0ca3..0c2cac7d52b 100644
--- a/contrib/isn/isn.c
+++ b/contrib/isn/isn.c
@@ -4,7 +4,7 @@
* PostgreSQL type definitions for ISNs (ISBN, ISMN, ISSN, EAN13, UPC)
*
* Author: German Mendez Bravo (Kronuz)
- * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/isn/isn.c
diff --git a/contrib/isn/isn.h b/contrib/isn/isn.h
index 29632d85185..d572149d2af 100644
--- a/contrib/isn/isn.h
+++ b/contrib/isn/isn.h
@@ -4,7 +4,7 @@
* PostgreSQL type definitions for ISNs (ISBN, ISMN, ISSN, EAN13, UPC)
*
* Author: German Mendez Bravo (Kronuz)
- * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/isn/isn.h
diff --git a/contrib/jsonb_plperl/Makefile b/contrib/jsonb_plperl/Makefile
index eb6d1deb7df..b43c8ed97bd 100644
--- a/contrib/jsonb_plperl/Makefile
+++ b/contrib/jsonb_plperl/Makefile
@@ -35,8 +35,5 @@ rpathdir = $(perl_archlibexp)/CORE
SHLIB_LINK += $(perl_embed_ldflags)
endif
-# As with plperl we need to make sure that the CORE directory is included
-# last, probably because it sometimes contains some header files with names
-# that clash with some of ours, or with some that we include, notably on
-# Windows.
-override CPPFLAGS := $(CPPFLAGS) $(perl_embed_ccflags) -I$(perl_archlibexp)/CORE
+# As with plperl we need to include the perl_includespec directory last.
+override CPPFLAGS := $(CPPFLAGS) $(perl_embed_ccflags) $(perl_includespec)
diff --git a/contrib/jsonb_plperl/expected/jsonb_plperl.out b/contrib/jsonb_plperl/expected/jsonb_plperl.out
index 99a2e8e135d..5a73485ac06 100644
--- a/contrib/jsonb_plperl/expected/jsonb_plperl.out
+++ b/contrib/jsonb_plperl/expected/jsonb_plperl.out
@@ -39,6 +39,20 @@ SELECT testSVToJsonb();
1
(1 row)
+CREATE FUNCTION testUVToJsonb() RETURNS jsonb
+LANGUAGE plperl
+TRANSFORM FOR TYPE jsonb
+as $$
+$val = ~0;
+return $val;
+$$;
+-- this might produce either 18446744073709551615 or 4294967295
+SELECT testUVToJsonb() IN ('18446744073709551615'::jsonb, '4294967295'::jsonb);
+ ?column?
+----------
+ t
+(1 row)
+
-- this revealed a bug in the original implementation
CREATE FUNCTION testRegexpResultToJsonb() RETURNS jsonb
LANGUAGE plperl
@@ -52,16 +66,39 @@ SELECT testRegexpResultToJsonb();
0
(1 row)
-CREATE FUNCTION roundtrip(val jsonb) RETURNS jsonb
+-- this revealed a different bug
+CREATE FUNCTION testTextToJsonbObject(text) RETURNS jsonb
+LANGUAGE plperl
+TRANSFORM FOR TYPE jsonb
+AS $$
+my $x = shift;
+return {a => $x};
+$$;
+SELECT testTextToJsonbObject('abc');
+ testtexttojsonbobject
+-----------------------
+ {"a": "abc"}
+(1 row)
+
+SELECT testTextToJsonbObject(NULL);
+ testtexttojsonbobject
+-----------------------
+ {"a": null}
+(1 row)
+
+CREATE FUNCTION roundtrip(val jsonb, ref text = '') RETURNS jsonb
LANGUAGE plperl
TRANSFORM FOR TYPE jsonb
AS $$
+# can't use Data::Dumper, but let's at least check for unexpected ref type
+die 'unexpected '.(ref($_[0]) || 'not a').' reference'
+ if ref($_[0]) ne $_[1];
return $_[0];
$$;
-SELECT roundtrip('null');
- roundtrip
------------
- null
+SELECT roundtrip('null') is null;
+ ?column?
+----------
+ t
(1 row)
SELECT roundtrip('1');
@@ -71,7 +108,7 @@ SELECT roundtrip('1');
(1 row)
SELECT roundtrip('1E+131071');
-ERROR: cannot convert infinite value to jsonb
+ERROR: cannot convert infinity to jsonb
CONTEXT: PL/Perl function "roundtrip"
SELECT roundtrip('-1');
roundtrip
@@ -115,91 +152,97 @@ SELECT roundtrip('false');
0
(1 row)
-SELECT roundtrip('[]');
+SELECT roundtrip('[]', 'ARRAY');
roundtrip
-----------
[]
(1 row)
-SELECT roundtrip('[null, null]');
+SELECT roundtrip('[null, null]', 'ARRAY');
roundtrip
--------------
[null, null]
(1 row)
-SELECT roundtrip('[1, 2, 3]');
+SELECT roundtrip('[1, 2, 3]', 'ARRAY');
roundtrip
-----------
[1, 2, 3]
(1 row)
-SELECT roundtrip('[-1, 2, -3]');
+SELECT roundtrip('[-1, 2, -3]', 'ARRAY');
roundtrip
-------------
[-1, 2, -3]
(1 row)
-SELECT roundtrip('[1.2, 2.3, 3.4]');
+SELECT roundtrip('[1.2, 2.3, 3.4]', 'ARRAY');
roundtrip
-----------------
[1.2, 2.3, 3.4]
(1 row)
-SELECT roundtrip('[-1.2, 2.3, -3.4]');
+SELECT roundtrip('[-1.2, 2.3, -3.4]', 'ARRAY');
roundtrip
-------------------
[-1.2, 2.3, -3.4]
(1 row)
-SELECT roundtrip('["string1", "string2"]');
+SELECT roundtrip('["string1", "string2"]', 'ARRAY');
roundtrip
------------------------
["string1", "string2"]
(1 row)
-SELECT roundtrip('{}');
+SELECT roundtrip('[["string1", "string2"]]', 'ARRAY');
+ roundtrip
+--------------------------
+ [["string1", "string2"]]
+(1 row)
+
+SELECT roundtrip('{}', 'HASH');
roundtrip
-----------
{}
(1 row)
-SELECT roundtrip('{"1": null}');
+SELECT roundtrip('{"1": null}', 'HASH');
roundtrip
-------------
{"1": null}
(1 row)
-SELECT roundtrip('{"1": 1}');
+SELECT roundtrip('{"1": 1}', 'HASH');
roundtrip
-----------
{"1": 1}
(1 row)
-SELECT roundtrip('{"1": -1}');
+SELECT roundtrip('{"1": -1}', 'HASH');
roundtrip
-----------
{"1": -1}
(1 row)
-SELECT roundtrip('{"1": 1.1}');
+SELECT roundtrip('{"1": 1.1}', 'HASH');
roundtrip
------------
{"1": 1.1}
(1 row)
-SELECT roundtrip('{"1": -1.1}');
+SELECT roundtrip('{"1": -1.1}', 'HASH');
roundtrip
-------------
{"1": -1.1}
(1 row)
-SELECT roundtrip('{"1": "string1"}');
+SELECT roundtrip('{"1": "string1"}', 'HASH');
roundtrip
------------------
{"1": "string1"}
(1 row)
-SELECT roundtrip('{"1": {"2": [3, 4, 5]}, "2": 3}');
+SELECT roundtrip('{"1": {"2": [3, 4, 5]}, "2": 3}', 'HASH');
roundtrip
---------------------------------
{"1": {"2": [3, 4, 5]}, "2": 3}
@@ -207,4 +250,4 @@ SELECT roundtrip('{"1": {"2": [3, 4, 5]}, "2": 3}');
\set VERBOSITY terse \\ -- suppress cascade details
DROP EXTENSION plperl CASCADE;
-NOTICE: drop cascades to 6 other objects
+NOTICE: drop cascades to 8 other objects
diff --git a/contrib/jsonb_plperl/expected/jsonb_plperlu.out b/contrib/jsonb_plperl/expected/jsonb_plperlu.out
index 8053cf6aa80..dff316cf984 100644
--- a/contrib/jsonb_plperl/expected/jsonb_plperlu.out
+++ b/contrib/jsonb_plperl/expected/jsonb_plperlu.out
@@ -39,6 +39,20 @@ SELECT testSVToJsonb();
1
(1 row)
+CREATE FUNCTION testUVToJsonb() RETURNS jsonb
+LANGUAGE plperlu
+TRANSFORM FOR TYPE jsonb
+as $$
+$val = ~0;
+return $val;
+$$;
+-- this might produce either 18446744073709551615 or 4294967295
+SELECT testUVToJsonb() IN ('18446744073709551615'::jsonb, '4294967295'::jsonb);
+ ?column?
+----------
+ t
+(1 row)
+
-- this revealed a bug in the original implementation
CREATE FUNCTION testRegexpResultToJsonb() RETURNS jsonb
LANGUAGE plperlu
@@ -52,154 +66,210 @@ SELECT testRegexpResultToJsonb();
0
(1 row)
-CREATE FUNCTION roundtrip(val jsonb) RETURNS jsonb
+-- this revealed a different bug
+CREATE FUNCTION testTextToJsonbObject(text) RETURNS jsonb
+LANGUAGE plperlu
+TRANSFORM FOR TYPE jsonb
+AS $$
+my $x = shift;
+return {a => $x};
+$$;
+SELECT testTextToJsonbObject('abc');
+ testtexttojsonbobject
+-----------------------
+ {"a": "abc"}
+(1 row)
+
+SELECT testTextToJsonbObject(NULL);
+ testtexttojsonbobject
+-----------------------
+ {"a": null}
+(1 row)
+
+CREATE FUNCTION roundtrip(val jsonb, ref text = '') RETURNS jsonb
LANGUAGE plperlu
TRANSFORM FOR TYPE jsonb
AS $$
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 0;
+elog(INFO, Dumper($_[0]));
+die 'unexpected '.(ref($_[0]) || 'not a').' reference'
+ if ref($_[0]) ne $_[1];
return $_[0];
$$;
-SELECT roundtrip('null');
- roundtrip
------------
- null
+SELECT roundtrip('null') is null;
+INFO: $VAR1 = undef;
+ ?column?
+----------
+ t
(1 row)
SELECT roundtrip('1');
+INFO: $VAR1 = '1';
roundtrip
-----------
1
(1 row)
-SELECT roundtrip('1E+131071');
-ERROR: cannot convert infinite value to jsonb
-CONTEXT: PL/Perl function "roundtrip"
+-- skip because Data::Dumper produces a platform-dependent spelling of infinity
+-- SELECT roundtrip('1E+131071');
SELECT roundtrip('-1');
+INFO: $VAR1 = '-1';
roundtrip
-----------
-1
(1 row)
SELECT roundtrip('1.2');
+INFO: $VAR1 = '1.2';
roundtrip
-----------
1.2
(1 row)
SELECT roundtrip('-1.2');
+INFO: $VAR1 = '-1.2';
roundtrip
-----------
-1.2
(1 row)
SELECT roundtrip('"string"');
+INFO: $VAR1 = 'string';
roundtrip
-----------
"string"
(1 row)
SELECT roundtrip('"NaN"');
+INFO: $VAR1 = 'NaN';
roundtrip
-----------
"NaN"
(1 row)
SELECT roundtrip('true');
+INFO: $VAR1 = '1';
roundtrip
-----------
1
(1 row)
SELECT roundtrip('false');
+INFO: $VAR1 = '0';
roundtrip
-----------
0
(1 row)
-SELECT roundtrip('[]');
+SELECT roundtrip('[]', 'ARRAY');
+INFO: $VAR1 = [];
roundtrip
-----------
[]
(1 row)
-SELECT roundtrip('[null, null]');
+SELECT roundtrip('[null, null]', 'ARRAY');
+INFO: $VAR1 = [undef,undef];
roundtrip
--------------
[null, null]
(1 row)
-SELECT roundtrip('[1, 2, 3]');
+SELECT roundtrip('[1, 2, 3]', 'ARRAY');
+INFO: $VAR1 = ['1','2','3'];
roundtrip
-----------
[1, 2, 3]
(1 row)
-SELECT roundtrip('[-1, 2, -3]');
+SELECT roundtrip('[-1, 2, -3]', 'ARRAY');
+INFO: $VAR1 = ['-1','2','-3'];
roundtrip
-------------
[-1, 2, -3]
(1 row)
-SELECT roundtrip('[1.2, 2.3, 3.4]');
+SELECT roundtrip('[1.2, 2.3, 3.4]', 'ARRAY');
+INFO: $VAR1 = ['1.2','2.3','3.4'];
roundtrip
-----------------
[1.2, 2.3, 3.4]
(1 row)
-SELECT roundtrip('[-1.2, 2.3, -3.4]');
+SELECT roundtrip('[-1.2, 2.3, -3.4]', 'ARRAY');
+INFO: $VAR1 = ['-1.2','2.3','-3.4'];
roundtrip
-------------------
[-1.2, 2.3, -3.4]
(1 row)
-SELECT roundtrip('["string1", "string2"]');
+SELECT roundtrip('["string1", "string2"]', 'ARRAY');
+INFO: $VAR1 = ['string1','string2'];
roundtrip
------------------------
["string1", "string2"]
(1 row)
-SELECT roundtrip('{}');
+SELECT roundtrip('[["string1", "string2"]]', 'ARRAY');
+INFO: $VAR1 = [['string1','string2']];
+ roundtrip
+--------------------------
+ [["string1", "string2"]]
+(1 row)
+
+SELECT roundtrip('{}', 'HASH');
+INFO: $VAR1 = {};
roundtrip
-----------
{}
(1 row)
-SELECT roundtrip('{"1": null}');
+SELECT roundtrip('{"1": null}', 'HASH');
+INFO: $VAR1 = {'1' => undef};
roundtrip
-------------
{"1": null}
(1 row)
-SELECT roundtrip('{"1": 1}');
+SELECT roundtrip('{"1": 1}', 'HASH');
+INFO: $VAR1 = {'1' => '1'};
roundtrip
-----------
{"1": 1}
(1 row)
-SELECT roundtrip('{"1": -1}');
+SELECT roundtrip('{"1": -1}', 'HASH');
+INFO: $VAR1 = {'1' => '-1'};
roundtrip
-----------
{"1": -1}
(1 row)
-SELECT roundtrip('{"1": 1.1}');
+SELECT roundtrip('{"1": 1.1}', 'HASH');
+INFO: $VAR1 = {'1' => '1.1'};
roundtrip
------------
{"1": 1.1}
(1 row)
-SELECT roundtrip('{"1": -1.1}');
+SELECT roundtrip('{"1": -1.1}', 'HASH');
+INFO: $VAR1 = {'1' => '-1.1'};
roundtrip
-------------
{"1": -1.1}
(1 row)
-SELECT roundtrip('{"1": "string1"}');
+SELECT roundtrip('{"1": "string1"}', 'HASH');
+INFO: $VAR1 = {'1' => 'string1'};
roundtrip
------------------
{"1": "string1"}
(1 row)
-SELECT roundtrip('{"1": {"2": [3, 4, 5]}, "2": 3}');
+SELECT roundtrip('{"1": {"2": [3, 4, 5]}, "2": 3}', 'HASH');
+INFO: $VAR1 = {'1' => {'2' => ['3','4','5']},'2' => '3'};
roundtrip
---------------------------------
{"1": {"2": [3, 4, 5]}, "2": 3}
@@ -207,4 +277,4 @@ SELECT roundtrip('{"1": {"2": [3, 4, 5]}, "2": 3}');
\set VERBOSITY terse \\ -- suppress cascade details
DROP EXTENSION plperlu CASCADE;
-NOTICE: drop cascades to 6 other objects
+NOTICE: drop cascades to 8 other objects
diff --git a/contrib/jsonb_plperl/jsonb_plperl.c b/contrib/jsonb_plperl/jsonb_plperl.c
index 837bae2ab50..04b04df953f 100644
--- a/contrib/jsonb_plperl/jsonb_plperl.c
+++ b/contrib/jsonb_plperl/jsonb_plperl.c
@@ -1,11 +1,7 @@
#include "postgres.h"
-#include
#include
-/* Defined by Perl */
-#undef _
-
#include "fmgr.h"
#include "plperl.h"
#include "plperl_helpers.h"
@@ -18,7 +14,7 @@ static SV *Jsonb_to_SV(JsonbContainer *jsonb);
static JsonbValue *SV_to_JsonbValue(SV *obj, JsonbParseState **ps, bool is_elem);
-static SV *
+static SV *
JsonbValue_to_SV(JsonbValue *jbv)
{
dTHX;
@@ -26,13 +22,14 @@ JsonbValue_to_SV(JsonbValue *jbv)
switch (jbv->type)
{
case jbvBinary:
- return newRV(Jsonb_to_SV(jbv->val.binary.data));
+ return Jsonb_to_SV(jbv->val.binary.data);
case jbvNumeric:
{
char *str = DatumGetCString(DirectFunctionCall1(numeric_out,
NumericGetDatum(jbv->val.numeric)));
SV *result = newSVnv(SvNV(cstr2sv(str)));
+
pfree(str);
return result;
}
@@ -42,6 +39,7 @@ JsonbValue_to_SV(JsonbValue *jbv)
char *str = pnstrdup(jbv->val.string.val,
jbv->val.string.len);
SV *result = cstr2sv(str);
+
pfree(str);
return result;
}
@@ -81,7 +79,7 @@ Jsonb_to_SV(JsonbContainer *jsonb)
(r = JsonbIteratorNext(&it, &tmp, true)) != WJB_DONE)
elog(ERROR, "unexpected jsonb token: %d", r);
- return newRV(JsonbValue_to_SV(&v));
+ return JsonbValue_to_SV(&v);
}
else
{
@@ -93,7 +91,7 @@ Jsonb_to_SV(JsonbContainer *jsonb)
av_push(av, JsonbValue_to_SV(&v));
}
- return (SV *) av;
+ return newRV((SV *) av);
}
case WJB_BEGIN_OBJECT:
@@ -118,7 +116,7 @@ Jsonb_to_SV(JsonbContainer *jsonb)
}
}
- return (SV *) hv;
+ return newRV((SV *) hv);
}
default:
@@ -191,12 +189,29 @@ SV_to_JsonbValue(SV *in, JsonbParseState **jsonb_state, bool is_elem)
case SVt_PVHV:
return HV_to_JsonbValue((HV *) in, jsonb_state);
- case SVt_NULL:
- out.type = jbvNull;
- break;
-
default:
- if (SvIOK(in))
+ if (!SvOK(in))
+ {
+ out.type = jbvNull;
+ }
+ else if (SvUOK(in))
+ {
+ /*
+ * If UV is >=64 bits, we have no better way to make this
+ * happen than converting to text and back. Given the low
+ * usage of UV in Perl code, it's not clear it's worth working
+ * hard to provide alternate code paths.
+ */
+ const char *strval = SvPV_nolen(in);
+
+ out.type = jbvNumeric;
+ out.val.numeric =
+ DatumGetNumeric(DirectFunctionCall3(numeric_in,
+ CStringGetDatum(strval),
+ ObjectIdGetDatum(InvalidOid),
+ Int32GetDatum(-1)));
+ }
+ else if (SvIOK(in))
{
IV ival = SvIV(in);
@@ -209,10 +224,22 @@ SV_to_JsonbValue(SV *in, JsonbParseState **jsonb_state, bool is_elem)
{
double nval = SvNV(in);
+ /*
+ * jsonb doesn't allow infinity or NaN (per JSON
+ * specification), but the numeric type that is used for the
+ * storage accepts NaN, so we have to prevent it here
+ * explicitly. We don't really have to check for isinf()
+ * here, as numeric doesn't allow it and it would be caught
+ * later, but it makes for a nicer error message.
+ */
if (isinf(nval))
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- (errmsg("cannot convert infinite value to jsonb"))));
+ (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
+ (errmsg("cannot convert infinity to jsonb"))));
+ if (isnan(nval))
+ ereport(ERROR,
+ (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
+ (errmsg("cannot convert NaN to jsonb"))));
out.type = jbvNumeric;
out.val.numeric =
@@ -254,7 +281,7 @@ jsonb_to_plperl(PG_FUNCTION_ARGS)
Jsonb *in = PG_GETARG_JSONB_P(0);
SV *sv = Jsonb_to_SV(&in->root);
- return PointerGetDatum(newRV(sv));
+ return PointerGetDatum(sv);
}
diff --git a/contrib/jsonb_plperl/jsonb_plperlu--1.0.sql b/contrib/jsonb_plperl/jsonb_plperlu--1.0.sql
index 99b8644e3b6..aa84b37bef7 100644
--- a/contrib/jsonb_plperl/jsonb_plperlu--1.0.sql
+++ b/contrib/jsonb_plperl/jsonb_plperlu--1.0.sql
@@ -1,19 +1,19 @@
-/* contrib/json_plperl/jsonb_plperl--1.0.sql */
+/* contrib/jsonb_plperl/jsonb_plperlu--1.0.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION jsonb_plperlu" to load this file. \quit
-CREATE FUNCTION jsonb_to_plperl(val internal) RETURNS internal
+CREATE FUNCTION jsonb_to_plperlu(val internal) RETURNS internal
LANGUAGE C STRICT IMMUTABLE
-AS 'MODULE_PATHNAME';
+AS 'MODULE_PATHNAME', 'jsonb_to_plperl';
-CREATE FUNCTION plperl_to_jsonb(val internal) RETURNS jsonb
+CREATE FUNCTION plperlu_to_jsonb(val internal) RETURNS jsonb
LANGUAGE C STRICT IMMUTABLE
-AS 'MODULE_PATHNAME';
+AS 'MODULE_PATHNAME', 'plperl_to_jsonb';
CREATE TRANSFORM FOR jsonb LANGUAGE plperlu (
- FROM SQL WITH FUNCTION jsonb_to_plperl(internal),
- TO SQL WITH FUNCTION plperl_to_jsonb(internal)
+ FROM SQL WITH FUNCTION jsonb_to_plperlu(internal),
+ TO SQL WITH FUNCTION plperlu_to_jsonb(internal)
);
COMMENT ON TRANSFORM FOR jsonb LANGUAGE plperlu IS 'transform between jsonb and Perl';
diff --git a/contrib/jsonb_plperl/sql/jsonb_plperl.sql b/contrib/jsonb_plperl/sql/jsonb_plperl.sql
index 8b0a8764afa..a5b2cffe6b7 100644
--- a/contrib/jsonb_plperl/sql/jsonb_plperl.sql
+++ b/contrib/jsonb_plperl/sql/jsonb_plperl.sql
@@ -34,6 +34,18 @@ $$;
SELECT testSVToJsonb();
+CREATE FUNCTION testUVToJsonb() RETURNS jsonb
+LANGUAGE plperl
+TRANSFORM FOR TYPE jsonb
+as $$
+$val = ~0;
+return $val;
+$$;
+
+-- this might produce either 18446744073709551615 or 4294967295
+SELECT testUVToJsonb() IN ('18446744073709551615'::jsonb, '4294967295'::jsonb);
+
+
-- this revealed a bug in the original implementation
CREATE FUNCTION testRegexpResultToJsonb() RETURNS jsonb
LANGUAGE plperl
@@ -45,15 +57,31 @@ $$;
SELECT testRegexpResultToJsonb();
-CREATE FUNCTION roundtrip(val jsonb) RETURNS jsonb
+-- this revealed a different bug
+CREATE FUNCTION testTextToJsonbObject(text) RETURNS jsonb
+LANGUAGE plperl
+TRANSFORM FOR TYPE jsonb
+AS $$
+my $x = shift;
+return {a => $x};
+$$;
+
+SELECT testTextToJsonbObject('abc');
+SELECT testTextToJsonbObject(NULL);
+
+
+CREATE FUNCTION roundtrip(val jsonb, ref text = '') RETURNS jsonb
LANGUAGE plperl
TRANSFORM FOR TYPE jsonb
AS $$
+# can't use Data::Dumper, but let's at least check for unexpected ref type
+die 'unexpected '.(ref($_[0]) || 'not a').' reference'
+ if ref($_[0]) ne $_[1];
return $_[0];
$$;
-SELECT roundtrip('null');
+SELECT roundtrip('null') is null;
SELECT roundtrip('1');
SELECT roundtrip('1E+131071');
SELECT roundtrip('-1');
@@ -65,23 +93,24 @@ SELECT roundtrip('"NaN"');
SELECT roundtrip('true');
SELECT roundtrip('false');
-SELECT roundtrip('[]');
-SELECT roundtrip('[null, null]');
-SELECT roundtrip('[1, 2, 3]');
-SELECT roundtrip('[-1, 2, -3]');
-SELECT roundtrip('[1.2, 2.3, 3.4]');
-SELECT roundtrip('[-1.2, 2.3, -3.4]');
-SELECT roundtrip('["string1", "string2"]');
-
-SELECT roundtrip('{}');
-SELECT roundtrip('{"1": null}');
-SELECT roundtrip('{"1": 1}');
-SELECT roundtrip('{"1": -1}');
-SELECT roundtrip('{"1": 1.1}');
-SELECT roundtrip('{"1": -1.1}');
-SELECT roundtrip('{"1": "string1"}');
-
-SELECT roundtrip('{"1": {"2": [3, 4, 5]}, "2": 3}');
+SELECT roundtrip('[]', 'ARRAY');
+SELECT roundtrip('[null, null]', 'ARRAY');
+SELECT roundtrip('[1, 2, 3]', 'ARRAY');
+SELECT roundtrip('[-1, 2, -3]', 'ARRAY');
+SELECT roundtrip('[1.2, 2.3, 3.4]', 'ARRAY');
+SELECT roundtrip('[-1.2, 2.3, -3.4]', 'ARRAY');
+SELECT roundtrip('["string1", "string2"]', 'ARRAY');
+SELECT roundtrip('[["string1", "string2"]]', 'ARRAY');
+
+SELECT roundtrip('{}', 'HASH');
+SELECT roundtrip('{"1": null}', 'HASH');
+SELECT roundtrip('{"1": 1}', 'HASH');
+SELECT roundtrip('{"1": -1}', 'HASH');
+SELECT roundtrip('{"1": 1.1}', 'HASH');
+SELECT roundtrip('{"1": -1.1}', 'HASH');
+SELECT roundtrip('{"1": "string1"}', 'HASH');
+
+SELECT roundtrip('{"1": {"2": [3, 4, 5]}, "2": 3}', 'HASH');
\set VERBOSITY terse \\ -- suppress cascade details
diff --git a/contrib/jsonb_plperl/sql/jsonb_plperlu.sql b/contrib/jsonb_plperl/sql/jsonb_plperlu.sql
index 9287f7672f7..c68ef7308a9 100644
--- a/contrib/jsonb_plperl/sql/jsonb_plperlu.sql
+++ b/contrib/jsonb_plperl/sql/jsonb_plperlu.sql
@@ -34,6 +34,18 @@ $$;
SELECT testSVToJsonb();
+CREATE FUNCTION testUVToJsonb() RETURNS jsonb
+LANGUAGE plperlu
+TRANSFORM FOR TYPE jsonb
+as $$
+$val = ~0;
+return $val;
+$$;
+
+-- this might produce either 18446744073709551615 or 4294967295
+SELECT testUVToJsonb() IN ('18446744073709551615'::jsonb, '4294967295'::jsonb);
+
+
-- this revealed a bug in the original implementation
CREATE FUNCTION testRegexpResultToJsonb() RETURNS jsonb
LANGUAGE plperlu
@@ -45,17 +57,37 @@ $$;
SELECT testRegexpResultToJsonb();
-CREATE FUNCTION roundtrip(val jsonb) RETURNS jsonb
+-- this revealed a different bug
+CREATE FUNCTION testTextToJsonbObject(text) RETURNS jsonb
+LANGUAGE plperlu
+TRANSFORM FOR TYPE jsonb
+AS $$
+my $x = shift;
+return {a => $x};
+$$;
+
+SELECT testTextToJsonbObject('abc');
+SELECT testTextToJsonbObject(NULL);
+
+
+CREATE FUNCTION roundtrip(val jsonb, ref text = '') RETURNS jsonb
LANGUAGE plperlu
TRANSFORM FOR TYPE jsonb
AS $$
+use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 0;
+elog(INFO, Dumper($_[0]));
+die 'unexpected '.(ref($_[0]) || 'not a').' reference'
+ if ref($_[0]) ne $_[1];
return $_[0];
$$;
-SELECT roundtrip('null');
+SELECT roundtrip('null') is null;
SELECT roundtrip('1');
-SELECT roundtrip('1E+131071');
+-- skip because Data::Dumper produces a platform-dependent spelling of infinity
+-- SELECT roundtrip('1E+131071');
SELECT roundtrip('-1');
SELECT roundtrip('1.2');
SELECT roundtrip('-1.2');
@@ -65,23 +97,24 @@ SELECT roundtrip('"NaN"');
SELECT roundtrip('true');
SELECT roundtrip('false');
-SELECT roundtrip('[]');
-SELECT roundtrip('[null, null]');
-SELECT roundtrip('[1, 2, 3]');
-SELECT roundtrip('[-1, 2, -3]');
-SELECT roundtrip('[1.2, 2.3, 3.4]');
-SELECT roundtrip('[-1.2, 2.3, -3.4]');
-SELECT roundtrip('["string1", "string2"]');
-
-SELECT roundtrip('{}');
-SELECT roundtrip('{"1": null}');
-SELECT roundtrip('{"1": 1}');
-SELECT roundtrip('{"1": -1}');
-SELECT roundtrip('{"1": 1.1}');
-SELECT roundtrip('{"1": -1.1}');
-SELECT roundtrip('{"1": "string1"}');
-
-SELECT roundtrip('{"1": {"2": [3, 4, 5]}, "2": 3}');
+SELECT roundtrip('[]', 'ARRAY');
+SELECT roundtrip('[null, null]', 'ARRAY');
+SELECT roundtrip('[1, 2, 3]', 'ARRAY');
+SELECT roundtrip('[-1, 2, -3]', 'ARRAY');
+SELECT roundtrip('[1.2, 2.3, 3.4]', 'ARRAY');
+SELECT roundtrip('[-1.2, 2.3, -3.4]', 'ARRAY');
+SELECT roundtrip('["string1", "string2"]', 'ARRAY');
+SELECT roundtrip('[["string1", "string2"]]', 'ARRAY');
+
+SELECT roundtrip('{}', 'HASH');
+SELECT roundtrip('{"1": null}', 'HASH');
+SELECT roundtrip('{"1": 1}', 'HASH');
+SELECT roundtrip('{"1": -1}', 'HASH');
+SELECT roundtrip('{"1": 1.1}', 'HASH');
+SELECT roundtrip('{"1": -1.1}', 'HASH');
+SELECT roundtrip('{"1": "string1"}', 'HASH');
+
+SELECT roundtrip('{"1": {"2": [3, 4, 5]}, "2": 3}', 'HASH');
\set VERBOSITY terse \\ -- suppress cascade details
diff --git a/contrib/jsonb_plpython/jsonb_plpython.c b/contrib/jsonb_plpython/jsonb_plpython.c
index 548826f592f..776cf7c8b9b 100644
--- a/contrib/jsonb_plpython/jsonb_plpython.c
+++ b/contrib/jsonb_plpython/jsonb_plpython.c
@@ -5,6 +5,7 @@
#include "plpy_typeio.h"
#include "utils/jsonb.h"
#include "utils/fmgrprotos.h"
+#include "utils/numeric.h"
PG_MODULE_MAGIC;
@@ -25,7 +26,7 @@ static PyObject *decimal_constructor;
static PyObject *PLyObject_FromJsonbContainer(JsonbContainer *jsonb);
static JsonbValue *PLyObject_ToJsonbValue(PyObject *obj,
- JsonbParseState **jsonb_state, bool is_elem);
+ JsonbParseState **jsonb_state, bool is_elem);
#if PY_MAJOR_VERSION >= 3
typedef PyObject *(*PLyUnicode_FromStringAndSize_t)
@@ -132,7 +133,7 @@ PLyObject_FromJsonbValue(JsonbValue *jsonbValue)
}
/*
- * PLyObject_FromJsonb
+ * PLyObject_FromJsonbContainer
*
* Transform JsonbContainer to PyObject.
*/
@@ -163,56 +164,91 @@ PLyObject_FromJsonbContainer(JsonbContainer *jsonb)
}
else
{
- /* array in v */
+ PyObject *volatile elem = NULL;
+
result = PyList_New(0);
if (!result)
return NULL;
- while ((r = JsonbIteratorNext(&it, &v, true)) != WJB_DONE)
+ PG_TRY();
{
- if (r == WJB_ELEM)
+ while ((r = JsonbIteratorNext(&it, &v, true)) != WJB_DONE)
{
- PyObject *elem = PLyObject_FromJsonbValue(&v);
+ if (r != WJB_ELEM)
+ continue;
+
+ elem = PLyObject_FromJsonbValue(&v);
PyList_Append(result, elem);
Py_XDECREF(elem);
+ elem = NULL;
}
}
+ PG_CATCH();
+ {
+ Py_XDECREF(elem);
+ Py_XDECREF(result);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
}
break;
case WJB_BEGIN_OBJECT:
- result = PyDict_New();
- if (!result)
- return NULL;
-
- while ((r = JsonbIteratorNext(&it, &v, true)) != WJB_DONE)
{
- if (r == WJB_KEY)
- {
- PyObject *key = PLyString_FromJsonbValue(&v);
-
- if (!key)
- return NULL;
+ PyObject *volatile result_v = PyDict_New();
+ PyObject *volatile key = NULL;
+ PyObject *volatile val = NULL;
- r = JsonbIteratorNext(&it, &v, true);
+ if (!result_v)
+ return NULL;
- if (r == WJB_VALUE)
+ PG_TRY();
+ {
+ while ((r = JsonbIteratorNext(&it, &v, true)) != WJB_DONE)
{
- PyObject *value = PLyObject_FromJsonbValue(&v);
+ if (r != WJB_KEY)
+ continue;
+
+ key = PLyString_FromJsonbValue(&v);
+ if (!key)
+ {
+ Py_XDECREF(result_v);
+ result_v = NULL;
+ break;
+ }
+
+ if ((r = JsonbIteratorNext(&it, &v, true)) != WJB_VALUE)
+ elog(ERROR, "unexpected jsonb token: %d", r);
- if (!value)
+ val = PLyObject_FromJsonbValue(&v);
+ if (!val)
{
Py_XDECREF(key);
- return NULL;
+ key = NULL;
+ Py_XDECREF(result_v);
+ result_v = NULL;
+ break;
}
- PyDict_SetItem(result, key, value);
- Py_XDECREF(value);
- }
+ PyDict_SetItem(result_v, key, val);
+ Py_XDECREF(key);
+ key = NULL;
+ Py_XDECREF(val);
+ val = NULL;
+ }
+ }
+ PG_CATCH();
+ {
+ Py_XDECREF(result_v);
Py_XDECREF(key);
+ Py_XDECREF(val);
+ PG_RE_THROW();
}
+ PG_END_TRY();
+
+ result = result_v;
}
break;
@@ -233,20 +269,15 @@ static JsonbValue *
PLyMapping_ToJsonbValue(PyObject *obj, JsonbParseState **jsonb_state)
{
Py_ssize_t pcount;
- JsonbValue *out = NULL;
-
- /* We need it volatile, since we use it after longjmp */
- volatile PyObject *items_v = NULL;
+ PyObject *volatile items;
+ JsonbValue *volatile out;
pcount = PyMapping_Size(obj);
- items_v = PyMapping_Items(obj);
+ items = PyMapping_Items(obj);
PG_TRY();
{
Py_ssize_t i;
- PyObject *items;
-
- items = (PyObject *) items_v;
pushJsonbValue(jsonb_state, WJB_BEGIN_OBJECT, NULL);
@@ -278,11 +309,13 @@ PLyMapping_ToJsonbValue(PyObject *obj, JsonbParseState **jsonb_state)
}
PG_CATCH();
{
- Py_DECREF(items_v);
+ Py_DECREF(items);
PG_RE_THROW();
}
PG_END_TRY();
+ Py_DECREF(items);
+
return out;
}
@@ -297,17 +330,30 @@ PLySequence_ToJsonbValue(PyObject *obj, JsonbParseState **jsonb_state)
{
Py_ssize_t i;
Py_ssize_t pcount;
+ PyObject *volatile value = NULL;
pcount = PySequence_Size(obj);
pushJsonbValue(jsonb_state, WJB_BEGIN_ARRAY, NULL);
- for (i = 0; i < pcount; i++)
+ PG_TRY();
{
- PyObject *value = PySequence_GetItem(obj, i);
+ for (i = 0; i < pcount; i++)
+ {
+ value = PySequence_GetItem(obj, i);
+ Assert(value);
- (void) PLyObject_ToJsonbValue(value, jsonb_state, true);
+ (void) PLyObject_ToJsonbValue(value, jsonb_state, true);
+ Py_XDECREF(value);
+ value = NULL;
+ }
}
+ PG_CATCH();
+ {
+ Py_XDECREF(value);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
return pushJsonbValue(jsonb_state, WJB_END_ARRAY, NULL);
}
@@ -325,8 +371,13 @@ PLyNumber_ToJsonbValue(PyObject *obj, JsonbValue *jbvNum)
PG_TRY();
{
- num = DatumGetNumeric(DirectFunctionCall3(numeric_in,
- CStringGetDatum(str), 0, -1));
+ Datum numd;
+
+ numd = DirectFunctionCall3(numeric_in,
+ CStringGetDatum(str),
+ ObjectIdGetDatum(InvalidOid),
+ Int32GetDatum(-1));
+ num = DatumGetNumeric(numd);
}
PG_CATCH();
{
@@ -338,6 +389,16 @@ PLyNumber_ToJsonbValue(PyObject *obj, JsonbValue *jbvNum)
pfree(str);
+ /*
+ * jsonb doesn't allow NaN (per JSON specification), so we have to prevent
+ * it here explicitly. (Infinity is also not allowed in jsonb, but
+ * numeric_in above already catches that.)
+ */
+ if (numeric_is_nan(num))
+ ereport(ERROR,
+ (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
+ (errmsg("cannot convert NaN to jsonb"))));
+
jbvNum->type = jbvNumeric;
jbvNum->val.numeric = num;
@@ -373,13 +434,13 @@ PLyObject_ToJsonbValue(PyObject *obj, JsonbParseState **jsonb_state, bool is_ele
out->type = jbvNull;
else if (PyString_Check(obj) || PyUnicode_Check(obj))
PLyString_ToJsonbValue(obj, out);
- /*
- * PyNumber_Check() returns true for booleans, so boolean check should come
- * first.
- */
+
+ /*
+ * PyNumber_Check() returns true for booleans, so boolean check should
+ * come first.
+ */
else if (PyBool_Check(obj))
{
- out = palloc(sizeof(JsonbValue));
out->type = jbvBool;
out->val.boolean = (obj == Py_True);
}
diff --git a/contrib/ltree/Makefile b/contrib/ltree/Makefile
index c101603e6cd..416c8da3127 100644
--- a/contrib/ltree/Makefile
+++ b/contrib/ltree/Makefile
@@ -9,6 +9,8 @@ EXTENSION = ltree
DATA = ltree--1.1.sql ltree--1.0--1.1.sql ltree--unpackaged--1.0.sql
PGFILEDESC = "ltree - hierarchical label data type"
+HEADERS = ltree.h
+
REGRESS = ltree
ifdef USE_PGXS
diff --git a/contrib/ltree/_ltree_gist.c b/contrib/ltree/_ltree_gist.c
index 28bf7ad9634..c33ac4eedd2 100644
--- a/contrib/ltree/_ltree_gist.c
+++ b/contrib/ltree/_ltree_gist.c
@@ -9,6 +9,8 @@
#include "access/gist.h"
#include "access/stratnum.h"
+#include "port/pg_bitutils.h"
+
#include "crc32.h"
#include "ltree.h"
@@ -23,26 +25,6 @@ PG_FUNCTION_INFO_V1(_ltree_consistent);
#define GETENTRY(vec,pos) ((ltree_gist *) DatumGetPointer((vec)->vector[(pos)].key))
#define NEXTVAL(x) ( (ltree*)( (char*)(x) + INTALIGN( VARSIZE(x) ) ) )
-/* Number of one-bits in an unsigned byte */
-static const uint8 number_of_ones[256] = {
- 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
-};
-
#define WISH_F(a,b,c) (double)( -(double)(((a)-(b))*((a)-(b))*((a)-(b)))*(c) )
@@ -209,12 +191,7 @@ _ltree_union(PG_FUNCTION_ARGS)
static int32
sizebitvec(BITVECP sign)
{
- int32 size = 0,
- i;
-
- ALOOPBYTE
- size += number_of_ones[(unsigned char) sign[i]];
- return size;
+ return pg_popcount((const char *) sign, ASIGLEN);
}
static int
@@ -227,7 +204,8 @@ hemdistsign(BITVECP a, BITVECP b)
ALOOPBYTE
{
diff = (unsigned char) (a[i] ^ b[i]);
- dist += number_of_ones[diff];
+ /* Using the popcount functions here isn't likely to win */
+ dist += pg_number_of_ones[diff];
}
return dist;
}
diff --git a/contrib/ltree/expected/ltree.out b/contrib/ltree/expected/ltree.out
index 3d5737d41b1..82269309056 100644
--- a/contrib/ltree/expected/ltree.out
+++ b/contrib/ltree/expected/ltree.out
@@ -259,6 +259,24 @@ SELECT lca('{1.2.3,1.2.3.4.5.6}');
1.2
(1 row)
+SELECT lca('{1.2.3}');
+ lca
+-----
+ 1.2
+(1 row)
+
+SELECT lca('{1}'), lca('{1}') IS NULL;
+ lca | ?column?
+-----+----------
+ | f
+(1 row)
+
+SELECT lca('{}') IS NULL;
+ ?column?
+----------
+ t
+(1 row)
+
SELECT lca('1.la.2.3','1.2.3.4.5.6');
lca
-----
diff --git a/contrib/ltree/lquery_op.c b/contrib/ltree/lquery_op.c
index b6d2deb1af4..62172d5ea14 100644
--- a/contrib/ltree/lquery_op.c
+++ b/contrib/ltree/lquery_op.c
@@ -50,7 +50,7 @@ getlexeme(char *start, char *end, int *len)
}
bool
- compare_subnode(ltree_level *t, char *qn, int len, int (*cmpptr) (const char *, const char *, size_t), bool anyend)
+compare_subnode(ltree_level *t, char *qn, int len, int (*cmpptr) (const char *, const char *, size_t), bool anyend)
{
char *endt = t->name + t->len;
char *endq = qn + len;
diff --git a/contrib/ltree/ltree.h b/contrib/ltree/ltree.h
index e4b8c84fa62..366e58004c7 100644
--- a/contrib/ltree/ltree.h
+++ b/contrib/ltree/ltree.h
@@ -155,13 +155,13 @@ Datum ltree_textadd(PG_FUNCTION_ARGS);
/* Util function */
Datum ltree_in(PG_FUNCTION_ARGS);
-bool ltree_execute(ITEM *curitem, void *checkval,
- bool calcnot, bool (*chkcond) (void *checkval, ITEM *val));
+bool ltree_execute(ITEM *curitem, void *checkval,
+ bool calcnot, bool (*chkcond) (void *checkval, ITEM *val));
int ltree_compare(const ltree *a, const ltree *b);
bool inner_isparent(const ltree *c, const ltree *p);
-bool compare_subnode(ltree_level *t, char *q, int len,
- int (*cmpptr) (const char *, const char *, size_t), bool anyend);
+bool compare_subnode(ltree_level *t, char *q, int len,
+ int (*cmpptr) (const char *, const char *, size_t), bool anyend);
ltree *lca_inner(ltree **a, int len);
int ltree_strncasecmp(const char *a, const char *b, size_t s);
diff --git a/contrib/ltree/ltree_op.c b/contrib/ltree/ltree_op.c
index d62ca02521b..df61c63180c 100644
--- a/contrib/ltree/ltree_op.c
+++ b/contrib/ltree/ltree_op.c
@@ -45,17 +45,24 @@ ltree_compare(const ltree *a, const ltree *b)
ltree_level *bl = LTREE_FIRST(b);
int an = a->numlevel;
int bn = b->numlevel;
- int res = 0;
while (an > 0 && bn > 0)
{
+ int res;
+
if ((res = memcmp(al->name, bl->name, Min(al->len, bl->len))) == 0)
{
if (al->len != bl->len)
return (al->len - bl->len) * 10 * (an + 1);
}
else
+ {
+ if (res < 0)
+ res = -1;
+ else
+ res = 1;
return res * 10 * (an + 1);
+ }
an--;
bn--;
@@ -146,7 +153,7 @@ inner_isparent(const ltree *c, const ltree *p)
{
if (cl->len != pl->len)
return false;
- if (memcmp(cl->name, pl->name, cl->len))
+ if (memcmp(cl->name, pl->name, cl->len) != 0)
return false;
pn--;
@@ -402,22 +409,34 @@ ltree_textadd(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(r);
}
+/*
+ * Common code for variants of lca(), find longest common ancestor of inputs
+ *
+ * Returns NULL if there is no common ancestor, ie, the longest common
+ * prefix is empty.
+ */
ltree *
lca_inner(ltree **a, int len)
{
int tmp,
- num = ((*a)->numlevel) ? (*a)->numlevel - 1 : 0;
- ltree **ptr = a + 1;
- int i,
- reslen = LTREE_HDRSIZE;
+ num,
+ i,
+ reslen;
+ ltree **ptr;
ltree_level *l1,
*l2;
ltree *res;
-
+ if (len <= 0)
+ return NULL; /* no inputs? */
if ((*a)->numlevel == 0)
- return NULL;
+ return NULL; /* any empty input means NULL result */
+
+ /* num is the length of the longest common ancestor so far */
+ num = (*a)->numlevel - 1;
+ /* Compare each additional input to *a */
+ ptr = a + 1;
while (ptr - a < len)
{
if ((*ptr)->numlevel == 0)
@@ -428,11 +447,12 @@ lca_inner(ltree **a, int len)
{
l1 = LTREE_FIRST(*a);
l2 = LTREE_FIRST(*ptr);
- tmp = num;
+ tmp = Min(num, (*ptr)->numlevel - 1);
num = 0;
- for (i = 0; i < Min(tmp, (*ptr)->numlevel - 1); i++)
+ for (i = 0; i < tmp; i++)
{
- if (l1->len == l2->len && memcmp(l1->name, l2->name, l1->len) == 0)
+ if (l1->len == l2->len &&
+ memcmp(l1->name, l2->name, l1->len) == 0)
num = i + 1;
else
break;
@@ -443,6 +463,8 @@ lca_inner(ltree **a, int len)
ptr++;
}
+ /* Now compute size of result ... */
+ reslen = LTREE_HDRSIZE;
l1 = LTREE_FIRST(*a);
for (i = 0; i < num; i++)
{
@@ -450,6 +472,7 @@ lca_inner(ltree **a, int len)
l1 = LEVEL_NEXT(l1);
}
+ /* ... and construct it by copying from *a */
res = (ltree *) palloc0(reslen);
SET_VARSIZE(res, reslen);
res->numlevel = num;
diff --git a/contrib/ltree/ltxtquery_io.c b/contrib/ltree/ltxtquery_io.c
index 56bf39d145b..054c5d93356 100644
--- a/contrib/ltree/ltxtquery_io.c
+++ b/contrib/ltree/ltxtquery_io.c
@@ -166,7 +166,7 @@ pushquery(QPRS_STATE *state, int32 type, int32 val, int32 distance, int32 lenval
}
/*
- * This function is used for query_txt parsing
+ * This function is used for query text parsing
*/
static void
pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag)
@@ -368,7 +368,7 @@ queryin(char *buf)
state.str = tmp;
}
- /* set user friendly-operand view */
+ /* set user-friendly operand view */
memcpy((void *) GETOPERAND(query), (void *) state.op, state.sumlen);
pfree(state.op);
diff --git a/contrib/ltree/sql/ltree.sql b/contrib/ltree/sql/ltree.sql
index e9f74909a64..846b04e48ee 100644
--- a/contrib/ltree/sql/ltree.sql
+++ b/contrib/ltree/sql/ltree.sql
@@ -54,6 +54,9 @@ SELECT lca('{la.2.3,1.2.3.4.5.6,""}') IS NULL;
SELECT lca('{la.2.3,1.2.3.4.5.6}') IS NULL;
SELECT lca('{1.la.2.3,1.2.3.4.5.6}');
SELECT lca('{1.2.3,1.2.3.4.5.6}');
+SELECT lca('{1.2.3}');
+SELECT lca('{1}'), lca('{1}') IS NULL;
+SELECT lca('{}') IS NULL;
SELECT lca('1.la.2.3','1.2.3.4.5.6');
SELECT lca('1.2.3','1.2.3.4.5.6');
SELECT lca('1.2.2.3','1.2.3.4.5.6');
diff --git a/contrib/ltree_plpython/Makefile b/contrib/ltree_plpython/Makefile
index 7e988c79935..ce2c0cd2e2f 100644
--- a/contrib/ltree_plpython/Makefile
+++ b/contrib/ltree_plpython/Makefile
@@ -4,19 +4,21 @@ MODULE_big = ltree_plpython$(python_majorversion)
OBJS = ltree_plpython.o $(WIN32RES)
PGFILEDESC = "ltree_plpython - ltree transform for plpython"
-PG_CPPFLAGS = -I$(top_srcdir)/src/pl/plpython $(python_includespec) -I$(top_srcdir)/contrib/ltree -DPLPYTHON_LIBNAME='"plpython$(python_majorversion)"'
-
EXTENSION = ltree_plpythonu ltree_plpython2u ltree_plpython3u
DATA = ltree_plpythonu--1.0.sql ltree_plpython2u--1.0.sql ltree_plpython3u--1.0.sql
REGRESS = ltree_plpython
REGRESS_PLPYTHON3_MANGLE := $(REGRESS)
+PG_CPPFLAGS = $(python_includespec) -DPLPYTHON_LIBNAME='"plpython$(python_majorversion)"'
+
ifdef USE_PGXS
+PG_CPPFLAGS += -I$(includedir_server)/extension
PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
include $(PGXS)
else
+PG_CPPFLAGS += -I$(top_srcdir)/src/pl/plpython -I$(top_srcdir)/contrib
subdir = contrib/ltree_plpython
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
diff --git a/contrib/ltree_plpython/ltree_plpython.c b/contrib/ltree_plpython/ltree_plpython.c
index e88636a0a96..b254aa558d1 100644
--- a/contrib/ltree_plpython/ltree_plpython.c
+++ b/contrib/ltree_plpython/ltree_plpython.c
@@ -2,7 +2,7 @@
#include "fmgr.h"
#include "plpython.h"
-#include "ltree.h"
+#include "ltree/ltree.h"
PG_MODULE_MAGIC;
diff --git a/contrib/oid2name/.gitignore b/contrib/oid2name/.gitignore
index fdefde108dd..0410fb7afad 100644
--- a/contrib/oid2name/.gitignore
+++ b/contrib/oid2name/.gitignore
@@ -1 +1,3 @@
/oid2name
+
+/tmp_check/
diff --git a/contrib/oid2name/Makefile b/contrib/oid2name/Makefile
index 3eef8f60bea..361a80a7a12 100644
--- a/contrib/oid2name/Makefile
+++ b/contrib/oid2name/Makefile
@@ -6,6 +6,8 @@ PGAPPICON = win32
PROGRAM = oid2name
OBJS = oid2name.o $(WIN32RES)
+TAP_TESTS = 1
+
PG_CPPFLAGS = -I$(libpq_srcdir)
PG_LIBS_INTERNAL = $(libpq_pgport)
diff --git a/contrib/oid2name/oid2name.c b/contrib/oid2name/oid2name.c
index 63e360c4c56..fa1e7959e77 100644
--- a/contrib/oid2name/oid2name.c
+++ b/contrib/oid2name/oid2name.c
@@ -11,9 +11,12 @@
#include "catalog/pg_class_d.h"
+#include "common/logging.h"
#include "fe_utils/connect.h"
#include "libpq-fe.h"
#include "pg_getopt.h"
+#include "getopt_long.h"
+
/* an extensible array to keep track of elements to show */
typedef struct
@@ -60,9 +63,30 @@ void sql_exec_dumpalltbspc(PGconn *, struct options *);
void
get_opts(int argc, char **argv, struct options *my_opts)
{
+ static struct option long_options[] = {
+ {"dbname", required_argument, NULL, 'd'},
+ {"host", required_argument, NULL, 'h'},
+ {"host", required_argument, NULL, 'H'}, /* deprecated */
+ {"filenode", required_argument, NULL, 'f'},
+ {"indexes", no_argument, NULL, 'i'},
+ {"oid", required_argument, NULL, 'o'},
+ {"port", required_argument, NULL, 'p'},
+ {"quiet", no_argument, NULL, 'q'},
+ {"tablespaces", no_argument, NULL, 's'},
+ {"system-objects", no_argument, NULL, 'S'},
+ {"table", required_argument, NULL, 't'},
+ {"username", required_argument, NULL, 'U'},
+ {"version", no_argument, NULL, 'V'},
+ {"extended", no_argument, NULL, 'x'},
+ {"help", no_argument, NULL, '?'},
+ {NULL, 0, NULL, 0}
+ };
+
int c;
const char *progname;
+ int optindex;
+ pg_logging_init(argv[0]);
progname = get_progname(argv[0]);
/* set the defaults */
@@ -93,7 +117,7 @@ get_opts(int argc, char **argv, struct options *my_opts)
}
/* get opts */
- while ((c = getopt(argc, argv, "H:p:U:d:t:o:f:qSxish")) != -1)
+ while ((c = getopt_long(argc, argv, "d:f:h:H:io:p:qsSt:U:x", long_options, &optindex)) != -1)
{
switch (c)
{
@@ -102,39 +126,40 @@ get_opts(int argc, char **argv, struct options *my_opts)
my_opts->dbname = pg_strdup(optarg);
break;
- /* specify one tablename to show */
- case 't':
- add_one_elt(optarg, my_opts->tables);
- break;
-
- /* specify one Oid to show */
- case 'o':
- add_one_elt(optarg, my_opts->oids);
- break;
-
/* specify one filenode to show */
case 'f':
add_one_elt(optarg, my_opts->filenodes);
break;
- /* don't show headers */
- case 'q':
- my_opts->quiet = true;
- break;
-
/* host to connect to */
- case 'H':
+ case 'H': /* deprecated */
+ case 'h':
my_opts->hostname = pg_strdup(optarg);
break;
+ /* also display indexes */
+ case 'i':
+ my_opts->indexes = true;
+ break;
+
+ /* specify one Oid to show */
+ case 'o':
+ add_one_elt(optarg, my_opts->oids);
+ break;
+
/* port to connect to on remote host */
case 'p':
my_opts->port = pg_strdup(optarg);
break;
- /* username */
- case 'U':
- my_opts->username = pg_strdup(optarg);
+ /* don't show headers */
+ case 'q':
+ my_opts->quiet = true;
+ break;
+
+ /* dump tablespaces only */
+ case 's':
+ my_opts->tablespaces = true;
break;
/* display system tables */
@@ -142,9 +167,14 @@ get_opts(int argc, char **argv, struct options *my_opts)
my_opts->systables = true;
break;
- /* also display indexes */
- case 'i':
- my_opts->indexes = true;
+ /* specify one tablename to show */
+ case 't':
+ add_one_elt(optarg, my_opts->tables);
+ break;
+
+ /* username */
+ case 'U':
+ my_opts->username = pg_strdup(optarg);
break;
/* display extra columns */
@@ -152,21 +182,19 @@ get_opts(int argc, char **argv, struct options *my_opts)
my_opts->extended = true;
break;
- /* dump tablespaces only */
- case 's':
- my_opts->tablespaces = true;
- break;
-
- case 'h':
- help(progname);
- exit(0);
- break;
-
default:
fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
exit(1);
}
}
+
+ if (optind < argc)
+ {
+ fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
+ progname, argv[optind]);
+ fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ exit(1);
+ }
}
static void
@@ -176,22 +204,24 @@ help(const char *progname)
"Usage:\n"
" %s [OPTION]...\n"
"\nOptions:\n"
- " -d DBNAME database to connect to\n"
- " -f FILENODE show info for table with given file node\n"
- " -H HOSTNAME database server host or socket directory\n"
- " -i show indexes and sequences too\n"
- " -o OID show info for table with given OID\n"
- " -p PORT database server port number\n"
- " -q quiet (don't show headers)\n"
- " -s show all tablespaces\n"
- " -S show system objects too\n"
- " -t TABLE show info for named table\n"
- " -U NAME connect as specified database user\n"
- " -V, --version output version information, then exit\n"
- " -x extended (show additional columns)\n"
- " -?, --help show this help, then exit\n"
+ " -f, --filenode=FILENODE show info for table with given file node\n"
+ " -i, --indexes show indexes and sequences too\n"
+ " -o, --oid=OID show info for table with given OID\n"
+ " -q, --quiet quiet (don't show headers)\n"
+ " -s, --tablespaces show all tablespaces\n"
+ " -S, --system-objects show system objects too\n"
+ " -t, --table=TABLE show info for named table\n"
+ " -V, --version output version information, then exit\n"
+ " -x, --extended extended (show additional columns)\n"
+ " -?, --help show this help, then exit\n"
+ "\nConnection options:\n"
+ " -d, --dbname=DBNAME database to connect to\n"
+ " -h, --host=HOSTNAME database server host or socket directory\n"
+ " -H same as -h, deprecated option\n"
+ " -p, --port=PORT database server port number\n"
+ " -U, --username=USERNAME connect as specified database user\n"
"\nThe default action is to show all database OIDs.\n\n"
- "Report bugs to .\n",
+ "Report bugs to .\n",
progname, progname);
}
@@ -300,8 +330,8 @@ sql_conn(struct options *my_opts)
if (!conn)
{
- fprintf(stderr, "%s: could not connect to database %s\n",
- "oid2name", my_opts->dbname);
+ pg_log_error("could not connect to database %s",
+ my_opts->dbname);
exit(1);
}
@@ -319,8 +349,8 @@ sql_conn(struct options *my_opts)
/* check to see that the backend connection was successfully made */
if (PQstatus(conn) == CONNECTION_BAD)
{
- fprintf(stderr, "%s: could not connect to database %s: %s",
- "oid2name", my_opts->dbname, PQerrorMessage(conn));
+ pg_log_error("could not connect to database %s: %s",
+ my_opts->dbname, PQerrorMessage(conn));
PQfinish(conn);
exit(1);
}
@@ -328,8 +358,8 @@ sql_conn(struct options *my_opts)
res = PQexec(conn, ALWAYS_SECURE_SEARCH_PATH_SQL);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
- fprintf(stderr, "oid2name: could not clear search_path: %s\n",
- PQerrorMessage(conn));
+ pg_log_error("could not clear search_path: %s",
+ PQerrorMessage(conn));
PQclear(res);
PQfinish(conn);
exit(-1);
@@ -362,8 +392,8 @@ sql_exec(PGconn *conn, const char *todo, bool quiet)
/* check and deal with errors */
if (!res || PQresultStatus(res) > 2)
{
- fprintf(stderr, "oid2name: query failed: %s\n", PQerrorMessage(conn));
- fprintf(stderr, "oid2name: query was: %s\n", todo);
+ pg_log_error("query failed: %s", PQerrorMessage(conn));
+ pg_log_error("query was: %s", todo);
PQclear(res);
PQfinish(conn);
diff --git a/contrib/oid2name/t/001_basic.pl b/contrib/oid2name/t/001_basic.pl
new file mode 100644
index 00000000000..fa2c5743f63
--- /dev/null
+++ b/contrib/oid2name/t/001_basic.pl
@@ -0,0 +1,12 @@
+use strict;
+use warnings;
+
+use TestLib;
+use Test::More tests => 8;
+
+#########################################
+# Basic checks
+
+program_help_ok('oid2name');
+program_version_ok('oid2name');
+program_options_handling_ok('oid2name');
diff --git a/contrib/pageinspect/Makefile b/contrib/pageinspect/Makefile
index e5a581f141b..cfe01297fb0 100644
--- a/contrib/pageinspect/Makefile
+++ b/contrib/pageinspect/Makefile
@@ -5,7 +5,7 @@ OBJS = rawpage.o heapfuncs.o btreefuncs.o fsmfuncs.o \
brinfuncs.o ginfuncs.o hashfuncs.o $(WIN32RES)
EXTENSION = pageinspect
-DATA = pageinspect--1.6--1.7.sql \
+DATA = pageinspect--1.7--1.8.sql pageinspect--1.6--1.7.sql \
pageinspect--1.5.sql pageinspect--1.5--1.6.sql \
pageinspect--1.4--1.5.sql pageinspect--1.3--1.4.sql \
pageinspect--1.2--1.3.sql pageinspect--1.1--1.2.sql \
diff --git a/contrib/pageinspect/brinfuncs.c b/contrib/pageinspect/brinfuncs.c
index f4f0dea860f..04140eccb84 100644
--- a/contrib/pageinspect/brinfuncs.c
+++ b/contrib/pageinspect/brinfuncs.c
@@ -2,7 +2,7 @@
* brinfuncs.c
* Functions to investigate BRIN indexes
*
- * Copyright (c) 2014-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2014-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pageinspect/brinfuncs.c
@@ -41,7 +41,7 @@ typedef struct brin_column_state
static Page verify_brin_page(bytea *raw_page, uint16 type,
- const char *strtype);
+ const char *strtype);
Datum
brin_page_type(PG_FUNCTION_ARGS)
diff --git a/contrib/pageinspect/btreefuncs.c b/contrib/pageinspect/btreefuncs.c
index 558a8c41f49..8d27c9b0f6f 100644
--- a/contrib/pageinspect/btreefuncs.c
+++ b/contrib/pageinspect/btreefuncs.c
@@ -30,6 +30,7 @@
#include "pageinspect.h"
#include "access/nbtree.h"
+#include "access/relation.h"
#include "catalog/namespace.h"
#include "catalog/pg_am.h"
#include "funcapi.h"
@@ -429,7 +430,7 @@ bt_page_items_bytea(PG_FUNCTION_ARGS)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser to use pageinspect functions"))));
+ (errmsg("must be superuser to use raw page functions"))));
if (SRF_IS_FIRSTCALL())
{
@@ -560,10 +561,10 @@ bt_metap(PG_FUNCTION_ARGS)
* Get values of extended metadata if available, use default values
* otherwise.
*/
- if (metad->btm_version == BTREE_VERSION)
+ if (metad->btm_version >= BTREE_NOVAC_VERSION)
{
values[j++] = psprintf("%u", metad->btm_oldest_btpo_xact);
- values[j++] = psprintf("%lf", metad->btm_last_cleanup_num_heap_tuples);
+ values[j++] = psprintf("%f", metad->btm_last_cleanup_num_heap_tuples);
}
else
{
diff --git a/contrib/pageinspect/expected/btree.out b/contrib/pageinspect/expected/btree.out
index 2aaa4df53b1..07c2dcd7714 100644
--- a/contrib/pageinspect/expected/btree.out
+++ b/contrib/pageinspect/expected/btree.out
@@ -5,7 +5,7 @@ CREATE INDEX test1_a_idx ON test1 USING btree (a);
SELECT * FROM bt_metap('test1_a_idx');
-[ RECORD 1 ]-----------+-------
magic | 340322
-version | 3
+version | 4
root | 1
level | 0
fastroot | 1
diff --git a/contrib/pageinspect/expected/page.out b/contrib/pageinspect/expected/page.out
index 5edb6500859..b6aea0124bb 100644
--- a/contrib/pageinspect/expected/page.out
+++ b/contrib/pageinspect/expected/page.out
@@ -82,11 +82,113 @@ SELECT * FROM fsm_page_contents(get_raw_page('test1', 'fsm', 0));
(1 row)
+-- If we freeze the only tuple on test1, the infomask should
+-- always be the same in all test runs. we show raw flags by
+-- default: HEAP_XMIN_COMMITTED and HEAP_XMIN_INVALID.
+VACUUM FREEZE test1;
+SELECT t_infomask, t_infomask2, raw_flags, combined_flags
+FROM heap_page_items(get_raw_page('test1', 0)),
+ LATERAL heap_tuple_infomask_flags(t_infomask, t_infomask2);
+ t_infomask | t_infomask2 | raw_flags | combined_flags
+------------+-------------+-----------------------------------------------------------+--------------------
+ 2816 | 2 | {HEAP_XMIN_COMMITTED,HEAP_XMIN_INVALID,HEAP_XMAX_INVALID} | {HEAP_XMIN_FROZEN}
+(1 row)
+
+-- output the decoded flag HEAP_XMIN_FROZEN instead
+SELECT t_infomask, t_infomask2, raw_flags, combined_flags
+FROM heap_page_items(get_raw_page('test1', 0)),
+ LATERAL heap_tuple_infomask_flags(t_infomask, t_infomask2);
+ t_infomask | t_infomask2 | raw_flags | combined_flags
+------------+-------------+-----------------------------------------------------------+--------------------
+ 2816 | 2 | {HEAP_XMIN_COMMITTED,HEAP_XMIN_INVALID,HEAP_XMAX_INVALID} | {HEAP_XMIN_FROZEN}
+(1 row)
+
+-- tests for decoding of combined flags
+-- HEAP_XMAX_SHR_LOCK = (HEAP_XMAX_EXCL_LOCK | HEAP_XMAX_KEYSHR_LOCK)
+SELECT * FROM heap_tuple_infomask_flags(x'0050'::int, 0);
+ raw_flags | combined_flags
+---------------------------------------------+----------------------
+ {HEAP_XMAX_KEYSHR_LOCK,HEAP_XMAX_EXCL_LOCK} | {HEAP_XMAX_SHR_LOCK}
+(1 row)
+
+-- HEAP_XMIN_FROZEN = (HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID)
+SELECT * FROM heap_tuple_infomask_flags(x'0300'::int, 0);
+ raw_flags | combined_flags
+-----------------------------------------+--------------------
+ {HEAP_XMIN_COMMITTED,HEAP_XMIN_INVALID} | {HEAP_XMIN_FROZEN}
+(1 row)
+
+-- HEAP_MOVED = (HEAP_MOVED_IN | HEAP_MOVED_OFF)
+SELECT * FROM heap_tuple_infomask_flags(x'C000'::int, 0);
+ raw_flags | combined_flags
+--------------------------------+----------------
+ {HEAP_MOVED_OFF,HEAP_MOVED_IN} | {HEAP_MOVED}
+(1 row)
+
+SELECT * FROM heap_tuple_infomask_flags(x'C000'::int, 0);
+ raw_flags | combined_flags
+--------------------------------+----------------
+ {HEAP_MOVED_OFF,HEAP_MOVED_IN} | {HEAP_MOVED}
+(1 row)
+
+-- test all flags of t_infomask and t_infomask2
+SELECT unnest(raw_flags)
+ FROM heap_tuple_infomask_flags(x'FFFF'::int, x'FFFF'::int) ORDER BY 1;
+ unnest
+-----------------------
+ HEAP_COMBOCID
+ HEAP_HASEXTERNAL
+ HEAP_HASNULL
+ HEAP_HASOID_OLD
+ HEAP_HASVARWIDTH
+ HEAP_HOT_UPDATED
+ HEAP_KEYS_UPDATED
+ HEAP_MOVED_IN
+ HEAP_MOVED_OFF
+ HEAP_ONLY_TUPLE
+ HEAP_UPDATED
+ HEAP_XMAX_COMMITTED
+ HEAP_XMAX_EXCL_LOCK
+ HEAP_XMAX_INVALID
+ HEAP_XMAX_IS_MULTI
+ HEAP_XMAX_KEYSHR_LOCK
+ HEAP_XMAX_LOCK_ONLY
+ HEAP_XMIN_COMMITTED
+ HEAP_XMIN_INVALID
+(19 rows)
+
+SELECT unnest(combined_flags)
+ FROM heap_tuple_infomask_flags(x'FFFF'::int, x'FFFF'::int) ORDER BY 1;
+ unnest
+--------------------
+ HEAP_MOVED
+ HEAP_XMAX_SHR_LOCK
+ HEAP_XMIN_FROZEN
+(3 rows)
+
+-- no flags at all
+SELECT * FROM heap_tuple_infomask_flags(0, 0);
+ raw_flags | combined_flags
+-----------+----------------
+ {} | {}
+(1 row)
+
+-- no combined flags
+SELECT * FROM heap_tuple_infomask_flags(x'0010'::int, 0);
+ raw_flags | combined_flags
+-------------------------+----------------
+ {HEAP_XMAX_KEYSHR_LOCK} | {}
+(1 row)
+
DROP TABLE test1;
--- check that using any of these functions with a partitioned table would fail
+-- check that using any of these functions with a partitioned table or index
+-- would fail
create table test_partitioned (a int) partition by range (a);
+create index test_partitioned_index on test_partitioned (a);
select get_raw_page('test_partitioned', 0); -- error about partitioned table
ERROR: cannot get raw page from partitioned table "test_partitioned"
+select get_raw_page('test_partitioned_index', 0); -- error about partitioned index
+ERROR: cannot get raw page from partitioned index "test_partitioned_index"
-- a regular table which is a member of a partition set should work though
create table test_part1 partition of test_partitioned for values from ( 1 ) to (100);
select get_raw_page('test_part1', 0); -- get farther and error about empty table
diff --git a/contrib/pageinspect/fsmfuncs.c b/contrib/pageinspect/fsmfuncs.c
index 86e8075845e..4b3c5968184 100644
--- a/contrib/pageinspect/fsmfuncs.c
+++ b/contrib/pageinspect/fsmfuncs.c
@@ -9,7 +9,7 @@
* there's hardly any use case for using these without superuser-rights
* anyway.
*
- * Copyright (c) 2007-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pageinspect/fsmfuncs.c
diff --git a/contrib/pageinspect/ginfuncs.c b/contrib/pageinspect/ginfuncs.c
index d42609c5777..229f9f9a2ee 100644
--- a/contrib/pageinspect/ginfuncs.c
+++ b/contrib/pageinspect/ginfuncs.c
@@ -2,7 +2,7 @@
* ginfuncs.c
* Functions to investigate the content of GIN indexes
*
- * Copyright (c) 2014-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2014-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pageinspect/ginfuncs.c
diff --git a/contrib/pageinspect/hashfuncs.c b/contrib/pageinspect/hashfuncs.c
index 99b61b8669f..9374c4aabc4 100644
--- a/contrib/pageinspect/hashfuncs.c
+++ b/contrib/pageinspect/hashfuncs.c
@@ -2,7 +2,7 @@
* hashfuncs.c
* Functions to investigate the content of HASH indexes
*
- * Copyright (c) 2017-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2017-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pageinspect/hashfuncs.c
@@ -18,7 +18,9 @@
#include "catalog/pg_am.h"
#include "funcapi.h"
#include "miscadmin.h"
+#include "utils/array.h"
#include "utils/builtins.h"
+#include "utils/rel.h"
PG_FUNCTION_INFO_V1(hash_page_type);
PG_FUNCTION_INFO_V1(hash_page_stats);
@@ -96,18 +98,22 @@ verify_hash_page(bytea *raw_page, int flags)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("page is not a hash meta page")));
+ break;
case LH_BUCKET_PAGE | LH_OVERFLOW_PAGE:
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("page is not a hash bucket or overflow page")));
+ break;
case LH_OVERFLOW_PAGE:
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("page is not a hash overflow page")));
+ break;
default:
elog(ERROR,
"hash page of type %08x not in mask %08x",
pagetype, flags);
+ break;
}
}
diff --git a/contrib/pageinspect/heapfuncs.c b/contrib/pageinspect/heapfuncs.c
index 7438257c5bb..02e2ab9997a 100644
--- a/contrib/pageinspect/heapfuncs.c
+++ b/contrib/pageinspect/heapfuncs.c
@@ -15,7 +15,7 @@
* there's hardly any use case for using these without superuser-rights
* anyway.
*
- * Copyright (c) 2007-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pageinspect/heapfuncs.c
@@ -28,13 +28,29 @@
#include "pageinspect.h"
#include "access/htup_details.h"
+#include "access/relation.h"
#include "funcapi.h"
+#include "catalog/pg_am_d.h"
#include "catalog/pg_type.h"
#include "miscadmin.h"
+#include "port/pg_bitutils.h"
#include "utils/array.h"
#include "utils/builtins.h"
#include "utils/rel.h"
+/*
+ * It's not supported to create tuples with oids anymore, but when pg_upgrade
+ * was used to upgrade from an older version, tuples might still have an
+ * oid. Seems worthwhile to display that.
+ */
+#define HeapTupleHeaderGetOidOld(tup) \
+( \
+ ((tup)->t_infomask & HEAP_HASOID_OLD) ? \
+ *((Oid *) ((char *)(tup) + (tup)->t_hoff - sizeof(Oid))) \
+ : \
+ InvalidOid \
+)
+
/*
* bits_to_text
@@ -241,8 +257,8 @@ heap_page_items(PG_FUNCTION_ARGS)
else
nulls[11] = true;
- if (tuphdr->t_infomask & HEAP_HASOID)
- values[12] = HeapTupleHeaderGetOid(tuphdr);
+ if (tuphdr->t_infomask & HEAP_HASOID_OLD)
+ values[12] = HeapTupleHeaderGetOidOld(tuphdr);
else
nulls[12] = true;
}
@@ -298,13 +314,16 @@ tuple_data_split_internal(Oid relid, char *tupdata,
TupleDesc tupdesc;
/* Get tuple descriptor from relation OID */
- rel = relation_open(relid, NoLock);
- tupdesc = CreateTupleDescCopyConstr(rel->rd_att);
- relation_close(rel, NoLock);
+ rel = relation_open(relid, AccessShareLock);
+ tupdesc = RelationGetDescr(rel);
raw_attrs = initArrayResult(BYTEAOID, CurrentMemoryContext, false);
nattrs = tupdesc->natts;
+ if (rel->rd_rel->relam != HEAP_TABLE_AM_OID)
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("only heap AM is supported")));
+
if (nattrs < (t_infomask2 & HEAP_NATTS_MASK))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
@@ -317,7 +336,6 @@ tuple_data_split_internal(Oid relid, char *tupdata,
bytea *attr_data = NULL;
attr = TupleDescAttr(tupdesc, i);
- is_null = (t_infomask & HEAP_HASNULL) && att_isnull(i, t_bits);
/*
* Tuple header can specify less attributes than tuple descriptor as
@@ -327,6 +345,8 @@ tuple_data_split_internal(Oid relid, char *tupdata,
*/
if (i >= (t_infomask2 & HEAP_NATTS_MASK))
is_null = true;
+ else
+ is_null = (t_infomask & HEAP_HASNULL) && att_isnull(i, t_bits);
if (!is_null)
{
@@ -386,6 +406,8 @@ tuple_data_split_internal(Oid relid, char *tupdata,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("end of tuple reached without looking at all its data")));
+ relation_close(rel, AccessShareLock);
+
return makeArrayResult(raw_attrs, CurrentMemoryContext);
}
@@ -473,3 +495,129 @@ tuple_data_split(PG_FUNCTION_ARGS)
PG_RETURN_ARRAYTYPE_P(res);
}
+
+/*
+ * heap_tuple_infomask_flags
+ *
+ * Decode into a human-readable format t_infomask and t_infomask2 associated
+ * to a tuple. All the flags are described in access/htup_details.h.
+ */
+PG_FUNCTION_INFO_V1(heap_tuple_infomask_flags);
+
+Datum
+heap_tuple_infomask_flags(PG_FUNCTION_ARGS)
+{
+#define HEAP_TUPLE_INFOMASK_COLS 2
+ Datum values[HEAP_TUPLE_INFOMASK_COLS];
+ bool nulls[HEAP_TUPLE_INFOMASK_COLS];
+ uint16 t_infomask = PG_GETARG_INT16(0);
+ uint16 t_infomask2 = PG_GETARG_INT16(1);
+ int cnt = 0;
+ ArrayType *a;
+ int bitcnt;
+ Datum *flags;
+ TupleDesc tupdesc;
+ HeapTuple tuple;
+
+ if (!superuser())
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("must be superuser to use raw page functions")));
+
+ /* Build a tuple descriptor for our result type */
+ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ bitcnt = pg_popcount((const char *) &t_infomask, sizeof(uint16)) +
+ pg_popcount((const char *) &t_infomask2, sizeof(uint16));
+
+ /* Initialize values and NULL flags arrays */
+ MemSet(values, 0, sizeof(values));
+ MemSet(nulls, 0, sizeof(nulls));
+
+ /* If no flags, return a set of empty arrays */
+ if (bitcnt <= 0)
+ {
+ values[0] = PointerGetDatum(construct_empty_array(TEXTOID));
+ values[1] = PointerGetDatum(construct_empty_array(TEXTOID));
+ tuple = heap_form_tuple(tupdesc, values, nulls);
+ PG_RETURN_DATUM(HeapTupleGetDatum(tuple));
+ }
+
+ /* build set of raw flags */
+ flags = (Datum *) palloc0(sizeof(Datum) * bitcnt);
+
+ /* decode t_infomask */
+ if ((t_infomask & HEAP_HASNULL) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_HASNULL");
+ if ((t_infomask & HEAP_HASVARWIDTH) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_HASVARWIDTH");
+ if ((t_infomask & HEAP_HASEXTERNAL) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_HASEXTERNAL");
+ if ((t_infomask & HEAP_HASOID_OLD) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_HASOID_OLD");
+ if ((t_infomask & HEAP_XMAX_KEYSHR_LOCK) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_XMAX_KEYSHR_LOCK");
+ if ((t_infomask & HEAP_COMBOCID) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_COMBOCID");
+ if ((t_infomask & HEAP_XMAX_EXCL_LOCK) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_XMAX_EXCL_LOCK");
+ if ((t_infomask & HEAP_XMAX_LOCK_ONLY) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_XMAX_LOCK_ONLY");
+ if ((t_infomask & HEAP_XMIN_COMMITTED) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_XMIN_COMMITTED");
+ if ((t_infomask & HEAP_XMIN_INVALID) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_XMIN_INVALID");
+ if ((t_infomask & HEAP_XMAX_COMMITTED) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_XMAX_COMMITTED");
+ if ((t_infomask & HEAP_XMAX_INVALID) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_XMAX_INVALID");
+ if ((t_infomask & HEAP_XMAX_IS_MULTI) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_XMAX_IS_MULTI");
+ if ((t_infomask & HEAP_UPDATED) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_UPDATED");
+ if ((t_infomask & HEAP_MOVED_OFF) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_MOVED_OFF");
+ if ((t_infomask & HEAP_MOVED_IN) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_MOVED_IN");
+
+ /* decode t_infomask2 */
+ if ((t_infomask2 & HEAP_KEYS_UPDATED) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_KEYS_UPDATED");
+ if ((t_infomask2 & HEAP_HOT_UPDATED) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_HOT_UPDATED");
+ if ((t_infomask2 & HEAP_ONLY_TUPLE) != 0)
+ flags[cnt++] = CStringGetTextDatum("HEAP_ONLY_TUPLE");
+
+ /* build value */
+ Assert(cnt <= bitcnt);
+ a = construct_array(flags, cnt, TEXTOID, -1, false, 'i');
+ values[0] = PointerGetDatum(a);
+
+ /*
+ * Build set of combined flags. Use the same array as previously, this
+ * keeps the code simple.
+ */
+ cnt = 0;
+ MemSet(flags, 0, sizeof(Datum) * bitcnt);
+
+ /* decode combined masks of t_infomask */
+ if ((t_infomask & HEAP_XMAX_SHR_LOCK) == HEAP_XMAX_SHR_LOCK)
+ flags[cnt++] = CStringGetTextDatum("HEAP_XMAX_SHR_LOCK");
+ if ((t_infomask & HEAP_XMIN_FROZEN) == HEAP_XMIN_FROZEN)
+ flags[cnt++] = CStringGetTextDatum("HEAP_XMIN_FROZEN");
+ if ((t_infomask & HEAP_MOVED) == HEAP_MOVED)
+ flags[cnt++] = CStringGetTextDatum("HEAP_MOVED");
+
+ /* Build an empty array if there are no combined flags */
+ if (cnt == 0)
+ a = construct_empty_array(TEXTOID);
+ else
+ a = construct_array(flags, cnt, TEXTOID, -1, false, 'i');
+ pfree(flags);
+ values[1] = PointerGetDatum(a);
+
+ /* Returns the record as Datum */
+ tuple = heap_form_tuple(tupdesc, values, nulls);
+ PG_RETURN_DATUM(HeapTupleGetDatum(tuple));
+}
diff --git a/contrib/pageinspect/pageinspect--1.7--1.8.sql b/contrib/pageinspect/pageinspect--1.7--1.8.sql
new file mode 100644
index 00000000000..2a7c4b35165
--- /dev/null
+++ b/contrib/pageinspect/pageinspect--1.7--1.8.sql
@@ -0,0 +1,16 @@
+/* contrib/pageinspect/pageinspect--1.7--1.8.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pageinspect UPDATE TO '1.8'" to load this file. \quit
+
+--
+-- heap_tuple_infomask_flags()
+--
+CREATE FUNCTION heap_tuple_infomask_flags(
+ t_infomask integer,
+ t_infomask2 integer,
+ raw_flags OUT text[],
+ combined_flags OUT text[])
+RETURNS record
+AS 'MODULE_PATHNAME', 'heap_tuple_infomask_flags'
+LANGUAGE C STRICT PARALLEL SAFE;
diff --git a/contrib/pageinspect/pageinspect.control b/contrib/pageinspect/pageinspect.control
index dcfc61f22dc..f8cdf526c65 100644
--- a/contrib/pageinspect/pageinspect.control
+++ b/contrib/pageinspect/pageinspect.control
@@ -1,5 +1,5 @@
# pageinspect extension
comment = 'inspect the contents of database pages at a low level'
-default_version = '1.7'
+default_version = '1.8'
module_pathname = '$libdir/pageinspect'
relocatable = true
diff --git a/contrib/pageinspect/pageinspect.h b/contrib/pageinspect/pageinspect.h
index ab7d5d66cd1..0a660ad6254 100644
--- a/contrib/pageinspect/pageinspect.h
+++ b/contrib/pageinspect/pageinspect.h
@@ -3,7 +3,7 @@
* pageinspect.h
* Common functions for pageinspect.
*
- * Copyright (c) 2017-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2017-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pageinspect/pageinspect.h
diff --git a/contrib/pageinspect/rawpage.c b/contrib/pageinspect/rawpage.c
index 72f1d21e1b7..f08f62f72da 100644
--- a/contrib/pageinspect/rawpage.c
+++ b/contrib/pageinspect/rawpage.c
@@ -5,7 +5,7 @@
*
* Access-method specific inspection functions are in separate files.
*
- * Copyright (c) 2007-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pageinspect/rawpage.c
@@ -18,6 +18,7 @@
#include "pageinspect.h"
#include "access/htup_details.h"
+#include "access/relation.h"
#include "catalog/namespace.h"
#include "catalog/pg_type.h"
#include "funcapi.h"
@@ -32,7 +33,7 @@
PG_MODULE_MAGIC;
static bytea *get_raw_page_internal(text *relname, ForkNumber forknum,
- BlockNumber blkno);
+ BlockNumber blkno);
/*
@@ -102,7 +103,7 @@ get_raw_page_internal(text *relname, ForkNumber forknum, BlockNumber blkno)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser to use raw functions"))));
+ (errmsg("must be superuser to use raw page functions"))));
relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
rel = relation_openrv(relrv, AccessShareLock);
@@ -128,6 +129,11 @@ get_raw_page_internal(text *relname, ForkNumber forknum, BlockNumber blkno)
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot get raw page from partitioned table \"%s\"",
RelationGetRelationName(rel))));
+ if (rel->rd_rel->relkind == RELKIND_PARTITIONED_INDEX)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("cannot get raw page from partitioned index \"%s\"",
+ RelationGetRelationName(rel))));
/*
* Reject attempts to read non-local temporary relations; we would be
diff --git a/contrib/pageinspect/sql/page.sql b/contrib/pageinspect/sql/page.sql
index 8f35830e067..bd049aeb247 100644
--- a/contrib/pageinspect/sql/page.sql
+++ b/contrib/pageinspect/sql/page.sql
@@ -31,11 +31,48 @@ SELECT tuple_data_split('test1'::regclass, t_data, t_infomask, t_infomask2, t_bi
SELECT * FROM fsm_page_contents(get_raw_page('test1', 'fsm', 0));
+-- If we freeze the only tuple on test1, the infomask should
+-- always be the same in all test runs. we show raw flags by
+-- default: HEAP_XMIN_COMMITTED and HEAP_XMIN_INVALID.
+VACUUM FREEZE test1;
+
+SELECT t_infomask, t_infomask2, raw_flags, combined_flags
+FROM heap_page_items(get_raw_page('test1', 0)),
+ LATERAL heap_tuple_infomask_flags(t_infomask, t_infomask2);
+
+-- output the decoded flag HEAP_XMIN_FROZEN instead
+SELECT t_infomask, t_infomask2, raw_flags, combined_flags
+FROM heap_page_items(get_raw_page('test1', 0)),
+ LATERAL heap_tuple_infomask_flags(t_infomask, t_infomask2);
+
+-- tests for decoding of combined flags
+-- HEAP_XMAX_SHR_LOCK = (HEAP_XMAX_EXCL_LOCK | HEAP_XMAX_KEYSHR_LOCK)
+SELECT * FROM heap_tuple_infomask_flags(x'0050'::int, 0);
+-- HEAP_XMIN_FROZEN = (HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID)
+SELECT * FROM heap_tuple_infomask_flags(x'0300'::int, 0);
+-- HEAP_MOVED = (HEAP_MOVED_IN | HEAP_MOVED_OFF)
+SELECT * FROM heap_tuple_infomask_flags(x'C000'::int, 0);
+SELECT * FROM heap_tuple_infomask_flags(x'C000'::int, 0);
+
+-- test all flags of t_infomask and t_infomask2
+SELECT unnest(raw_flags)
+ FROM heap_tuple_infomask_flags(x'FFFF'::int, x'FFFF'::int) ORDER BY 1;
+SELECT unnest(combined_flags)
+ FROM heap_tuple_infomask_flags(x'FFFF'::int, x'FFFF'::int) ORDER BY 1;
+
+-- no flags at all
+SELECT * FROM heap_tuple_infomask_flags(0, 0);
+-- no combined flags
+SELECT * FROM heap_tuple_infomask_flags(x'0010'::int, 0);
+
DROP TABLE test1;
--- check that using any of these functions with a partitioned table would fail
+-- check that using any of these functions with a partitioned table or index
+-- would fail
create table test_partitioned (a int) partition by range (a);
+create index test_partitioned_index on test_partitioned (a);
select get_raw_page('test_partitioned', 0); -- error about partitioned table
+select get_raw_page('test_partitioned_index', 0); -- error about partitioned index
-- a regular table which is a member of a partition set should work though
create table test_part1 partition of test_partitioned for values from ( 1 ) to (100);
diff --git a/contrib/passwordcheck/passwordcheck.c b/contrib/passwordcheck/passwordcheck.c
index d3d9ff36761..c3fb5a9c085 100644
--- a/contrib/passwordcheck/passwordcheck.c
+++ b/contrib/passwordcheck/passwordcheck.c
@@ -3,7 +3,7 @@
* passwordcheck.c
*
*
- * Copyright (c) 2009-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2009-2019, PostgreSQL Global Development Group
*
* Author: Laurenz Albe
*
@@ -26,10 +26,14 @@
PG_MODULE_MAGIC;
+/* Saved hook value in case of unload */
+static check_password_hook_type prev_check_password_hook = NULL;
+
/* passwords shorter than this will be rejected */
#define MIN_PWD_LENGTH 8
extern void _PG_init(void);
+extern void _PG_fini(void);
/*
* check_password
@@ -55,6 +59,11 @@ check_password(const char *username,
Datum validuntil_time,
bool validuntil_null)
{
+ if (prev_check_password_hook)
+ prev_check_password_hook(username, shadow_pass,
+ password_type, validuntil_time,
+ validuntil_null);
+
if (password_type != PASSWORD_TYPE_PLAINTEXT)
{
/*
@@ -133,5 +142,16 @@ void
_PG_init(void)
{
/* activate password checks when the module is loaded */
+ prev_check_password_hook = check_password_hook;
check_password_hook = check_password;
}
+
+/*
+ * Module unload function
+ */
+void
+_PG_fini(void)
+{
+ /* uninstall hook */
+ check_password_hook = prev_check_password_hook;
+}
diff --git a/contrib/pg_buffercache/pg_buffercache_pages.c b/contrib/pg_buffercache/pg_buffercache_pages.c
index b410aafa5a9..1bd579fcbb0 100644
--- a/contrib/pg_buffercache/pg_buffercache_pages.c
+++ b/contrib/pg_buffercache/pg_buffercache_pages.c
@@ -99,7 +99,7 @@ pg_buffercache_pages(PG_FUNCTION_ARGS)
elog(ERROR, "incorrect number of output arguments");
/* Construct a tuple descriptor for the result rows. */
- tupledesc = CreateTemplateTupleDesc(expected_tupledesc->natts, false);
+ tupledesc = CreateTemplateTupleDesc(expected_tupledesc->natts);
TupleDescInitEntry(tupledesc, (AttrNumber) 1, "bufferid",
INT4OID, -1, 0);
TupleDescInitEntry(tupledesc, (AttrNumber) 2, "relfilenode",
diff --git a/contrib/pg_freespacemap/pg_freespacemap.c b/contrib/pg_freespacemap/pg_freespacemap.c
index 7d939a7d207..b82cab2d97e 100644
--- a/contrib/pg_freespacemap/pg_freespacemap.c
+++ b/contrib/pg_freespacemap/pg_freespacemap.c
@@ -8,6 +8,7 @@
*/
#include "postgres.h"
+#include "access/relation.h"
#include "funcapi.h"
#include "storage/freespace.h"
diff --git a/contrib/pg_prewarm/autoprewarm.c b/contrib/pg_prewarm/autoprewarm.c
index bb28e237d17..38ae240c551 100644
--- a/contrib/pg_prewarm/autoprewarm.c
+++ b/contrib/pg_prewarm/autoprewarm.c
@@ -16,7 +16,7 @@
* relevant database in turn. The former keeps running after the
* initial prewarm is complete to update the dump file periodically.
*
- * Copyright (c) 2016-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2016-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pg_prewarm/autoprewarm.c
@@ -25,9 +25,10 @@
*/
#include "postgres.h"
+
#include
-#include "access/heapam.h"
+#include "access/relation.h"
#include "access/xact.h"
#include "catalog/pg_class.h"
#include "catalog/pg_type.h"
@@ -73,9 +74,9 @@ typedef struct AutoPrewarmSharedState
/* Following items are for communication with per-database worker */
dsm_handle block_info_handle;
Oid database;
- int64 prewarm_start_idx;
- int64 prewarm_stop_idx;
- int64 prewarmed_blocks;
+ int prewarm_start_idx;
+ int prewarm_stop_idx;
+ int prewarmed_blocks;
} AutoPrewarmSharedState;
void _PG_init(void);
@@ -86,7 +87,7 @@ PG_FUNCTION_INFO_V1(autoprewarm_start_worker);
PG_FUNCTION_INFO_V1(autoprewarm_dump_now);
static void apw_load_buffers(void);
-static int64 apw_dump_now(bool is_bgworker, bool dump_unlogged);
+static int apw_dump_now(bool is_bgworker, bool dump_unlogged);
static void apw_start_master_worker(void);
static void apw_start_database_worker(void);
static bool apw_init_shmem(void);
@@ -180,8 +181,8 @@ autoprewarm_main(Datum main_arg)
{
LWLockRelease(&apw_state->lock);
ereport(LOG,
- (errmsg("autoprewarm worker is already running under PID %d",
- apw_state->bgworker_pid)));
+ (errmsg("autoprewarm worker is already running under PID %lu",
+ (unsigned long) apw_state->bgworker_pid)));
return;
}
apw_state->bgworker_pid = MyProcPid;
@@ -206,8 +207,6 @@ autoprewarm_main(Datum main_arg)
/* Periodically dump buffers until terminated. */
while (!got_sigterm)
{
- int rc;
-
/* In case of a SIGHUP, just reload the configuration. */
if (got_sighup)
{
@@ -218,10 +217,10 @@ autoprewarm_main(Datum main_arg)
if (autoprewarm_interval <= 0)
{
/* We're only dumping at shutdown, so just wait forever. */
- rc = WaitLatch(&MyProc->procLatch,
- WL_LATCH_SET | WL_POSTMASTER_DEATH,
- -1L,
- PG_WAIT_EXTENSION);
+ (void) WaitLatch(&MyProc->procLatch,
+ WL_LATCH_SET | WL_EXIT_ON_PM_DEATH,
+ -1L,
+ PG_WAIT_EXTENSION);
}
else
{
@@ -247,16 +246,14 @@ autoprewarm_main(Datum main_arg)
}
/* Sleep until the next dump time. */
- rc = WaitLatch(&MyProc->procLatch,
- WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
- delay_in_ms,
- PG_WAIT_EXTENSION);
+ (void) WaitLatch(&MyProc->procLatch,
+ WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
+ delay_in_ms,
+ PG_WAIT_EXTENSION);
}
- /* Reset the latch, bail out if postmaster died, otherwise loop. */
+ /* Reset the latch, loop. */
ResetLatch(&MyProc->procLatch);
- if (rc & WL_POSTMASTER_DEATH)
- proc_exit(1);
}
/*
@@ -274,7 +271,7 @@ static void
apw_load_buffers(void)
{
FILE *file = NULL;
- int64 num_elements,
+ int num_elements,
i;
BlockInfoRecord *blkinfo;
dsm_segment *seg;
@@ -290,8 +287,8 @@ apw_load_buffers(void)
{
LWLockRelease(&apw_state->lock);
ereport(LOG,
- (errmsg("skipping prewarm because block dump file is being written by PID %d",
- apw_state->pid_using_dumpfile)));
+ (errmsg("skipping prewarm because block dump file is being written by PID %lu",
+ (unsigned long) apw_state->pid_using_dumpfile)));
return;
}
LWLockRelease(&apw_state->lock);
@@ -317,7 +314,7 @@ apw_load_buffers(void)
}
/* First line of the file is a record count. */
- if (fscanf(file, "<<" INT64_FORMAT ">>\n", &num_elements) != 1)
+ if (fscanf(file, "<<%d>>\n", &num_elements) != 1)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not read from file \"%s\": %m",
@@ -336,7 +333,7 @@ apw_load_buffers(void)
&blkinfo[i].tablespace, &blkinfo[i].filenode,
&forknum, &blkinfo[i].blocknum) != 5)
ereport(ERROR,
- (errmsg("autoprewarm block dump file is corrupted at line " INT64_FORMAT,
+ (errmsg("autoprewarm block dump file is corrupted at line %d",
i + 1)));
blkinfo[i].forknum = forknum;
}
@@ -355,40 +352,40 @@ apw_load_buffers(void)
/* Get the info position of the first block of the next database. */
while (apw_state->prewarm_start_idx < num_elements)
{
- uint32 i = apw_state->prewarm_start_idx;
- Oid current_db = blkinfo[i].database;
+ int j = apw_state->prewarm_start_idx;
+ Oid current_db = blkinfo[j].database;
/*
- * Advance the prewarm_stop_idx to the first BlockRecordInfo that does
+ * Advance the prewarm_stop_idx to the first BlockInfoRecord that does
* not belong to this database.
*/
- i++;
- while (i < num_elements)
+ j++;
+ while (j < num_elements)
{
- if (current_db != blkinfo[i].database)
+ if (current_db != blkinfo[j].database)
{
/*
- * Combine BlockRecordInfos for global objects with those of
+ * Combine BlockInfoRecords for global objects with those of
* the database.
*/
if (current_db != InvalidOid)
break;
- current_db = blkinfo[i].database;
+ current_db = blkinfo[j].database;
}
- i++;
+ j++;
}
/*
* If we reach this point with current_db == InvalidOid, then only
- * BlockRecordInfos belonging to global objects exist. We can't
+ * BlockInfoRecords belonging to global objects exist. We can't
* prewarm without a database connection, so just bail out.
*/
if (current_db == InvalidOid)
break;
/* Configure stop point and database for next per-database worker. */
- apw_state->prewarm_stop_idx = i;
+ apw_state->prewarm_stop_idx = j;
apw_state->database = current_db;
Assert(apw_state->prewarm_start_idx < apw_state->prewarm_stop_idx);
@@ -415,8 +412,7 @@ apw_load_buffers(void)
/* Report our success. */
ereport(LOG,
- (errmsg("autoprewarm successfully prewarmed " INT64_FORMAT
- " of " INT64_FORMAT " previously-loaded blocks",
+ (errmsg("autoprewarm successfully prewarmed %d of %d previously-loaded blocks",
apw_state->prewarmed_blocks, num_elements)));
}
@@ -427,7 +423,7 @@ apw_load_buffers(void)
void
autoprewarm_database_main(Datum main_arg)
{
- uint32 pos;
+ int pos;
BlockInfoRecord *block_info;
Relation rel = NULL;
BlockNumber nblocks = 0;
@@ -557,13 +553,14 @@ autoprewarm_database_main(Datum main_arg)
* Dump information on blocks in shared buffers. We use a text format here
* so that it's easy to understand and even change the file contents if
* necessary.
+ * Returns the number of blocks dumped.
*/
-static int64
+static int
apw_dump_now(bool is_bgworker, bool dump_unlogged)
{
- uint32 i;
+ int num_blocks;
+ int i;
int ret;
- int64 num_blocks;
BlockInfoRecord *block_info_array;
BufferDesc *bufHdr;
FILE *file;
@@ -580,12 +577,12 @@ apw_dump_now(bool is_bgworker, bool dump_unlogged)
{
if (!is_bgworker)
ereport(ERROR,
- (errmsg("could not perform block dump because dump file is being used by PID %d",
- apw_state->pid_using_dumpfile)));
+ (errmsg("could not perform block dump because dump file is being used by PID %lu",
+ (unsigned long) apw_state->pid_using_dumpfile)));
ereport(LOG,
- (errmsg("skipping block dump because it is already being performed by PID %d",
- apw_state->pid_using_dumpfile)));
+ (errmsg("skipping block dump because it is already being performed by PID %lu",
+ (unsigned long) apw_state->pid_using_dumpfile)));
return 0;
}
@@ -630,7 +627,7 @@ apw_dump_now(bool is_bgworker, bool dump_unlogged)
errmsg("could not open file \"%s\": %m",
transient_dump_file_path)));
- ret = fprintf(file, "<<" INT64_FORMAT ">>\n", num_blocks);
+ ret = fprintf(file, "<<%d>>\n", num_blocks);
if (ret < 0)
{
int save_errno = errno;
@@ -640,7 +637,7 @@ apw_dump_now(bool is_bgworker, bool dump_unlogged)
errno = save_errno;
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write to file \"%s\" : %m",
+ errmsg("could not write to file \"%s\": %m",
transient_dump_file_path)));
}
@@ -663,7 +660,7 @@ apw_dump_now(bool is_bgworker, bool dump_unlogged)
errno = save_errno;
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write to file \"%s\" : %m",
+ errmsg("could not write to file \"%s\": %m",
transient_dump_file_path)));
}
}
@@ -683,7 +680,7 @@ apw_dump_now(bool is_bgworker, bool dump_unlogged)
errno = save_errno;
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not close file \"%s\" : %m",
+ errmsg("could not close file \"%s\": %m",
transient_dump_file_path)));
}
@@ -691,8 +688,7 @@ apw_dump_now(bool is_bgworker, bool dump_unlogged)
apw_state->pid_using_dumpfile = InvalidPid;
ereport(DEBUG1,
- (errmsg("wrote block details for " INT64_FORMAT " blocks",
- num_blocks)));
+ (errmsg("wrote block details for %d blocks", num_blocks)));
return num_blocks;
}
@@ -717,8 +713,8 @@ autoprewarm_start_worker(PG_FUNCTION_ARGS)
if (pid != InvalidPid)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("autoprewarm worker is already running under PID %d",
- pid)));
+ errmsg("autoprewarm worker is already running under PID %lu",
+ (unsigned long) pid)));
apw_start_master_worker();
@@ -727,11 +723,14 @@ autoprewarm_start_worker(PG_FUNCTION_ARGS)
/*
* SQL-callable function to perform an immediate block dump.
+ *
+ * Note: this is declared to return int8, as insurance against some
+ * very distant day when we might make NBuffers wider than int.
*/
Datum
autoprewarm_dump_now(PG_FUNCTION_ARGS)
{
- int64 num_blocks;
+ int num_blocks;
apw_init_shmem();
@@ -741,7 +740,7 @@ autoprewarm_dump_now(PG_FUNCTION_ARGS)
}
PG_END_ENSURE_ERROR_CLEANUP(apw_detach_shmem, 0);
- PG_RETURN_INT64(num_blocks);
+ PG_RETURN_INT64((int64) num_blocks);
}
/*
@@ -841,6 +840,7 @@ apw_start_database_worker(void)
worker.bgw_flags =
BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION;
worker.bgw_start_time = BgWorkerStart_ConsistentState;
+ worker.bgw_restart_time = BGW_NEVER_RESTART;
strcpy(worker.bgw_library_name, "pg_prewarm");
strcpy(worker.bgw_function_name, "autoprewarm_database_main");
strcpy(worker.bgw_name, "autoprewarm worker");
@@ -869,7 +869,7 @@ do { \
return -1; \
else if (a->fld > b->fld) \
return 1; \
-} while(0);
+} while(0)
/*
* apw_compare_blockinfo
@@ -883,8 +883,8 @@ do { \
static int
apw_compare_blockinfo(const void *p, const void *q)
{
- BlockInfoRecord *a = (BlockInfoRecord *) p;
- BlockInfoRecord *b = (BlockInfoRecord *) q;
+ const BlockInfoRecord *a = (const BlockInfoRecord *) p;
+ const BlockInfoRecord *b = (const BlockInfoRecord *) q;
cmp_member_elem(database);
cmp_member_elem(tablespace);
diff --git a/contrib/pg_prewarm/pg_prewarm.c b/contrib/pg_prewarm/pg_prewarm.c
index 3cbb7c2b889..f3deb47a97b 100644
--- a/contrib/pg_prewarm/pg_prewarm.c
+++ b/contrib/pg_prewarm/pg_prewarm.c
@@ -3,7 +3,7 @@
* pg_prewarm.c
* prewarming utilities
*
- * Copyright (c) 2010-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pg_prewarm/pg_prewarm.c
@@ -15,7 +15,7 @@
#include
#include
-#include "access/heapam.h"
+#include "access/relation.h"
#include "fmgr.h"
#include "miscadmin.h"
#include "storage/bufmgr.h"
@@ -36,7 +36,7 @@ typedef enum
PREWARM_BUFFER
} PrewarmType;
-static char blockbuffer[BLCKSZ];
+static PGAlignedBlock blockbuffer;
/*
* pg_prewarm(regclass, mode text, fork text,
@@ -178,7 +178,7 @@ pg_prewarm(PG_FUNCTION_ARGS)
for (block = first_block; block <= last_block; ++block)
{
CHECK_FOR_INTERRUPTS();
- smgrread(rel->rd_smgr, forkNumber, block, blockbuffer);
+ smgrread(rel->rd_smgr, forkNumber, block, blockbuffer.data);
++blocks_done;
}
}
diff --git a/contrib/pg_standby/pg_standby.c b/contrib/pg_standby/pg_standby.c
index cb785971a98..031b1b5cd58 100644
--- a/contrib/pg_standby/pg_standby.c
+++ b/contrib/pg_standby/pg_standby.c
@@ -58,7 +58,6 @@ char *triggerPath; /* where to find the trigger file? */
char *xlogFilePath; /* where we are going to restore to */
char *nextWALFileName; /* the file we need to get from archive */
char *restartWALFileName; /* the file from which we can restart restore */
-char *priorWALFileName; /* the file we need to get from archive */
char WALFilePath[MAXPGPATH * 2]; /* the file path including archive */
char restoreCommand[MAXPGPATH]; /* run this to restore */
char exclusiveCleanupFileName[MAXFNAMELEN]; /* the file we need to get
@@ -94,7 +93,6 @@ int restoreCommandType;
#define XLOG_DATA 0
#define XLOG_HISTORY 1
-#define XLOG_BACKUP_LABEL 2
int nextWALFileType;
#define SET_RESTORE_COMMAND(cmd, arg1, arg2) \
@@ -116,7 +114,7 @@ static bool SetWALSegSize(void);
* accessible directory. If you want to make other assumptions,
* such as using a vendor-specific archive and access API, these
* routines are the ones you'll need to change. You're
- * encouraged to submit any changes to pgsql-hackers@postgresql.org
+ * encouraged to submit any changes to pgsql-hackers@lists.postgresql.org
* or personally to the current maintainer. Those changes may be
* folded in to later versions of this program.
*/
@@ -211,15 +209,9 @@ CustomizableNextWALFileReady(void)
}
/*
- * If it's a backup file, return immediately. If it's a regular file
- * return only if it's the right size already.
+ * Return only if it's the right size already.
*/
- if (IsBackupHistoryFileName(nextWALFileName))
- {
- nextWALFileType = XLOG_BACKUP_LABEL;
- return true;
- }
- else if (WalSegSz > 0 && stat_buf.st_size == WalSegSz)
+ if (WalSegSz > 0 && stat_buf.st_size == WalSegSz)
{
#ifdef WIN32
@@ -408,9 +400,7 @@ SetWALSegSize(void)
{
bool ret_val = false;
int fd;
-
- /* malloc this buffer to ensure sufficient alignment: */
- char *buf = (char *) pg_malloc(XLOG_BLCKSZ);
+ PGAlignedXLogBlock buf;
Assert(WalSegSz == -1);
@@ -418,14 +408,13 @@ SetWALSegSize(void)
{
fprintf(stderr, "%s: could not open WAL file \"%s\": %s\n",
progname, WALFilePath, strerror(errno));
- pg_free(buf);
return false;
}
errno = 0;
- if (read(fd, buf, XLOG_BLCKSZ) == XLOG_BLCKSZ)
+ if (read(fd, buf.data, XLOG_BLCKSZ) == XLOG_BLCKSZ)
{
- XLogLongPageHeader longhdr = (XLogLongPageHeader) buf;
+ XLogLongPageHeader longhdr = (XLogLongPageHeader) buf.data;
WalSegSz = longhdr->xlp_seg_size;
@@ -462,7 +451,6 @@ SetWALSegSize(void)
fflush(stderr);
close(fd);
- pg_free(buf);
return ret_val;
}
@@ -622,11 +610,11 @@ usage(void)
printf(" -w MAXWAITTIME max seconds to wait for a file (0=no limit) (default=0)\n");
printf(" -?, --help show this help, then exit\n");
printf("\n"
- "Main intended use as restore_command in recovery.conf:\n"
+ "Main intended use as restore_command in postgresql.conf:\n"
" restore_command = 'pg_standby [OPTION]... ARCHIVELOCATION %%f %%p %%r'\n"
"e.g.\n"
" restore_command = 'pg_standby /mnt/server/archiverdir %%f %%p %%r'\n");
- printf("\nReport bugs to .\n");
+ printf("\nReport bugs to .\n");
}
#ifndef WIN32
diff --git a/contrib/pg_stat_statements/Makefile b/contrib/pg_stat_statements/Makefile
index 39b368b70eb..051ce46f0c5 100644
--- a/contrib/pg_stat_statements/Makefile
+++ b/contrib/pg_stat_statements/Makefile
@@ -4,7 +4,8 @@ MODULE_big = pg_stat_statements
OBJS = pg_stat_statements.o $(WIN32RES)
EXTENSION = pg_stat_statements
-DATA = pg_stat_statements--1.4.sql pg_stat_statements--1.4--1.5.sql \
+DATA = pg_stat_statements--1.4.sql pg_stat_statements--1.6--1.7.sql \
+ pg_stat_statements--1.5--1.6.sql pg_stat_statements--1.4--1.5.sql \
pg_stat_statements--1.3--1.4.sql pg_stat_statements--1.2--1.3.sql \
pg_stat_statements--1.1--1.2.sql pg_stat_statements--1.0--1.1.sql \
pg_stat_statements--unpackaged--1.0.sql
diff --git a/contrib/pg_stat_statements/expected/pg_stat_statements.out b/contrib/pg_stat_statements/expected/pg_stat_statements.out
index 5318c3550c7..6787ec1efda 100644
--- a/contrib/pg_stat_statements/expected/pg_stat_statements.out
+++ b/contrib/pg_stat_statements/expected/pg_stat_statements.out
@@ -354,6 +354,93 @@ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
SELECT pg_stat_statements_reset() | 1 | 1
(5 rows)
+--
+-- queries with locking clauses
+--
+CREATE TABLE pgss_a (id integer PRIMARY KEY);
+CREATE TABLE pgss_b (id integer PRIMARY KEY, a_id integer REFERENCES pgss_a);
+SELECT pg_stat_statements_reset();
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+-- control query
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+-- test range tables
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_a;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_b;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_a, pgss_b; -- matches plain "FOR UPDATE"
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_b, pgss_a;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+-- test strengths
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR NO KEY UPDATE;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR SHARE;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR KEY SHARE;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+-- test wait policies
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE NOWAIT;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE SKIP LOCKED;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT calls, query FROM pg_stat_statements ORDER BY query COLLATE "C";
+ calls | query
+-------+------------------------------------------------------------------------------------------
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR KEY SHARE
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR NO KEY UPDATE
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR SHARE
+ 2 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE NOWAIT
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_a
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_b
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_b, pgss_a
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE SKIP LOCKED
+ 1 | SELECT pg_stat_statements_reset()
+(11 rows)
+
+DROP TABLE pgss_a, pgss_b CASCADE;
--
-- utility commands
--
@@ -395,4 +482,202 @@ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
SELECT pg_stat_statements_reset() | 1 | 1
(8 rows)
+--
+-- Track user activity and reset them
+--
+SELECT pg_stat_statements_reset();
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+CREATE ROLE regress_stats_user1;
+CREATE ROLE regress_stats_user2;
+SET ROLE regress_stats_user1;
+SELECT 1 AS "ONE";
+ ONE
+-----
+ 1
+(1 row)
+
+SELECT 1+1 AS "TWO";
+ TWO
+-----
+ 2
+(1 row)
+
+RESET ROLE;
+SET ROLE regress_stats_user2;
+SELECT 1 AS "ONE";
+ ONE
+-----
+ 1
+(1 row)
+
+SELECT 1+1 AS "TWO";
+ TWO
+-----
+ 2
+(1 row)
+
+RESET ROLE;
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+-----------------------------------+-------+------
+ CREATE ROLE regress_stats_user1 | 1 | 0
+ CREATE ROLE regress_stats_user2 | 1 | 0
+ RESET ROLE | 2 | 0
+ SELECT $1 AS "ONE" | 1 | 1
+ SELECT $1 AS "ONE" | 1 | 1
+ SELECT $1+$2 AS "TWO" | 1 | 1
+ SELECT $1+$2 AS "TWO" | 1 | 1
+ SELECT pg_stat_statements_reset() | 1 | 1
+ SET ROLE regress_stats_user1 | 1 | 0
+ SET ROLE regress_stats_user2 | 1 | 0
+(10 rows)
+
+--
+-- Don't reset anything if any of the parameter is NULL
+--
+SELECT pg_stat_statements_reset(NULL);
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+------------------------------------------------------------------------------+-------+------
+ CREATE ROLE regress_stats_user1 | 1 | 0
+ CREATE ROLE regress_stats_user2 | 1 | 0
+ RESET ROLE | 2 | 0
+ SELECT $1 AS "ONE" | 1 | 1
+ SELECT $1 AS "ONE" | 1 | 1
+ SELECT $1+$2 AS "TWO" | 1 | 1
+ SELECT $1+$2 AS "TWO" | 1 | 1
+ SELECT pg_stat_statements_reset($1) | 1 | 1
+ SELECT pg_stat_statements_reset() | 1 | 1
+ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C" | 1 | 10
+ SET ROLE regress_stats_user1 | 1 | 0
+ SET ROLE regress_stats_user2 | 1 | 0
+(12 rows)
+
+--
+-- remove query ('SELECT $1+$2 AS "TWO"') executed by regress_stats_user2
+-- in the current_database
+--
+SELECT pg_stat_statements_reset(
+ (SELECT r.oid FROM pg_roles AS r WHERE r.rolname = 'regress_stats_user2'),
+ (SELECT d.oid FROM pg_database As d where datname = current_database()),
+ (SELECT s.queryid FROM pg_stat_statements AS s
+ WHERE s.query = 'SELECT $1+$2 AS "TWO"' LIMIT 1));
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+----------------------------------------------------------------------------------+-------+------
+ CREATE ROLE regress_stats_user1 | 1 | 0
+ CREATE ROLE regress_stats_user2 | 1 | 0
+ RESET ROLE | 2 | 0
+ SELECT $1 AS "ONE" | 1 | 1
+ SELECT $1 AS "ONE" | 1 | 1
+ SELECT $1+$2 AS "TWO" | 1 | 1
+ SELECT pg_stat_statements_reset( +| 1 | 1
+ (SELECT r.oid FROM pg_roles AS r WHERE r.rolname = $1), +| |
+ (SELECT d.oid FROM pg_database As d where datname = current_database()),+| |
+ (SELECT s.queryid FROM pg_stat_statements AS s +| |
+ WHERE s.query = $2 LIMIT $3)) | |
+ SELECT pg_stat_statements_reset($1) | 1 | 1
+ SELECT pg_stat_statements_reset() | 1 | 1
+ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C" | 2 | 22
+ SET ROLE regress_stats_user1 | 1 | 0
+ SET ROLE regress_stats_user2 | 1 | 0
+(12 rows)
+
+--
+-- remove query ('SELECT $1 AS "ONE"') executed by two users
+--
+SELECT pg_stat_statements_reset(0,0,s.queryid)
+ FROM pg_stat_statements AS s WHERE s.query = 'SELECT $1 AS "ONE"';
+ pg_stat_statements_reset
+--------------------------
+
+
+(2 rows)
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+----------------------------------------------------------------------------------+-------+------
+ CREATE ROLE regress_stats_user1 | 1 | 0
+ CREATE ROLE regress_stats_user2 | 1 | 0
+ RESET ROLE | 2 | 0
+ SELECT $1+$2 AS "TWO" | 1 | 1
+ SELECT pg_stat_statements_reset( +| 1 | 1
+ (SELECT r.oid FROM pg_roles AS r WHERE r.rolname = $1), +| |
+ (SELECT d.oid FROM pg_database As d where datname = current_database()),+| |
+ (SELECT s.queryid FROM pg_stat_statements AS s +| |
+ WHERE s.query = $2 LIMIT $3)) | |
+ SELECT pg_stat_statements_reset($1) | 1 | 1
+ SELECT pg_stat_statements_reset($1,$2,s.queryid) +| 1 | 2
+ FROM pg_stat_statements AS s WHERE s.query = $3 | |
+ SELECT pg_stat_statements_reset() | 1 | 1
+ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C" | 3 | 34
+ SET ROLE regress_stats_user1 | 1 | 0
+ SET ROLE regress_stats_user2 | 1 | 0
+(11 rows)
+
+--
+-- remove query of a user (regress_stats_user1)
+--
+SELECT pg_stat_statements_reset(r.oid)
+ FROM pg_roles AS r WHERE r.rolname = 'regress_stats_user1';
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+----------------------------------------------------------------------------------+-------+------
+ CREATE ROLE regress_stats_user1 | 1 | 0
+ CREATE ROLE regress_stats_user2 | 1 | 0
+ RESET ROLE | 2 | 0
+ SELECT pg_stat_statements_reset( +| 1 | 1
+ (SELECT r.oid FROM pg_roles AS r WHERE r.rolname = $1), +| |
+ (SELECT d.oid FROM pg_database As d where datname = current_database()),+| |
+ (SELECT s.queryid FROM pg_stat_statements AS s +| |
+ WHERE s.query = $2 LIMIT $3)) | |
+ SELECT pg_stat_statements_reset($1) | 1 | 1
+ SELECT pg_stat_statements_reset($1,$2,s.queryid) +| 1 | 2
+ FROM pg_stat_statements AS s WHERE s.query = $3 | |
+ SELECT pg_stat_statements_reset() | 1 | 1
+ SELECT pg_stat_statements_reset(r.oid) +| 1 | 1
+ FROM pg_roles AS r WHERE r.rolname = $1 | |
+ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C" | 4 | 45
+ SET ROLE regress_stats_user2 | 1 | 0
+(10 rows)
+
+--
+-- reset all
+--
+SELECT pg_stat_statements_reset(0,0,0);
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+----------------------------------------+-------+------
+ SELECT pg_stat_statements_reset(0,0,0) | 1 | 1
+(1 row)
+
+--
+-- cleanup
+--
+DROP ROLE regress_stats_user1;
+DROP ROLE regress_stats_user2;
DROP EXTENSION pg_stat_statements;
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.5--1.6.sql b/contrib/pg_stat_statements/pg_stat_statements--1.5--1.6.sql
new file mode 100644
index 00000000000..4f8c7f7ee8a
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements--1.5--1.6.sql
@@ -0,0 +1,7 @@
+/* contrib/pg_stat_statements/pg_stat_statements--1.5--1.6.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.6'" to load this file. \quit
+
+-- Execution is only allowed for superusers, fixing issue with 1.5.
+REVOKE EXECUTE ON FUNCTION pg_stat_statements_reset() FROM pg_read_all_stats;
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql b/contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql
new file mode 100644
index 00000000000..6fc3fed4c93
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql
@@ -0,0 +1,22 @@
+/* contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.7'" to load this file. \quit
+
+/* First we have to remove them from the extension */
+ALTER EXTENSION pg_stat_statements DROP FUNCTION pg_stat_statements_reset();
+
+/* Then we can drop them */
+DROP FUNCTION pg_stat_statements_reset();
+
+/* Now redefine */
+CREATE FUNCTION pg_stat_statements_reset(IN userid Oid DEFAULT 0,
+ IN dbid Oid DEFAULT 0,
+ IN queryid bigint DEFAULT 0
+)
+RETURNS void
+AS 'MODULE_PATHNAME', 'pg_stat_statements_reset_1_7'
+LANGUAGE C STRICT PARALLEL SAFE;
+
+-- Don't want this to be available to non-superusers.
+REVOKE ALL ON FUNCTION pg_stat_statements_reset(Oid, Oid, bigint) FROM PUBLIC;
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 928673498af..221b47298ce 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -48,7 +48,7 @@
* in the file to be read or written while holding only shared lock.
*
*
- * Copyright (c) 2008-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2008-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pg_stat_statements/pg_stat_statements.c
@@ -61,7 +61,6 @@
#include
#include
-#include "access/hash.h"
#include "catalog/pg_authid.h"
#include "executor/instrument.h"
#include "funcapi.h"
@@ -76,7 +75,9 @@
#include "storage/ipc.h"
#include "storage/spin.h"
#include "tcop/utility.h"
+#include "utils/acl.h"
#include "utils/builtins.h"
+#include "utils/hashutils.h"
#include "utils/memutils.h"
PG_MODULE_MAGIC;
@@ -289,6 +290,7 @@ void _PG_init(void);
void _PG_fini(void);
PG_FUNCTION_INFO_V1(pg_stat_statements_reset);
+PG_FUNCTION_INFO_V1(pg_stat_statements_reset_1_7);
PG_FUNCTION_INFO_V1(pg_stat_statements_1_2);
PG_FUNCTION_INFO_V1(pg_stat_statements_1_3);
PG_FUNCTION_INFO_V1(pg_stat_statements);
@@ -298,45 +300,46 @@ static void pgss_shmem_shutdown(int code, Datum arg);
static void pgss_post_parse_analyze(ParseState *pstate, Query *query);
static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags);
static void pgss_ExecutorRun(QueryDesc *queryDesc,
- ScanDirection direction,
- uint64 count, bool execute_once);
+ ScanDirection direction,
+ uint64 count, bool execute_once);
static void pgss_ExecutorFinish(QueryDesc *queryDesc);
static void pgss_ExecutorEnd(QueryDesc *queryDesc);
static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
- ProcessUtilityContext context, ParamListInfo params,
- QueryEnvironment *queryEnv,
- DestReceiver *dest, char *completionTag);
+ ProcessUtilityContext context, ParamListInfo params,
+ QueryEnvironment *queryEnv,
+ DestReceiver *dest, char *completionTag);
static uint64 pgss_hash_string(const char *str, int len);
static void pgss_store(const char *query, uint64 queryId,
- int query_location, int query_len,
- double total_time, uint64 rows,
- const BufferUsage *bufusage,
- pgssJumbleState *jstate);
+ int query_location, int query_len,
+ double total_time, uint64 rows,
+ const BufferUsage *bufusage,
+ pgssJumbleState *jstate);
static void pg_stat_statements_internal(FunctionCallInfo fcinfo,
- pgssVersion api_version,
- bool showtext);
+ pgssVersion api_version,
+ bool showtext);
static Size pgss_memsize(void);
static pgssEntry *entry_alloc(pgssHashKey *key, Size query_offset, int query_len,
- int encoding, bool sticky);
+ int encoding, bool sticky);
static void entry_dealloc(void);
static bool qtext_store(const char *query, int query_len,
- Size *query_offset, int *gc_count);
+ Size *query_offset, int *gc_count);
static char *qtext_load_file(Size *buffer_size);
static char *qtext_fetch(Size query_offset, int query_len,
- char *buffer, Size buffer_size);
+ char *buffer, Size buffer_size);
static bool need_gc_qtexts(void);
static void gc_qtexts(void);
-static void entry_reset(void);
+static void entry_reset(Oid userid, Oid dbid, uint64 queryid);
static void AppendJumble(pgssJumbleState *jstate,
- const unsigned char *item, Size size);
+ const unsigned char *item, Size size);
static void JumbleQuery(pgssJumbleState *jstate, Query *query);
static void JumbleRangeTable(pgssJumbleState *jstate, List *rtable);
+static void JumbleRowMarks(pgssJumbleState *jstate, List *rowMarks);
static void JumbleExpr(pgssJumbleState *jstate, Node *node);
static void RecordConstLocation(pgssJumbleState *jstate, int location);
static char *generate_normalized_query(pgssJumbleState *jstate, const char *query,
- int query_loc, int *query_len_p, int encoding);
+ int query_loc, int *query_len_p, int encoding);
static void fill_in_constant_lengths(pgssJumbleState *jstate, const char *query,
- int query_loc);
+ int query_loc);
static int comp_location(const void *a, const void *b);
@@ -623,7 +626,7 @@ pgss_shmem_startup(void)
/*
* Remove the persisted stats file so it's not included in
- * backups/replication slaves, etc. A new file will be written on next
+ * backups/replication standbys, etc. A new file will be written on next
* shutdown.
*
* Note: it's okay if the PGSS_TEXT_FILE is included in a basebackup,
@@ -641,19 +644,19 @@ pgss_shmem_startup(void)
read_error:
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not read pg_stat_statement file \"%s\": %m",
+ errmsg("could not read file \"%s\": %m",
PGSS_DUMP_FILE)));
goto fail;
data_error:
ereport(LOG,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("ignoring invalid data in pg_stat_statement file \"%s\"",
+ errmsg("ignoring invalid data in file \"%s\"",
PGSS_DUMP_FILE)));
goto fail;
write_error:
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not write pg_stat_statement file \"%s\": %m",
+ errmsg("could not write file \"%s\": %m",
PGSS_TEXT_FILE)));
fail:
if (buffer)
@@ -760,7 +763,7 @@ pgss_shmem_shutdown(int code, Datum arg)
error:
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not write pg_stat_statement file \"%s\": %m",
+ errmsg("could not write file \"%s\": %m",
PGSS_DUMP_FILE ".tmp")));
if (qbuffer)
free(qbuffer);
@@ -785,7 +788,7 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query)
Assert(query->queryId == UINT64CONST(0));
/* Safety check... */
- if (!pgss || !pgss_hash)
+ if (!pgss || !pgss_hash || !pgss_enabled())
return;
/*
@@ -1147,8 +1150,18 @@ pgss_store(const char *query, uint64 queryId,
* For utility statements, we just hash the query string to get an ID.
*/
if (queryId == UINT64CONST(0))
+ {
queryId = pgss_hash_string(query, query_len);
+ /*
+ * If we are unlucky enough to get a hash of zero(invalid), use
+ * queryID as 2 instead, queryID 1 is already in use for normal
+ * statements.
+ */
+ if (queryId == UINT64CONST(0))
+ queryId = UINT64CONST(2);
+ }
+
/* Set up key for hashtable search */
key.userid = GetUserId();
key.dbid = MyDatabaseId;
@@ -1292,16 +1305,32 @@ pgss_store(const char *query, uint64 queryId,
}
/*
- * Reset all statement statistics.
+ * Reset statement statistics corresponding to userid, dbid, and queryid.
+ */
+Datum
+pg_stat_statements_reset_1_7(PG_FUNCTION_ARGS)
+{
+ Oid userid;
+ Oid dbid;
+ uint64 queryid;
+
+ userid = PG_GETARG_OID(0);
+ dbid = PG_GETARG_OID(1);
+ queryid = (uint64) PG_GETARG_INT64(2);
+
+ entry_reset(userid, dbid, queryid);
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Reset statement statistics.
*/
Datum
pg_stat_statements_reset(PG_FUNCTION_ARGS)
{
- if (!pgss || !pgss_hash)
- ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
- entry_reset();
+ entry_reset(0, 0, 0);
+
PG_RETURN_VOID();
}
@@ -1870,7 +1899,7 @@ qtext_store(const char *query, int query_len,
error:
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not write pg_stat_statement file \"%s\": %m",
+ errmsg("could not write file \"%s\": %m",
PGSS_TEXT_FILE)));
if (fd >= 0)
@@ -1912,7 +1941,7 @@ qtext_load_file(Size *buffer_size)
if (errno != ENOENT)
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not read pg_stat_statement file \"%s\": %m",
+ errmsg("could not read file \"%s\": %m",
PGSS_TEXT_FILE)));
return NULL;
}
@@ -1922,7 +1951,7 @@ qtext_load_file(Size *buffer_size)
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not stat pg_stat_statement file \"%s\": %m",
+ errmsg("could not stat file \"%s\": %m",
PGSS_TEXT_FILE)));
CloseTransientFile(fd);
return NULL;
@@ -1938,7 +1967,7 @@ qtext_load_file(Size *buffer_size)
ereport(LOG,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
- errdetail("Could not allocate enough memory to read pg_stat_statement file \"%s\".",
+ errdetail("Could not allocate enough memory to read file \"%s\".",
PGSS_TEXT_FILE)));
CloseTransientFile(fd);
return NULL;
@@ -1957,14 +1986,17 @@ qtext_load_file(Size *buffer_size)
if (errno)
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not read pg_stat_statement file \"%s\": %m",
+ errmsg("could not read file \"%s\": %m",
PGSS_TEXT_FILE)));
free(buf);
CloseTransientFile(fd);
return NULL;
}
- CloseTransientFile(fd);
+ if (CloseTransientFile(fd) != 0)
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not close file \"%s\": %m", PGSS_TEXT_FILE)));
*buffer_size = stat.st_size;
return buf;
@@ -2087,7 +2119,7 @@ gc_qtexts(void)
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not write pg_stat_statement file \"%s\": %m",
+ errmsg("could not write file \"%s\": %m",
PGSS_TEXT_FILE)));
goto gc_fail;
}
@@ -2117,7 +2149,7 @@ gc_qtexts(void)
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not write pg_stat_statement file \"%s\": %m",
+ errmsg("could not write file \"%s\": %m",
PGSS_TEXT_FILE)));
hash_seq_term(&hash_seq);
goto gc_fail;
@@ -2135,14 +2167,14 @@ gc_qtexts(void)
if (ftruncate(fileno(qfile), extent) != 0)
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not truncate pg_stat_statement file \"%s\": %m",
+ errmsg("could not truncate file \"%s\": %m",
PGSS_TEXT_FILE)));
if (FreeFile(qfile))
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not write pg_stat_statement file \"%s\": %m",
+ errmsg("could not write file \"%s\": %m",
PGSS_TEXT_FILE)));
qfile = NULL;
goto gc_fail;
@@ -2202,7 +2234,7 @@ gc_qtexts(void)
if (qfile == NULL)
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not write new pg_stat_statement file \"%s\": %m",
+ errmsg("could not recreate file \"%s\": %m",
PGSS_TEXT_FILE)));
else
FreeFile(qfile);
@@ -2228,22 +2260,67 @@ gc_qtexts(void)
}
/*
- * Release all entries.
+ * Release entries corresponding to parameters passed.
*/
static void
-entry_reset(void)
+entry_reset(Oid userid, Oid dbid, uint64 queryid)
{
HASH_SEQ_STATUS hash_seq;
pgssEntry *entry;
FILE *qfile;
+ long num_entries;
+ long num_remove = 0;
+ pgssHashKey key;
+
+ if (!pgss || !pgss_hash)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
+ num_entries = hash_get_num_entries(pgss_hash);
- hash_seq_init(&hash_seq, pgss_hash);
- while ((entry = hash_seq_search(&hash_seq)) != NULL)
+ if (userid != 0 && dbid != 0 && queryid != UINT64CONST(0))
+ {
+ /* If all the parameters are available, use the fast path. */
+ key.userid = userid;
+ key.dbid = dbid;
+ key.queryid = queryid;
+
+ /* Remove the key if exists */
+ entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_REMOVE, NULL);
+ if (entry) /* found */
+ num_remove++;
+ }
+ else if (userid != 0 || dbid != 0 || queryid != UINT64CONST(0))
{
- hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL);
+ /* Remove entries corresponding to valid parameters. */
+ hash_seq_init(&hash_seq, pgss_hash);
+ while ((entry = hash_seq_search(&hash_seq)) != NULL)
+ {
+ if ((!userid || entry->key.userid == userid) &&
+ (!dbid || entry->key.dbid == dbid) &&
+ (!queryid || entry->key.queryid == queryid))
+ {
+ hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL);
+ num_remove++;
+ }
+ }
}
+ else
+ {
+ /* Remove all entries. */
+ hash_seq_init(&hash_seq, pgss_hash);
+ while ((entry = hash_seq_search(&hash_seq)) != NULL)
+ {
+ hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL);
+ num_remove++;
+ }
+ }
+
+ /* All entries are removed? */
+ if (num_entries != num_remove)
+ goto release_lock;
/*
* Write new empty query file, perhaps even creating a new one to recover
@@ -2254,7 +2331,7 @@ entry_reset(void)
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not create pg_stat_statement file \"%s\": %m",
+ errmsg("could not create file \"%s\": %m",
PGSS_TEXT_FILE)));
goto done;
}
@@ -2263,7 +2340,7 @@ entry_reset(void)
if (ftruncate(fileno(qfile), 0) != 0)
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not truncate pg_stat_statement file \"%s\": %m",
+ errmsg("could not truncate file \"%s\": %m",
PGSS_TEXT_FILE)));
FreeFile(qfile);
@@ -2273,6 +2350,7 @@ entry_reset(void)
/* This counts as a query text garbage collection for our purposes */
record_gc_qtexts();
+release_lock:
LWLockRelease(pgss->lock);
}
@@ -2353,7 +2431,7 @@ JumbleQuery(pgssJumbleState *jstate, Query *query)
JumbleExpr(jstate, (Node *) query->sortClause);
JumbleExpr(jstate, query->limitOffset);
JumbleExpr(jstate, query->limitCount);
- /* we ignore rowMarks */
+ JumbleRowMarks(jstate, query->rowMarks);
JumbleExpr(jstate, query->setOperations);
}
@@ -2403,6 +2481,8 @@ JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
case RTE_NAMEDTUPLESTORE:
APP_JUMB_STRING(rte->enrname);
break;
+ case RTE_RESULT:
+ break;
default:
elog(ERROR, "unrecognized RTE kind: %d", (int) rte->rtekind);
break;
@@ -2410,6 +2490,26 @@ JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
}
}
+/*
+ * Jumble a rowMarks list
+ */
+static void
+JumbleRowMarks(pgssJumbleState *jstate, List *rowMarks)
+{
+ ListCell *lc;
+
+ foreach(lc, rowMarks)
+ {
+ RowMarkClause *rowmark = lfirst_node(RowMarkClause, lc);
+ if (!rowmark->pushedDown)
+ {
+ APP_JUMB(rowmark->rti);
+ APP_JUMB(rowmark->strength);
+ APP_JUMB(rowmark->waitPolicy);
+ }
+ }
+}
+
/*
* Jumble an expression tree
*
@@ -2504,14 +2604,14 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
JumbleExpr(jstate, (Node *) expr->aggfilter);
}
break;
- case T_ArrayRef:
+ case T_SubscriptingRef:
{
- ArrayRef *aref = (ArrayRef *) node;
+ SubscriptingRef *sbsref = (SubscriptingRef *) node;
- JumbleExpr(jstate, (Node *) aref->refupperindexpr);
- JumbleExpr(jstate, (Node *) aref->reflowerindexpr);
- JumbleExpr(jstate, (Node *) aref->refexpr);
- JumbleExpr(jstate, (Node *) aref->refassgnexpr);
+ JumbleExpr(jstate, (Node *) sbsref->refupperindexpr);
+ JumbleExpr(jstate, (Node *) sbsref->reflowerindexpr);
+ JumbleExpr(jstate, (Node *) sbsref->refexpr);
+ JumbleExpr(jstate, (Node *) sbsref->refassgnexpr);
}
break;
case T_FuncExpr:
@@ -2852,6 +2952,7 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
/* we store the string name because RTE_CTE RTEs need it */
APP_JUMB_STRING(cte->ctename);
+ APP_JUMB(cte->ctematerialized);
JumbleQuery(jstate, castNode(Query, cte->ctequery));
}
break;
@@ -3074,8 +3175,8 @@ fill_in_constant_lengths(pgssJumbleState *jstate, const char *query,
/* initialize the flex scanner --- should match raw_parser() */
yyscanner = scanner_init(query,
&yyextra,
- ScanKeywords,
- NumScanKeywords);
+ &ScanKeywords,
+ ScanKeywordTokens);
/* we don't want to re-emit any escape string warnings */
yyextra.escape_string_warning = false;
diff --git a/contrib/pg_stat_statements/pg_stat_statements.control b/contrib/pg_stat_statements/pg_stat_statements.control
index 193fcdfafa0..14cb4223543 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.control
+++ b/contrib/pg_stat_statements/pg_stat_statements.control
@@ -1,5 +1,5 @@
# pg_stat_statements extension
comment = 'track execution statistics of all SQL statements executed'
-default_version = '1.5'
+default_version = '1.7'
module_pathname = '$libdir/pg_stat_statements'
relocatable = true
diff --git a/contrib/pg_stat_statements/sql/pg_stat_statements.sql b/contrib/pg_stat_statements/sql/pg_stat_statements.sql
index a8361fd1bff..8b527070d46 100644
--- a/contrib/pg_stat_statements/sql/pg_stat_statements.sql
+++ b/contrib/pg_stat_statements/sql/pg_stat_statements.sql
@@ -177,6 +177,37 @@ SELECT PLUS_ONE(1);
SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+--
+-- queries with locking clauses
+--
+CREATE TABLE pgss_a (id integer PRIMARY KEY);
+CREATE TABLE pgss_b (id integer PRIMARY KEY, a_id integer REFERENCES pgss_a);
+
+SELECT pg_stat_statements_reset();
+
+-- control query
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id;
+
+-- test range tables
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE;
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_a;
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_b;
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_a, pgss_b; -- matches plain "FOR UPDATE"
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_b, pgss_a;
+
+-- test strengths
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR NO KEY UPDATE;
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR SHARE;
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR KEY SHARE;
+
+-- test wait policies
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE NOWAIT;
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE SKIP LOCKED;
+
+SELECT calls, query FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+DROP TABLE pgss_a, pgss_b CASCADE;
+
--
-- utility commands
--
@@ -195,4 +226,68 @@ DROP FUNCTION PLUS_TWO(INTEGER);
SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+--
+-- Track user activity and reset them
+--
+SELECT pg_stat_statements_reset();
+CREATE ROLE regress_stats_user1;
+CREATE ROLE regress_stats_user2;
+
+SET ROLE regress_stats_user1;
+
+SELECT 1 AS "ONE";
+SELECT 1+1 AS "TWO";
+
+RESET ROLE;
+SET ROLE regress_stats_user2;
+
+SELECT 1 AS "ONE";
+SELECT 1+1 AS "TWO";
+
+RESET ROLE;
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- Don't reset anything if any of the parameter is NULL
+--
+SELECT pg_stat_statements_reset(NULL);
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- remove query ('SELECT $1+$2 AS "TWO"') executed by regress_stats_user2
+-- in the current_database
+--
+SELECT pg_stat_statements_reset(
+ (SELECT r.oid FROM pg_roles AS r WHERE r.rolname = 'regress_stats_user2'),
+ (SELECT d.oid FROM pg_database As d where datname = current_database()),
+ (SELECT s.queryid FROM pg_stat_statements AS s
+ WHERE s.query = 'SELECT $1+$2 AS "TWO"' LIMIT 1));
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- remove query ('SELECT $1 AS "ONE"') executed by two users
+--
+SELECT pg_stat_statements_reset(0,0,s.queryid)
+ FROM pg_stat_statements AS s WHERE s.query = 'SELECT $1 AS "ONE"';
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- remove query of a user (regress_stats_user1)
+--
+SELECT pg_stat_statements_reset(r.oid)
+ FROM pg_roles AS r WHERE r.rolname = 'regress_stats_user1';
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- reset all
+--
+SELECT pg_stat_statements_reset(0,0,0);
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- cleanup
+--
+DROP ROLE regress_stats_user1;
+DROP ROLE regress_stats_user2;
+
DROP EXTENSION pg_stat_statements;
diff --git a/contrib/pg_trgm/expected/pg_strict_word_trgm.out b/contrib/pg_trgm/expected/pg_strict_word_trgm.out
index 43898a3b980..1e1ee16ce95 100644
--- a/contrib/pg_trgm/expected/pg_strict_word_trgm.out
+++ b/contrib/pg_trgm/expected/pg_strict_word_trgm.out
@@ -1,6 +1,8 @@
DROP INDEX trgm_idx2;
\copy test_trgm3 from 'data/trgm2.data'
ERROR: relation "test_trgm3" does not exist
+-- reduce noise
+set extra_float_digits = 0;
select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where 'Baykal' <<% t order by sml desc, t;
t | sml
-------------------------------------+----------
diff --git a/contrib/pg_trgm/expected/pg_trgm.out b/contrib/pg_trgm/expected/pg_trgm.out
index 6efc54356a1..b3e709f4962 100644
--- a/contrib/pg_trgm/expected/pg_trgm.out
+++ b/contrib/pg_trgm/expected/pg_trgm.out
@@ -10,6 +10,8 @@ WHERE opc.oid >= 16384 AND NOT amvalidate(opc.oid);
--backslash is used in tests below, installcheck will fail if
--standard_conforming_string is off
set standard_conforming_strings=on;
+-- reduce noise
+set extra_float_digits = 0;
select show_trgm('');
show_trgm
-----------
diff --git a/contrib/pg_trgm/expected/pg_word_trgm.out b/contrib/pg_trgm/expected/pg_word_trgm.out
index bed61c4922a..936d489390e 100644
--- a/contrib/pg_trgm/expected/pg_word_trgm.out
+++ b/contrib/pg_trgm/expected/pg_word_trgm.out
@@ -1,5 +1,7 @@
CREATE TABLE test_trgm2(t text COLLATE "C");
\copy test_trgm2 from 'data/trgm2.data'
+-- reduce noise
+set extra_float_digits = 0;
select t,word_similarity('Baykal',t) as sml from test_trgm2 where 'Baykal' <% t order by sml desc, t;
t | sml
-------------------------------------+----------
diff --git a/contrib/pg_trgm/sql/pg_strict_word_trgm.sql b/contrib/pg_trgm/sql/pg_strict_word_trgm.sql
index 98e0d379f85..ce0791f29b7 100644
--- a/contrib/pg_trgm/sql/pg_strict_word_trgm.sql
+++ b/contrib/pg_trgm/sql/pg_strict_word_trgm.sql
@@ -2,6 +2,9 @@ DROP INDEX trgm_idx2;
\copy test_trgm3 from 'data/trgm2.data'
+-- reduce noise
+set extra_float_digits = 0;
+
select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where 'Baykal' <<% t order by sml desc, t;
select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where 'Kabankala' <<% t order by sml desc, t;
select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where t %>> 'Baykal' order by sml desc, t;
diff --git a/contrib/pg_trgm/sql/pg_trgm.sql b/contrib/pg_trgm/sql/pg_trgm.sql
index 96ae542320d..08459e64c30 100644
--- a/contrib/pg_trgm/sql/pg_trgm.sql
+++ b/contrib/pg_trgm/sql/pg_trgm.sql
@@ -9,6 +9,9 @@ WHERE opc.oid >= 16384 AND NOT amvalidate(opc.oid);
--standard_conforming_string is off
set standard_conforming_strings=on;
+-- reduce noise
+set extra_float_digits = 0;
+
select show_trgm('');
select show_trgm('(*&^$@%@');
select show_trgm('a b c');
diff --git a/contrib/pg_trgm/sql/pg_word_trgm.sql b/contrib/pg_trgm/sql/pg_word_trgm.sql
index 4b1db9706a9..d9fa1c55e5e 100644
--- a/contrib/pg_trgm/sql/pg_word_trgm.sql
+++ b/contrib/pg_trgm/sql/pg_word_trgm.sql
@@ -2,6 +2,9 @@ CREATE TABLE test_trgm2(t text COLLATE "C");
\copy test_trgm2 from 'data/trgm2.data'
+-- reduce noise
+set extra_float_digits = 0;
+
select t,word_similarity('Baykal',t) as sml from test_trgm2 where 'Baykal' <% t order by sml desc, t;
select t,word_similarity('Kabankala',t) as sml from test_trgm2 where 'Kabankala' <% t order by sml desc, t;
select t,word_similarity('Baykal',t) as sml from test_trgm2 where t %> 'Baykal' order by sml desc, t;
diff --git a/contrib/pg_trgm/trgm.h b/contrib/pg_trgm/trgm.h
index f0ab50dd05c..0fd600d5810 100644
--- a/contrib/pg_trgm/trgm.h
+++ b/contrib/pg_trgm/trgm.h
@@ -134,7 +134,7 @@ extern float4 cnt_sml(TRGM *trg1, TRGM *trg2, bool inexact);
extern bool trgm_contained_by(TRGM *trg1, TRGM *trg2);
extern bool *trgm_presence_map(TRGM *query, TRGM *key);
extern TRGM *createTrgmNFA(text *text_re, Oid collation,
- TrgmPackedGraph **graph, MemoryContext rcontext);
+ TrgmPackedGraph **graph, MemoryContext rcontext);
extern bool trigramsMatchGraph(TrgmPackedGraph *graph, bool *check);
#endif /* __TRGM_H__ */
diff --git a/contrib/pg_trgm/trgm_gist.c b/contrib/pg_trgm/trgm_gist.c
index 53e6830ab1b..e79db8a4f03 100644
--- a/contrib/pg_trgm/trgm_gist.c
+++ b/contrib/pg_trgm/trgm_gist.c
@@ -7,6 +7,7 @@
#include "access/stratnum.h"
#include "fmgr.h"
+#include "port/pg_bitutils.h"
typedef struct
@@ -39,26 +40,6 @@ PG_FUNCTION_INFO_V1(gtrgm_same);
PG_FUNCTION_INFO_V1(gtrgm_penalty);
PG_FUNCTION_INFO_V1(gtrgm_picksplit);
-/* Number of one-bits in an unsigned byte */
-static const uint8 number_of_ones[256] = {
- 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
-};
-
Datum
gtrgm_in(PG_FUNCTION_ARGS)
@@ -292,7 +273,11 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
case SimilarityStrategyNumber:
case WordSimilarityStrategyNumber:
case StrictWordSimilarityStrategyNumber:
- /* Similarity search is exact. (Strict) word similarity search is inexact */
+
+ /*
+ * Similarity search is exact. (Strict) word similarity search is
+ * inexact
+ */
*recheck = (strategy != SimilarityStrategyNumber);
nlimit = index_strategy_get_limit(strategy);
@@ -630,12 +615,7 @@ gtrgm_same(PG_FUNCTION_ARGS)
static int32
sizebitvec(BITVECP sign)
{
- int32 size = 0,
- i;
-
- LOOPBYTE
- size += number_of_ones[(unsigned char) sign[i]];
- return size;
+ return pg_popcount(sign, SIGLEN);
}
static int
@@ -648,7 +628,8 @@ hemdistsign(BITVECP a, BITVECP b)
LOOPBYTE
{
diff = (unsigned char) (a[i] ^ b[i]);
- dist += number_of_ones[diff];
+ /* Using the popcount functions here isn't likely to win */
+ dist += pg_number_of_ones[diff];
}
return dist;
}
diff --git a/contrib/pg_trgm/trgm_op.c b/contrib/pg_trgm/trgm_op.c
index b572d087d83..0d4614e9c8a 100644
--- a/contrib/pg_trgm/trgm_op.c
+++ b/contrib/pg_trgm/trgm_op.c
@@ -48,14 +48,14 @@ typedef struct
/* Trigram bound type */
typedef uint8 TrgmBound;
-#define TRGM_BOUND_LEFT (0x01) /* trigram is left bound of word */
-#define TRGM_BOUND_RIGHT (0x02) /* trigram is right bound of word */
+#define TRGM_BOUND_LEFT 0x01 /* trigram is left bound of word */
+#define TRGM_BOUND_RIGHT 0x02 /* trigram is right bound of word */
/* Word similarity flags */
-#define WORD_SIMILARITY_CHECK_ONLY (0x01) /* if set then only check existence
- * of similar search pattern in text */
-#define WORD_SIMILARITY_STRICT (0x02) /* force bounds of extent to match
- * word bounds */
+#define WORD_SIMILARITY_CHECK_ONLY 0x01 /* only check existence of similar
+ * search pattern in text */
+#define WORD_SIMILARITY_STRICT 0x02 /* force bounds of extent to match
+ * word bounds */
/*
* Module load callback
@@ -65,7 +65,7 @@ _PG_init(void)
{
/* Define custom GUC variables. */
DefineCustomRealVariable("pg_trgm.similarity_threshold",
- "Sets the threshold used by the %% operator.",
+ "Sets the threshold used by the % operator.",
"Valid range is 0.0 .. 1.0.",
&similarity_threshold,
0.3,
@@ -77,7 +77,7 @@ _PG_init(void)
NULL,
NULL);
DefineCustomRealVariable("pg_trgm.word_similarity_threshold",
- "Sets the threshold used by the <%% operator.",
+ "Sets the threshold used by the <% operator.",
"Valid range is 0.0 .. 1.0.",
&word_similarity_threshold,
0.6,
@@ -89,7 +89,7 @@ _PG_init(void)
NULL,
NULL);
DefineCustomRealVariable("pg_trgm.strict_word_similarity_threshold",
- "Sets the threshold used by the <<%% operator.",
+ "Sets the threshold used by the <<% operator.",
"Valid range is 0.0 .. 1.0.",
&strict_word_similarity_threshold,
0.5,
@@ -144,7 +144,7 @@ index_strategy_get_limit(StrategyNumber strategy)
break;
}
- return 0.0; /* keep compiler quiet */
+ return 0.0; /* keep compiler quiet */
}
/*
@@ -496,13 +496,13 @@ iterate_word_similarity(int *trg2indexes,
/* Select appropriate threshold */
threshold = (flags & WORD_SIMILARITY_STRICT) ?
- strict_word_similarity_threshold :
- word_similarity_threshold;
+ strict_word_similarity_threshold :
+ word_similarity_threshold;
/*
- * Consider first trigram as initial lower bount for strict word similarity,
- * or initialize it later with first trigram present for plain word
- * similarity.
+ * Consider first trigram as initial lower bound for strict word
+ * similarity, or initialize it later with first trigram present for plain
+ * word similarity.
*/
lower = (flags & WORD_SIMILARITY_STRICT) ? 0 : -1;
@@ -533,7 +533,7 @@ iterate_word_similarity(int *trg2indexes,
* plain word similarity
*/
if ((flags & WORD_SIMILARITY_STRICT) ? (bounds[i] & TRGM_BOUND_RIGHT)
- : found[trgindex])
+ : found[trgindex])
{
int prev_lower,
tmp_ulen2,
@@ -597,8 +597,8 @@ iterate_word_similarity(int *trg2indexes,
smlr_max = Max(smlr_max, smlr_cur);
/*
- * if we only check that word similarity is greater than
- * threshold we do not need to calculate a maximum similarity.
+ * if we only check that word similarity is greater than threshold
+ * we do not need to calculate a maximum similarity.
*/
if ((flags & WORD_SIMILARITY_CHECK_ONLY) && smlr_max >= threshold)
break;
@@ -653,7 +653,7 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
ulen1;
int *trg2indexes;
float4 result;
- TrgmBound *bounds;
+ TrgmBound *bounds;
protect_out_of_mem(slen1 + slen2);
diff --git a/contrib/pg_trgm/trgm_regexp.c b/contrib/pg_trgm/trgm_regexp.c
index 547e7c094f8..3ad5731ae80 100644
--- a/contrib/pg_trgm/trgm_regexp.c
+++ b/contrib/pg_trgm/trgm_regexp.c
@@ -181,7 +181,7 @@
* 7) Mark state 3 final because state 5 of source NFA is marked as final.
*
*
- * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
@@ -202,7 +202,7 @@
/*
* Uncomment (or use -DTRGM_REGEXP_DEBUG) to print debug info,
* for exploring and debugging the algorithm implementation.
- * This produces three graph files in /tmp, in Graphviz .dot format.
+ * This produces three graph files in /tmp, in Graphviz .gv format.
* Some progress information is also printed to postmaster stderr.
*/
/* #define TRGM_REGEXP_DEBUG */
@@ -441,9 +441,9 @@ typedef struct
struct TrgmPackedGraph
{
/*
- * colorTrigramsCount and colorTrigramsGroups contain information about
- * how trigrams are grouped into color trigrams. "colorTrigramsCount" is
- * the count of color trigrams and "colorTrigramGroups" contains number of
+ * colorTrigramsCount and colorTrigramGroups contain information about how
+ * trigrams are grouped into color trigrams. "colorTrigramsCount" is the
+ * count of color trigrams and "colorTrigramGroups" contains number of
* simple trigrams for each color trigram. The array of simple trigrams
* (stored separately from this struct) is ordered so that the simple
* trigrams for each color trigram are consecutive, and they're in order
@@ -478,9 +478,9 @@ typedef struct
/* prototypes for private functions */
static TRGM *createTrgmNFAInternal(regex_t *regex, TrgmPackedGraph **graph,
- MemoryContext rcontext);
+ MemoryContext rcontext);
static void RE_compile(regex_t *regex, text *text_re,
- int cflags, Oid collation);
+ int cflags, Oid collation);
static void getColorInfo(regex_t *regex, TrgmNFA *trgmNFA);
static bool convertPgWchar(pg_wchar c, trgm_mb_char *result);
static void transformGraph(TrgmNFA *trgmNFA);
@@ -489,7 +489,7 @@ static void addKey(TrgmNFA *trgmNFA, TrgmState *state, TrgmStateKey *key);
static void addKeyToQueue(TrgmNFA *trgmNFA, TrgmStateKey *key);
static void addArcs(TrgmNFA *trgmNFA, TrgmState *state);
static void addArc(TrgmNFA *trgmNFA, TrgmState *state, TrgmStateKey *key,
- TrgmColor co, TrgmStateKey *destKey);
+ TrgmColor co, TrgmStateKey *destKey);
static bool validArcLabel(TrgmStateKey *key, TrgmColor co);
static TrgmState *getState(TrgmNFA *trgmNFA, TrgmStateKey *key);
static bool prefixContains(TrgmPrefix *prefix1, TrgmPrefix *prefix2);
@@ -1013,9 +1013,7 @@ addKey(TrgmNFA *trgmNFA, TrgmState *state, TrgmStateKey *key)
{
regex_arc_t *arcs;
TrgmStateKey destKey;
- ListCell *cell,
- *prev,
- *next;
+ ListCell *cell;
int i,
arcsCount;
@@ -1030,13 +1028,10 @@ addKey(TrgmNFA *trgmNFA, TrgmState *state, TrgmStateKey *key)
* redundancy. We can drop either old key(s) or the new key if we find
* redundancy.
*/
- prev = NULL;
- cell = list_head(state->enterKeys);
- while (cell)
+ foreach(cell, state->enterKeys)
{
TrgmStateKey *existingKey = (TrgmStateKey *) lfirst(cell);
- next = lnext(cell);
if (existingKey->nstate == key->nstate)
{
if (prefixContains(&existingKey->prefix, &key->prefix))
@@ -1050,15 +1045,10 @@ addKey(TrgmNFA *trgmNFA, TrgmState *state, TrgmStateKey *key)
* The new key covers this old key. Remove the old key, it's
* no longer needed once we add this key to the list.
*/
- state->enterKeys = list_delete_cell(state->enterKeys,
- cell, prev);
+ state->enterKeys = foreach_delete_current(state->enterKeys,
+ cell);
}
- else
- prev = cell;
}
- else
- prev = cell;
- cell = next;
}
/* No redundancy, so add this key to the state's list */
@@ -2187,8 +2177,8 @@ printSourceNFA(regex_t *regex, TrgmColorInfo *colors, int ncolors)
appendStringInfoString(&buf, "}\n");
{
- /* dot -Tpng -o /tmp/source.png < /tmp/source.dot */
- FILE *fp = fopen("/tmp/source.dot", "w");
+ /* dot -Tpng -o /tmp/source.png < /tmp/source.gv */
+ FILE *fp = fopen("/tmp/source.gv", "w");
fprintf(fp, "%s", buf.data);
fclose(fp);
@@ -2249,8 +2239,8 @@ printTrgmNFA(TrgmNFA *trgmNFA)
appendStringInfoString(&buf, "}\n");
{
- /* dot -Tpng -o /tmp/transformed.png < /tmp/transformed.dot */
- FILE *fp = fopen("/tmp/transformed.dot", "w");
+ /* dot -Tpng -o /tmp/transformed.png < /tmp/transformed.gv */
+ FILE *fp = fopen("/tmp/transformed.gv", "w");
fprintf(fp, "%s", buf.data);
fclose(fp);
@@ -2340,8 +2330,8 @@ printTrgmPackedGraph(TrgmPackedGraph *packedGraph, TRGM *trigrams)
appendStringInfoString(&buf, "}\n");
{
- /* dot -Tpng -o /tmp/packed.png < /tmp/packed.dot */
- FILE *fp = fopen("/tmp/packed.dot", "w");
+ /* dot -Tpng -o /tmp/packed.png < /tmp/packed.gv */
+ FILE *fp = fopen("/tmp/packed.gv", "w");
fprintf(fp, "%s", buf.data);
fclose(fp);
diff --git a/contrib/pg_visibility/pg_visibility.c b/contrib/pg_visibility/pg_visibility.c
index 944dea66fc8..75b6d96440b 100644
--- a/contrib/pg_visibility/pg_visibility.c
+++ b/contrib/pg_visibility/pg_visibility.c
@@ -3,13 +3,14 @@
* pg_visibility.c
* display visibility map information and page-level visibility bits
*
- * Copyright (c) 2016-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2016-2019, PostgreSQL Global Development Group
*
* contrib/pg_visibility/pg_visibility.c
*-------------------------------------------------------------------------
*/
#include "postgres.h"
+#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/visibilitymap.h"
#include "catalog/pg_type.h"
@@ -20,6 +21,7 @@
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "utils/rel.h"
+#include "utils/snapmgr.h"
PG_MODULE_MAGIC;
@@ -49,10 +51,10 @@ PG_FUNCTION_INFO_V1(pg_truncate_visibility_map);
static TupleDesc pg_visibility_tupdesc(bool include_blkno, bool include_pd);
static vbits *collect_visibility_data(Oid relid, bool include_pd);
static corrupt_items *collect_corrupt_items(Oid relid, bool all_visible,
- bool all_frozen);
+ bool all_frozen);
static void record_corrupt_item(corrupt_items *items, ItemPointer tid);
static bool tuple_all_visible(HeapTuple tup, TransactionId OldestXmin,
- Buffer buffer);
+ Buffer buffer);
static void check_relation_relkind(Relation rel);
/*
@@ -292,7 +294,7 @@ pg_visibility_map_summary(PG_FUNCTION_ARGS)
ReleaseBuffer(vmbuffer);
relation_close(rel, AccessShareLock);
- tupdesc = CreateTemplateTupleDesc(2, false);
+ tupdesc = CreateTemplateTupleDesc(2);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "all_visible", INT8OID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "all_frozen", INT8OID, -1, 0);
tupdesc = BlessTupleDesc(tupdesc);
@@ -381,6 +383,8 @@ pg_truncate_visibility_map(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
Relation rel;
+ ForkNumber fork;
+ BlockNumber block;
rel = relation_open(relid, AccessExclusiveLock);
@@ -390,7 +394,12 @@ pg_truncate_visibility_map(PG_FUNCTION_ARGS)
RelationOpenSmgr(rel);
rel->rd_smgr->smgr_vm_nblocks = InvalidBlockNumber;
- visibilitymap_truncate(rel, 0);
+ block = visibilitymap_prepare_truncate(rel, 0);
+ if (BlockNumberIsValid(block))
+ {
+ fork = VISIBILITYMAP_FORKNUM;
+ smgrtruncate(rel->rd_smgr, &fork, 1, &block);
+ }
if (RelationNeedsWAL(rel))
{
@@ -416,7 +425,7 @@ pg_truncate_visibility_map(PG_FUNCTION_ARGS)
* here and when we sent the messages at our eventual commit. However,
* we're currently only sending a non-transactional smgr invalidation,
* which will have been posted to shared memory immediately from within
- * visibilitymap_truncate. Therefore, there should be no race here.
+ * smgr_truncate. Therefore, there should be no race here.
*
* The reason why it's desirable to release the lock early here is because
* of the possibility that someone will need to use this to blow away many
@@ -447,7 +456,7 @@ pg_visibility_tupdesc(bool include_blkno, bool include_pd)
++maxattr;
if (include_pd)
++maxattr;
- tupdesc = CreateTemplateTupleDesc(maxattr, false);
+ tupdesc = CreateTemplateTupleDesc(maxattr);
if (include_blkno)
TupleDescInitEntry(tupdesc, ++a, "blkno", INT8OID, -1, 0);
TupleDescInitEntry(tupdesc, ++a, "all_visible", BOOLOID, -1, 0);
diff --git a/contrib/pgcrypto/Makefile b/contrib/pgcrypto/Makefile
index 573bc6df79a..1313b664087 100644
--- a/contrib/pgcrypto/Makefile
+++ b/contrib/pgcrypto/Makefile
@@ -59,6 +59,9 @@ SHLIB_LINK += $(filter -leay32, $(LIBS))
SHLIB_LINK += -lws2_32
endif
+# Upstream uses a larger subset of C99.
+imath.o: CFLAGS+=$(PERMIT_DECLARATION_AFTER_STATEMENT)
+
rijndael.o: rijndael.tbl
rijndael.tbl:
diff --git a/contrib/pgcrypto/crypt-des.c b/contrib/pgcrypto/crypt-des.c
index ed07fc46064..6efaa609c9d 100644
--- a/contrib/pgcrypto/crypt-des.c
+++ b/contrib/pgcrypto/crypt-des.c
@@ -11,7 +11,7 @@
* binaries of libcrypt exportable from the USA
*
* Adapted for FreeBSD-4.0 by Mark R V Murray
- * this file should now *only* export crypt_des(), in order to make
+ * this file should now *only* export px_crypt_des(), in order to make
* a module that can be optionally included in libcrypt.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/contrib/pgcrypto/expected/hmac-sha1.out b/contrib/pgcrypto/expected/hmac-sha1.out
index d5f1cf25972..de605b804eb 100644
--- a/contrib/pgcrypto/expected/hmac-sha1.out
+++ b/contrib/pgcrypto/expected/hmac-sha1.out
@@ -1,5 +1,5 @@
--
--- HMAC-MD5
+-- HMAC-SHA1
--
SELECT encode(hmac(
'Hi There',
diff --git a/contrib/pgcrypto/expected/pgp-compression_1.out b/contrib/pgcrypto/expected/pgp-compression_1.out
deleted file mode 100644
index 655830ae140..00000000000
--- a/contrib/pgcrypto/expected/pgp-compression_1.out
+++ /dev/null
@@ -1,42 +0,0 @@
---
--- PGP compression support
---
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-
-ww0ECQMCsci6AdHnELlh0kQB4jFcVwHMJg0Bulop7m3Mi36s15TAhBo0AnzIrRFrdLVCkKohsS6+
-DMcmR53SXfLoDJOv/M8uKj3QSq7oWNIp95pxfA==
-=tbSn
------END PGP MESSAGE-----
-'), 'key', 'expect-compress-algo=1');
- pgp_sym_decrypt
------------------
- Secret message
-(1 row)
-
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret message', 'key', 'compress-algo=0'),
- 'key', 'expect-compress-algo=0');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret message', 'key', 'compress-algo=1'),
- 'key', 'expect-compress-algo=1');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret message', 'key', 'compress-algo=2'),
- 'key', 'expect-compress-algo=2');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- level=0 should turn compression off
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret message', 'key',
- 'compress-algo=2, compress-level=0'),
- 'key', 'expect-compress-algo=0');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
diff --git a/contrib/pgcrypto/expected/pgp-decrypt.out b/contrib/pgcrypto/expected/pgp-decrypt.out
index 2dabfaf7b0e..e8250b090ab 100644
--- a/contrib/pgcrypto/expected/pgp-decrypt.out
+++ b/contrib/pgcrypto/expected/pgp-decrypt.out
@@ -1,5 +1,5 @@
--
--- pgp_descrypt tests
+-- pgp decrypt tests
--
-- Checking ciphers
select pgp_sym_decrypt(dearmor('
diff --git a/contrib/pgcrypto/expected/pgp-decrypt_1.out b/contrib/pgcrypto/expected/pgp-decrypt_1.out
deleted file mode 100644
index f3df4e618ad..00000000000
--- a/contrib/pgcrypto/expected/pgp-decrypt_1.out
+++ /dev/null
@@ -1,424 +0,0 @@
---
--- pgp_descrypt tests
---
--- Checking ciphers
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.blowfish.sha1.mdc.s2k3.z0
-
-jA0EBAMCfFNwxnvodX9g0jwB4n4s26/g5VmKzVab1bX1SmwY7gvgvlWdF3jKisvS
-yA6Ce1QTMK3KdL2MPfamsTUSAML8huCJMwYQFfE=
-=JcP+
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes.sha1.mdc.s2k3.z0
-
-jA0EBwMCci97v0Q6Z0Zg0kQBsVf5Oe3iC+FBzUmuMV9KxmAyOMyjCc/5i8f1Eest
-UTAsG35A1vYs02VARKzGz6xI2UHwFUirP+brPBg3Ee7muOx8pA==
-=XtrP
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes192.sha1.mdc.s2k3.z0
-
-jA0ECAMCI7YQpWqp3D1g0kQBCjB7GlX7+SQeXNleXeXQ78ZAPNliquGDq9u378zI
-5FPTqAhIB2/2fjY8QEIs1ai00qphjX2NitxV/3Wn+6dufB4Q4g==
-=rCZt
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes256.sha1.mdc.s2k3.z0
-
-jA0ECQMC4f/5djqCC1Rg0kQBTHEPsD+Sw7biBsM2er3vKyGPAQkuTBGKC5ie7hT/
-lceMfQdbAg6oTFyJpk/wH18GzRDphCofg0X8uLgkAKMrpcmgog==
-=fB6S
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
--- Checking MDC modes
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes.sha1.nomdc.s2k3.z0
-
-jA0EBwMCnv07rlXqWctgyS2Dm2JfOKCRL4sLSLJUC8RS2cH7cIhKSuLitOtyquB+
-u9YkgfJfsuRJmgQ9tmo=
-=60ui
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes.sha1.mdc.s2k3.z0
-
-jA0EBwMCEeP3idNjQ1Bg0kQBf4G0wX+2QNzLh2YNwYkQgQkfYhn/hLXjV4nK9nsE
-8Ex1Dsdt5UPvOz8W8VKQRS6loOfOe+yyXil8W3IYFwUpdDUi+Q==
-=moGf
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
--- Checking hashes
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes.md5.mdc.s2k3.z0
-
-jA0EBwMClrXXtOXetohg0kQBn0Kl1ymevQZRHkdoYRHgzCwSQEiss7zYff2UNzgO
-KyRrHf7zEBuZiZ2AG34jNVMOLToj1jJUg5zTSdecUzQVCykWTA==
-=NyLk
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes.sha1.mdc.s2k3.z0
-
-jA0EBwMCApbdlrURoWJg0kQBzHM/E0o7djY82bNuspjxjAcPFrrtp0uvDdMQ4z2m
-/PM8jhgI5vxFYfNQjLl8y3fHYIomk9YflN9K/Q13iq8A8sjeTw==
-=FxbQ
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
--- Checking S2K modes
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes.sha1.mdc.s2k0.z0
-
-jAQEBwAC0kQBKTaLAKE3xzps+QIZowqRNb2eAdzBw2LxEW2YD5PgNlbhJdGg+dvw
-Ah9GXjGS1TVALzTImJbz1uHUZRfhJlFbc5yGQw==
-=YvkV
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes.sha1.mdc.s2k1.z0
-
-jAwEBwEC/QTByBLI3b/SRAHPxKzI6SZBo5lAEOD+EsvKQWO4adL9tDY+++Iqy1xK
-4IaWXVKEj9R2Lr2xntWWMGZtcKtjD2lFFRXXd9dZp1ZThNDz
-=dbXm
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes.sha1.mdc.s2k3.z0
-
-jA0EBwMCEq4Su3ZqNEJg0kQB4QG5jBTKF0i04xtH+avzmLhstBNRxvV3nsmB3cwl
-z+9ZaA/XdSx5ZiFnMym8P6r8uY9rLjjNptvvRHlxIReF+p9MNg==
-=VJKg
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes192.sha1.mdc.s2k0.z0
-
-jAQECAAC0kQBBDnQWkgsx9YFaqDfWmpsiyAJ6y2xG/sBvap1dySYEMuZ+wJTXQ9E
-Cr3i2M7TgVZ0M4jp4QL0adG1lpN5iK7aQeOwMw==
-=cg+i
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes192.sha1.mdc.s2k1.z0
-
-jAwECAECruOfyNDFiTnSRAEVoGXm4A9UZKkWljdzjEO/iaE7mIraltIpQMkiqCh9
-7h8uZ2u9uRBOv222fZodGvc6bvq/4R4hAa/6qSHtm8mdmvGt
-=aHmC
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes192.sha1.mdc.s2k3.z0
-
-jA0ECAMCjFn6SRi3SONg0kQBqtSHPaD0m7rXfDAhCWU/ypAsI93GuHGRyM99cvMv
-q6eF6859ZVnli3BFSDSk3a4e/pXhglxmDYCfjAXkozKNYLo6yw==
-=K0LS
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes256.sha1.mdc.s2k0.z0
-
-jAQECQAC0kQB4L1eMbani07XF2ZYiXNK9LW3v8w41oUPl7dStmrJPQFwsdxmrDHu
-rQr3WbdKdY9ufjOE5+mXI+EFkSPrF9rL9NCq6w==
-=RGts
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes256.sha1.mdc.s2k1.z0
-
-jAwECQECKHhrou7ZOIXSRAHWIVP+xjVQcjAVBTt+qh9SNzYe248xFTwozkwev3mO
-+KVJW0qhk0An+Y2KF99/bYFl9cL5D3Tl43fC8fXGl3x3m7pR
-=SUrU
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes256.sha1.mdc.s2k3.z0
-
-jA0ECQMCjc8lwZu8Fz1g0kQBkEzjImi21liep5jj+3dAJ2aZFfUkohi8b3n9z+7+
-4+NRzL7cMW2RLAFnJbiqXDlRHMwleeuLN1up2WIxsxtYYuaBjA==
-=XZrG
------END PGP MESSAGE-----
-'), 'foobar');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
--- Checking longer passwords
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes.sha1.mdc.s2k3.z0
-
-jA0EBwMCx6dBiuqrYNRg0kQBEo63AvA1SCslxP7ayanLf1H0/hlk2nONVhTwVEWi
-tTGup1mMz6Cfh1uDRErUuXpx9A0gdMu7zX0o5XjrL7WGDAZdSw==
-=XKKG
------END PGP MESSAGE-----
-'), '0123456789abcdefghij');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes.sha1.mdc.s2k3.z0
-
-jA0EBwMCBDvYuS990iFg0kQBW31UK5OiCjWf5x6KJ8qNNT2HZWQCjCBZMU0XsOC6
-CMxFKadf144H/vpoV9GA0f22keQgCl0EsTE4V4lweVOPTKCMJg==
-=gWDh
------END PGP MESSAGE-----
-'), '0123456789abcdefghij2jk4h5g2j54khg23h54g2kh54g2khj54g23hj54');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes.sha1.mdc.s2k3.z0
-
-jA0EBwMCqXbFafC+ofVg0kQBejyiPqH0QMERVGfmPOjtAxvyG5KDIJPYojTgVSDt
-FwsDabdQUz5O7bgNSnxfmyw1OifGF+W2bIn/8W+0rDf8u3+O+Q==
-=OxOF
------END PGP MESSAGE-----
-'), 'x');
- pgp_sym_decrypt
------------------
- Secret message.
-(1 row)
-
--- Checking various data
-select encode(digest(pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat1.aes.sha1.mdc.s2k3.z0
-
-jA0EBwMCGJ+SpuOysINg0kQBJfSjzsW0x4OVcAyr17O7FBvMTwIGeGcJd99oTQU8
-Xtx3kDqnhUq9Z1fS3qPbi5iNP2A9NxOBxPWz2JzxhydANlgbxg==
-=W/ik
------END PGP MESSAGE-----
-'), '0123456789abcdefghij'), 'sha1'), 'hex');
- encode
-------------------------------------------
- 0225e3ede6f2587b076d021a189ff60aad67e066
-(1 row)
-
--- expected: 0225e3ede6f2587b076d021a189ff60aad67e066
-select encode(digest(pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat2.aes.sha1.mdc.s2k3.z0
-
-jA0EBwMCvdpDvidNzMxg0jUBvj8eS2+1t/9/zgemxvhtc0fvdKGGbjH7dleaTJRB
-SaV9L04ky1qECNDx3XjnoKLC+H7IOQ==
-=Fxen
------END PGP MESSAGE-----
-'), '0123456789abcdefghij'), 'sha1'), 'hex');
- encode
-------------------------------------------
- da39a3ee5e6b4b0d3255bfef95601890afd80709
-(1 row)
-
--- expected: da39a3ee5e6b4b0d3255bfef95601890afd80709
-select encode(digest(pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: dat3.aes.sha1.mdc.s2k3.z0
-
-jA0EBwMCxQvxJZ3G/HRg0lgBeYmTa7/uDAjPyFwSX4CYBgpZWVn/JS8JzILrcWF8
-gFnkUKIE0PSaYFp+Yi1VlRfUtRQ/X/LYNGa7tWZS+4VQajz2Xtz4vUeAEiYFYPXk
-73Hb8m1yRhQK
-=ivrD
------END PGP MESSAGE-----
-'), '0123456789abcdefghij'), 'sha1'), 'hex');
- encode
-------------------------------------------
- 5e5c135efc0dd00633efc6dfd6e731ea408a5b4c
-(1 row)
-
--- expected: 5e5c135efc0dd00633efc6dfd6e731ea408a5b4c
--- Checking CRLF
-select encode(digest(pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: crlf mess
-
-ww0ECQMCt7VAtby6l4Bi0lgB5KMIZiiF/b3CfMfUyY0eDncsGXtkbu1X+l9brjpMP8eJnY79Amms
-a3nsOzKTXUfS9VyaXo8IrncM6n7fdaXpwba/3tNsAhJG4lDv1k4g9v8Ix2dfv6Rs
-=mBP9
------END PGP MESSAGE-----
-'), 'key', 'convert-crlf=0'), 'sha1'), 'hex');
- encode
-------------------------------------------
- 9353062be7720f1446d30b9e75573a4833886784
-(1 row)
-
--- expected: 9353062be7720f1446d30b9e75573a4833886784
-select encode(digest(pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-Comment: crlf mess
-
-ww0ECQMCt7VAtby6l4Bi0lgB5KMIZiiF/b3CfMfUyY0eDncsGXtkbu1X+l9brjpMP8eJnY79Amms
-a3nsOzKTXUfS9VyaXo8IrncM6n7fdaXpwba/3tNsAhJG4lDv1k4g9v8Ix2dfv6Rs
-=mBP9
------END PGP MESSAGE-----
-'), 'key', 'convert-crlf=1'), 'sha1'), 'hex');
- encode
-------------------------------------------
- 7efefcab38467f7484d6fa43dc86cf5281bd78e2
-(1 row)
-
--- expected: 7efefcab38467f7484d6fa43dc86cf5281bd78e2
--- check BUG #11905, problem with messages 6 less than a power of 2.
-select pgp_sym_decrypt(pgp_sym_encrypt(repeat('x',65530),'1'),'1') = repeat('x',65530);
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- expected: true
--- Negative tests
--- Decryption with a certain incorrect key yields an apparent Literal Data
--- packet reporting its content to be binary data. Ciphertext source:
--- iterative pgp_sym_encrypt('secret', 'key') until the random prefix gave
--- rise to that property.
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-
-ww0EBwMCxf8PTrQBmJdl0jcB6y2joE7GSLKRv7trbNsF5Z8ou5NISLUg31llVH/S0B2wl4bvzZjV
-VsxxqLSPzNLAeIspJk5G
-=mSd/
------END PGP MESSAGE-----
-'), 'wrong-key', 'debug=1');
-NOTICE: dbg: prefix_init: corrupt prefix
-NOTICE: dbg: parse_literal_data: data type=b
-NOTICE: dbg: mdcbuf_finish: bad MDC pkt hdr
-ERROR: Wrong key or corrupt data
--- Routine text/binary mismatch.
-select pgp_sym_decrypt(pgp_sym_encrypt_bytea('P', 'key'), 'key', 'debug=1');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- Decryption with a certain incorrect key yields an apparent BZip2-compressed
--- plaintext. Ciphertext source: iterative pgp_sym_encrypt('secret', 'key')
--- until the random prefix gave rise to that property.
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-
-ww0EBwMC9rK/dMkF5Zlt0jcBlzAQ1mQY2qYbKYbw8h3EZ5Jk0K2IiY92R82TRhWzBIF/8cmXDPtP
-GXsd65oYJZp3Khz0qfyn
-=Nmpq
------END PGP MESSAGE-----
-'), 'wrong-key', 'debug=1');
-NOTICE: dbg: prefix_init: corrupt prefix
-NOTICE: dbg: parse_compressed_data: bzip2 unsupported
-NOTICE: dbg: mdcbuf_finish: bad MDC pkt hdr
-ERROR: Wrong key or corrupt data
--- Routine use of BZip2 compression. Ciphertext source:
--- echo x | gpg --homedir /nonexistent --personal-compress-preferences bzip2 \
--- --personal-cipher-preferences aes --no-emit-version --batch \
--- --symmetric --passphrase key --armor
-select pgp_sym_decrypt(dearmor('
------BEGIN PGP MESSAGE-----
-
-jA0EBwMCRhFrAKNcLVJg0mMBLJG1cCASNk/x/3dt1zJ+2eo7jHfjgg3N6wpB3XIe
-QCwkWJwlBG5pzbO5gu7xuPQN+TbPJ7aQ2sLx3bAHhtYb0i3vV9RO10Gw++yUyd4R
-UCAAw2JRIISttRHMfDpDuZJpvYo=
-=AZ9M
------END PGP MESSAGE-----
-'), 'key', 'debug=1');
-NOTICE: dbg: parse_compressed_data: bzip2 unsupported
-ERROR: Unsupported compression algorithm
diff --git a/contrib/pgcrypto/expected/pgp-encrypt_1.out b/contrib/pgcrypto/expected/pgp-encrypt_1.out
deleted file mode 100644
index 72f346414ab..00000000000
--- a/contrib/pgcrypto/expected/pgp-encrypt_1.out
+++ /dev/null
@@ -1,161 +0,0 @@
---
--- PGP encrypt
---
--- ensure consistent test output regardless of the default bytea format
-SET bytea_output TO escape;
-select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'), 'key');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- check whether the defaults are ok
-select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'),
- 'key', 'expect-cipher-algo=aes128,
- expect-disable-mdc=0,
- expect-sess-key=0,
- expect-s2k-mode=3,
- expect-s2k-digest-algo=sha1,
- expect-compress-algo=0
- ');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- maybe the expect- stuff simply does not work
-select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'),
- 'key', 'expect-cipher-algo=bf,
- expect-disable-mdc=1,
- expect-sess-key=1,
- expect-s2k-mode=0,
- expect-s2k-digest-algo=md5,
- expect-compress-algo=1
- ');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- bytea as text
-select pgp_sym_decrypt(pgp_sym_encrypt_bytea('Binary', 'baz'), 'baz');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- text as bytea
-select pgp_sym_decrypt_bytea(pgp_sym_encrypt('Text', 'baz'), 'baz');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- algorithm change
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=bf'),
- 'key', 'expect-cipher-algo=bf');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=aes'),
- 'key', 'expect-cipher-algo=aes128');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=aes192'),
- 'key', 'expect-cipher-algo=aes192');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- s2k change
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 's2k-mode=0'),
- 'key', 'expect-s2k-mode=0');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 's2k-mode=1'),
- 'key', 'expect-s2k-mode=1');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 's2k-mode=3'),
- 'key', 'expect-s2k-mode=3');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- s2k count change
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 's2k-count=1024'),
- 'key', 'expect-s2k-count=1024');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- s2k_count rounds up
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 's2k-count=65000000'),
- 'key', 'expect-s2k-count=65000000');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- s2k digest change
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 's2k-digest-algo=md5'),
- 'key', 'expect-s2k-digest-algo=md5');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 's2k-digest-algo=sha1'),
- 'key', 'expect-s2k-digest-algo=sha1');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- sess key
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 'sess-key=0'),
- 'key', 'expect-sess-key=0');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 'sess-key=1'),
- 'key', 'expect-sess-key=1');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=bf'),
- 'key', 'expect-sess-key=1, expect-cipher-algo=bf');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=aes192'),
- 'key', 'expect-sess-key=1, expect-cipher-algo=aes192');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=aes256'),
- 'key', 'expect-sess-key=1, expect-cipher-algo=aes256');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- no mdc
-select pgp_sym_decrypt(
- pgp_sym_encrypt('Secret.', 'key', 'disable-mdc=1'),
- 'key', 'expect-disable-mdc=1');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- crlf
-select encode(pgp_sym_decrypt_bytea(
- pgp_sym_encrypt(E'1\n2\n3\r\n', 'key', 'convert-crlf=1'),
- 'key'), 'hex');
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- conversion should be lossless
-select encode(digest(pgp_sym_decrypt(
- pgp_sym_encrypt(E'\r\n0\n1\r\r\n\n2\r', 'key', 'convert-crlf=1'),
- 'key', 'convert-crlf=1'), 'sha1'), 'hex') as result,
- encode(digest(E'\r\n0\n1\r\r\n\n2\r', 'sha1'), 'hex') as expect;
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
diff --git a/contrib/pgcrypto/expected/pgp-pubkey-encrypt_1.out b/contrib/pgcrypto/expected/pgp-pubkey-encrypt_1.out
deleted file mode 100644
index 6da4c6da413..00000000000
--- a/contrib/pgcrypto/expected/pgp-pubkey-encrypt_1.out
+++ /dev/null
@@ -1,62 +0,0 @@
---
--- PGP Public Key Encryption
---
--- ensure consistent test output regardless of the default bytea format
-SET bytea_output TO escape;
--- successful encrypt/decrypt
-select pgp_pub_decrypt(
- pgp_pub_encrypt('Secret msg', dearmor(pubkey)),
- dearmor(seckey))
-from keytbl where keytbl.id=1;
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
-select pgp_pub_decrypt(
- pgp_pub_encrypt('Secret msg', dearmor(pubkey)),
- dearmor(seckey))
-from keytbl where keytbl.id=2;
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
-select pgp_pub_decrypt(
- pgp_pub_encrypt('Secret msg', dearmor(pubkey)),
- dearmor(seckey))
-from keytbl where keytbl.id=3;
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
-select pgp_pub_decrypt(
- pgp_pub_encrypt('Secret msg', dearmor(pubkey)),
- dearmor(seckey))
-from keytbl where keytbl.id=6;
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- try with rsa-sign only
-select pgp_pub_decrypt(
- pgp_pub_encrypt('Secret msg', dearmor(pubkey)),
- dearmor(seckey))
-from keytbl where keytbl.id=4;
-ERROR: No encryption key found
--- try with secret key
-select pgp_pub_decrypt(
- pgp_pub_encrypt('Secret msg', dearmor(seckey)),
- dearmor(seckey))
-from keytbl where keytbl.id=1;
-ERROR: Refusing to encrypt with secret key
--- does text-to-bytea works
-select pgp_pub_decrypt_bytea(
- pgp_pub_encrypt('Secret msg', dearmor(pubkey)),
- dearmor(seckey))
-from keytbl where keytbl.id=1;
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
--- and bytea-to-text?
-select pgp_pub_decrypt(
- pgp_pub_encrypt_bytea('Secret msg', dearmor(pubkey)),
- dearmor(seckey))
-from keytbl where keytbl.id=1;
-ERROR: generating random data is not supported by this build
-DETAIL: This functionality requires a source of strong random numbers.
-HINT: You need to rebuild PostgreSQL using --enable-strong-random.
diff --git a/contrib/pgcrypto/imath.c b/contrib/pgcrypto/imath.c
index cd528bfd836..6936d2cdcaf 100644
--- a/contrib/pgcrypto/imath.c
+++ b/contrib/pgcrypto/imath.c
@@ -1,90 +1,110 @@
-/* imath version 1.3 */
+/*-------------------------------------------------------------------------
+ *
+ * imath.c
+ *
+ * Last synchronized from https://github.com/creachadair/imath/tree/v1.29,
+ * using the following procedure:
+ *
+ * 1. Download imath.c and imath.h of the last synchronized version. Remove
+ * "#ifdef __cplusplus" blocks, which upset pgindent. Run pgindent on the
+ * two files. Filter the two files through "unexpand -t4 --first-only".
+ * Diff the result against the PostgreSQL versions. As of the last
+ * synchronization, changes were as follows:
+ *
+ * - replace malloc(), realloc() and free() with px_ versions
+ * - redirect assert() to Assert()
+ * - #undef MIN, #undef MAX before defining them
+ * - remove includes covered by c.h
+ * - rename DEBUG to IMATH_DEBUG
+ * - replace stdint.h usage with c.h equivalents
+ * - suppress MSVC warning 4146
+ * - add required PG_USED_FOR_ASSERTS_ONLY
+ *
+ * 2. Download a newer imath.c and imath.h. Transform them like in step 1.
+ * Apply to these files the diff you saved in step 1. Look for new lines
+ * requiring the same kind of change, such as new malloc() calls.
+ *
+ * 3. Configure PostgreSQL using --without-openssl. Run "make -C
+ * contrib/pgcrypto check".
+ *
+ * 4. Update this header comment.
+ *
+ * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/pgcrypto/imath.c
+ *
+ * Upstream copyright terms follow.
+ *-------------------------------------------------------------------------
+ */
+
/*
Name: imath.c
Purpose: Arbitrary precision integer arithmetic routines.
- Author: M. J. Fromberger
- Info: Id: imath.c 21 2006-04-02 18:58:36Z sting
-
- Copyright (C) 2002 Michael J. Fromberger, All Rights Reserved.
-
- Permission is hereby granted, free of charge, to any person
- obtaining a copy of this software and associated documentation files
- (the "Software"), to deal in the Software without restriction,
- including without limitation the rights to use, copy, modify, merge,
- publish, distribute, sublicense, and/or sell copies of the Software,
- and to permit persons to whom the Software is furnished to do so,
- subject to the following conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ Author: M. J. Fromberger
+
+ Copyright (C) 2002-2007 Michael J. Fromberger, All Rights Reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
-/* contrib/pgcrypto/imath.c */
#include "postgres.h"
-#include "px.h"
+
#include "imath.h"
+#include "px.h"
#undef assert
#define assert(TEST) Assert(TEST)
-#define TRACEABLE_CLAMP 0
-#define TRACEABLE_FREE 0
-
-/* {{{ Constants */
const mp_result MP_OK = 0; /* no error, all is well */
-const mp_result MP_FALSE = 0; /* boolean false */
-const mp_result MP_TRUE = -1; /* boolean true */
-const mp_result MP_MEMORY = -2; /* out of memory */
+const mp_result MP_FALSE = 0; /* boolean false */
+const mp_result MP_TRUE = -1; /* boolean true */
+const mp_result MP_MEMORY = -2; /* out of memory */
const mp_result MP_RANGE = -3; /* argument out of range */
-const mp_result MP_UNDEF = -4; /* result undefined */
-const mp_result MP_TRUNC = -5; /* output truncated */
+const mp_result MP_UNDEF = -4; /* result undefined */
+const mp_result MP_TRUNC = -5; /* output truncated */
const mp_result MP_BADARG = -6; /* invalid null argument */
+const mp_result MP_MINERR = -6;
const mp_sign MP_NEG = 1; /* value is strictly negative */
-const mp_sign MP_ZPOS = 0; /* value is non-negative */
+const mp_sign MP_ZPOS = 0; /* value is non-negative */
static const char *s_unknown_err = "unknown result code";
-static const char *s_error_msg[] = {
- "error code 0",
- "boolean true",
- "out of memory",
- "argument out of range",
- "result undefined",
- "output truncated",
- "invalid null argument",
- NULL
-};
-
-/* }}} */
-
-/* Optional library flags */
-#define MP_CAP_DIGITS 1 /* flag bit to capitalize letter digits */
-
-/* Argument checking macros
- Use CHECK() where a return value is required; NRCHECK() elsewhere */
-#define CHECK(TEST) assert(TEST)
-#define NRCHECK(TEST) assert(TEST)
-
-/* {{{ Logarithm table for computing output sizes */
+static const char *s_error_msg[] = {"error code 0", "boolean true",
+ "out of memory", "argument out of range",
+ "result undefined", "output truncated",
+"invalid argument", NULL};
/* The ith entry of this table gives the value of log_i(2).
An integer value n requires ceil(log_i(n)) digits to be represented
in base i. Since it is easy to compute lg(n), by counting bits, we
can compute log_i(n) = lg(n) * log_i(2).
+
+ The use of this table eliminates a dependency upon linkage against
+ the standard math libraries.
+
+ If MP_MAX_RADIX is increased, this table should be expanded too.
*/
static const double s_log2[] = {
- 0.000000000, 0.000000000, 1.000000000, 0.630929754, /* 0 1 2 3 */
- 0.500000000, 0.430676558, 0.386852807, 0.356207187, /* 4 5 6 7 */
+ 0.000000000, 0.000000000, 1.000000000, 0.630929754, /* (D)(D) 2 3 */
+ 0.500000000, 0.430676558, 0.386852807, 0.356207187, /* 4 5 6 7 */
0.333333333, 0.315464877, 0.301029996, 0.289064826, /* 8 9 10 11 */
0.278942946, 0.270238154, 0.262649535, 0.255958025, /* 12 13 14 15 */
0.250000000, 0.244650542, 0.239812467, 0.235408913, /* 16 17 18 19 */
@@ -92,136 +112,242 @@ static const double s_log2[] = {
0.218104292, 0.215338279, 0.212746054, 0.210309918, /* 24 25 26 27 */
0.208014598, 0.205846832, 0.203795047, 0.201849087, /* 28 29 30 31 */
0.200000000, 0.198239863, 0.196561632, 0.194959022, /* 32 33 34 35 */
- 0.193426404, 0.191958720, 0.190551412, 0.189200360, /* 36 37 38 39 */
- 0.187901825, 0.186652411, 0.185449023, 0.184288833, /* 40 41 42 43 */
- 0.183169251, 0.182087900, 0.181042597, 0.180031327, /* 44 45 46 47 */
- 0.179052232, 0.178103594, 0.177183820, 0.176291434, /* 48 49 50 51 */
- 0.175425064, 0.174583430, 0.173765343, 0.172969690, /* 52 53 54 55 */
- 0.172195434, 0.171441601, 0.170707280, 0.169991616, /* 56 57 58 59 */
- 0.169293808, 0.168613099, 0.167948779, 0.167300179, /* 60 61 62 63 */
- 0.166666667
+ 0.193426404, /* 36 */
};
-/* }}} */
-/* {{{ Various macros */
-
/* Return the number of digits needed to represent a static value */
#define MP_VALUE_DIGITS(V) \
-((sizeof(V)+(sizeof(mp_digit)-1))/sizeof(mp_digit))
+ ((sizeof(V) + (sizeof(mp_digit) - 1)) / sizeof(mp_digit))
/* Round precision P to nearest word boundary */
-#define ROUND_PREC(P) ((mp_size)(2*(((P)+1)/2)))
+static inline mp_size
+s_round_prec(mp_size P)
+{
+ return 2 * ((P + 1) / 2);
+}
/* Set array P of S digits to zero */
-#define ZERO(P, S) \
-do{mp_size i__=(S)*sizeof(mp_digit);mp_digit *p__=(P);memset(p__,0,i__);}while(0)
+static inline void
+ZERO(mp_digit *P, mp_size S)
+{
+ mp_size i__ = S * sizeof(mp_digit);
+ mp_digit *p__ = P;
+
+ memset(p__, 0, i__);
+}
/* Copy S digits from array P to array Q */
-#define COPY(P, Q, S) \
-do{mp_size i__=(S)*sizeof(mp_digit);mp_digit *p__=(P),*q__=(Q);\
-memcpy(q__,p__,i__);}while(0)
+static inline void
+COPY(mp_digit *P, mp_digit *Q, mp_size S)
+{
+ mp_size i__ = S * sizeof(mp_digit);
+ mp_digit *p__ = P;
+ mp_digit *q__ = Q;
-/* Reverse N elements of type T in array A */
-#define REV(T, A, N) \
-do{T *u_=(A),*v_=u_+(N)-1;while(u_ 1 && (*dz_-- == 0)) --uz_;MP_USED(z_)=uz_;}while(0)
-#endif
+/* Reverse N elements of unsigned char in A. */
+static inline void
+REV(unsigned char *A, int N)
+{
+ unsigned char *u_ = A;
+ unsigned char *v_ = u_ + N - 1;
+
+ while (u_ < v_)
+ {
+ unsigned char xch = *u_;
+
+ *u_++ = *v_;
+ *v_-- = xch;
+ }
+}
+
+/* Strip leading zeroes from z_ in-place. */
+static inline void
+CLAMP(mp_int z_)
+{
+ mp_size uz_ = MP_USED(z_);
+ mp_digit *dz_ = MP_DIGITS(z_) + uz_ - 1;
+
+ while (uz_ > 1 && (*dz_-- == 0))
+ --uz_;
+ z_->used = uz_;
+}
+/* Select min/max. */
#undef MIN
#undef MAX
-#define MIN(A, B) ((B)<(A)?(B):(A))
-#define MAX(A, B) ((B)>(A)?(B):(A))
-#define SWAP(T, A, B) do{T t_=(A);A=(B);B=t_;}while(0)
-
-#define TEMP(K) (temp + (K))
-#define SETUP(E, C) \
-do{if((res = (E)) != MP_OK) goto CLEANUP; ++(C);}while(0)
+static inline int
+MIN(int A, int B)
+{
+ return (B < A ? B : A);
+}
+static inline mp_size
+MAX(mp_size A, mp_size B)
+{
+ return (B > A ? B : A);
+}
-#define CMPZ(Z) \
-(((Z)->used==1&&(Z)->digits[0]==0)?0:((Z)->sign==MP_NEG)?-1:1)
+/* Exchange lvalues A and B of type T, e.g.
+ SWAP(int, x, y) where x and y are variables of type int. */
+#define SWAP(T, A, B) \
+ do { \
+ T t_ = (A); \
+ A = (B); \
+ B = t_; \
+ } while (0)
-#define UMUL(X, Y, Z) \
-do{mp_size ua_=MP_USED(X),ub_=MP_USED(Y);mp_size o_=ua_+ub_;\
-ZERO(MP_DIGITS(Z),o_);\
-(void) s_kmul(MP_DIGITS(X),MP_DIGITS(Y),MP_DIGITS(Z),ua_,ub_);\
-MP_USED(Z)=o_;CLAMP(Z);}while(0)
+/* Declare a block of N temporary mpz_t values.
+ These values are initialized to zero.
+ You must add CLEANUP_TEMP() at the end of the function.
+ Use TEMP(i) to access a pointer to the ith value.
+ */
+#define DECLARE_TEMP(N) \
+ struct { \
+ mpz_t value[(N)]; \
+ int len; \
+ mp_result err; \
+ } temp_ = { \
+ .len = (N), \
+ .err = MP_OK, \
+ }; \
+ do { \
+ for (int i = 0; i < temp_.len; i++) { \
+ mp_int_init(TEMP(i)); \
+ } \
+ } while (0)
+
+/* Clear all allocated temp values. */
+#define CLEANUP_TEMP() \
+ CLEANUP: \
+ do { \
+ for (int i = 0; i < temp_.len; i++) { \
+ mp_int_clear(TEMP(i)); \
+ } \
+ if (temp_.err != MP_OK) { \
+ return temp_.err; \
+ } \
+ } while (0)
+
+/* A pointer to the kth temp value. */
+#define TEMP(K) (temp_.value + (K))
+
+/* Evaluate E, an expression of type mp_result expected to return MP_OK. If
+ the value is not MP_OK, the error is cached and control resumes at the
+ cleanup handler, which returns it.
+*/
+#define REQUIRE(E) \
+ do { \
+ temp_.err = (E); \
+ if (temp_.err != MP_OK) goto CLEANUP; \
+ } while (0)
+
+/* Compare value to zero. */
+static inline int
+CMPZ(mp_int Z)
+{
+ if (Z->used == 1 && Z->digits[0] == 0)
+ return 0;
+ return (Z->sign == MP_NEG) ? -1 : 1;
+}
-#define USQR(X, Z) \
-do{mp_size ua_=MP_USED(X),o_=ua_+ua_;ZERO(MP_DIGITS(Z),o_);\
-(void) s_ksqr(MP_DIGITS(X),MP_DIGITS(Z),ua_);MP_USED(Z)=o_;CLAMP(Z);}while(0)
+static inline mp_word
+UPPER_HALF(mp_word W)
+{
+ return (W >> MP_DIGIT_BIT);
+}
+static inline mp_digit
+LOWER_HALF(mp_word W)
+{
+ return (mp_digit) (W);
+}
-#define UPPER_HALF(W) ((mp_word)((W) >> MP_DIGIT_BIT))
-#define LOWER_HALF(W) ((mp_digit)(W))
-#define HIGH_BIT_SET(W) ((W) >> (MP_WORD_BIT - 1))
-#define ADD_WILL_OVERFLOW(W, V) ((MP_WORD_MAX - (V)) < (W))
+/* Report whether the highest-order bit of W is 1. */
+static inline bool
+HIGH_BIT_SET(mp_word W)
+{
+ return (W >> (MP_WORD_BIT - 1)) != 0;
+}
-/* }}} */
+/* Report whether adding W + V will carry out. */
+static inline bool
+ADD_WILL_OVERFLOW(mp_word W, mp_word V)
+{
+ return ((MP_WORD_MAX - V) < W);
+}
/* Default number of digits allocated to a new mp_int */
-static mp_size default_precision = 64;
+static mp_size default_precision = 8;
+
+void
+mp_int_default_precision(mp_size size)
+{
+ assert(size > 0);
+ default_precision = size;
+}
/* Minimum number of digits to invoke recursive multiply */
static mp_size multiply_threshold = 32;
-/* Default library configuration flags */
-static mp_word mp_flags = MP_CAP_DIGITS;
+void
+mp_int_multiply_threshold(mp_size thresh)
+{
+ assert(thresh >= sizeof(mp_word));
+ multiply_threshold = thresh;
+}
/* Allocate a buffer of (at least) num digits, or return
NULL if that couldn't be done. */
static mp_digit *s_alloc(mp_size num);
-#if TRACEABLE_FREE
+/* Release a buffer of digits allocated by s_alloc(). */
static void s_free(void *ptr);
-#else
-#define s_free(P) px_free(P)
-#endif
/* Insure that z has at least min digits allocated, resizing if
necessary. Returns true if successful, false if out of memory. */
-static int s_pad(mp_int z, mp_size min);
+static bool s_pad(mp_int z, mp_size min);
-/* Normalize by removing leading zeroes (except when z = 0) */
-#if TRACEABLE_CLAMP
-static void s_clamp(mp_int z);
-#endif
+/* Ensure Z has at least N digits allocated. */
+static inline mp_result
+GROW(mp_int Z, mp_size N)
+{
+ return s_pad(Z, N) ? MP_OK : MP_MEMORY;
+}
/* Fill in a "fake" mp_int on the stack with a given value */
-static void s_fake(mp_int z, int value, mp_digit vbuf[]);
+static void s_fake(mp_int z, mp_small value, mp_digit vbuf[]);
+static void s_ufake(mp_int z, mp_usmall value, mp_digit vbuf[]);
/* Compare two runs of digits of given length, returns <0, 0, >0 */
static int s_cdig(mp_digit *da, mp_digit *db, mp_size len);
/* Pack the unsigned digits of v into array t */
-static int s_vpack(int v, mp_digit t[]);
+static int s_uvpack(mp_usmall v, mp_digit t[]);
/* Compare magnitudes of a and b, returns <0, 0, >0 */
static int s_ucmp(mp_int a, mp_int b);
/* Compare magnitudes of a and v, returns <0, 0, >0 */
-static int s_vcmp(mp_int a, int v);
+static int s_vcmp(mp_int a, mp_small v);
+static int s_uvcmp(mp_int a, mp_usmall uv);
/* Unsigned magnitude addition; assumes dc is big enough.
Carry out is returned (no memory allocated). */
-static mp_digit s_uadd(mp_digit *da, mp_digit *db, mp_digit *dc,
- mp_size size_a, mp_size size_b);
+static mp_digit s_uadd(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
+ mp_size size_b);
/* Unsigned magnitude subtraction. Assumes dc is big enough. */
-static void s_usub(mp_digit *da, mp_digit *db, mp_digit *dc,
- mp_size size_a, mp_size size_b);
+static void s_usub(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
+ mp_size size_b);
/* Unsigned recursive multiplication. Assumes dc is big enough. */
-static int s_kmul(mp_digit *da, mp_digit *db, mp_digit *dc,
- mp_size size_a, mp_size size_b);
+static int s_kmul(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
+ mp_size size_b);
/* Unsigned magnitude multiplication. Assumes dc is big enough. */
-static void s_umul(mp_digit *da, mp_digit *db, mp_digit *dc,
- mp_size size_a, mp_size size_b);
+static void s_umul(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
+ mp_size size_b);
/* Unsigned recursive squaring. Assumes dc is big enough. */
static int s_ksqr(mp_digit *da, mp_digit *dc, mp_size size_a);
@@ -236,8 +362,7 @@ static void s_dadd(mp_int a, mp_digit b);
static void s_dmul(mp_int a, mp_digit b);
/* Single digit multiplication on buffers; assumes dc is big enough. */
-static void s_dbmul(mp_digit *da, mp_digit b, mp_digit *dc,
- mp_size size_a);
+static void s_dbmul(mp_digit *da, mp_digit b, mp_digit *dc, mp_size size_a);
/* Single digit division. Replaces a with the quotient,
returns the remainder. */
@@ -264,7 +389,7 @@ static int s_dp2k(mp_int z);
static int s_isp2(mp_int z);
/* Set z to 2^k. May allocate; returns false in case this fails. */
-static int s_2expt(mp_int z, int k);
+static int s_2expt(mp_int z, mp_small k);
/* Normalize a and b for division, returns normalization constant */
static int s_norm(mp_int a, mp_int b);
@@ -279,17 +404,17 @@ static int s_reduce(mp_int x, mp_int m, mp_int mu, mp_int q1, mp_int q2);
/* Modular exponentiation, using Barrett reduction */
static mp_result s_embar(mp_int a, mp_int b, mp_int m, mp_int mu, mp_int c);
-/* Unsigned magnitude division. Assumes |a| > |b|. Allocates
- temporaries; overwrites a with quotient, b with remainder. */
-static mp_result s_udiv(mp_int a, mp_int b);
+/* Unsigned magnitude division. Assumes |a| > |b|. Allocates temporaries;
+ overwrites a with quotient, b with remainder. */
+static mp_result s_udiv_knuth(mp_int a, mp_int b);
-/* Compute the number of digits in radix r required to represent the
- given value. Does not account for sign flags, terminators, etc. */
+/* Compute the number of digits in radix r required to represent the given
+ value. Does not account for sign flags, terminators, etc. */
static int s_outlen(mp_int z, mp_size r);
-/* Guess how many digits of precision will be needed to represent a
- radix r value of the specified number of digits. Returns a value
- guaranteed to be no smaller than the actual number required. */
+/* Guess how many digits of precision will be needed to represent a radix r
+ value of the specified number of digits. Returns a value guaranteed to be
+ no smaller than the actual number required. */
static mp_size s_inlen(int len, mp_size r);
/* Convert a character to a digit value in radix r, or
@@ -302,177 +427,161 @@ static char s_val2ch(int v, int caps);
/* Take 2's complement of a buffer in place */
static void s_2comp(unsigned char *buf, int len);
-/* Convert a value to binary, ignoring sign. On input, *limpos is the
- bound on how many bytes should be written to buf; on output, *limpos
- is set to the number of bytes actually written. */
+/* Convert a value to binary, ignoring sign. On input, *limpos is the bound on
+ how many bytes should be written to buf; on output, *limpos is set to the
+ number of bytes actually written. */
static mp_result s_tobin(mp_int z, unsigned char *buf, int *limpos, int pad);
-#if 0
-/* Dump a representation of the mp_int to standard output */
-void s_print(char *tag, mp_int z);
-void s_print_buf(char *tag, mp_digit *buf, mp_size num);
-#endif
-
-/* {{{ get_default_precision() */
-
-mp_size
-mp_get_default_precision(void)
+/* Multiply X by Y into Z, ignoring signs. Requires that Z have enough storage
+ preallocated to hold the result. */
+static inline void
+UMUL(mp_int X, mp_int Y, mp_int Z)
{
- return default_precision;
-}
-
-/* }}} */
-
-/* {{{ mp_set_default_precision(s) */
-
-void
-mp_set_default_precision(mp_size s)
-{
- NRCHECK(s > 0);
+ mp_size ua_ = MP_USED(X);
+ mp_size ub_ = MP_USED(Y);
+ mp_size o_ = ua_ + ub_;
- default_precision = (mp_size) ROUND_PREC(s);
+ ZERO(MP_DIGITS(Z), o_);
+ (void) s_kmul(MP_DIGITS(X), MP_DIGITS(Y), MP_DIGITS(Z), ua_, ub_);
+ Z->used = o_;
+ CLAMP(Z);
}
-/* }}} */
-
-/* {{{ mp_get_multiply_threshold() */
-
-mp_size
-mp_get_multiply_threshold(void)
+/* Square X into Z. Requires that Z have enough storage to hold the result. */
+static inline void
+USQR(mp_int X, mp_int Z)
{
- return multiply_threshold;
-}
-
-/* }}} */
+ mp_size ua_ = MP_USED(X);
+ mp_size o_ = ua_ + ua_;
-/* {{{ mp_set_multiply_threshold(s) */
-
-void
-mp_set_multiply_threshold(mp_size s)
-{
- multiply_threshold = s;
+ ZERO(MP_DIGITS(Z), o_);
+ (void) s_ksqr(MP_DIGITS(X), MP_DIGITS(Z), ua_);
+ Z->used = o_;
+ CLAMP(Z);
}
-/* }}} */
-
-/* {{{ mp_int_init(z) */
-
mp_result
mp_int_init(mp_int z)
{
- return mp_int_init_size(z, default_precision);
-}
+ if (z == NULL)
+ return MP_BADARG;
-/* }}} */
+ z->single = 0;
+ z->digits = &(z->single);
+ z->alloc = 1;
+ z->used = 1;
+ z->sign = MP_ZPOS;
-/* {{{ mp_int_alloc() */
+ return MP_OK;
+}
mp_int
mp_int_alloc(void)
{
mp_int out = px_alloc(sizeof(mpz_t));
- assert(out != NULL);
- out->digits = NULL;
- out->used = 0;
- out->alloc = 0;
- out->sign = 0;
+ if (out != NULL)
+ mp_int_init(out);
return out;
}
-/* }}} */
-
-/* {{{ mp_int_init_size(z, prec) */
-
mp_result
mp_int_init_size(mp_int z, mp_size prec)
{
- CHECK(z != NULL);
+ assert(z != NULL);
- prec = (mp_size) ROUND_PREC(prec);
- prec = MAX(prec, default_precision);
+ if (prec == 0)
+ {
+ prec = default_precision;
+ }
+ else if (prec == 1)
+ {
+ return mp_int_init(z);
+ }
+ else
+ {
+ prec = s_round_prec(prec);
+ }
- if ((MP_DIGITS(z) = s_alloc(prec)) == NULL)
+ z->digits = s_alloc(prec);
+ if (MP_DIGITS(z) == NULL)
return MP_MEMORY;
z->digits[0] = 0;
- MP_USED(z) = 1;
- MP_ALLOC(z) = prec;
- MP_SIGN(z) = MP_ZPOS;
+ z->used = 1;
+ z->alloc = prec;
+ z->sign = MP_ZPOS;
return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_init_copy(z, old) */
-
mp_result
mp_int_init_copy(mp_int z, mp_int old)
{
- mp_result res;
- mp_size uold,
- target;
+ assert(z != NULL && old != NULL);
- CHECK(z != NULL && old != NULL);
+ mp_size uold = MP_USED(old);
- uold = MP_USED(old);
- target = MAX(uold, default_precision);
+ if (uold == 1)
+ {
+ mp_int_init(z);
+ }
+ else
+ {
+ mp_size target = MAX(uold, default_precision);
+ mp_result res = mp_int_init_size(z, target);
- if ((res = mp_int_init_size(z, target)) != MP_OK)
- return res;
+ if (res != MP_OK)
+ return res;
+ }
- MP_USED(z) = uold;
- MP_SIGN(z) = MP_SIGN(old);
+ z->used = uold;
+ z->sign = old->sign;
COPY(MP_DIGITS(old), MP_DIGITS(z), uold);
return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_init_value(z, value) */
-
mp_result
-mp_int_init_value(mp_int z, int value)
+mp_int_init_value(mp_int z, mp_small value)
{
- mp_result res;
-
- CHECK(z != NULL);
-
- if ((res = mp_int_init(z)) != MP_OK)
- return res;
+ mpz_t vtmp;
+ mp_digit vbuf[MP_VALUE_DIGITS(value)];
- return mp_int_set_value(z, value);
+ s_fake(&vtmp, value, vbuf);
+ return mp_int_init_copy(z, &vtmp);
}
-/* }}} */
-
-/* {{{ mp_int_set_value(z, value) */
-
mp_result
-mp_int_set_value(mp_int z, int value)
+mp_int_init_uvalue(mp_int z, mp_usmall uvalue)
{
- mp_size ndig;
-
- CHECK(z != NULL);
-
- /* How many digits to copy */
- ndig = (mp_size) MP_VALUE_DIGITS(value);
+ mpz_t vtmp;
+ mp_digit vbuf[MP_VALUE_DIGITS(uvalue)];
- if (!s_pad(z, ndig))
- return MP_MEMORY;
+ s_ufake(&vtmp, uvalue, vbuf);
+ return mp_int_init_copy(z, &vtmp);
+}
- MP_USED(z) = (mp_size) s_vpack(value, MP_DIGITS(z));
- MP_SIGN(z) = (value < 0) ? MP_NEG : MP_ZPOS;
+mp_result
+mp_int_set_value(mp_int z, mp_small value)
+{
+ mpz_t vtmp;
+ mp_digit vbuf[MP_VALUE_DIGITS(value)];
- return MP_OK;
+ s_fake(&vtmp, value, vbuf);
+ return mp_int_copy(&vtmp, z);
}
-/* }}} */
+mp_result
+mp_int_set_uvalue(mp_int z, mp_usmall uvalue)
+{
+ mpz_t vtmp;
+ mp_digit vbuf[MP_VALUE_DIGITS(uvalue)];
-/* {{{ mp_int_clear(z) */
+ s_ufake(&vtmp, uvalue, vbuf);
+ return mp_int_copy(&vtmp, z);
+}
void
mp_int_clear(mp_int z)
@@ -482,34 +591,26 @@ mp_int_clear(mp_int z)
if (MP_DIGITS(z) != NULL)
{
- s_free(MP_DIGITS(z));
- MP_DIGITS(z) = NULL;
+ if (MP_DIGITS(z) != &(z->single))
+ s_free(MP_DIGITS(z));
+
+ z->digits = NULL;
}
}
-/* }}} */
-
-/* {{{ mp_int_free(z) */
-
void
mp_int_free(mp_int z)
{
- NRCHECK(z != NULL);
-
- if (z->digits != NULL)
- mp_int_clear(z);
+ assert(z != NULL);
- px_free(z);
+ mp_int_clear(z);
+ px_free(z); /* note: NOT s_free() */
}
-/* }}} */
-
-/* {{{ mp_int_copy(a, c) */
-
mp_result
mp_int_copy(mp_int a, mp_int c)
{
- CHECK(a != NULL && c != NULL);
+ assert(a != NULL && c != NULL);
if (a != c)
{
@@ -524,17 +625,13 @@ mp_int_copy(mp_int a, mp_int c)
dc = MP_DIGITS(c);
COPY(da, dc, ua);
- MP_USED(c) = ua;
- MP_SIGN(c) = MP_SIGN(a);
+ c->used = ua;
+ c->sign = a->sign;
}
return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_swap(a, c) */
-
void
mp_int_swap(mp_int a, mp_int c)
{
@@ -544,90 +641,71 @@ mp_int_swap(mp_int a, mp_int c)
*a = *c;
*c = tmp;
+
+ if (MP_DIGITS(a) == &(c->single))
+ a->digits = &(a->single);
+ if (MP_DIGITS(c) == &(a->single))
+ c->digits = &(c->single);
}
}
-/* }}} */
-
-/* {{{ mp_int_zero(z) */
-
void
mp_int_zero(mp_int z)
{
- NRCHECK(z != NULL);
+ assert(z != NULL);
z->digits[0] = 0;
- MP_USED(z) = 1;
- MP_SIGN(z) = MP_ZPOS;
+ z->used = 1;
+ z->sign = MP_ZPOS;
}
-/* }}} */
-
-/* {{{ mp_int_abs(a, c) */
-
mp_result
mp_int_abs(mp_int a, mp_int c)
{
- mp_result res;
+ assert(a != NULL && c != NULL);
- CHECK(a != NULL && c != NULL);
+ mp_result res;
if ((res = mp_int_copy(a, c)) != MP_OK)
return res;
- MP_SIGN(c) = MP_ZPOS;
+ c->sign = MP_ZPOS;
return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_neg(a, c) */
-
mp_result
mp_int_neg(mp_int a, mp_int c)
{
- mp_result res;
+ assert(a != NULL && c != NULL);
- CHECK(a != NULL && c != NULL);
+ mp_result res;
if ((res = mp_int_copy(a, c)) != MP_OK)
return res;
if (CMPZ(c) != 0)
- MP_SIGN(c) = 1 - MP_SIGN(a);
+ c->sign = 1 - MP_SIGN(a);
return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_add(a, b, c) */
-
mp_result
mp_int_add(mp_int a, mp_int b, mp_int c)
{
- mp_size ua,
- ub,
- uc,
- max;
-
- CHECK(a != NULL && b != NULL && c != NULL);
+ assert(a != NULL && b != NULL && c != NULL);
- ua = MP_USED(a);
- ub = MP_USED(b);
- uc = MP_USED(c);
- max = MAX(ua, ub);
+ mp_size ua = MP_USED(a);
+ mp_size ub = MP_USED(b);
+ mp_size max = MAX(ua, ub);
if (MP_SIGN(a) == MP_SIGN(b))
{
/* Same sign -- add magnitudes, preserve sign of addends */
- mp_digit carry;
-
if (!s_pad(c, max))
return MP_MEMORY;
- carry = s_uadd(MP_DIGITS(a), MP_DIGITS(b), MP_DIGITS(c), ua, ub);
- uc = max;
+ mp_digit carry = s_uadd(MP_DIGITS(a), MP_DIGITS(b), MP_DIGITS(c), ua, ub);
+ mp_size uc = max;
if (carry)
{
@@ -638,50 +716,55 @@ mp_int_add(mp_int a, mp_int b, mp_int c)
++uc;
}
- MP_USED(c) = uc;
- MP_SIGN(c) = MP_SIGN(a);
+ c->used = uc;
+ c->sign = a->sign;
}
else
{
/* Different signs -- subtract magnitudes, preserve sign of greater */
+ int cmp = s_ucmp(a, b); /* magnitude comparision, sign ignored */
+
+ /*
+ * Set x to max(a, b), y to min(a, b) to simplify later code. A
+ * special case yields zero for equal magnitudes.
+ */
mp_int x,
y;
- int cmp = s_ucmp(a, b); /* magnitude comparison, sign ignored */
- /* Set x to max(a, b), y to min(a, b) to simplify later code */
- if (cmp >= 0)
+ if (cmp == 0)
{
- x = a;
- y = b;
+ mp_int_zero(c);
+ return MP_OK;
}
- else
+ else if (cmp < 0)
{
x = b;
y = a;
}
+ else
+ {
+ x = a;
+ y = b;
+ }
if (!s_pad(c, MP_USED(x)))
return MP_MEMORY;
/* Subtract smaller from larger */
s_usub(MP_DIGITS(x), MP_DIGITS(y), MP_DIGITS(c), MP_USED(x), MP_USED(y));
- MP_USED(c) = MP_USED(x);
+ c->used = x->used;
CLAMP(c);
/* Give result the sign of the larger */
- MP_SIGN(c) = MP_SIGN(x);
+ c->sign = x->sign;
}
return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_add_value(a, value, c) */
-
mp_result
-mp_int_add_value(mp_int a, int value, mp_int c)
+mp_int_add_value(mp_int a, mp_small value, mp_int c)
{
mpz_t vtmp;
mp_digit vbuf[MP_VALUE_DIGITS(value)];
@@ -691,35 +774,23 @@ mp_int_add_value(mp_int a, int value, mp_int c)
return mp_int_add(a, &vtmp, c);
}
-/* }}} */
-
-/* {{{ mp_int_sub(a, b, c) */
-
mp_result
mp_int_sub(mp_int a, mp_int b, mp_int c)
{
- mp_size ua,
- ub,
- uc,
- max;
+ assert(a != NULL && b != NULL && c != NULL);
- CHECK(a != NULL && b != NULL && c != NULL);
-
- ua = MP_USED(a);
- ub = MP_USED(b);
- uc = MP_USED(c);
- max = MAX(ua, ub);
+ mp_size ua = MP_USED(a);
+ mp_size ub = MP_USED(b);
+ mp_size max = MAX(ua, ub);
if (MP_SIGN(a) != MP_SIGN(b))
{
/* Different signs -- add magnitudes and keep sign of a */
- mp_digit carry;
-
if (!s_pad(c, max))
return MP_MEMORY;
- carry = s_uadd(MP_DIGITS(a), MP_DIGITS(b), MP_DIGITS(c), ua, ub);
- uc = max;
+ mp_digit carry = s_uadd(MP_DIGITS(a), MP_DIGITS(b), MP_DIGITS(c), ua, ub);
+ mp_size uc = max;
if (carry)
{
@@ -730,20 +801,20 @@ mp_int_sub(mp_int a, mp_int b, mp_int c)
++uc;
}
- MP_USED(c) = uc;
- MP_SIGN(c) = MP_SIGN(a);
+ c->used = uc;
+ c->sign = a->sign;
}
else
{
/* Same signs -- subtract magnitudes */
+ if (!s_pad(c, max))
+ return MP_MEMORY;
mp_int x,
y;
mp_sign osign;
- int cmp = s_ucmp(a, b);
- if (!s_pad(c, max))
- return MP_MEMORY;
+ int cmp = s_ucmp(a, b);
if (cmp >= 0)
{
@@ -762,21 +833,17 @@ mp_int_sub(mp_int a, mp_int b, mp_int c)
osign = 1 - osign;
s_usub(MP_DIGITS(x), MP_DIGITS(y), MP_DIGITS(c), MP_USED(x), MP_USED(y));
- MP_USED(c) = MP_USED(x);
+ c->used = x->used;
CLAMP(c);
- MP_SIGN(c) = osign;
+ c->sign = osign;
}
return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_sub_value(a, value, c) */
-
mp_result
-mp_int_sub_value(mp_int a, int value, mp_int c)
+mp_int_sub_value(mp_int a, mp_small value, mp_int c)
{
mpz_t vtmp;
mp_digit vbuf[MP_VALUE_DIGITS(value)];
@@ -786,21 +853,10 @@ mp_int_sub_value(mp_int a, int value, mp_int c)
return mp_int_sub(a, &vtmp, c);
}
-/* }}} */
-
-/* {{{ mp_int_mul(a, b, c) */
-
mp_result
mp_int_mul(mp_int a, mp_int b, mp_int c)
{
- mp_digit *out;
- mp_size osize,
- ua,
- ub,
- p = 0;
- mp_sign osign;
-
- CHECK(a != NULL && b != NULL && c != NULL);
+ assert(a != NULL && b != NULL && c != NULL);
/* If either input is zero, we can shortcut multiplication */
if (mp_int_compare_zero(a) == 0 || mp_int_compare_zero(b) == 0)
@@ -810,21 +866,24 @@ mp_int_mul(mp_int a, mp_int b, mp_int c)
}
/* Output is positive if inputs have same sign, otherwise negative */
- osign = (MP_SIGN(a) == MP_SIGN(b)) ? MP_ZPOS : MP_NEG;
+ mp_sign osign = (MP_SIGN(a) == MP_SIGN(b)) ? MP_ZPOS : MP_NEG;
/*
- * If the output is not equal to any of the inputs, we'll write the
- * results there directly; otherwise, allocate a temporary space.
+ * If the output is not identical to any of the inputs, we'll write the
+ * results directly; otherwise, allocate a temporary space.
*/
- ua = MP_USED(a);
- ub = MP_USED(b);
- osize = MAX(ua, ub);
+ mp_size ua = MP_USED(a);
+ mp_size ub = MP_USED(b);
+ mp_size osize = MAX(ua, ub);
+
osize = 4 * ((osize + 1) / 2);
+ mp_digit *out;
+ mp_size p = 0;
+
if (c == a || c == b)
{
- p = ROUND_PREC(osize);
- p = MAX(p, default_precision);
+ p = MAX(s_round_prec(osize), default_precision);
if ((out = s_alloc(p)) == NULL)
return MP_MEMORY;
@@ -847,24 +906,21 @@ mp_int_mul(mp_int a, mp_int b, mp_int c)
*/
if (out != MP_DIGITS(c))
{
- s_free(MP_DIGITS(c));
- MP_DIGITS(c) = out;
- MP_ALLOC(c) = p;
+ if ((void *) MP_DIGITS(c) != (void *) c)
+ s_free(MP_DIGITS(c));
+ c->digits = out;
+ c->alloc = p;
}
- MP_USED(c) = osize; /* might not be true, but we'll fix it ... */
+ c->used = osize; /* might not be true, but we'll fix it ... */
CLAMP(c); /* ... right here */
- MP_SIGN(c) = osign;
+ c->sign = osign;
return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_mul_value(a, value, c) */
-
mp_result
-mp_int_mul_value(mp_int a, int value, mp_int c)
+mp_int_mul_value(mp_int a, mp_small value, mp_int c)
{
mpz_t vtmp;
mp_digit vbuf[MP_VALUE_DIGITS(value)];
@@ -874,45 +930,39 @@ mp_int_mul_value(mp_int a, int value, mp_int c)
return mp_int_mul(a, &vtmp, c);
}
-/* }}} */
-
-/* {{{ mp_int_mul_pow2(a, p2, c) */
-
mp_result
-mp_int_mul_pow2(mp_int a, int p2, mp_int c)
+mp_int_mul_pow2(mp_int a, mp_small p2, mp_int c)
{
- mp_result res;
+ assert(a != NULL && c != NULL && p2 >= 0);
- CHECK(a != NULL && c != NULL && p2 >= 0);
+ mp_result res = mp_int_copy(a, c);
- if ((res = mp_int_copy(a, c)) != MP_OK)
+ if (res != MP_OK)
return res;
if (s_qmul(c, (mp_size) p2))
+ {
return MP_OK;
+ }
else
+ {
return MP_MEMORY;
+ }
}
-/* }}} */
-
-/* {{{ mp_int_sqr(a, c) */
-
mp_result
mp_int_sqr(mp_int a, mp_int c)
{
- mp_digit *out;
- mp_size osize,
- p = 0;
-
- CHECK(a != NULL && c != NULL);
+ assert(a != NULL && c != NULL);
/* Get a temporary buffer big enough to hold the result */
- osize = (mp_size) 4 * ((MP_USED(a) + 1) / 2);
+ mp_size osize = (mp_size) 4 * ((MP_USED(a) + 1) / 2);
+ mp_size p = 0;
+ mp_digit *out;
if (a == c)
{
- p = ROUND_PREC(osize);
+ p = s_round_prec(osize);
p = MAX(p, default_precision);
if ((out = s_alloc(p)) == NULL)
@@ -935,39 +985,35 @@ mp_int_sqr(mp_int a, mp_int c)
*/
if (out != MP_DIGITS(c))
{
- s_free(MP_DIGITS(c));
- MP_DIGITS(c) = out;
- MP_ALLOC(c) = p;
+ if ((void *) MP_DIGITS(c) != (void *) c)
+ s_free(MP_DIGITS(c));
+ c->digits = out;
+ c->alloc = p;
}
- MP_USED(c) = osize; /* might not be true, but we'll fix it ... */
+ c->used = osize; /* might not be true, but we'll fix it ... */
CLAMP(c); /* ... right here */
- MP_SIGN(c) = MP_ZPOS;
+ c->sign = MP_ZPOS;
return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_div(a, b, q, r) */
-
mp_result
mp_int_div(mp_int a, mp_int b, mp_int q, mp_int r)
{
- int cmp,
- last = 0,
- lg;
+ assert(a != NULL && b != NULL && q != r);
+
+ int cmp;
mp_result res = MP_OK;
- mpz_t temp[2];
mp_int qout,
rout;
- mp_sign sa = MP_SIGN(a),
- sb = MP_SIGN(b);
-
- CHECK(a != NULL && b != NULL && q != r);
+ mp_sign sa = MP_SIGN(a);
+ mp_sign sb = MP_SIGN(b);
if (CMPZ(b) == 0)
+ {
return MP_UNDEF;
+ }
else if ((cmp = s_ucmp(a, b)) < 0)
{
/*
@@ -995,7 +1041,7 @@ mp_int_div(mp_int a, mp_int b, mp_int q, mp_int r)
q->digits[0] = 1;
if (sa != sb)
- MP_SIGN(q) = MP_NEG;
+ q->sign = MP_NEG;
}
return MP_OK;
@@ -1006,37 +1052,41 @@ mp_int_div(mp_int a, mp_int b, mp_int q, mp_int r)
* quotient and remainder, but q and r are allowed to be NULL or to
* overlap with the inputs.
*/
+ DECLARE_TEMP(2);
+ int lg;
+
if ((lg = s_isp2(b)) < 0)
{
- if (q && b != q && (res = mp_int_copy(a, q)) == MP_OK)
+ if (q && b != q)
{
+ REQUIRE(mp_int_copy(a, q));
qout = q;
}
else
{
- qout = TEMP(last);
- SETUP(mp_int_init_copy(TEMP(last), a), last);
+ REQUIRE(mp_int_copy(a, TEMP(0)));
+ qout = TEMP(0);
}
- if (r && a != r && (res = mp_int_copy(b, r)) == MP_OK)
+ if (r && a != r)
{
+ REQUIRE(mp_int_copy(b, r));
rout = r;
}
else
{
- rout = TEMP(last);
- SETUP(mp_int_init_copy(TEMP(last), b), last);
+ REQUIRE(mp_int_copy(b, TEMP(1)));
+ rout = TEMP(1);
}
- if ((res = s_udiv(qout, rout)) != MP_OK)
- goto CLEANUP;
+ REQUIRE(s_udiv_knuth(qout, rout));
}
else
{
- if (q && (res = mp_int_copy(a, q)) != MP_OK)
- goto CLEANUP;
- if (r && (res = mp_int_copy(a, r)) != MP_OK)
- goto CLEANUP;
+ if (q)
+ REQUIRE(mp_int_copy(a, q));
+ if (r)
+ REQUIRE(mp_int_copy(a, r));
if (q)
s_qdiv(q, (mp_size) lg);
@@ -1049,203 +1099,184 @@ mp_int_div(mp_int a, mp_int b, mp_int q, mp_int r)
/* Recompute signs for output */
if (rout)
{
- MP_SIGN(rout) = sa;
+ rout->sign = sa;
if (CMPZ(rout) == 0)
- MP_SIGN(rout) = MP_ZPOS;
+ rout->sign = MP_ZPOS;
}
if (qout)
{
- MP_SIGN(qout) = (sa == sb) ? MP_ZPOS : MP_NEG;
+ qout->sign = (sa == sb) ? MP_ZPOS : MP_NEG;
if (CMPZ(qout) == 0)
- MP_SIGN(qout) = MP_ZPOS;
+ qout->sign = MP_ZPOS;
}
- if (q && (res = mp_int_copy(qout, q)) != MP_OK)
- goto CLEANUP;
- if (r && (res = mp_int_copy(rout, r)) != MP_OK)
- goto CLEANUP;
-
-CLEANUP:
- while (--last >= 0)
- mp_int_clear(TEMP(last));
-
+ if (q)
+ REQUIRE(mp_int_copy(qout, q));
+ if (r)
+ REQUIRE(mp_int_copy(rout, r));
+ CLEANUP_TEMP();
return res;
}
-/* }}} */
-
-/* {{{ mp_int_mod(a, m, c) */
-
mp_result
mp_int_mod(mp_int a, mp_int m, mp_int c)
{
- mp_result res;
- mpz_t tmp;
- mp_int out;
+ DECLARE_TEMP(1);
+ mp_int out = (m == c) ? TEMP(0) : c;
- if (m == c)
+ REQUIRE(mp_int_div(a, m, NULL, out));
+ if (CMPZ(out) < 0)
{
- if ((res = mp_int_init(&tmp)) != MP_OK)
- return res;
-
- out = &tmp;
+ REQUIRE(mp_int_add(out, m, c));
}
else
{
- out = c;
+ REQUIRE(mp_int_copy(out, c));
}
-
- if ((res = mp_int_div(a, m, NULL, out)) != MP_OK)
- goto CLEANUP;
-
- if (CMPZ(out) < 0)
- res = mp_int_add(out, m, c);
- else
- res = mp_int_copy(out, c);
-
-CLEANUP:
- if (out != c)
- mp_int_clear(&tmp);
-
- return res;
+ CLEANUP_TEMP();
+ return MP_OK;
}
-/* }}} */
-
-
-/* {{{ mp_int_div_value(a, value, q, r) */
-
mp_result
-mp_int_div_value(mp_int a, int value, mp_int q, int *r)
+mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small *r)
{
- mpz_t vtmp,
- rtmp;
+ mpz_t vtmp;
mp_digit vbuf[MP_VALUE_DIGITS(value)];
- mp_result res;
- if ((res = mp_int_init(&rtmp)) != MP_OK)
- return res;
s_fake(&vtmp, value, vbuf);
- if ((res = mp_int_div(a, &vtmp, q, &rtmp)) != MP_OK)
- goto CLEANUP;
+ DECLARE_TEMP(1);
+ REQUIRE(mp_int_div(a, &vtmp, q, TEMP(0)));
if (r)
- (void) mp_int_to_int(&rtmp, r); /* can't fail */
+ (void) mp_int_to_int(TEMP(0), r); /* can't fail */
-CLEANUP:
- mp_int_clear(&rtmp);
- return res;
+ CLEANUP_TEMP();
+ return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_div_pow2(a, p2, q, r) */
-
mp_result
-mp_int_div_pow2(mp_int a, int p2, mp_int q, mp_int r)
+mp_int_div_pow2(mp_int a, mp_small p2, mp_int q, mp_int r)
{
- mp_result res = MP_OK;
+ assert(a != NULL && p2 >= 0 && q != r);
- CHECK(a != NULL && p2 >= 0 && q != r);
+ mp_result res = MP_OK;
if (q != NULL && (res = mp_int_copy(a, q)) == MP_OK)
+ {
s_qdiv(q, (mp_size) p2);
+ }
if (res == MP_OK && r != NULL && (res = mp_int_copy(a, r)) == MP_OK)
+ {
s_qmod(r, (mp_size) p2);
+ }
return res;
}
-/* }}} */
-
-/* {{{ mp_int_expt(a, b, c) */
-
mp_result
-mp_int_expt(mp_int a, int b, mp_int c)
+mp_int_expt(mp_int a, mp_small b, mp_int c)
{
- mpz_t t;
- mp_result res;
- unsigned int v = abs(b);
-
- CHECK(b >= 0 && c != NULL);
+ assert(c != NULL);
+ if (b < 0)
+ return MP_RANGE;
- if ((res = mp_int_init_copy(&t, a)) != MP_OK)
- return res;
+ DECLARE_TEMP(1);
+ REQUIRE(mp_int_copy(a, TEMP(0)));
(void) mp_int_set_value(c, 1);
+ unsigned int v = labs(b);
+
while (v != 0)
{
if (v & 1)
{
- if ((res = mp_int_mul(c, &t, c)) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_mul(c, TEMP(0), c));
}
v >>= 1;
if (v == 0)
break;
- if ((res = mp_int_sqr(&t, &t)) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_sqr(TEMP(0), TEMP(0)));
}
-CLEANUP:
- mp_int_clear(&t);
- return res;
+ CLEANUP_TEMP();
+ return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_expt_value(a, b, c) */
-
mp_result
-mp_int_expt_value(int a, int b, mp_int c)
+mp_int_expt_value(mp_small a, mp_small b, mp_int c)
{
- mpz_t t;
- mp_result res;
- unsigned int v = abs(b);
-
- CHECK(b >= 0 && c != NULL);
+ assert(c != NULL);
+ if (b < 0)
+ return MP_RANGE;
- if ((res = mp_int_init_value(&t, a)) != MP_OK)
- return res;
+ DECLARE_TEMP(1);
+ REQUIRE(mp_int_set_value(TEMP(0), a));
(void) mp_int_set_value(c, 1);
+ unsigned int v = labs(b);
+
while (v != 0)
{
if (v & 1)
{
- if ((res = mp_int_mul(c, &t, c)) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_mul(c, TEMP(0), c));
}
v >>= 1;
if (v == 0)
break;
- if ((res = mp_int_sqr(&t, &t)) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_sqr(TEMP(0), TEMP(0)));
}
-CLEANUP:
- mp_int_clear(&t);
- return res;
+ CLEANUP_TEMP();
+ return MP_OK;
}
-/* }}} */
+mp_result
+mp_int_expt_full(mp_int a, mp_int b, mp_int c)
+{
+ assert(a != NULL && b != NULL && c != NULL);
+ if (MP_SIGN(b) == MP_NEG)
+ return MP_RANGE;
+
+ DECLARE_TEMP(1);
+ REQUIRE(mp_int_copy(a, TEMP(0)));
-/* {{{ mp_int_compare(a, b) */
+ (void) mp_int_set_value(c, 1);
+ for (unsigned ix = 0; ix < MP_USED(b); ++ix)
+ {
+ mp_digit d = b->digits[ix];
+
+ for (unsigned jx = 0; jx < MP_DIGIT_BIT; ++jx)
+ {
+ if (d & 1)
+ {
+ REQUIRE(mp_int_mul(c, TEMP(0), c));
+ }
+
+ d >>= 1;
+ if (d == 0 && ix + 1 == MP_USED(b))
+ break;
+ REQUIRE(mp_int_sqr(TEMP(0), TEMP(0)));
+ }
+ }
+
+ CLEANUP_TEMP();
+ return MP_OK;
+}
int
mp_int_compare(mp_int a, mp_int b)
{
- mp_sign sa;
+ assert(a != NULL && b != NULL);
- CHECK(a != NULL && b != NULL);
+ mp_sign sa = MP_SIGN(a);
- sa = MP_SIGN(a);
if (sa == MP_SIGN(b))
{
int cmp = s_ucmp(a, b);
@@ -1255,93 +1286,89 @@ mp_int_compare(mp_int a, mp_int b)
* both negative, the sense is reversed.
*/
if (sa == MP_ZPOS)
+ {
return cmp;
+ }
else
+ {
return -cmp;
-
+ }
+ }
+ else if (sa == MP_ZPOS)
+ {
+ return 1;
}
else
{
- if (sa == MP_ZPOS)
- return 1;
- else
- return -1;
+ return -1;
}
}
-/* }}} */
-
-/* {{{ mp_int_compare_unsigned(a, b) */
-
int
mp_int_compare_unsigned(mp_int a, mp_int b)
{
- NRCHECK(a != NULL && b != NULL);
+ assert(a != NULL && b != NULL);
return s_ucmp(a, b);
}
-/* }}} */
-
-/* {{{ mp_int_compare_zero(z) */
-
int
mp_int_compare_zero(mp_int z)
{
- NRCHECK(z != NULL);
+ assert(z != NULL);
if (MP_USED(z) == 1 && z->digits[0] == 0)
+ {
return 0;
+ }
else if (MP_SIGN(z) == MP_ZPOS)
+ {
return 1;
+ }
else
+ {
return -1;
+ }
}
-/* }}} */
-
-/* {{{ mp_int_compare_value(z, value) */
-
int
-mp_int_compare_value(mp_int z, int value)
+mp_int_compare_value(mp_int z, mp_small value)
{
- mp_sign vsign = (value < 0) ? MP_NEG : MP_ZPOS;
- int cmp;
+ assert(z != NULL);
- CHECK(z != NULL);
+ mp_sign vsign = (value < 0) ? MP_NEG : MP_ZPOS;
if (vsign == MP_SIGN(z))
{
- cmp = s_vcmp(z, value);
+ int cmp = s_vcmp(z, value);
- if (vsign == MP_ZPOS)
- return cmp;
- else
- return -cmp;
+ return (vsign == MP_ZPOS) ? cmp : -cmp;
}
else
{
- if (value < 0)
- return 1;
- else
- return -1;
+ return (value < 0) ? 1 : -1;
}
}
-/* }}} */
+int
+mp_int_compare_uvalue(mp_int z, mp_usmall uv)
+{
+ assert(z != NULL);
-/* {{{ mp_int_exptmod(a, b, m, c) */
+ if (MP_SIGN(z) == MP_NEG)
+ {
+ return -1;
+ }
+ else
+ {
+ return s_uvcmp(z, uv);
+ }
+}
mp_result
mp_int_exptmod(mp_int a, mp_int b, mp_int m, mp_int c)
{
- mp_result res;
- mp_size um;
- mpz_t temp[3];
- mp_int s;
- int last = 0;
-
- CHECK(a != NULL && b != NULL && c != NULL && m != NULL);
+ assert(a != NULL && b != NULL && c != NULL && m != NULL);
/* Zero moduli and negative exponents are not considered. */
if (CMPZ(m) == 0)
@@ -1349,13 +1376,17 @@ mp_int_exptmod(mp_int a, mp_int b, mp_int m, mp_int c)
if (CMPZ(b) < 0)
return MP_RANGE;
- um = MP_USED(m);
- SETUP(mp_int_init_size(TEMP(0), 2 * um), last);
- SETUP(mp_int_init_size(TEMP(1), 2 * um), last);
+ mp_size um = MP_USED(m);
+
+ DECLARE_TEMP(3);
+ REQUIRE(GROW(TEMP(0), 2 * um));
+ REQUIRE(GROW(TEMP(1), 2 * um));
+
+ mp_int s;
if (c == b || c == m)
{
- SETUP(mp_int_init_size(TEMP(2), 2 * um), last);
+ REQUIRE(GROW(TEMP(2), 2 * um));
s = TEMP(2);
}
else
@@ -1363,30 +1394,17 @@ mp_int_exptmod(mp_int a, mp_int b, mp_int m, mp_int c)
s = c;
}
- if ((res = mp_int_mod(a, m, TEMP(0))) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_mod(a, m, TEMP(0)));
+ REQUIRE(s_brmu(TEMP(1), m));
+ REQUIRE(s_embar(TEMP(0), b, m, TEMP(1), s));
+ REQUIRE(mp_int_copy(s, c));
- if ((res = s_brmu(TEMP(1), m)) != MP_OK)
- goto CLEANUP;
-
- if ((res = s_embar(TEMP(0), b, m, TEMP(1), s)) != MP_OK)
- goto CLEANUP;
-
- res = mp_int_copy(s, c);
-
-CLEANUP:
- while (--last >= 0)
- mp_int_clear(TEMP(last));
-
- return res;
+ CLEANUP_TEMP();
+ return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_exptmod_evalue(a, value, m, c) */
-
mp_result
-mp_int_exptmod_evalue(mp_int a, int value, mp_int m, mp_int c)
+mp_int_exptmod_evalue(mp_int a, mp_small value, mp_int m, mp_int c)
{
mpz_t vtmp;
mp_digit vbuf[MP_VALUE_DIGITS(value)];
@@ -1396,13 +1414,8 @@ mp_int_exptmod_evalue(mp_int a, int value, mp_int m, mp_int c)
return mp_int_exptmod(a, &vtmp, m, c);
}
-/* }}} */
-
-/* {{{ mp_int_exptmod_bvalue(v, b, m, c) */
-
mp_result
-mp_int_exptmod_bvalue(int value, mp_int b,
- mp_int m, mp_int c)
+mp_int_exptmod_bvalue(mp_small value, mp_int b, mp_int m, mp_int c)
{
mpz_t vtmp;
mp_digit vbuf[MP_VALUE_DIGITS(value)];
@@ -1412,20 +1425,11 @@ mp_int_exptmod_bvalue(int value, mp_int b,
return mp_int_exptmod(&vtmp, b, m, c);
}
-/* }}} */
-
-/* {{{ mp_int_exptmod_known(a, b, m, mu, c) */
-
mp_result
-mp_int_exptmod_known(mp_int a, mp_int b, mp_int m, mp_int mu, mp_int c)
+mp_int_exptmod_known(mp_int a, mp_int b, mp_int m, mp_int mu,
+ mp_int c)
{
- mp_result res;
- mp_size um;
- mpz_t temp[2];
- mp_int s;
- int last = 0;
-
- CHECK(a && b && m && c);
+ assert(a && b && m && c);
/* Zero moduli and negative exponents are not considered. */
if (CMPZ(m) == 0)
@@ -1433,12 +1437,16 @@ mp_int_exptmod_known(mp_int a, mp_int b, mp_int m, mp_int mu, mp_int c)
if (CMPZ(b) < 0)
return MP_RANGE;
- um = MP_USED(m);
- SETUP(mp_int_init_size(TEMP(0), 2 * um), last);
+ DECLARE_TEMP(2);
+ mp_size um = MP_USED(m);
+
+ REQUIRE(GROW(TEMP(0), 2 * um));
+
+ mp_int s;
if (c == b || c == m)
{
- SETUP(mp_int_init_size(TEMP(1), 2 * um), last);
+ REQUIRE(GROW(TEMP(1), 2 * um));
s = TEMP(1);
}
else
@@ -1446,68 +1454,41 @@ mp_int_exptmod_known(mp_int a, mp_int b, mp_int m, mp_int mu, mp_int c)
s = c;
}
- if ((res = mp_int_mod(a, m, TEMP(0))) != MP_OK)
- goto CLEANUP;
-
- if ((res = s_embar(TEMP(0), b, m, mu, s)) != MP_OK)
- goto CLEANUP;
-
- res = mp_int_copy(s, c);
+ REQUIRE(mp_int_mod(a, m, TEMP(0)));
+ REQUIRE(s_embar(TEMP(0), b, m, mu, s));
+ REQUIRE(mp_int_copy(s, c));
-CLEANUP:
- while (--last >= 0)
- mp_int_clear(TEMP(last));
-
- return res;
+ CLEANUP_TEMP();
+ return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_redux_const(m, c) */
-
mp_result
mp_int_redux_const(mp_int m, mp_int c)
{
- CHECK(m != NULL && c != NULL && m != c);
+ assert(m != NULL && c != NULL && m != c);
return s_brmu(c, m);
}
-/* }}} */
-
-/* {{{ mp_int_invmod(a, m, c) */
-
mp_result
mp_int_invmod(mp_int a, mp_int m, mp_int c)
{
- mp_result res;
- mp_sign sa;
- int last = 0;
- mpz_t temp[2];
-
- CHECK(a != NULL && m != NULL && c != NULL);
+ assert(a != NULL && m != NULL && c != NULL);
if (CMPZ(a) == 0 || CMPZ(m) <= 0)
return MP_RANGE;
- sa = MP_SIGN(a); /* need this for the result later */
-
- for (last = 0; last < 2; ++last)
- if ((res = mp_int_init(TEMP(last))) != MP_OK)
- goto CLEANUP;
+ DECLARE_TEMP(2);
- if ((res = mp_int_egcd(a, m, TEMP(0), TEMP(1), NULL)) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_egcd(a, m, TEMP(0), TEMP(1), NULL));
if (mp_int_compare_value(TEMP(0), 1) != 0)
{
- res = MP_UNDEF;
- goto CLEANUP;
+ REQUIRE(MP_UNDEF);
}
/* It is first necessary to constrain the value to the proper range */
- if ((res = mp_int_mod(TEMP(1), m, TEMP(1))) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_mod(TEMP(1), m, TEMP(1)));
/*
* Now, if 'a' was originally negative, the value we have is actually the
@@ -1515,136 +1496,112 @@ mp_int_invmod(mp_int a, mp_int m, mp_int c)
* have to subtract from the modulus. Otherwise, the value is okay as it
* stands.
*/
- if (sa == MP_NEG)
- res = mp_int_sub(m, TEMP(1), c);
+ if (MP_SIGN(a) == MP_NEG)
+ {
+ REQUIRE(mp_int_sub(m, TEMP(1), c));
+ }
else
- res = mp_int_copy(TEMP(1), c);
-
-CLEANUP:
- while (--last >= 0)
- mp_int_clear(TEMP(last));
+ {
+ REQUIRE(mp_int_copy(TEMP(1), c));
+ }
- return res;
+ CLEANUP_TEMP();
+ return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_gcd(a, b, c) */
-
/* Binary GCD algorithm due to Josef Stein, 1961 */
mp_result
mp_int_gcd(mp_int a, mp_int b, mp_int c)
{
- int ca,
- cb,
- k = 0;
- mpz_t u,
- v,
- t;
- mp_result res;
+ assert(a != NULL && b != NULL && c != NULL);
- CHECK(a != NULL && b != NULL && c != NULL);
+ int ca = CMPZ(a);
+ int cb = CMPZ(b);
- ca = CMPZ(a);
- cb = CMPZ(b);
if (ca == 0 && cb == 0)
+ {
return MP_UNDEF;
+ }
else if (ca == 0)
+ {
return mp_int_abs(b, c);
+ }
else if (cb == 0)
+ {
return mp_int_abs(a, c);
+ }
- if ((res = mp_int_init(&t)) != MP_OK)
- return res;
- if ((res = mp_int_init_copy(&u, a)) != MP_OK)
- goto U;
- if ((res = mp_int_init_copy(&v, b)) != MP_OK)
- goto V;
+ DECLARE_TEMP(3);
+ REQUIRE(mp_int_copy(a, TEMP(0)));
+ REQUIRE(mp_int_copy(b, TEMP(1)));
+
+ TEMP(0)->sign = MP_ZPOS;
+ TEMP(1)->sign = MP_ZPOS;
- MP_SIGN(&u) = MP_ZPOS;
- MP_SIGN(&v) = MP_ZPOS;
+ int k = 0;
{ /* Divide out common factors of 2 from u and v */
- int div2_u = s_dp2k(&u),
- div2_v = s_dp2k(&v);
+ int div2_u = s_dp2k(TEMP(0));
+ int div2_v = s_dp2k(TEMP(1));
k = MIN(div2_u, div2_v);
- s_qdiv(&u, (mp_size) k);
- s_qdiv(&v, (mp_size) k);
+ s_qdiv(TEMP(0), (mp_size) k);
+ s_qdiv(TEMP(1), (mp_size) k);
}
- if (mp_int_is_odd(&u))
+ if (mp_int_is_odd(TEMP(0)))
{
- if ((res = mp_int_neg(&v, &t)) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_neg(TEMP(1), TEMP(2)));
}
else
{
- if ((res = mp_int_copy(&u, &t)) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_copy(TEMP(0), TEMP(2)));
}
for (;;)
{
- s_qdiv(&t, s_dp2k(&t));
+ s_qdiv(TEMP(2), s_dp2k(TEMP(2)));
- if (CMPZ(&t) > 0)
+ if (CMPZ(TEMP(2)) > 0)
{
- if ((res = mp_int_copy(&t, &u)) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_copy(TEMP(2), TEMP(0)));
}
else
{
- if ((res = mp_int_neg(&t, &v)) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_neg(TEMP(2), TEMP(1)));
}
- if ((res = mp_int_sub(&u, &v, &t)) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_sub(TEMP(0), TEMP(1), TEMP(2)));
- if (CMPZ(&t) == 0)
+ if (CMPZ(TEMP(2)) == 0)
break;
}
- if ((res = mp_int_abs(&u, c)) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_abs(TEMP(0), c));
if (!s_qmul(c, (mp_size) k))
- res = MP_MEMORY;
-
-CLEANUP:
- mp_int_clear(&v);
-V: mp_int_clear(&u);
-U: mp_int_clear(&t);
+ REQUIRE(MP_MEMORY);
- return res;
+ CLEANUP_TEMP();
+ return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_egcd(a, b, c, x, y) */
-
-/* This is the binary GCD algorithm again, but this time we keep track
- of the elementary matrix operations as we go, so we can get values
- x and y satisfying c = ax + by.
+/* This is the binary GCD algorithm again, but this time we keep track of the
+ elementary matrix operations as we go, so we can get values x and y
+ satisfying c = ax + by.
*/
mp_result
-mp_int_egcd(mp_int a, mp_int b, mp_int c,
- mp_int x, mp_int y)
-{
- int k,
- last = 0,
- ca,
- cb;
- mpz_t temp[8];
- mp_result res;
+mp_int_egcd(mp_int a, mp_int b, mp_int c, mp_int x, mp_int y)
+{
+ assert(a != NULL && b != NULL && c != NULL && (x != NULL || y != NULL));
- CHECK(a != NULL && b != NULL && c != NULL &&
- (x != NULL || y != NULL));
+ mp_result res = MP_OK;
+ int ca = CMPZ(a);
+ int cb = CMPZ(b);
- ca = CMPZ(a);
- cb = CMPZ(b);
if (ca == 0 && cb == 0)
+ {
return MP_UNDEF;
+ }
else if (ca == 0)
{
if ((res = mp_int_abs(b, c)) != MP_OK)
@@ -1665,20 +1622,17 @@ mp_int_egcd(mp_int a, mp_int b, mp_int c,
/*
* Initialize temporaries: A:0, B:1, C:2, D:3, u:4, v:5, ou:6, ov:7
*/
- for (last = 0; last < 4; ++last)
- {
- if ((res = mp_int_init(TEMP(last))) != MP_OK)
- goto CLEANUP;
- }
- TEMP(0)->digits[0] = 1;
- TEMP(3)->digits[0] = 1;
-
- SETUP(mp_int_init_copy(TEMP(4), a), last);
- SETUP(mp_int_init_copy(TEMP(5), b), last);
+ DECLARE_TEMP(8);
+ REQUIRE(mp_int_set_value(TEMP(0), 1));
+ REQUIRE(mp_int_set_value(TEMP(3), 1));
+ REQUIRE(mp_int_copy(a, TEMP(4)));
+ REQUIRE(mp_int_copy(b, TEMP(5)));
/* We will work with absolute values here */
- MP_SIGN(TEMP(4)) = MP_ZPOS;
- MP_SIGN(TEMP(5)) = MP_ZPOS;
+ TEMP(4)->sign = MP_ZPOS;
+ TEMP(5)->sign = MP_ZPOS;
+
+ int k = 0;
{ /* Divide out common factors of 2 from u and v */
int div2_u = s_dp2k(TEMP(4)),
@@ -1689,8 +1643,8 @@ mp_int_egcd(mp_int a, mp_int b, mp_int c,
s_qdiv(TEMP(5), k);
}
- SETUP(mp_int_init_copy(TEMP(6), TEMP(4)), last);
- SETUP(mp_int_init_copy(TEMP(7), TEMP(5)), last);
+ REQUIRE(mp_int_copy(TEMP(4), TEMP(6)));
+ REQUIRE(mp_int_copy(TEMP(5), TEMP(7)));
for (;;)
{
@@ -1700,10 +1654,8 @@ mp_int_egcd(mp_int a, mp_int b, mp_int c,
if (mp_int_is_odd(TEMP(0)) || mp_int_is_odd(TEMP(1)))
{
- if ((res = mp_int_add(TEMP(0), TEMP(7), TEMP(0))) != MP_OK)
- goto CLEANUP;
- if ((res = mp_int_sub(TEMP(1), TEMP(6), TEMP(1))) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_add(TEMP(0), TEMP(7), TEMP(0)));
+ REQUIRE(mp_int_sub(TEMP(1), TEMP(6), TEMP(1)));
}
s_qdiv(TEMP(0), 1);
@@ -1716,10 +1668,8 @@ mp_int_egcd(mp_int a, mp_int b, mp_int c,
if (mp_int_is_odd(TEMP(2)) || mp_int_is_odd(TEMP(3)))
{
- if ((res = mp_int_add(TEMP(2), TEMP(7), TEMP(2))) != MP_OK)
- goto CLEANUP;
- if ((res = mp_int_sub(TEMP(3), TEMP(6), TEMP(3))) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_add(TEMP(2), TEMP(7), TEMP(2)));
+ REQUIRE(mp_int_sub(TEMP(3), TEMP(6), TEMP(3)));
}
s_qdiv(TEMP(2), 1);
@@ -1728,157 +1678,163 @@ mp_int_egcd(mp_int a, mp_int b, mp_int c,
if (mp_int_compare(TEMP(4), TEMP(5)) >= 0)
{
- if ((res = mp_int_sub(TEMP(4), TEMP(5), TEMP(4))) != MP_OK)
- goto CLEANUP;
- if ((res = mp_int_sub(TEMP(0), TEMP(2), TEMP(0))) != MP_OK)
- goto CLEANUP;
- if ((res = mp_int_sub(TEMP(1), TEMP(3), TEMP(1))) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_sub(TEMP(4), TEMP(5), TEMP(4)));
+ REQUIRE(mp_int_sub(TEMP(0), TEMP(2), TEMP(0)));
+ REQUIRE(mp_int_sub(TEMP(1), TEMP(3), TEMP(1)));
}
else
{
- if ((res = mp_int_sub(TEMP(5), TEMP(4), TEMP(5))) != MP_OK)
- goto CLEANUP;
- if ((res = mp_int_sub(TEMP(2), TEMP(0), TEMP(2))) != MP_OK)
- goto CLEANUP;
- if ((res = mp_int_sub(TEMP(3), TEMP(1), TEMP(3))) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_sub(TEMP(5), TEMP(4), TEMP(5)));
+ REQUIRE(mp_int_sub(TEMP(2), TEMP(0), TEMP(2)));
+ REQUIRE(mp_int_sub(TEMP(3), TEMP(1), TEMP(3)));
}
if (CMPZ(TEMP(4)) == 0)
{
- if (x && (res = mp_int_copy(TEMP(2), x)) != MP_OK)
- goto CLEANUP;
- if (y && (res = mp_int_copy(TEMP(3), y)) != MP_OK)
- goto CLEANUP;
+ if (x)
+ REQUIRE(mp_int_copy(TEMP(2), x));
+ if (y)
+ REQUIRE(mp_int_copy(TEMP(3), y));
if (c)
{
if (!s_qmul(TEMP(5), k))
{
- res = MP_MEMORY;
- goto CLEANUP;
+ REQUIRE(MP_MEMORY);
}
-
- res = mp_int_copy(TEMP(5), c);
+ REQUIRE(mp_int_copy(TEMP(5), c));
}
break;
}
}
-CLEANUP:
- while (--last >= 0)
- mp_int_clear(TEMP(last));
-
- return res;
+ CLEANUP_TEMP();
+ return MP_OK;
}
-/* }}} */
+mp_result
+mp_int_lcm(mp_int a, mp_int b, mp_int c)
+{
+ assert(a != NULL && b != NULL && c != NULL);
+
+ /*
+ * Since a * b = gcd(a, b) * lcm(a, b), we can compute lcm(a, b) = (a /
+ * gcd(a, b)) * b.
+ *
+ * This formulation insures everything works even if the input variables
+ * share space.
+ */
+ DECLARE_TEMP(1);
+ REQUIRE(mp_int_gcd(a, b, TEMP(0)));
+ REQUIRE(mp_int_div(a, TEMP(0), TEMP(0), NULL));
+ REQUIRE(mp_int_mul(TEMP(0), b, TEMP(0)));
+ REQUIRE(mp_int_copy(TEMP(0), c));
-/* {{{ mp_int_divisible_value(a, v) */
+ CLEANUP_TEMP();
+ return MP_OK;
+}
-int
-mp_int_divisible_value(mp_int a, int v)
+bool
+mp_int_divisible_value(mp_int a, mp_small v)
{
- int rem = 0;
+ mp_small rem = 0;
if (mp_int_div_value(a, v, NULL, &rem) != MP_OK)
- return 0;
-
+ {
+ return false;
+ }
return rem == 0;
}
-/* }}} */
-
-/* {{{ mp_int_is_pow2(z) */
-
int
mp_int_is_pow2(mp_int z)
{
- CHECK(z != NULL);
+ assert(z != NULL);
return s_isp2(z);
}
-/* }}} */
-
-/* {{{ mp_int_sqrt(a, c) */
-
+/* Implementation of Newton's root finding method, based loosely on a patch
+ contributed by Hal Finkel
+ modified by M. J. Fromberger.
+ */
mp_result
-mp_int_sqrt(mp_int a, mp_int c)
+mp_int_root(mp_int a, mp_small b, mp_int c)
{
- mp_result res = MP_OK;
- mpz_t temp[2];
- int last = 0;
+ assert(a != NULL && c != NULL && b > 0);
- CHECK(a != NULL && c != NULL);
+ if (b == 1)
+ {
+ return mp_int_copy(a, c);
+ }
+ bool flips = false;
- /* The square root of a negative value does not exist in the integers. */
if (MP_SIGN(a) == MP_NEG)
- return MP_UNDEF;
+ {
+ if (b % 2 == 0)
+ {
+ return MP_UNDEF; /* root does not exist for negative a with
+ * even b */
+ }
+ else
+ {
+ flips = true;
+ }
+ }
- SETUP(mp_int_init_copy(TEMP(last), a), last);
- SETUP(mp_int_init(TEMP(last)), last);
+ DECLARE_TEMP(5);
+ REQUIRE(mp_int_copy(a, TEMP(0)));
+ REQUIRE(mp_int_copy(a, TEMP(1)));
+ TEMP(0)->sign = MP_ZPOS;
+ TEMP(1)->sign = MP_ZPOS;
for (;;)
{
- if ((res = mp_int_sqr(TEMP(0), TEMP(1))) != MP_OK)
- goto CLEANUP;
+ REQUIRE(mp_int_expt(TEMP(1), b, TEMP(2)));
- if (mp_int_compare_unsigned(a, TEMP(1)) == 0)
+ if (mp_int_compare_unsigned(TEMP(2), TEMP(0)) <= 0)
break;
- if ((res = mp_int_copy(a, TEMP(1))) != MP_OK)
- goto CLEANUP;
- if ((res = mp_int_div(TEMP(1), TEMP(0), TEMP(1), NULL)) != MP_OK)
- goto CLEANUP;
- if ((res = mp_int_add(TEMP(0), TEMP(1), TEMP(1))) != MP_OK)
- goto CLEANUP;
- if ((res = mp_int_div_pow2(TEMP(1), 1, TEMP(1), NULL)) != MP_OK)
- goto CLEANUP;
-
- if (mp_int_compare_unsigned(TEMP(0), TEMP(1)) == 0)
- break;
- if ((res = mp_int_sub_value(TEMP(0), 1, TEMP(0))) != MP_OK)
- goto CLEANUP;
- if (mp_int_compare_unsigned(TEMP(0), TEMP(1)) == 0)
- break;
+ REQUIRE(mp_int_sub(TEMP(2), TEMP(0), TEMP(2)));
+ REQUIRE(mp_int_expt(TEMP(1), b - 1, TEMP(3)));
+ REQUIRE(mp_int_mul_value(TEMP(3), b, TEMP(3)));
+ REQUIRE(mp_int_div(TEMP(2), TEMP(3), TEMP(4), NULL));
+ REQUIRE(mp_int_sub(TEMP(1), TEMP(4), TEMP(4)));
- if ((res = mp_int_copy(TEMP(1), TEMP(0))) != MP_OK)
- goto CLEANUP;
+ if (mp_int_compare_unsigned(TEMP(1), TEMP(4)) == 0)
+ {
+ REQUIRE(mp_int_sub_value(TEMP(4), 1, TEMP(4)));
+ }
+ REQUIRE(mp_int_copy(TEMP(4), TEMP(1)));
}
- res = mp_int_copy(TEMP(0), c);
+ REQUIRE(mp_int_copy(TEMP(1), c));
-CLEANUP:
- while (--last >= 0)
- mp_int_clear(TEMP(last));
+ /* If the original value of a was negative, flip the output sign. */
+ if (flips)
+ (void) mp_int_neg(c, c); /* cannot fail */
- return res;
+ CLEANUP_TEMP();
+ return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_to_int(z, out) */
-
mp_result
-mp_int_to_int(mp_int z, int *out)
+mp_int_to_int(mp_int z, mp_small *out)
{
- unsigned int uv = 0;
- mp_size uz;
- mp_digit *dz;
- mp_sign sz;
+ assert(z != NULL);
- CHECK(z != NULL);
+ /* Make sure the value is representable as a small integer */
+ mp_sign sz = MP_SIGN(z);
- /* Make sure the value is representable as an int */
- sz = MP_SIGN(z);
- if ((sz == MP_ZPOS && mp_int_compare_value(z, INT_MAX) > 0) ||
- mp_int_compare_value(z, INT_MIN) < 0)
+ if ((sz == MP_ZPOS && mp_int_compare_value(z, MP_SMALL_MAX) > 0) ||
+ mp_int_compare_value(z, MP_SMALL_MIN) < 0)
+ {
return MP_RANGE;
+ }
- uz = MP_USED(z);
- dz = MP_DIGITS(z) + uz - 1;
+ mp_usmall uz = MP_USED(z);
+ mp_digit *dz = MP_DIGITS(z) + uz - 1;
+ mp_small uv = 0;
while (uz > 0)
{
@@ -1888,33 +1844,56 @@ mp_int_to_int(mp_int z, int *out)
}
if (out)
- *out = (sz == MP_NEG) ? -(int) uv : (int) uv;
+ *out = (mp_small) ((sz == MP_NEG) ? -uv : uv);
return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_to_string(z, radix, str, limit) */
-
mp_result
-mp_int_to_string(mp_int z, mp_size radix,
- char *str, int limit)
+mp_int_to_uint(mp_int z, mp_usmall *out)
{
- mp_result res;
- int cmp = 0;
+ assert(z != NULL);
- CHECK(z != NULL && str != NULL && limit >= 2);
+ /* Make sure the value is representable as an unsigned small integer */
+ mp_size sz = MP_SIGN(z);
- if (radix < MP_MIN_RADIX || radix > MP_MAX_RADIX)
+ if (sz == MP_NEG || mp_int_compare_uvalue(z, MP_USMALL_MAX) > 0)
+ {
return MP_RANGE;
+ }
+
+ mp_size uz = MP_USED(z);
+ mp_digit *dz = MP_DIGITS(z) + uz - 1;
+ mp_usmall uv = 0;
+
+ while (uz > 0)
+ {
+ uv <<= MP_DIGIT_BIT / 2;
+ uv = (uv << (MP_DIGIT_BIT / 2)) | *dz--;
+ --uz;
+ }
+
+ if (out)
+ *out = uv;
+
+ return MP_OK;
+}
+
+mp_result
+mp_int_to_string(mp_int z, mp_size radix, char *str, int limit)
+{
+ assert(z != NULL && str != NULL && limit >= 2);
+ assert(radix >= MP_MIN_RADIX && radix <= MP_MAX_RADIX);
+
+ int cmp = 0;
if (CMPZ(z) == 0)
{
- *str++ = s_val2ch(0, mp_flags & MP_CAP_DIGITS);
+ *str++ = s_val2ch(0, 1);
}
else
{
+ mp_result res;
mpz_t tmp;
char *h,
*t;
@@ -1938,7 +1917,7 @@ mp_int_to_string(mp_int z, mp_size radix,
break;
d = s_ddiv(&tmp, (mp_digit) radix);
- *str++ = s_val2ch(d, mp_flags & MP_CAP_DIGITS);
+ *str++ = s_val2ch(d, 1);
}
t = str - 1;
@@ -1956,26 +1935,22 @@ mp_int_to_string(mp_int z, mp_size radix,
*str = '\0';
if (cmp == 0)
+ {
return MP_OK;
+ }
else
+ {
return MP_TRUNC;
+ }
}
-/* }}} */
-
-/* {{{ mp_int_string_len(z, radix) */
-
mp_result
mp_int_string_len(mp_int z, mp_size radix)
{
- int len;
-
- CHECK(z != NULL);
-
- if (radix < MP_MIN_RADIX || radix > MP_MAX_RADIX)
- return MP_RANGE;
+ assert(z != NULL);
+ assert(radix >= MP_MIN_RADIX && radix <= MP_MAX_RADIX);
- len = s_outlen(z, radix) + 1; /* for terminator */
+ int len = s_outlen(z, radix) + 1; /* for terminator */
/* Allow for sign marker on negatives */
if (MP_SIGN(z) == MP_NEG)
@@ -1984,31 +1959,19 @@ mp_int_string_len(mp_int z, mp_size radix)
return len;
}
-/* }}} */
-
-/* {{{ mp_int_read_string(z, radix, *str) */
-
/* Read zero-terminated string into z */
mp_result
mp_int_read_string(mp_int z, mp_size radix, const char *str)
{
return mp_int_read_cstring(z, radix, str, NULL);
-
}
-/* }}} */
-
-/* {{{ mp_int_read_cstring(z, radix, *str, **end) */
-
mp_result
-mp_int_read_cstring(mp_int z, mp_size radix, const char *str, char **end)
+mp_int_read_cstring(mp_int z, mp_size radix, const char *str,
+ char **end)
{
- int ch;
-
- CHECK(z != NULL && str != NULL);
-
- if (radix < MP_MIN_RADIX || radix > MP_MAX_RADIX)
- return MP_RANGE;
+ assert(z != NULL && str != NULL);
+ assert(radix >= MP_MIN_RADIX && radix <= MP_MAX_RADIX);
/* Skip leading whitespace */
while (isspace((unsigned char) *str))
@@ -2018,17 +1981,19 @@ mp_int_read_cstring(mp_int z, mp_size radix, const char *str, char **end)
switch (*str)
{
case '-':
- MP_SIGN(z) = MP_NEG;
+ z->sign = MP_NEG;
++str;
break;
case '+':
++str; /* fallthrough */
default:
- MP_SIGN(z) = MP_ZPOS;
+ z->sign = MP_ZPOS;
break;
}
/* Skip leading zeroes */
+ int ch;
+
while ((ch = s_ch2val(*str, radix)) == 0)
++str;
@@ -2036,7 +2001,7 @@ mp_int_read_cstring(mp_int z, mp_size radix, const char *str, char **end)
if (!s_pad(z, s_inlen(strlen(str), radix)))
return MP_MEMORY;
- MP_USED(z) = 1;
+ z->used = 1;
z->digits[0] = 0;
while (*str != '\0' && ((ch = s_ch2val(*str, radix)) >= 0))
@@ -2050,41 +2015,38 @@ mp_int_read_cstring(mp_int z, mp_size radix, const char *str, char **end)
/* Override sign for zero, even if negative specified. */
if (CMPZ(z) == 0)
- MP_SIGN(z) = MP_ZPOS;
+ z->sign = MP_ZPOS;
if (end != NULL)
- *end = (char *) str;
+ *end = unconstify(char *, str);
/*
* Return a truncation error if the string has unprocessed characters
* remaining, so the caller can tell if the whole string was done
*/
if (*str != '\0')
+ {
return MP_TRUNC;
+ }
else
+ {
return MP_OK;
+ }
}
-/* }}} */
-
-/* {{{ mp_int_count_bits(z) */
-
mp_result
mp_int_count_bits(mp_int z)
{
- mp_size nbits = 0,
- uz;
- mp_digit d;
+ assert(z != NULL);
- CHECK(z != NULL);
+ mp_size uz = MP_USED(z);
- uz = MP_USED(z);
if (uz == 1 && z->digits[0] == 0)
return 1;
--uz;
- nbits = uz * MP_DIGIT_BIT;
- d = z->digits[uz];
+ mp_size nbits = uz * MP_DIGIT_BIT;
+ mp_digit d = z->digits[uz];
while (d != 0)
{
@@ -2095,21 +2057,15 @@ mp_int_count_bits(mp_int z)
return nbits;
}
-/* }}} */
-
-/* {{{ mp_int_to_binary(z, buf, limit) */
-
mp_result
mp_int_to_binary(mp_int z, unsigned char *buf, int limit)
{
static const int PAD_FOR_2C = 1;
- mp_result res;
- int limpos = limit;
-
- CHECK(z != NULL && buf != NULL);
+ assert(z != NULL && buf != NULL);
- res = s_tobin(z, buf, &limpos, PAD_FOR_2C);
+ int limpos = limit;
+ mp_result res = s_tobin(z, buf, &limpos, PAD_FOR_2C);
if (MP_SIGN(z) == MP_NEG)
s_2comp(buf, limpos);
@@ -2117,22 +2073,14 @@ mp_int_to_binary(mp_int z, unsigned char *buf, int limit)
return res;
}
-/* }}} */
-
-/* {{{ mp_int_read_binary(z, buf, len) */
-
mp_result
mp_int_read_binary(mp_int z, unsigned char *buf, int len)
{
- mp_size need,
- i;
- unsigned char *tmp;
- mp_digit *dz;
-
- CHECK(z != NULL && buf != NULL && len > 0);
+ assert(z != NULL && buf != NULL && len > 0);
/* Figure out how many digits are needed to represent this value */
- need = ((len * CHAR_BIT) + (MP_DIGIT_BIT - 1)) / MP_DIGIT_BIT;
+ mp_size need = ((len * CHAR_BIT) + (MP_DIGIT_BIT - 1)) / MP_DIGIT_BIT;
+
if (!s_pad(z, need))
return MP_MEMORY;
@@ -2144,12 +2092,14 @@ mp_int_read_binary(mp_int z, unsigned char *buf, int len)
*/
if (buf[0] >> (CHAR_BIT - 1))
{
- MP_SIGN(z) = MP_NEG;
+ z->sign = MP_NEG;
s_2comp(buf, len);
}
- dz = MP_DIGITS(z);
- for (tmp = buf, i = len; i > 0; --i, ++tmp)
+ mp_digit *dz = MP_DIGITS(z);
+ unsigned char *tmp = buf;
+
+ for (int i = len; i > 0; --i, ++tmp)
{
s_qmul(z, (mp_size) CHAR_BIT);
*dz |= *tmp;
@@ -2162,20 +2112,15 @@ mp_int_read_binary(mp_int z, unsigned char *buf, int len)
return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_binary_len(z) */
-
mp_result
mp_int_binary_len(mp_int z)
{
mp_result res = mp_int_count_bits(z);
- int bytes = mp_int_unsigned_len(z);
if (res <= 0)
return res;
- bytes = (res + (CHAR_BIT - 1)) / CHAR_BIT;
+ int bytes = mp_int_unsigned_len(z);
/*
* If the highest-order bit falls exactly on a byte boundary, we need to
@@ -2188,193 +2133,170 @@ mp_int_binary_len(mp_int z)
return bytes;
}
-/* }}} */
-
-/* {{{ mp_int_to_unsigned(z, buf, limit) */
-
mp_result
mp_int_to_unsigned(mp_int z, unsigned char *buf, int limit)
{
static const int NO_PADDING = 0;
- CHECK(z != NULL && buf != NULL);
+ assert(z != NULL && buf != NULL);
return s_tobin(z, buf, &limit, NO_PADDING);
}
-/* }}} */
-
-/* {{{ mp_int_read_unsigned(z, buf, len) */
-
mp_result
mp_int_read_unsigned(mp_int z, unsigned char *buf, int len)
{
- mp_size need,
- i;
- unsigned char *tmp;
- mp_digit *dz;
-
- CHECK(z != NULL && buf != NULL && len > 0);
+ assert(z != NULL && buf != NULL && len > 0);
/* Figure out how many digits are needed to represent this value */
- need = ((len * CHAR_BIT) + (MP_DIGIT_BIT - 1)) / MP_DIGIT_BIT;
+ mp_size need = ((len * CHAR_BIT) + (MP_DIGIT_BIT - 1)) / MP_DIGIT_BIT;
+
if (!s_pad(z, need))
return MP_MEMORY;
mp_int_zero(z);
- dz = MP_DIGITS(z);
- for (tmp = buf, i = len; i > 0; --i, ++tmp)
+ unsigned char *tmp = buf;
+
+ for (int i = len; i > 0; --i, ++tmp)
{
(void) s_qmul(z, CHAR_BIT);
- *dz |= *tmp;
+ *MP_DIGITS(z) |= *tmp;
}
return MP_OK;
}
-/* }}} */
-
-/* {{{ mp_int_unsigned_len(z) */
-
mp_result
mp_int_unsigned_len(mp_int z)
{
mp_result res = mp_int_count_bits(z);
- int bytes;
if (res <= 0)
return res;
- bytes = (res + (CHAR_BIT - 1)) / CHAR_BIT;
+ int bytes = (res + (CHAR_BIT - 1)) / CHAR_BIT;
return bytes;
}
-/* }}} */
-
-/* {{{ mp_error_string(res) */
-
const char *
mp_error_string(mp_result res)
{
- int ix;
-
if (res > 0)
return s_unknown_err;
res = -res;
+ int ix;
+
for (ix = 0; ix < res && s_error_msg[ix] != NULL; ++ix)
;
if (s_error_msg[ix] != NULL)
+ {
return s_error_msg[ix];
+ }
else
+ {
return s_unknown_err;
+ }
}
-/* }}} */
-
/*------------------------------------------------------------------------*/
/* Private functions for internal use. These make assumptions. */
-/* {{{ s_alloc(num) */
+#if IMATH_DEBUG
+static const mp_digit fill = (mp_digit) 0xdeadbeefabad1dea;
+#endif
static mp_digit *
s_alloc(mp_size num)
{
mp_digit *out = px_alloc(num * sizeof(mp_digit));
- assert(out != NULL); /* for debugging */
+ assert(out != NULL);
+#if IMATH_DEBUG
+ for (mp_size ix = 0; ix < num; ++ix)
+ out[ix] = fill;
+#endif
return out;
}
-/* }}} */
-
-/* {{{ s_realloc(old, num) */
-
static mp_digit *
-s_realloc(mp_digit *old, mp_size num)
+s_realloc(mp_digit *old, mp_size osize, mp_size nsize)
{
- mp_digit *new = px_realloc(old, num * sizeof(mp_digit));
+#if IMATH_DEBUG
+ mp_digit *new = s_alloc(nsize);
- assert(new != NULL); /* for debugging */
+ assert(new != NULL);
- return new;
-}
+ for (mp_size ix = 0; ix < nsize; ++ix)
+ new[ix] = fill;
+ memcpy(new, old, osize * sizeof(mp_digit));
+#else
+ mp_digit *new = px_realloc(old, nsize * sizeof(mp_digit));
-/* }}} */
+ assert(new != NULL);
+#endif
-/* {{{ s_free(ptr) */
+ return new;
+}
-#if TRACEABLE_FREE
static void
s_free(void *ptr)
{
px_free(ptr);
}
-#endif
-
-/* }}} */
-
-/* {{{ s_pad(z, min) */
-static int
+static bool
s_pad(mp_int z, mp_size min)
{
if (MP_ALLOC(z) < min)
{
- mp_size nsize = ROUND_PREC(min);
- mp_digit *tmp = s_realloc(MP_DIGITS(z), nsize);
+ mp_size nsize = s_round_prec(min);
+ mp_digit *tmp;
- if (tmp == NULL)
- return 0;
+ if (z->digits == &(z->single))
+ {
+ if ((tmp = s_alloc(nsize)) == NULL)
+ return false;
+ tmp[0] = z->single;
+ }
+ else if ((tmp = s_realloc(MP_DIGITS(z), MP_ALLOC(z), nsize)) == NULL)
+ {
+ return false;
+ }
- MP_DIGITS(z) = tmp;
- MP_ALLOC(z) = nsize;
+ z->digits = tmp;
+ z->alloc = nsize;
}
- return 1;
+ return true;
}
-/* }}} */
-
-/* {{{ s_clamp(z) */
-
-#if TRACEABLE_CLAMP
+/* Note: This will not work correctly when value == MP_SMALL_MIN */
static void
-s_clamp(mp_int z)
+s_fake(mp_int z, mp_small value, mp_digit vbuf[])
{
- mp_size uz = MP_USED(z);
- mp_digit *zd = MP_DIGITS(z) + uz - 1;
-
- while (uz > 1 && (*zd-- == 0))
- --uz;
+ mp_usmall uv = (mp_usmall) (value < 0) ? -value : value;
- MP_USED(z) = uz;
+ s_ufake(z, uv, vbuf);
+ if (value < 0)
+ z->sign = MP_NEG;
}
-#endif
-
-/* }}} */
-
-/* {{{ s_fake(z, value, vbuf) */
static void
-s_fake(mp_int z, int value, mp_digit vbuf[])
+s_ufake(mp_int z, mp_usmall value, mp_digit vbuf[])
{
- mp_size uv = (mp_size) s_vpack(value, vbuf);
+ mp_size ndig = (mp_size) s_uvpack(value, vbuf);
- z->used = uv;
+ z->used = ndig;
z->alloc = MP_VALUE_DIGITS(value);
- z->sign = (value < 0) ? MP_NEG : MP_ZPOS;
+ z->sign = MP_ZPOS;
z->digits = vbuf;
}
-/* }}} */
-
-/* {{{ s_cdig(da, db, len) */
-
static int
s_cdig(mp_digit *da, mp_digit *db, mp_size len)
{
@@ -2384,22 +2306,21 @@ s_cdig(mp_digit *da, mp_digit *db, mp_size len)
for ( /* */ ; len != 0; --len, --dat, --dbt)
{
if (*dat > *dbt)
+ {
return 1;
+ }
else if (*dat < *dbt)
+ {
return -1;
+ }
}
return 0;
}
-/* }}} */
-
-/* {{{ s_vpack(v, t[]) */
-
static int
-s_vpack(int v, mp_digit t[])
+s_uvpack(mp_usmall uv, mp_digit t[])
{
- unsigned int uv = (unsigned int) ((v < 0) ? -v : v);
int ndig = 0;
if (uv == 0)
@@ -2417,10 +2338,6 @@ s_vpack(int v, mp_digit t[])
return ndig;
}
-/* }}} */
-
-/* {{{ s_ucmp(a, b) */
-
static int
s_ucmp(mp_int a, mp_int b)
{
@@ -2428,41 +2345,47 @@ s_ucmp(mp_int a, mp_int b)
ub = MP_USED(b);
if (ua > ub)
+ {
return 1;
+ }
else if (ub > ua)
+ {
return -1;
+ }
else
+ {
return s_cdig(MP_DIGITS(a), MP_DIGITS(b), ua);
+ }
}
-/* }}} */
-
-/* {{{ s_vcmp(a, v) */
-
static int
-s_vcmp(mp_int a, int v)
+s_vcmp(mp_int a, mp_small v)
{
- mp_digit vdig[MP_VALUE_DIGITS(v)];
- int ndig = 0;
- mp_size ua = MP_USED(a);
-
- ndig = s_vpack(v, vdig);
+#if _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4146)
+#endif
+ mp_usmall uv = (v < 0) ? -(mp_usmall) v : (mp_usmall) v;
+#if _MSC_VER
+#pragma warning(pop)
+#endif
- if (ua > ndig)
- return 1;
- else if (ua < ndig)
- return -1;
- else
- return s_cdig(MP_DIGITS(a), vdig, ndig);
+ return s_uvcmp(a, uv);
}
-/* }}} */
+static int
+s_uvcmp(mp_int a, mp_usmall uv)
+{
+ mpz_t vtmp;
+ mp_digit vdig[MP_VALUE_DIGITS(uv)];
-/* {{{ s_uadd(da, db, dc, size_a, size_b) */
+ s_ufake(&vtmp, uv, vdig);
+ return s_ucmp(a, &vtmp);
+}
static mp_digit
-s_uadd(mp_digit *da, mp_digit *db, mp_digit *dc,
- mp_size size_a, mp_size size_b)
+s_uadd(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
+ mp_size size_b)
{
mp_size pos;
mp_word w = 0;
@@ -2495,13 +2418,9 @@ s_uadd(mp_digit *da, mp_digit *db, mp_digit *dc,
return (mp_digit) w;
}
-/* }}} */
-
-/* {{{ s_usub(da, db, dc, size_a, size_b) */
-
static void
-s_usub(mp_digit *da, mp_digit *db, mp_digit *dc,
- mp_size size_a, mp_size size_b)
+s_usub(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
+ mp_size size_b)
{
mp_size pos;
mp_word w = 0;
@@ -2513,7 +2432,8 @@ s_usub(mp_digit *da, mp_digit *db, mp_digit *dc,
for (pos = 0; pos < size_b; ++pos, ++da, ++db, ++dc)
{
w = ((mp_word) MP_DIGIT_MAX + 1 + /* MP_RADIX */
- (mp_word) *da) - w - (mp_word) *db;
+ (mp_word) *da) -
+ w - (mp_word) *db;
*dc = LOWER_HALF(w);
w = (UPPER_HALF(w) == 0);
@@ -2523,7 +2443,8 @@ s_usub(mp_digit *da, mp_digit *db, mp_digit *dc,
for ( /* */ ; pos < size_a; ++pos, ++da, ++dc)
{
w = ((mp_word) MP_DIGIT_MAX + 1 + /* MP_RADIX */
- (mp_word) *da) - w;
+ (mp_word) *da) -
+ w;
*dc = LOWER_HALF(w);
w = (UPPER_HALF(w) == 0);
@@ -2533,13 +2454,9 @@ s_usub(mp_digit *da, mp_digit *db, mp_digit *dc,
assert(w == 0);
}
-/* }}} */
-
-/* {{{ s_kmul(da, db, dc, size_a, size_b) */
-
static int
-s_kmul(mp_digit *da, mp_digit *db, mp_digit *dc,
- mp_size size_a, mp_size size_b)
+s_kmul(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
+ mp_size size_b)
{
mp_size bot_size;
@@ -2561,11 +2478,8 @@ s_kmul(mp_digit *da, mp_digit *db, mp_digit *dc,
* Karatsuba algorithm to compute the product; otherwise use the normal
* multiplication algorithm
*/
- if (multiply_threshold &&
- size_a >= multiply_threshold &&
- size_b > bot_size)
+ if (multiply_threshold && size_a >= multiply_threshold && size_b > bot_size)
{
-
mp_digit *t1,
*t2,
*t3,
@@ -2617,12 +2531,11 @@ s_kmul(mp_digit *da, mp_digit *db, mp_digit *dc,
/* Assemble the output value */
COPY(t1, dc, buf_size);
- carry = s_uadd(t3, dc + bot_size, dc + bot_size,
- buf_size + 1, buf_size);
+ carry = s_uadd(t3, dc + bot_size, dc + bot_size, buf_size + 1, buf_size);
assert(carry == 0);
- carry = s_uadd(t2, dc + 2 * bot_size, dc + 2 * bot_size,
- buf_size, buf_size);
+ carry =
+ s_uadd(t2, dc + 2 * bot_size, dc + 2 * bot_size, buf_size, buf_size);
assert(carry == 0);
s_free(t1); /* note t2 and t3 are just internal pointers
@@ -2636,13 +2549,9 @@ s_kmul(mp_digit *da, mp_digit *db, mp_digit *dc,
return 1;
}
-/* }}} */
-
-/* {{{ s_umul(da, db, dc, size_a, size_b) */
-
static void
-s_umul(mp_digit *da, mp_digit *db, mp_digit *dc,
- mp_size size_a, mp_size size_b)
+s_umul(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
+ mp_size size_b)
{
mp_size a,
b;
@@ -2669,10 +2578,6 @@ s_umul(mp_digit *da, mp_digit *db, mp_digit *dc,
}
}
-/* }}} */
-
-/* {{{ s_ksqr(da, dc, size_a) */
-
static int
s_ksqr(mp_digit *da, mp_digit *dc, mp_size size_a)
{
@@ -2682,7 +2587,8 @@ s_ksqr(mp_digit *da, mp_digit *dc, mp_size size_a)
mp_digit *a_top = da + bot_size;
mp_digit *t1,
*t2,
- *t3;
+ *t3,
+ carry PG_USED_FOR_ASSERTS_ONLY;
mp_size at_size = size_a - bot_size;
mp_size buf_size = 2 * bot_size;
@@ -2716,13 +2622,14 @@ s_ksqr(mp_digit *da, mp_digit *dc, mp_size size_a)
/* Assemble the output value */
COPY(t1, dc, 2 * bot_size);
- (void) s_uadd(t3, dc + bot_size, dc + bot_size,
- buf_size + 1, buf_size + 1);
+ carry = s_uadd(t3, dc + bot_size, dc + bot_size, buf_size + 1, buf_size);
+ assert(carry == 0);
- (void) s_uadd(t2, dc + 2 * bot_size, dc + 2 * bot_size,
- buf_size, buf_size);
+ carry =
+ s_uadd(t2, dc + 2 * bot_size, dc + 2 * bot_size, buf_size, buf_size);
+ assert(carry == 0);
- px_free(t1); /* note that t2 and t2 are internal pointers
+ s_free(t1); /* note that t2 and t2 are internal pointers
* only */
}
@@ -2734,10 +2641,6 @@ s_ksqr(mp_digit *da, mp_digit *dc, mp_size size_a)
return 1;
}
-/* }}} */
-
-/* {{{ s_usqr(da, dc, size_a) */
-
static void
s_usqr(mp_digit *da, mp_digit *dc, mp_size size_a)
{
@@ -2800,10 +2703,6 @@ s_usqr(mp_digit *da, mp_digit *dc, mp_size size_a)
}
}
-/* }}} */
-
-/* {{{ s_dadd(a, b) */
-
static void
s_dadd(mp_int a, mp_digit b)
{
@@ -2826,14 +2725,10 @@ s_dadd(mp_int a, mp_digit b)
if (w)
{
*da = (mp_digit) w;
- MP_USED(a) += 1;
+ a->used += 1;
}
}
-/* }}} */
-
-/* {{{ s_dmul(a, b) */
-
static void
s_dmul(mp_int a, mp_digit b)
{
@@ -2852,14 +2747,10 @@ s_dmul(mp_int a, mp_digit b)
if (w)
{
*da = (mp_digit) w;
- MP_USED(a) += 1;
+ a->used += 1;
}
}
-/* }}} */
-
-/* {{{ s_dbmul(da, b, dc, size_a) */
-
static void
s_dbmul(mp_digit *da, mp_digit b, mp_digit *dc, mp_size size_a)
{
@@ -2878,10 +2769,6 @@ s_dbmul(mp_digit *da, mp_digit b, mp_digit *dc, mp_size size_a)
*dc = LOWER_HALF(w);
}
-/* }}} */
-
-/* {{{ s_ddiv(da, d, dc, size_a) */
-
static mp_digit
s_ddiv(mp_int a, mp_digit b)
{
@@ -2911,10 +2798,6 @@ s_ddiv(mp_int a, mp_digit b)
return (mp_digit) w;
}
-/* }}} */
-
-/* {{{ s_qdiv(z, p2) */
-
static void
s_qdiv(mp_int z, mp_size p2)
{
@@ -2938,9 +2821,11 @@ s_qdiv(mp_int z, mp_size p2)
from = to + ndig;
for (mark = ndig; mark < uz; ++mark)
+ {
*to++ = *from++;
+ }
- MP_USED(z) = uz - ndig;
+ z->used = uz - ndig;
}
if (nbits)
@@ -2965,33 +2850,25 @@ s_qdiv(mp_int z, mp_size p2)
}
if (MP_USED(z) == 1 && z->digits[0] == 0)
- MP_SIGN(z) = MP_ZPOS;
+ z->sign = MP_ZPOS;
}
-/* }}} */
-
-/* {{{ s_qmod(z, p2) */
-
static void
s_qmod(mp_int z, mp_size p2)
{
mp_size start = p2 / MP_DIGIT_BIT + 1,
rest = p2 % MP_DIGIT_BIT;
mp_size uz = MP_USED(z);
- mp_digit mask = (1 << rest) - 1;
+ mp_digit mask = (1u << rest) - 1;
if (start <= uz)
{
- MP_USED(z) = start;
+ z->used = start;
z->digits[start - 1] &= mask;
CLAMP(z);
}
}
-/* }}} */
-
-/* {{{ s_qmul(z, p2) */
-
static int
s_qmul(mp_int z, mp_size p2)
{
@@ -3063,21 +2940,19 @@ s_qmul(mp_int z, mp_size p2)
}
}
- MP_USED(z) = uz;
+ z->used = uz;
CLAMP(z);
return 1;
}
-/* }}} */
-
-/* {{{ s_qsub(z, p2) */
-
-/* Subtract |z| from 2^p2, assuming 2^p2 > |z|, and set z to be positive */
+/* Compute z = 2^p2 - |z|; requires that 2^p2 >= |z|
+ The sign of the result is always zero/positive.
+ */
static int
s_qsub(mp_int z, mp_size p2)
{
- mp_digit hi = (1 << (p2 % MP_DIGIT_BIT)),
+ mp_digit hi = (1u << (p2 % MP_DIGIT_BIT)),
*zp;
mp_size tdig = (p2 / MP_DIGIT_BIT),
pos;
@@ -3099,16 +2974,12 @@ s_qsub(mp_int z, mp_size p2)
assert(UPPER_HALF(w) != 0); /* no borrow out should be possible */
- MP_SIGN(z) = MP_ZPOS;
+ z->sign = MP_ZPOS;
CLAMP(z);
return 1;
}
-/* }}} */
-
-/* {{{ s_dp2k(z) */
-
static int
s_dp2k(mp_int z)
{
@@ -3135,10 +3006,6 @@ s_dp2k(mp_int z)
return k;
}
-/* }}} */
-
-/* {{{ s_isp2(z) */
-
static int
s_isp2(mp_int z)
{
@@ -3167,12 +3034,8 @@ s_isp2(mp_int z)
return (int) k;
}
-/* }}} */
-
-/* {{{ s_2expt(z, k) */
-
static int
-s_2expt(mp_int z, int k)
+s_2expt(mp_int z, mp_small k)
{
mp_size ndig,
rest;
@@ -3186,23 +3049,19 @@ s_2expt(mp_int z, int k)
dz = MP_DIGITS(z);
ZERO(dz, ndig);
- *(dz + ndig - 1) = (1 << rest);
- MP_USED(z) = ndig;
+ *(dz + ndig - 1) = (1u << rest);
+ z->used = ndig;
return 1;
}
-/* }}} */
-
-/* {{{ s_norm(a, b) */
-
static int
s_norm(mp_int a, mp_int b)
{
mp_digit d = b->digits[MP_USED(b) - 1];
int k = 0;
- while (d < (mp_digit) ((mp_digit) 1 << (MP_DIGIT_BIT - 1)))
+ while (d < (1u << (mp_digit) (MP_DIGIT_BIT - 1)))
{ /* d < (MP_RADIX / 2) */
d <<= 1;
++k;
@@ -3218,10 +3077,6 @@ s_norm(mp_int a, mp_int b)
return k;
}
-/* }}} */
-
-/* {{{ s_brmu(z, m) */
-
static mp_result
s_brmu(mp_int z, mp_int m)
{
@@ -3234,10 +3089,6 @@ s_brmu(mp_int z, mp_int m)
return mp_int_div(z, m, z, NULL);
}
-/* }}} */
-
-/* {{{ s_reduce(x, m, mu, q1, q2) */
-
static int
s_reduce(mp_int x, mp_int m, mp_int mu, mp_int q1, mp_int q2)
{
@@ -3276,53 +3127,47 @@ s_reduce(mp_int x, mp_int m, mp_int mu, mp_int q1, mp_int q2)
return 0;
/*
- * If x > m, we need to back it off until it is in range. This will be
+ * If x > m, we need to back it off until it is in range. This will be
* required at most twice.
*/
if (mp_int_compare(x, m) >= 0)
+ {
(void) mp_int_sub(x, m, x);
- if (mp_int_compare(x, m) >= 0)
- (void) mp_int_sub(x, m, x);
+ if (mp_int_compare(x, m) >= 0)
+ {
+ (void) mp_int_sub(x, m, x);
+ }
+ }
/* At this point, x has been properly reduced. */
return 1;
}
-/* }}} */
-
-/* {{{ s_embar(a, b, m, mu, c) */
-
-/* Perform modular exponentiation using Barrett's method, where mu is
- the reduction constant for m. Assumes a < m, b > 0. */
+/* Perform modular exponentiation using Barrett's method, where mu is the
+ reduction constant for m. Assumes a < m, b > 0. */
static mp_result
s_embar(mp_int a, mp_int b, mp_int m, mp_int mu, mp_int c)
{
- mp_digit *db,
- *dbt,
- umu,
- d;
- mpz_t temp[3];
- mp_result res;
- int last = 0;
+ mp_digit umu = MP_USED(mu);
+ mp_digit *db = MP_DIGITS(b);
+ mp_digit *dbt = db + MP_USED(b) - 1;
- umu = MP_USED(mu);
- db = MP_DIGITS(b);
- dbt = db + MP_USED(b) - 1;
-
- while (last < 3)
- {
- SETUP(mp_int_init_size(TEMP(last), 4 * umu), last);
- ZERO(MP_DIGITS(TEMP(last - 1)), MP_ALLOC(TEMP(last - 1)));
- }
+ DECLARE_TEMP(3);
+ REQUIRE(GROW(TEMP(0), 4 * umu));
+ REQUIRE(GROW(TEMP(1), 4 * umu));
+ REQUIRE(GROW(TEMP(2), 4 * umu));
+ ZERO(TEMP(0)->digits, TEMP(0)->alloc);
+ ZERO(TEMP(1)->digits, TEMP(1)->alloc);
+ ZERO(TEMP(2)->digits, TEMP(2)->alloc);
(void) mp_int_set_value(c, 1);
/* Take care of low-order digits */
while (db < dbt)
{
- int i;
+ mp_digit d = *db;
- for (d = *db, i = MP_DIGIT_BIT; i > 0; --i, d >>= 1)
+ for (int i = MP_DIGIT_BIT; i > 0; --i, d >>= 1)
{
if (d & 1)
{
@@ -3330,31 +3175,27 @@ s_embar(mp_int a, mp_int b, mp_int m, mp_int mu, mp_int c)
UMUL(c, a, TEMP(0));
if (!s_reduce(TEMP(0), m, mu, TEMP(1), TEMP(2)))
{
- res = MP_MEMORY;
- goto CLEANUP;
+ REQUIRE(MP_MEMORY);
}
mp_int_copy(TEMP(0), c);
}
-
USQR(a, TEMP(0));
assert(MP_SIGN(TEMP(0)) == MP_ZPOS);
if (!s_reduce(TEMP(0), m, mu, TEMP(1), TEMP(2)))
{
- res = MP_MEMORY;
- goto CLEANUP;
+ REQUIRE(MP_MEMORY);
}
assert(MP_SIGN(TEMP(0)) == MP_ZPOS);
mp_int_copy(TEMP(0), a);
-
-
}
++db;
}
/* Take care of highest-order digit */
- d = *dbt;
+ mp_digit d = *dbt;
+
for (;;)
{
if (d & 1)
@@ -3362,8 +3203,7 @@ s_embar(mp_int a, mp_int b, mp_int m, mp_int mu, mp_int c)
UMUL(c, a, TEMP(0));
if (!s_reduce(TEMP(0), m, mu, TEMP(1), TEMP(2)))
{
- res = MP_MEMORY;
- goto CLEANUP;
+ REQUIRE(MP_MEMORY);
}
mp_int_copy(TEMP(0), c);
}
@@ -3375,170 +3215,272 @@ s_embar(mp_int a, mp_int b, mp_int m, mp_int mu, mp_int c)
USQR(a, TEMP(0));
if (!s_reduce(TEMP(0), m, mu, TEMP(1), TEMP(2)))
{
- res = MP_MEMORY;
- goto CLEANUP;
+ REQUIRE(MP_MEMORY);
}
(void) mp_int_copy(TEMP(0), a);
}
-CLEANUP:
- while (--last >= 0)
- mp_int_clear(TEMP(last));
-
- return res;
+ CLEANUP_TEMP();
+ return MP_OK;
}
-/* }}} */
+/* Division of nonnegative integers
+
+ This function implements division algorithm for unsigned multi-precision
+ integers. The algorithm is based on Algorithm D from Knuth's "The Art of
+ Computer Programming", 3rd ed. 1998, pg 272-273.
-/* {{{ s_udiv(a, b) */
+ We diverge from Knuth's algorithm in that we do not perform the subtraction
+ from the remainder until we have determined that we have the correct
+ quotient digit. This makes our algorithm less efficient that Knuth because
+ we might have to perform multiple multiplication and comparison steps before
+ the subtraction. The advantage is that it is easy to implement and ensure
+ correctness without worrying about underflow from the subtraction.
-/* Precondition: a >= b and b > 0
- Postcondition: a' = a / b, b' = a % b
+ inputs: u a n+m digit integer in base b (b is 2^MP_DIGIT_BIT)
+ v a n digit integer in base b (b is 2^MP_DIGIT_BIT)
+ n >= 1
+ m >= 0
+ outputs: u / v stored in u
+ u % v stored in v
*/
static mp_result
-s_udiv(mp_int a, mp_int b)
-{
- mpz_t q,
- r,
- t;
- mp_size ua,
- ub,
- qpos = 0;
- mp_digit *da,
- btop;
- mp_result res = MP_OK;
- int k,
- skip = 0;
-
+s_udiv_knuth(mp_int u, mp_int v)
+{
/* Force signs to positive */
- MP_SIGN(a) = MP_ZPOS;
- MP_SIGN(b) = MP_ZPOS;
+ u->sign = MP_ZPOS;
+ v->sign = MP_ZPOS;
+
+ /* Use simple division algorithm when v is only one digit long */
+ if (MP_USED(v) == 1)
+ {
+ mp_digit d,
+ rem;
- /* Normalize, per Knuth */
- k = s_norm(a, b);
+ d = v->digits[0];
+ rem = s_ddiv(u, d);
+ mp_int_set_value(v, rem);
+ return MP_OK;
+ }
- ua = MP_USED(a);
- ub = MP_USED(b);
- btop = b->digits[ub - 1];
- if ((res = mp_int_init_size(&q, ua)) != MP_OK)
- return res;
- if ((res = mp_int_init_size(&t, ua + 1)) != MP_OK)
- goto CLEANUP;
+ /*
+ * Algorithm D
+ *
+ * The n and m variables are defined as used by Knuth. u is an n digit
+ * number with digits u_{n-1}..u_0. v is an n+m digit number with digits
+ * from v_{m+n-1}..v_0. We require that n > 1 and m >= 0
+ */
+ mp_size n = MP_USED(v);
+ mp_size m = MP_USED(u) - n;
+
+ assert(n > 1);
+ /* assert(m >= 0) follows because m is unsigned. */
+
+ /*
+ * D1: Normalize. The normalization step provides the necessary condition
+ * for Theorem B, which states that the quotient estimate for q_j, call it
+ * qhat
+ *
+ * qhat = u_{j+n}u_{j+n-1} / v_{n-1}
+ *
+ * is bounded by
+ *
+ * qhat - 2 <= q_j <= qhat.
+ *
+ * That is, qhat is always greater than the actual quotient digit q, and
+ * it is never more than two larger than the actual quotient digit.
+ */
+ int k = s_norm(u, v);
+
+ /*
+ * Extend size of u by one if needed.
+ *
+ * The algorithm begins with a value of u that has one more digit of
+ * input. The normalization step sets u_{m+n}..u_0 = 2^k * u_{m+n-1}..u_0.
+ * If the multiplication did not increase the number of digits of u, we
+ * need to add a leading zero here.
+ */
+ if (k == 0 || MP_USED(u) != m + n + 1)
+ {
+ if (!s_pad(u, m + n + 1))
+ return MP_MEMORY;
+ u->digits[m + n] = 0;
+ u->used = m + n + 1;
+ }
- da = MP_DIGITS(a);
- r.digits = da + ua - 1; /* The contents of r are shared with a */
- r.used = 1;
+ /*
+ * Add a leading 0 to v.
+ *
+ * The multiplication in step D4 multiplies qhat * 0v_{n-1}..v_0. We need
+ * to add the leading zero to v here to ensure that the multiplication
+ * will produce the full n+1 digit result.
+ */
+ if (!s_pad(v, n + 1))
+ return MP_MEMORY;
+ v->digits[n] = 0;
+
+ /*
+ * Initialize temporary variables q and t. q allocates space for m+1
+ * digits to store the quotient digits t allocates space for n+1 digits to
+ * hold the result of q_j*v
+ */
+ DECLARE_TEMP(2);
+ REQUIRE(GROW(TEMP(0), m + 1));
+ REQUIRE(GROW(TEMP(1), n + 1));
+
+ /* D2: Initialize j */
+ int j = m;
+ mpz_t r;
+
+ r.digits = MP_DIGITS(u) + j; /* The contents of r are shared with u */
+ r.used = n + 1;
r.sign = MP_ZPOS;
- r.alloc = MP_ALLOC(a);
- ZERO(t.digits, t.alloc);
+ r.alloc = MP_ALLOC(u);
+ ZERO(TEMP(1)->digits, TEMP(1)->alloc);
- /* Solve for quotient digits, store in q.digits in reverse order */
- while (r.digits >= da)
+ /* Calculate the m+1 digits of the quotient result */
+ for (; j >= 0; j--)
{
- assert(qpos <= q.alloc);
+ /* D3: Calculate q' */
+ /* r->digits is aligned to position j of the number u */
+ mp_word pfx,
+ qhat;
- if (s_ucmp(b, &r) > 0)
- {
- r.digits -= 1;
- r.used += 1;
+ pfx = r.digits[n];
+ pfx <<= MP_DIGIT_BIT / 2;
+ pfx <<= MP_DIGIT_BIT / 2;
+ pfx |= r.digits[n - 1]; /* pfx = u_{j+n}{j+n-1} */
- if (++skip > 1)
- q.digits[qpos++] = 0;
+ qhat = pfx / v->digits[n - 1];
- CLAMP(&r);
- }
- else
- {
- mp_word pfx = r.digits[r.used - 1];
- mp_word qdigit;
+ /*
+ * Check to see if qhat > b, and decrease qhat if so. Theorem B
+ * guarantess that qhat is at most 2 larger than the actual value, so
+ * it is possible that qhat is greater than the maximum value that
+ * will fit in a digit
+ */
+ if (qhat > MP_DIGIT_MAX)
+ qhat = MP_DIGIT_MAX;
- if (r.used > 1 && (pfx < btop || r.digits[r.used - 2] == 0))
- {
- pfx <<= MP_DIGIT_BIT / 2;
- pfx <<= MP_DIGIT_BIT / 2;
- pfx |= r.digits[r.used - 2];
+ /*
+ * D4,D5,D6: Multiply qhat * v and test for a correct value of q
+ *
+ * We proceed a bit different than the way described by Knuth. This
+ * way is simpler but less efficent. Instead of doing the multiply and
+ * subtract then checking for underflow, we first do the multiply of
+ * qhat * v and see if it is larger than the current remainder r. If
+ * it is larger, we decrease qhat by one and try again. We may need to
+ * decrease qhat one more time before we get a value that is smaller
+ * than r.
+ *
+ * This way is less efficent than Knuth becuase we do more multiplies,
+ * but we do not need to worry about underflow this way.
+ */
+ /* t = qhat * v */
+ s_dbmul(MP_DIGITS(v), (mp_digit) qhat, TEMP(1)->digits, n + 1);
+ TEMP(1)->used = n + 1;
+ CLAMP(TEMP(1));
+
+ /* Clamp r for the comparison. Comparisons do not like leading zeros. */
+ CLAMP(&r);
+ if (s_ucmp(TEMP(1), &r) > 0)
+ { /* would the remainder be negative? */
+ qhat -= 1; /* try a smaller q */
+ s_dbmul(MP_DIGITS(v), (mp_digit) qhat, TEMP(1)->digits, n + 1);
+ TEMP(1)->used = n + 1;
+ CLAMP(TEMP(1));
+ if (s_ucmp(TEMP(1), &r) > 0)
+ { /* would the remainder be negative? */
+ assert(qhat > 0);
+ qhat -= 1; /* try a smaller q */
+ s_dbmul(MP_DIGITS(v), (mp_digit) qhat, TEMP(1)->digits, n + 1);
+ TEMP(1)->used = n + 1;
+ CLAMP(TEMP(1));
}
+ assert(s_ucmp(TEMP(1), &r) <= 0 && "The mathematics failed us.");
+ }
- qdigit = pfx / btop;
- if (qdigit > MP_DIGIT_MAX)
- qdigit = 1;
+ /*
+ * Unclamp r. The D algorithm expects r = u_{j+n}..u_j to always be
+ * n+1 digits long.
+ */
+ r.used = n + 1;
- s_dbmul(MP_DIGITS(b), (mp_digit) qdigit, t.digits, ub);
- t.used = ub + 1;
- CLAMP(&t);
- while (s_ucmp(&t, &r) > 0)
- {
- --qdigit;
- (void) mp_int_sub(&t, b, &t); /* cannot fail */
- }
+ /*
+ * D4: Multiply and subtract
+ *
+ * Note: The multiply was completed above so we only need to subtract
+ * here.
+ */
+ s_usub(r.digits, TEMP(1)->digits, r.digits, r.used, TEMP(1)->used);
- s_usub(r.digits, t.digits, r.digits, r.used, t.used);
- CLAMP(&r);
+ /*
+ * D5: Test remainder
+ *
+ * Note: Not needed because we always check that qhat is the correct
+ * value before performing the subtract. Value cast to mp_digit to
+ * prevent warning, qhat has been clamped to MP_DIGIT_MAX
+ */
+ TEMP(0)->digits[j] = (mp_digit) qhat;
- q.digits[qpos++] = (mp_digit) qdigit;
- ZERO(t.digits, t.used);
- skip = 0;
- }
+ /*
+ * D6: Add back Note: Not needed because we always check that qhat is
+ * the correct value before performing the subtract.
+ */
+
+ /* D7: Loop on j */
+ r.digits--;
+ ZERO(TEMP(1)->digits, TEMP(1)->alloc);
}
- /* Put quotient digits in the correct order, and discard extra zeroes */
- q.used = qpos;
- REV(mp_digit, q.digits, qpos);
- CLAMP(&q);
+ /* Get rid of leading zeros in q */
+ TEMP(0)->used = m + 1;
+ CLAMP(TEMP(0));
/* Denormalize the remainder */
- CLAMP(a);
+ CLAMP(u); /* use u here because the r.digits pointer is
+ * off-by-one */
if (k != 0)
- s_qdiv(a, k);
+ s_qdiv(u, k);
- mp_int_copy(a, b); /* ok: 0 <= r < b */
- mp_int_copy(&q, a); /* ok: q <= a */
+ mp_int_copy(u, v); /* ok: 0 <= r < v */
+ mp_int_copy(TEMP(0), u); /* ok: q <= u */
- mp_int_clear(&t);
-CLEANUP:
- mp_int_clear(&q);
- return res;
+ CLEANUP_TEMP();
+ return MP_OK;
}
-/* }}} */
-
-/* {{{ s_outlen(z, r) */
-
-/* Precondition: 2 <= r < 64 */
static int
s_outlen(mp_int z, mp_size r)
{
- mp_result bits;
- double raw;
+ assert(r >= MP_MIN_RADIX && r <= MP_MAX_RADIX);
- bits = mp_int_count_bits(z);
- raw = (double) bits * s_log2[r];
+ mp_result bits = mp_int_count_bits(z);
+ double raw = (double) bits * s_log2[r];
return (int) (raw + 0.999999);
}
-/* }}} */
-
-/* {{{ s_inlen(len, r) */
-
static mp_size
s_inlen(int len, mp_size r)
{
double raw = (double) len / s_log2[r];
mp_size bits = (mp_size) (raw + 0.5);
- return (mp_size) ((bits + (MP_DIGIT_BIT - 1)) / MP_DIGIT_BIT);
+ return (mp_size) ((bits + (MP_DIGIT_BIT - 1)) / MP_DIGIT_BIT) + 1;
}
-/* }}} */
-
-/* {{{ s_ch2val(c, r) */
-
static int
s_ch2val(char c, int r)
{
int out;
+ /*
+ * In some locales, isalpha() accepts characters outside the range A-Z,
+ * producing out<0 or out>=36. The "out >= r" check will always catch
+ * out>=36. Though nothing explicitly catches out<0, our caller reacts
+ * the same way to every negative return value.
+ */
if (isdigit((unsigned char) c))
out = c - '0';
else if (r > 10 && isalpha((unsigned char) c))
@@ -3549,39 +3491,36 @@ s_ch2val(char c, int r)
return (out >= r) ? -1 : out;
}
-/* }}} */
-
-/* {{{ s_val2ch(v, caps) */
-
static char
s_val2ch(int v, int caps)
{
assert(v >= 0);
if (v < 10)
+ {
return v + '0';
+ }
else
{
char out = (v - 10) + 'a';
if (caps)
+ {
return toupper((unsigned char) out);
+ }
else
+ {
return out;
+ }
}
}
-/* }}} */
-
-/* {{{ s_2comp(buf, len) */
-
static void
s_2comp(unsigned char *buf, int len)
{
- int i;
unsigned short s = 1;
- for (i = len - 1; i >= 0; --i)
+ for (int i = len - 1; i >= 0; --i)
{
unsigned char c = ~buf[i];
@@ -3595,20 +3534,14 @@ s_2comp(unsigned char *buf, int len)
/* last carry out is ignored */
}
-/* }}} */
-
-/* {{{ s_tobin(z, buf, *limpos) */
-
static mp_result
s_tobin(mp_int z, unsigned char *buf, int *limpos, int pad)
{
- mp_size uz;
- mp_digit *dz;
int pos = 0,
limit = *limpos;
+ mp_size uz = MP_USED(z);
+ mp_digit *dz = MP_DIGITS(z);
- uz = MP_USED(z);
- dz = MP_DIGITS(z);
while (uz > 0 && pos < limit)
{
mp_digit d = *dz++;
@@ -3634,13 +3567,17 @@ s_tobin(mp_int z, unsigned char *buf, int *limpos, int pad)
if (pad != 0 && (buf[pos - 1] >> (CHAR_BIT - 1)))
{
if (pos < limit)
+ {
buf[pos++] = 0;
+ }
else
+ {
uz = 1;
+ }
}
/* Digits are in reverse order, fix that */
- REV(unsigned char, buf, pos);
+ REV(buf, pos);
/* Return the number of bytes actually written */
*limpos = pos;
@@ -3648,40 +3585,4 @@ s_tobin(mp_int z, unsigned char *buf, int *limpos, int pad)
return (uz == 0) ? MP_OK : MP_TRUNC;
}
-/* }}} */
-
-/* {{{ s_print(tag, z) */
-
-#if 0
-void
-s_print(char *tag, mp_int z)
-{
- int i;
-
- fprintf(stderr, "%s: %c ", tag,
- (MP_SIGN(z) == MP_NEG) ? '-' : '+');
-
- for (i = MP_USED(z) - 1; i >= 0; --i)
- fprintf(stderr, "%0*X", (int) (MP_DIGIT_BIT / 4), z->digits[i]);
-
- fputc('\n', stderr);
-
-}
-
-void
-s_print_buf(char *tag, mp_digit *buf, mp_size num)
-{
- int i;
-
- fprintf(stderr, "%s: ", tag);
-
- for (i = num - 1; i >= 0; --i)
- fprintf(stderr, "%0*X", (int) (MP_DIGIT_BIT / 4), buf[i]);
-
- fputc('\n', stderr);
-}
-#endif
-
-/* }}} */
-
-/* HERE THERE BE DRAGONS */
+/* Here there be dragons */
diff --git a/contrib/pgcrypto/imath.h b/contrib/pgcrypto/imath.h
index 2d7a5268e5c..0e1676d04e9 100644
--- a/contrib/pgcrypto/imath.h
+++ b/contrib/pgcrypto/imath.h
@@ -1,61 +1,57 @@
/*
- Name: imath.h
- Purpose: Arbitrary precision integer arithmetic routines.
- Author: M. J. Fromberger
- Info: Id: imath.h 21 2006-04-02 18:58:36Z sting
-
- Copyright (C) 2002 Michael J. Fromberger, All Rights Reserved.
-
- Permission is hereby granted, free of charge, to any person
- obtaining a copy of this software and associated documentation files
- (the "Software"), to deal in the Software without restriction,
- including without limitation the rights to use, copy, modify, merge,
- publish, distribute, sublicense, and/or sell copies of the Software,
- and to permit persons to whom the Software is furnished to do so,
- subject to the following conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ Name: imath.h
+ Purpose: Arbitrary precision integer arithmetic routines.
+ Author: M. J. Fromberger
+
+ Copyright (C) 2002-2007 Michael J. Fromberger, All Rights Reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
-/* contrib/pgcrypto/imath.h */
#ifndef IMATH_H_
#define IMATH_H_
-/* use always 32bit digits - should some arch use 16bit digits? */
-#define USE_LONG_LONG
-
#include
typedef unsigned char mp_sign;
typedef unsigned int mp_size;
typedef int mp_result;
+typedef long mp_small; /* must be a signed type */
+typedef unsigned long mp_usmall; /* must be an unsigned type */
-#ifdef USE_LONG_LONG
-typedef uint32 mp_digit;
-typedef uint64 mp_word;
-#define MP_DIGIT_MAX 0xFFFFFFFFULL
-#define MP_WORD_MAX 0xFFFFFFFFFFFFFFFFULL
-#else
+/* Build with words as uint64 by default. */
+#ifdef USE_32BIT_WORDS
typedef uint16 mp_digit;
typedef uint32 mp_word;
-
-#define MP_DIGIT_MAX 0xFFFFUL
-#define MP_WORD_MAX 0xFFFFFFFFUL
+#define MP_DIGIT_MAX (PG_UINT16_MAX * 1UL)
+#define MP_WORD_MAX (PG_UINT32_MAX * 1UL)
+#else
+typedef uint32 mp_digit;
+typedef uint64 mp_word;
+#define MP_DIGIT_MAX (PG_UINT32_MAX * UINT64CONST(1))
+#define MP_WORD_MAX (PG_UINT64_MAX)
#endif
-typedef struct mpz
+typedef struct
{
+ mp_digit single;
mp_digit *digits;
mp_size alloc;
mp_size used;
@@ -64,10 +60,26 @@ typedef struct mpz
*mp_int;
-#define MP_DIGITS(Z) ((Z)->digits)
-#define MP_ALLOC(Z) ((Z)->alloc)
-#define MP_USED(Z) ((Z)->used)
-#define MP_SIGN(Z) ((Z)->sign)
+static inline mp_digit *
+MP_DIGITS(mp_int Z)
+{
+ return Z->digits;
+}
+static inline mp_size
+MP_ALLOC(mp_int Z)
+{
+ return Z->alloc;
+}
+static inline mp_size
+MP_USED(mp_int Z)
+{
+ return Z->used;
+}
+static inline mp_sign
+MP_SIGN(mp_int Z)
+{
+ return Z->sign;
+}
extern const mp_result MP_OK;
extern const mp_result MP_FALSE;
@@ -77,134 +89,357 @@ extern const mp_result MP_RANGE;
extern const mp_result MP_UNDEF;
extern const mp_result MP_TRUNC;
extern const mp_result MP_BADARG;
+extern const mp_result MP_MINERR;
+
+#define MP_DIGIT_BIT (sizeof(mp_digit) * CHAR_BIT)
+#define MP_WORD_BIT (sizeof(mp_word) * CHAR_BIT)
+#define MP_SMALL_MIN LONG_MIN
+#define MP_SMALL_MAX LONG_MAX
+#define MP_USMALL_MAX ULONG_MAX
+
+#define MP_MIN_RADIX 2
+#define MP_MAX_RADIX 36
-#define MP_DIGIT_BIT (sizeof(mp_digit) * CHAR_BIT)
-#define MP_WORD_BIT (sizeof(mp_word) * CHAR_BIT)
+/** Sets the default number of digits allocated to an `mp_int` constructed by
+ `mp_int_init_size()` with `prec == 0`. Allocations are rounded up to
+ multiples of this value. `MP_DEFAULT_PREC` is the default value. Requires
+ `ndigits > 0`. */
+void mp_int_default_precision(mp_size ndigits);
-#define MP_MIN_RADIX 2
-#define MP_MAX_RADIX 36
+/** Sets the number of digits below which multiplication will use the standard
+ quadratic "schoolbook" multiplcation algorithm rather than Karatsuba-Ofman.
+ Requires `ndigits >= sizeof(mp_word)`. */
+void mp_int_multiply_threshold(mp_size ndigits);
+/** A sign indicating a (strictly) negative value. */
extern const mp_sign MP_NEG;
+
+/** A sign indicating a zero or positive value. */
extern const mp_sign MP_ZPOS;
-#define mp_int_is_odd(Z) ((Z)->digits[0] & 1)
-#define mp_int_is_even(Z) !((Z)->digits[0] & 1)
+/** Reports whether `z` is odd, having remainder 1 when divided by 2. */
+static inline bool
+mp_int_is_odd(mp_int z)
+{
+ return (z->digits[0] & 1) != 0;
+}
-mp_size mp_get_default_precision(void);
-void mp_set_default_precision(mp_size s);
-mp_size mp_get_multiply_threshold(void);
-void mp_set_multiply_threshold(mp_size s);
+/** Reports whether `z` is even, having remainder 0 when divided by 2. */
+static inline bool
+mp_int_is_even(mp_int z)
+{
+ return (z->digits[0] & 1) == 0;
+}
+/** Initializes `z` with 1-digit precision and sets it to zero. This function
+ cannot fail unless `z == NULL`. */
mp_result mp_int_init(mp_int z);
+
+/** Allocates a fresh zero-valued `mpz_t` on the heap, returning NULL in case
+ of error. The only possible error is out-of-memory. */
mp_int mp_int_alloc(void);
+
+/** Initializes `z` with at least `prec` digits of storage, and sets it to
+ zero. If `prec` is zero, the default precision is used. In either case the
+ size is rounded up to the nearest multiple of the word size. */
mp_result mp_int_init_size(mp_int z, mp_size prec);
+
+/** Initializes `z` to be a copy of an already-initialized value in `old`. The
+ new copy does not share storage with the original. */
mp_result mp_int_init_copy(mp_int z, mp_int old);
-mp_result mp_int_init_value(mp_int z, int value);
-mp_result mp_int_set_value(mp_int z, int value);
+
+/** Initializes `z` to the specified signed `value` at default precision. */
+mp_result mp_int_init_value(mp_int z, mp_small value);
+
+/** Initializes `z` to the specified unsigned `value` at default precision. */
+mp_result mp_int_init_uvalue(mp_int z, mp_usmall uvalue);
+
+/** Sets `z` to the value of the specified signed `value`. */
+mp_result mp_int_set_value(mp_int z, mp_small value);
+
+/** Sets `z` to the value of the specified unsigned `value`. */
+mp_result mp_int_set_uvalue(mp_int z, mp_usmall uvalue);
+
+/** Releases the storage used by `z`. */
void mp_int_clear(mp_int z);
+
+/** Releases the storage used by `z` and also `z` itself.
+ This should only be used for `z` allocated by `mp_int_alloc()`. */
void mp_int_free(mp_int z);
-mp_result mp_int_copy(mp_int a, mp_int c); /* c = a */
-void mp_int_swap(mp_int a, mp_int c); /* swap a, c */
-void mp_int_zero(mp_int z); /* z = 0 */
-mp_result mp_int_abs(mp_int a, mp_int c); /* c = |a| */
-mp_result mp_int_neg(mp_int a, mp_int c); /* c = -a */
-mp_result mp_int_add(mp_int a, mp_int b, mp_int c); /* c = a + b */
-mp_result mp_int_add_value(mp_int a, int value, mp_int c);
-mp_result mp_int_sub(mp_int a, mp_int b, mp_int c); /* c = a - b */
-mp_result mp_int_sub_value(mp_int a, int value, mp_int c);
-mp_result mp_int_mul(mp_int a, mp_int b, mp_int c); /* c = a * b */
-mp_result mp_int_mul_value(mp_int a, int value, mp_int c);
-mp_result mp_int_mul_pow2(mp_int a, int p2, mp_int c);
-mp_result mp_int_sqr(mp_int a, mp_int c); /* c = a * a */
-
-mp_result mp_int_div(mp_int a, mp_int b, /* q = a / b */
- mp_int q, mp_int r); /* r = a % b */
-mp_result mp_int_div_value(mp_int a, int value, /* q = a / value */
- mp_int q, int *r); /* r = a % value */
-mp_result mp_int_div_pow2(mp_int a, int p2, /* q = a / 2^p2 */
- mp_int q, mp_int r); /* r = q % 2^p2 */
-mp_result mp_int_mod(mp_int a, mp_int m, mp_int c); /* c = a % m */
-
-#define mp_int_mod_value(A, V, R) mp_int_div_value((A), (V), 0, (R))
-mp_result mp_int_expt(mp_int a, int b, mp_int c); /* c = a^b */
-mp_result mp_int_expt_value(int a, int b, mp_int c); /* c = a^b */
-
-int mp_int_compare(mp_int a, mp_int b); /* a <=> b */
-int mp_int_compare_unsigned(mp_int a, mp_int b); /* |a| <=> |b| */
-int mp_int_compare_zero(mp_int z); /* a <=> 0 */
-int mp_int_compare_value(mp_int z, int value); /* a <=> v */
-
-/* Returns true if v|a, false otherwise (including errors) */
-int mp_int_divisible_value(mp_int a, int v);
-
-/* Returns k >= 0 such that z = 2^k, if one exists; otherwise < 0 */
+/** Replaces the value of `c` with a copy of the value of `a`. No new memory is
+ allocated unless `a` has more significant digits than `c` has allocated. */
+mp_result mp_int_copy(mp_int a, mp_int c);
+
+/** Swaps the values and storage between `a` and `c`. */
+void mp_int_swap(mp_int a, mp_int c);
+
+/** Sets `z` to zero. The allocated storage of `z` is not changed. */
+void mp_int_zero(mp_int z);
+
+/** Sets `c` to the absolute value of `a`. */
+mp_result mp_int_abs(mp_int a, mp_int c);
+
+/** Sets `c` to the additive inverse (negation) of `a`. */
+mp_result mp_int_neg(mp_int a, mp_int c);
+
+/** Sets `c` to the sum of `a` and `b`. */
+mp_result mp_int_add(mp_int a, mp_int b, mp_int c);
+
+/** Sets `c` to the sum of `a` and `value`. */
+mp_result mp_int_add_value(mp_int a, mp_small value, mp_int c);
+
+/** Sets `c` to the difference of `a` less `b`. */
+mp_result mp_int_sub(mp_int a, mp_int b, mp_int c);
+
+/** Sets `c` to the difference of `a` less `value`. */
+mp_result mp_int_sub_value(mp_int a, mp_small value, mp_int c);
+
+/** Sets `c` to the product of `a` and `b`. */
+mp_result mp_int_mul(mp_int a, mp_int b, mp_int c);
+
+/** Sets `c` to the product of `a` and `value`. */
+mp_result mp_int_mul_value(mp_int a, mp_small value, mp_int c);
+
+/** Sets `c` to the product of `a` and `2^p2`. Requires `p2 >= 0`. */
+mp_result mp_int_mul_pow2(mp_int a, mp_small p2, mp_int c);
+
+/** Sets `c` to the square of `a`. */
+mp_result mp_int_sqr(mp_int a, mp_int c);
+
+/** Sets `q` and `r` to the quotent and remainder of `a / b`. Division by
+ powers of 2 is detected and handled efficiently. The remainder is pinned
+ to `0 <= r < b`.
+
+ Either of `q` or `r` may be NULL, but not both, and `q` and `r` may not
+ point to the same value. */
+mp_result mp_int_div(mp_int a, mp_int b, mp_int q, mp_int r);
+
+/** Sets `q` and `*r` to the quotent and remainder of `a / value`. Division by
+ powers of 2 is detected and handled efficiently. The remainder is pinned to
+ `0 <= *r < b`. Either of `q` or `r` may be NULL. */
+mp_result mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small *r);
+
+/** Sets `q` and `r` to the quotient and remainder of `a / 2^p2`. This is a
+ special case for division by powers of two that is more efficient than
+ using ordinary division. Note that `mp_int_div()` will automatically handle
+ this case, this function is for cases where you have only the exponent. */
+mp_result mp_int_div_pow2(mp_int a, mp_small p2, mp_int q, mp_int r);
+
+/** Sets `c` to the remainder of `a / m`.
+ The remainder is pinned to `0 <= c < m`. */
+mp_result mp_int_mod(mp_int a, mp_int m, mp_int c);
+
+/** Sets `c` to the value of `a` raised to the `b` power.
+ It returns `MP_RANGE` if `b < 0`. */
+mp_result mp_int_expt(mp_int a, mp_small b, mp_int c);
+
+/** Sets `c` to the value of `a` raised to the `b` power.
+ It returns `MP_RANGE` if `b < 0`. */
+mp_result mp_int_expt_value(mp_small a, mp_small b, mp_int c);
+
+/** Sets `c` to the value of `a` raised to the `b` power.
+ It returns `MP_RANGE`) if `b < 0`. */
+mp_result mp_int_expt_full(mp_int a, mp_int b, mp_int c);
+
+/** Sets `*r` to the remainder of `a / value`.
+ The remainder is pinned to `0 <= r < value`. */
+static inline
+mp_result
+mp_int_mod_value(mp_int a, mp_small value, mp_small *r)
+{
+ return mp_int_div_value(a, value, 0, r);
+}
+
+/** Returns the comparator of `a` and `b`. */
+int mp_int_compare(mp_int a, mp_int b);
+
+/** Returns the comparator of the magnitudes of `a` and `b`, disregarding their
+ signs. Neither `a` nor `b` is modified by the comparison. */
+int mp_int_compare_unsigned(mp_int a, mp_int b);
+
+/** Returns the comparator of `z` and zero. */
+int mp_int_compare_zero(mp_int z);
+
+/** Returns the comparator of `z` and the signed value `v`. */
+int mp_int_compare_value(mp_int z, mp_small v);
+
+/** Returns the comparator of `z` and the unsigned value `uv`. */
+int mp_int_compare_uvalue(mp_int z, mp_usmall uv);
+
+/** Reports whether `a` is divisible by `v`. */
+bool mp_int_divisible_value(mp_int a, mp_small v);
+
+/** Returns `k >= 0` such that `z` is `2^k`, if such a `k` exists. If no such
+ `k` exists, the function returns -1. */
int mp_int_is_pow2(mp_int z);
-mp_result mp_int_exptmod(mp_int a, mp_int b, mp_int m,
- mp_int c); /* c = a^b (mod m) */
-mp_result mp_int_exptmod_evalue(mp_int a, int value,
- mp_int m, mp_int c); /* c = a^v (mod m) */
-mp_result mp_int_exptmod_bvalue(int value, mp_int b,
- mp_int m, mp_int c); /* c = v^b (mod m) */
-mp_result mp_int_exptmod_known(mp_int a, mp_int b,
- mp_int m, mp_int mu,
- mp_int c); /* c = a^b (mod m) */
+/** Sets `c` to the value of `a` raised to the `b` power, reduced modulo `m`.
+ It returns `MP_RANGE` if `b < 0` or `MP_UNDEF` if `m == 0`. */
+mp_result mp_int_exptmod(mp_int a, mp_int b, mp_int m, mp_int c);
+
+/** Sets `c` to the value of `a` raised to the `value` power, modulo `m`.
+ It returns `MP_RANGE` if `value < 0` or `MP_UNDEF` if `m == 0`. */
+mp_result mp_int_exptmod_evalue(mp_int a, mp_small value, mp_int m, mp_int c);
+
+/** Sets `c` to the value of `value` raised to the `b` power, modulo `m`.
+ It returns `MP_RANGE` if `b < 0` or `MP_UNDEF` if `m == 0`. */
+mp_result mp_int_exptmod_bvalue(mp_small value, mp_int b, mp_int m, mp_int c);
+
+/** Sets `c` to the value of `a` raised to the `b` power, reduced modulo `m`,
+ given a precomputed reduction constant `mu` defined for Barrett's modular
+ reduction algorithm.
+
+ It returns `MP_RANGE` if `b < 0` or `MP_UNDEF` if `m == 0`. */
+mp_result mp_int_exptmod_known(mp_int a, mp_int b, mp_int m, mp_int mu, mp_int c);
+
+/** Sets `c` to the reduction constant for Barrett reduction by modulus `m`.
+ Requires that `c` and `m` point to distinct locations. */
mp_result mp_int_redux_const(mp_int m, mp_int c);
-mp_result mp_int_invmod(mp_int a, mp_int m, mp_int c); /* c = 1/a (mod m) */
+/** Sets `c` to the multiplicative inverse of `a` modulo `m`, if it exists.
+ The least non-negative representative of the congruence class is computed.
+
+ It returns `MP_UNDEF` if the inverse does not exist, or `MP_RANGE` if `a ==
+ 0` or `m <= 0`. */
+mp_result mp_int_invmod(mp_int a, mp_int m, mp_int c);
-mp_result mp_int_gcd(mp_int a, mp_int b, mp_int c); /* c = gcd(a, b) */
+/** Sets `c` to the greatest common divisor of `a` and `b`.
-mp_result mp_int_egcd(mp_int a, mp_int b, mp_int c, /* c = gcd(a, b) */
- mp_int x, mp_int y); /* c = ax + by */
+ It returns `MP_UNDEF` if the GCD is undefined, such as for example if `a`
+ and `b` are both zero. */
+mp_result mp_int_gcd(mp_int a, mp_int b, mp_int c);
-mp_result mp_int_sqrt(mp_int a, mp_int c); /* c = floor(sqrt(q)) */
+/** Sets `c` to the greatest common divisor of `a` and `b`, and sets `x` and
+ `y` to values satisfying Bezout's identity `gcd(a, b) = ax + by`.
-/* Convert to an int, if representable (returns MP_RANGE if not). */
-mp_result mp_int_to_int(mp_int z, int *out);
+ It returns `MP_UNDEF` if the GCD is undefined, such as for example if `a`
+ and `b` are both zero. */
+mp_result mp_int_egcd(mp_int a, mp_int b, mp_int c, mp_int x, mp_int y);
-/* Convert to nul-terminated string with the specified radix, writing at
- most limit characters including the nul terminator */
-mp_result mp_int_to_string(mp_int z, mp_size radix,
- char *str, int limit);
+/** Sets `c` to the least common multiple of `a` and `b`.
-/* Return the number of characters required to represent
- z in the given radix. May over-estimate. */
+ It returns `MP_UNDEF` if the LCM is undefined, such as for example if `a`
+ and `b` are both zero. */
+mp_result mp_int_lcm(mp_int a, mp_int b, mp_int c);
+
+/** Sets `c` to the greatest integer not less than the `b`th root of `a`,
+ using Newton's root-finding algorithm.
+ It returns `MP_UNDEF` if `a < 0` and `b` is even. */
+mp_result mp_int_root(mp_int a, mp_small b, mp_int c);
+
+/** Sets `c` to the greatest integer not less than the square root of `a`.
+ This is a special case of `mp_int_root()`. */
+static inline
+mp_result
+mp_int_sqrt(mp_int a, mp_int c)
+{
+ return mp_int_root(a, 2, c);
+}
+
+/** Returns `MP_OK` if `z` is representable as `mp_small`, else `MP_RANGE`.
+ If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */
+mp_result mp_int_to_int(mp_int z, mp_small *out);
+
+/** Returns `MP_OK` if `z` is representable as `mp_usmall`, or `MP_RANGE`.
+ If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */
+mp_result mp_int_to_uint(mp_int z, mp_usmall *out);
+
+/** Converts `z` to a zero-terminated string of characters in the specified
+ `radix`, writing at most `limit` characters to `str` including the
+ terminating NUL value. A leading `-` is used to indicate a negative value.
+
+ Returns `MP_TRUNC` if `limit` was to small to write all of `z`.
+ Requires `MP_MIN_RADIX <= radix <= MP_MAX_RADIX`. */
+mp_result mp_int_to_string(mp_int z, mp_size radix, char *str, int limit);
+
+/** Reports the minimum number of characters required to represent `z` as a
+ zero-terminated string in the given `radix`.
+ Requires `MP_MIN_RADIX <= radix <= MP_MAX_RADIX`. */
mp_result mp_int_string_len(mp_int z, mp_size radix);
-/* Read zero-terminated string into z */
+/** Reads a string of ASCII digits in the specified `radix` from the zero
+ terminated `str` provided into `z`. For values of `radix > 10`, the letters
+ `A`..`Z` or `a`..`z` are accepted. Letters are interpreted without respect
+ to case.
+
+ Leading whitespace is ignored, and a leading `+` or `-` is interpreted as a
+ sign flag. Processing stops when a NUL or any other character out of range
+ for a digit in the given radix is encountered.
+
+ If the whole string was consumed, `MP_OK` is returned; otherwise
+ `MP_TRUNC`. is returned.
+
+ Requires `MP_MIN_RADIX <= radix <= MP_MAX_RADIX`. */
mp_result mp_int_read_string(mp_int z, mp_size radix, const char *str);
-mp_result mp_int_read_cstring(mp_int z, mp_size radix, const char *str,
- char **end);
-/* Return the number of significant bits in z */
+/** Reads a string of ASCII digits in the specified `radix` from the zero
+ terminated `str` provided into `z`. For values of `radix > 10`, the letters
+ `A`..`Z` or `a`..`z` are accepted. Letters are interpreted without respect
+ to case.
+
+ Leading whitespace is ignored, and a leading `+` or `-` is interpreted as a
+ sign flag. Processing stops when a NUL or any other character out of range
+ for a digit in the given radix is encountered.
+
+ If the whole string was consumed, `MP_OK` is returned; otherwise
+ `MP_TRUNC`. is returned. If `end` is not NULL, `*end` is set to point to
+ the first unconsumed byte of the input string (the NUL byte if the whole
+ string was consumed). This emulates the behavior of the standard C
+ `strtol()` function.
+
+ Requires `MP_MIN_RADIX <= radix <= MP_MAX_RADIX`. */
+mp_result mp_int_read_cstring(mp_int z, mp_size radix, const char *str, char **end);
+
+/** Returns the number of significant bits in `z`. */
mp_result mp_int_count_bits(mp_int z);
-/* Convert z to two's complement binary, writing at most limit bytes */
+/** Converts `z` to 2's complement binary, writing at most `limit` bytes into
+ the given `buf`. Returns `MP_TRUNC` if the buffer limit was too small to
+ contain the whole value. If this occurs, the contents of buf will be
+ effectively garbage, as the function uses the buffer as scratch space.
+
+ The binary representation of `z` is in base-256 with digits ordered from
+ most significant to least significant (network byte ordering). The
+ high-order bit of the first byte is set for negative values, clear for
+ non-negative values.
+
+ As a result, non-negative values will be padded with a leading zero byte if
+ the high-order byte of the base-256 magnitude is set. This extra byte is
+ accounted for by the `mp_int_binary_len()` function. */
mp_result mp_int_to_binary(mp_int z, unsigned char *buf, int limit);
-/* Read a two's complement binary value into z from the given buffer */
+/** Reads a 2's complement binary value from `buf` into `z`, where `len` is the
+ length of the buffer. The contents of `buf` may be overwritten during
+ processing, although they will be restored when the function returns. */
mp_result mp_int_read_binary(mp_int z, unsigned char *buf, int len);
-/* Return the number of bytes required to represent z in binary. */
+/** Returns the number of bytes to represent `z` in 2's complement binary. */
mp_result mp_int_binary_len(mp_int z);
-/* Convert z to unsigned binary, writing at most limit bytes */
+/** Converts the magnitude of `z` to unsigned binary, writing at most `limit`
+ bytes into the given `buf`. The sign of `z` is ignored, but `z` is not
+ modified. Returns `MP_TRUNC` if the buffer limit was too small to contain
+ the whole value. If this occurs, the contents of `buf` will be effectively
+ garbage, as the function uses the buffer as scratch space during
+ conversion.
+
+ The binary representation of `z` is in base-256 with digits ordered from
+ most significant to least significant (network byte ordering). */
mp_result mp_int_to_unsigned(mp_int z, unsigned char *buf, int limit);
-/* Read an unsigned binary value into z from the given buffer */
+/** Reads an unsigned binary value from `buf` into `z`, where `len` is the
+ length of the buffer. The contents of `buf` are not modified during
+ processing. */
mp_result mp_int_read_unsigned(mp_int z, unsigned char *buf, int len);
-/* Return the number of bytes required to represent z as unsigned output */
+/** Returns the number of bytes required to represent `z` as an unsigned binary
+ value in base 256. */
mp_result mp_int_unsigned_len(mp_int z);
-/* Return a statically allocated string describing error code res */
+/** Returns a pointer to a brief, human-readable, zero-terminated string
+ describing `res`. The returned string is statically allocated and must not
+ be freed by the caller. */
const char *mp_error_string(mp_result res);
-#if 0
-void s_print(char *tag, mp_int z);
-void s_print_buf(char *tag, mp_digit *buf, mp_size num);
-#endif
-
#endif /* end IMATH_H_ */
diff --git a/contrib/pgcrypto/internal.c b/contrib/pgcrypto/internal.c
index 16dfe725eae..db58408d4c7 100644
--- a/contrib/pgcrypto/internal.c
+++ b/contrib/pgcrypto/internal.c
@@ -39,25 +39,6 @@
#include "blf.h"
#include "rijndael.h"
-/*
- * System reseeds should be separated at least this much.
- */
-#define SYSTEM_RESEED_MIN (20*60) /* 20 min */
-/*
- * How often to roll dice.
- */
-#define SYSTEM_RESEED_CHECK_TIME (10*60) /* 10 min */
-/*
- * The chance is x/256 that the reseed happens.
- */
-#define SYSTEM_RESEED_CHANCE (4) /* 256/4 * 10min ~ 10h */
-
-/*
- * If this much time has passed, force reseed.
- */
-#define SYSTEM_RESEED_MAX (12*60*60) /* 12h */
-
-
#ifndef MD5_DIGEST_LENGTH
#define MD5_DIGEST_LENGTH 16
#endif
diff --git a/contrib/pgcrypto/mbuf.h b/contrib/pgcrypto/mbuf.h
index 50a989f059d..e6d754e8695 100644
--- a/contrib/pgcrypto/mbuf.h
+++ b/contrib/pgcrypto/mbuf.h
@@ -90,8 +90,8 @@ int mbuf_free(MBuf *mbuf);
/*
* Push filter
*/
-int pushf_create(PushFilter **res, const PushFilterOps *ops, void *init_arg,
- PushFilter *next);
+int pushf_create(PushFilter **res, const PushFilterOps *ops, void *init_arg,
+ PushFilter *next);
int pushf_write(PushFilter *mp, const uint8 *data, int len);
void pushf_free_all(PushFilter *mp);
void pushf_free(PushFilter *mp);
@@ -102,11 +102,11 @@ int pushf_create_mbuf_writer(PushFilter **mp_p, MBuf *mbuf);
/*
* Pull filter
*/
-int pullf_create(PullFilter **res, const PullFilterOps *ops,
- void *init_arg, PullFilter *src);
+int pullf_create(PullFilter **res, const PullFilterOps *ops,
+ void *init_arg, PullFilter *src);
int pullf_read(PullFilter *mp, int len, uint8 **data_p);
-int pullf_read_max(PullFilter *mp, int len,
- uint8 **data_p, uint8 *tmpbuf);
+int pullf_read_max(PullFilter *mp, int len,
+ uint8 **data_p, uint8 *tmpbuf);
void pullf_free(PullFilter *mp);
int pullf_read_fixed(PullFilter *src, int len, uint8 *dst);
diff --git a/contrib/pgcrypto/md5.c b/contrib/pgcrypto/md5.c
index cac4e408ab4..15d7c9bcdc5 100644
--- a/contrib/pgcrypto/md5.c
+++ b/contrib/pgcrypto/md5.c
@@ -132,7 +132,7 @@ static const uint8 md5_paddat[MD5_BUFLEN] = {
0, 0, 0, 0, 0, 0, 0, 0,
};
-static void md5_calc(uint8 *, md5_ctxt *);
+static void md5_calc(const uint8 *, md5_ctxt *);
void
md5_init(md5_ctxt *ctxt)
@@ -161,7 +161,7 @@ md5_loop(md5_ctxt *ctxt, const uint8 *input, unsigned len)
md5_calc(ctxt->md5_buf, ctxt);
for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN)
- md5_calc((uint8 *) (input + i), ctxt);
+ md5_calc(input + i, ctxt);
ctxt->md5_i = len - i;
memmove(ctxt->md5_buf, input + i, ctxt->md5_i);
@@ -242,7 +242,7 @@ static uint32 X[16];
#endif
static void
-md5_calc(uint8 *b64, md5_ctxt *ctxt)
+md5_calc(const uint8 *b64, md5_ctxt *ctxt)
{
uint32 A = ctxt->md5_sta;
uint32 B = ctxt->md5_stb;
@@ -250,7 +250,7 @@ md5_calc(uint8 *b64, md5_ctxt *ctxt)
uint32 D = ctxt->md5_std;
#ifndef WORDS_BIGENDIAN
- uint32 *X = (uint32 *) b64;
+ const uint32 *X = (const uint32 *) b64;
#else
/* 4 byte words */
/* what a brute force but fast! */
diff --git a/contrib/pgcrypto/openssl.c b/contrib/pgcrypto/openssl.c
index f71a933407d..7d686f39402 100644
--- a/contrib/pgcrypto/openssl.c
+++ b/contrib/pgcrypto/openssl.c
@@ -408,7 +408,7 @@ gen_ossl_encrypt(PX_Cipher *c, const uint8 *data, unsigned dlen,
/* Blowfish */
/*
- * Check if strong crypto is supported. Some openssl installations
+ * Check if strong crypto is supported. Some OpenSSL installations
* support only short keys and unfortunately BF_set_key does not return any
* error value. This function tests if is possible to use strong key.
*/
diff --git a/contrib/pgcrypto/pgcrypto.c b/contrib/pgcrypto/pgcrypto.c
index de09ececcfd..f69ae107c3a 100644
--- a/contrib/pgcrypto/pgcrypto.c
+++ b/contrib/pgcrypto/pgcrypto.c
@@ -34,7 +34,6 @@
#include
#include "parser/scansup.h"
-#include "utils/backend_random.h"
#include "utils/builtins.h"
#include "utils/uuid.h"
@@ -423,7 +422,6 @@ PG_FUNCTION_INFO_V1(pg_random_bytes);
Datum
pg_random_bytes(PG_FUNCTION_ARGS)
{
-#ifdef HAVE_STRONG_RANDOM
int len = PG_GETARG_INT32(0);
bytea *res;
@@ -440,9 +438,6 @@ pg_random_bytes(PG_FUNCTION_ARGS)
px_THROW_ERROR(PXE_NO_RANDOM);
PG_RETURN_BYTEA_P(res);
-#else
- px_THROW_ERROR(PXE_NO_RANDOM);
-#endif
}
/* SQL function: gen_random_uuid() returns uuid */
@@ -451,24 +446,8 @@ PG_FUNCTION_INFO_V1(pg_random_uuid);
Datum
pg_random_uuid(PG_FUNCTION_ARGS)
{
-#ifdef HAVE_STRONG_RANDOM
- uint8 *buf = (uint8 *) palloc(UUID_LEN);
-
- /* Generate random bits. */
- if (!pg_backend_random((char *) buf, UUID_LEN))
- px_THROW_ERROR(PXE_NO_RANDOM);
-
- /*
- * Set magic numbers for a "version 4" (pseudorandom) UUID, see
- * http://tools.ietf.org/html/rfc4122#section-4.4
- */
- buf[6] = (buf[6] & 0x0f) | 0x40; /* "version" field */
- buf[8] = (buf[8] & 0x3f) | 0x80; /* "variant" field */
-
- PG_RETURN_UUID_P((pg_uuid_t *) buf);
-#else
- px_THROW_ERROR(PXE_NO_RANDOM);
-#endif
+ /* redirect to built-in function */
+ return gen_random_uuid(fcinfo);
}
static void *
diff --git a/contrib/pgcrypto/pgp-compress.c b/contrib/pgcrypto/pgp-compress.c
index 57efe733386..2adaf2c8755 100644
--- a/contrib/pgcrypto/pgp-compress.c
+++ b/contrib/pgcrypto/pgp-compress.c
@@ -117,7 +117,7 @@ compress_process(PushFilter *next, void *priv, const uint8 *data, int len)
*/
while (len > 0)
{
- st->stream.next_in = (void *) data;
+ st->stream.next_in = unconstify(uint8 *, data);
st->stream.avail_in = len;
st->stream.next_out = st->buf;
st->stream.avail_out = st->buf_len;
@@ -311,7 +311,7 @@ pgp_decompress_filter(PullFilter **res, PGP_Context *ctx, PullFilter *src)
{
return pullf_create(res, &decompress_filter, ctx, src);
}
-#else /* !HAVE_ZLIB */
+#else /* !HAVE_LIBZ */
int
pgp_compress_filter(PushFilter **res, PGP_Context *ctx, PushFilter *dst)
diff --git a/contrib/pgcrypto/pgp-decrypt.c b/contrib/pgcrypto/pgp-decrypt.c
index 7d31e5354b8..4c43eb7e3ef 100644
--- a/contrib/pgcrypto/pgp-decrypt.c
+++ b/contrib/pgcrypto/pgp-decrypt.c
@@ -132,7 +132,7 @@ pgp_parse_pkt_hdr(PullFilter *src, uint8 *tag, int *len_p, int allow_ctx)
int res;
uint8 *p;
- /* EOF is normal here, thus we dont use GETBYTE */
+ /* EOF is normal here, thus we don't use GETBYTE */
res = pullf_read(src, 1, &p);
if (res < 0)
return res;
@@ -355,7 +355,7 @@ mdc_finish(PGP_Context *ctx, PullFilter *src, int len)
if (len != 20)
return PXE_PGP_CORRUPT_DATA;
- /* mdc_read should not call md_update */
+ /* mdc_read should not call px_md_update */
ctx->in_mdc_pkt = 1;
/* read data */
@@ -423,7 +423,7 @@ static struct PullFilterOps mdc_filter = {
/*
* Combined Pkt reader and MDC hasher.
*
- * For the case of SYMENCRYPTED_MDC packet, where
+ * For the case of SYMENCRYPTED_DATA_MDC packet, where
* the data part has 'context length', which means
* that data packet ends 22 bytes before end of parent
* packet, which is silly.
@@ -811,8 +811,8 @@ parse_literal_data(PGP_Context *ctx, MBuf *dst, PullFilter *pkt)
}
/* process_data_packets and parse_compressed_data call each other */
-static int process_data_packets(PGP_Context *ctx, MBuf *dst,
- PullFilter *src, int allow_compr, int need_mdc);
+static int process_data_packets(PGP_Context *ctx, MBuf *dst,
+ PullFilter *src, int allow_compr, int need_mdc);
static int
parse_compressed_data(PGP_Context *ctx, MBuf *dst, PullFilter *pkt)
@@ -894,7 +894,10 @@ process_data_packets(PGP_Context *ctx, MBuf *dst, PullFilter *src,
break;
}
- /* context length inside SYMENC_MDC needs special handling */
+ /*
+ * Context length inside SYMENCRYPTED_DATA_MDC packet needs special
+ * handling.
+ */
if (need_mdc && res == PKT_CONTEXT)
res = pullf_create(&pkt, &mdcbuf_filter, ctx, src);
else
diff --git a/contrib/pgcrypto/pgp-encrypt.c b/contrib/pgcrypto/pgp-encrypt.c
index d510729e5b4..2938b4b3f5e 100644
--- a/contrib/pgcrypto/pgp-encrypt.c
+++ b/contrib/pgcrypto/pgp-encrypt.c
@@ -37,8 +37,6 @@
#include "px.h"
#include "pgp.h"
-#include "utils/backend_random.h"
-
#define MDC_DIGEST_LEN 20
#define STREAM_ID 0xE0
@@ -481,13 +479,12 @@ init_encdata_packet(PushFilter **pf_res, PGP_Context *ctx, PushFilter *dst)
static int
write_prefix(PGP_Context *ctx, PushFilter *dst)
{
-#ifdef HAVE_STRONG_RANDOM
uint8 prefix[PGP_MAX_BLOCK + 2];
int res,
bs;
bs = pgp_get_cipher_block_size(ctx->cipher_algo);
- if (!pg_backend_random((char *) prefix, bs))
+ if (!pg_strong_random(prefix, bs))
return PXE_NO_RANDOM;
prefix[bs + 0] = prefix[bs - 2];
@@ -496,9 +493,6 @@ write_prefix(PGP_Context *ctx, PushFilter *dst)
res = pushf_write(dst, prefix, bs + 2);
px_memset(prefix, 0, bs + 2);
return res < 0 ? res : 0;
-#else
- return PXE_NO_RANDOM;
-#endif
}
/*
@@ -587,13 +581,9 @@ init_sess_key(PGP_Context *ctx)
{
if (ctx->use_sess_key || ctx->pub_key)
{
-#ifdef HAVE_STRONG_RANDOM
ctx->sess_key_len = pgp_get_cipher_key_size(ctx->cipher_algo);
- if (!pg_strong_random((char *) ctx->sess_key, ctx->sess_key_len))
+ if (!pg_strong_random(ctx->sess_key, ctx->sess_key_len))
return PXE_NO_RANDOM;
-#else
- return PXE_NO_RANDOM;
-#endif
}
else
{
@@ -628,7 +618,7 @@ pgp_encrypt(PGP_Context *ctx, MBuf *src, MBuf *dst)
goto out;
/*
- * initialize symkey
+ * initialize sym_key
*/
if (ctx->sym_key)
{
diff --git a/contrib/pgcrypto/pgp-mpi-internal.c b/contrib/pgcrypto/pgp-mpi-internal.c
index 545009ce199..c73f086b0be 100644
--- a/contrib/pgcrypto/pgp-mpi-internal.c
+++ b/contrib/pgcrypto/pgp-mpi-internal.c
@@ -57,13 +57,12 @@ mp_clear_free(mpz_t *a)
static int
mp_px_rand(uint32 bits, mpz_t *res)
{
-#ifdef HAVE_STRONG_RANDOM
unsigned bytes = (bits + 7) / 8;
int last_bits = bits & 7;
uint8 *buf;
buf = px_alloc(bytes);
- if (!pg_strong_random((char *) buf, bytes))
+ if (!pg_strong_random(buf, bytes))
{
px_free(buf);
return PXE_NO_RANDOM;
@@ -83,9 +82,6 @@ mp_px_rand(uint32 bits, mpz_t *res)
px_free(buf);
return 0;
-#else
- return PXE_NO_RANDOM;
-#endif
}
static void
diff --git a/contrib/pgcrypto/pgp-pgsql.c b/contrib/pgcrypto/pgp-pgsql.c
index 0984e01a14b..3feadf7b702 100644
--- a/contrib/pgcrypto/pgp-pgsql.c
+++ b/contrib/pgcrypto/pgp-pgsql.c
@@ -761,7 +761,7 @@ pgp_pub_decrypt_text(PG_FUNCTION_ARGS)
*/
/*
- * Helper function for pgp_armor. Converts arrays of keys and values into
+ * Helper function for pg_armor. Converts arrays of keys and values into
* plain C arrays, and checks that they don't contain invalid characters.
*/
static int
diff --git a/contrib/pgcrypto/pgp-pubenc.c b/contrib/pgcrypto/pgp-pubenc.c
index 44398766643..08599f09786 100644
--- a/contrib/pgcrypto/pgp-pubenc.c
+++ b/contrib/pgcrypto/pgp-pubenc.c
@@ -39,7 +39,6 @@
static int
pad_eme_pkcs1_v15(uint8 *data, int data_len, int res_len, uint8 **res_p)
{
-#ifdef HAVE_STRONG_RANDOM
uint8 *buf,
*p;
int pad_len = res_len - 2 - data_len;
@@ -50,7 +49,7 @@ pad_eme_pkcs1_v15(uint8 *data, int data_len, int res_len, uint8 **res_p)
buf = px_alloc(res_len);
buf[0] = 0x02;
- if (!pg_strong_random((char *) buf + 1, pad_len))
+ if (!pg_strong_random(buf + 1, pad_len))
{
px_free(buf);
return PXE_NO_RANDOM;
@@ -62,11 +61,11 @@ pad_eme_pkcs1_v15(uint8 *data, int data_len, int res_len, uint8 **res_p)
{
if (*p == 0)
{
- if (!pg_strong_random((char *) p, 1))
+ if (!pg_strong_random(p, 1))
{
px_memset(buf, 0, res_len);
px_free(buf);
- break;
+ return PXE_NO_RANDOM;
}
}
if (*p != 0)
@@ -78,10 +77,6 @@ pad_eme_pkcs1_v15(uint8 *data, int data_len, int res_len, uint8 **res_p)
*res_p = buf;
return 0;
-
-#else
- return PXE_NO_RANDOM;
-#endif
}
static int
diff --git a/contrib/pgcrypto/pgp-s2k.c b/contrib/pgcrypto/pgp-s2k.c
index a0fd8969efe..3f2f442ffcc 100644
--- a/contrib/pgcrypto/pgp-s2k.c
+++ b/contrib/pgcrypto/pgp-s2k.c
@@ -34,7 +34,6 @@
#include "px.h"
#include "pgp.h"
-#include "utils/backend_random.h"
static int
calc_s2k_simple(PGP_S2K *s2k, PX_MD *md, const uint8 *key,
@@ -235,13 +234,13 @@ pgp_s2k_fill(PGP_S2K *s2k, int mode, int digest_algo, int count)
case PGP_S2K_SIMPLE:
break;
case PGP_S2K_SALTED:
- if (!pg_backend_random((char *) s2k->salt, PGP_S2K_SALT))
+ if (!pg_strong_random(s2k->salt, PGP_S2K_SALT))
return PXE_NO_RANDOM;
break;
case PGP_S2K_ISALTED:
- if (!pg_backend_random((char *) s2k->salt, PGP_S2K_SALT))
+ if (!pg_strong_random(s2k->salt, PGP_S2K_SALT))
return PXE_NO_RANDOM;
- if (!pg_backend_random((char *) &tmp, 1))
+ if (!pg_strong_random(&tmp, 1))
return PXE_NO_RANDOM;
s2k->iter = decide_s2k_iter(tmp, count);
break;
diff --git a/contrib/pgcrypto/pgp.c b/contrib/pgcrypto/pgp.c
index 0800fc325d1..dd8dae1b845 100644
--- a/contrib/pgcrypto/pgp.c
+++ b/contrib/pgcrypto/pgp.c
@@ -54,7 +54,6 @@ struct digest_info
{
const char *name;
int code;
- const char *int_name;
};
struct cipher_info
diff --git a/contrib/pgcrypto/pgp.h b/contrib/pgcrypto/pgp.h
index 1b6ea4c9eaf..f338523b7a6 100644
--- a/contrib/pgcrypto/pgp.h
+++ b/contrib/pgcrypto/pgp.h
@@ -261,8 +261,8 @@ int pgp_set_unicode_mode(PGP_Context *ctx, int mode);
int pgp_get_unicode_mode(PGP_Context *ctx);
int pgp_set_symkey(PGP_Context *ctx, const uint8 *key, int klen);
-int pgp_set_pubkey(PGP_Context *ctx, MBuf *keypkt,
- const uint8 *key, int klen, int pubtype);
+int pgp_set_pubkey(PGP_Context *ctx, MBuf *keypkt,
+ const uint8 *key, int klen, int pubtype);
int pgp_get_keyid(MBuf *pgp_data, char *dst);
@@ -278,17 +278,17 @@ int pgp_s2k_read(PullFilter *src, PGP_S2K *s2k);
int pgp_s2k_process(PGP_S2K *s2k, int cipher, const uint8 *key, int klen);
typedef struct PGP_CFB PGP_CFB;
-int pgp_cfb_create(PGP_CFB **ctx_p, int algo,
- const uint8 *key, int key_len, int recync, uint8 *iv);
+int pgp_cfb_create(PGP_CFB **ctx_p, int algo,
+ const uint8 *key, int key_len, int resync, uint8 *iv);
void pgp_cfb_free(PGP_CFB *ctx);
int pgp_cfb_encrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
int pgp_cfb_decrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
-void pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
- int num_headers, char **keys, char **values);
+void pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
+ int num_headers, char **keys, char **values);
int pgp_armor_decode(const uint8 *src, int len, StringInfo dst);
-int pgp_extract_armor_headers(const uint8 *src, unsigned len,
- int *nheaders, char ***keys, char ***values);
+int pgp_extract_armor_headers(const uint8 *src, unsigned len,
+ int *nheaders, char ***keys, char ***values);
int pgp_compress_filter(PushFilter **res, PGP_Context *ctx, PushFilter *dst);
int pgp_decompress_filter(PullFilter **res, PGP_Context *ctx, PullFilter *src);
@@ -298,10 +298,10 @@ void pgp_key_free(PGP_PubKey *pk);
int _pgp_read_public_key(PullFilter *pkt, PGP_PubKey **pk_p);
int pgp_parse_pubenc_sesskey(PGP_Context *ctx, PullFilter *pkt);
-int pgp_create_pkt_reader(PullFilter **pf_p, PullFilter *src, int len,
- int pkttype, PGP_Context *ctx);
-int pgp_parse_pkt_hdr(PullFilter *src, uint8 *tag, int *len_p,
- int allow_ctx);
+int pgp_create_pkt_reader(PullFilter **pf_p, PullFilter *src, int len,
+ int pkttype, PGP_Context *ctx);
+int pgp_parse_pkt_hdr(PullFilter *src, uint8 *tag, int *len_p,
+ int allow_ctx);
int pgp_skip_packet(PullFilter *pkt);
int pgp_expect_packet_end(PullFilter *pkt);
@@ -317,10 +317,10 @@ int pgp_mpi_write(PushFilter *dst, PGP_MPI *n);
int pgp_mpi_hash(PX_MD *md, PGP_MPI *n);
unsigned pgp_mpi_cksum(unsigned cksum, PGP_MPI *n);
-int pgp_elgamal_encrypt(PGP_PubKey *pk, PGP_MPI *m,
- PGP_MPI **c1, PGP_MPI **c2);
-int pgp_elgamal_decrypt(PGP_PubKey *pk, PGP_MPI *c1, PGP_MPI *c2,
- PGP_MPI **m);
+int pgp_elgamal_encrypt(PGP_PubKey *pk, PGP_MPI *m,
+ PGP_MPI **c1, PGP_MPI **c2);
+int pgp_elgamal_decrypt(PGP_PubKey *pk, PGP_MPI *c1, PGP_MPI *c2,
+ PGP_MPI **m);
int pgp_rsa_encrypt(PGP_PubKey *pk, PGP_MPI *m, PGP_MPI **c);
int pgp_rsa_decrypt(PGP_PubKey *pk, PGP_MPI *c, PGP_MPI **m);
diff --git a/contrib/pgcrypto/px-crypt.c b/contrib/pgcrypto/px-crypt.c
index ee40788fe71..51be0b7da17 100644
--- a/contrib/pgcrypto/px-crypt.c
+++ b/contrib/pgcrypto/px-crypt.c
@@ -34,7 +34,6 @@
#include "px.h"
#include "px-crypt.h"
-#include "utils/backend_random.h"
static char *
run_crypt_des(const char *psw, const char *salt,
@@ -153,7 +152,7 @@ px_gen_salt(const char *salt_type, char *buf, int rounds)
return PXE_BAD_SALT_ROUNDS;
}
- if (!pg_backend_random(rbuf, g->input_len))
+ if (!pg_strong_random(rbuf, g->input_len))
return PXE_NO_RANDOM;
p = g->gen(rounds, rbuf, g->input_len, buf, PX_MAX_SALT_LEN);
diff --git a/contrib/pgcrypto/px-crypt.h b/contrib/pgcrypto/px-crypt.h
index 696902a17c3..08001a81f5e 100644
--- a/contrib/pgcrypto/px-crypt.h
+++ b/contrib/pgcrypto/px-crypt.h
@@ -56,27 +56,27 @@ int px_gen_salt(const char *salt_type, char *dst, int rounds);
*/
/* crypt-gensalt.c */
-char *_crypt_gensalt_traditional_rn(unsigned long count,
- const char *input, int size, char *output, int output_size);
-char *_crypt_gensalt_extended_rn(unsigned long count,
- const char *input, int size, char *output, int output_size);
-char *_crypt_gensalt_md5_rn(unsigned long count,
- const char *input, int size, char *output, int output_size);
-char *_crypt_gensalt_blowfish_rn(unsigned long count,
- const char *input, int size, char *output, int output_size);
+char *_crypt_gensalt_traditional_rn(unsigned long count,
+ const char *input, int size, char *output, int output_size);
+char *_crypt_gensalt_extended_rn(unsigned long count,
+ const char *input, int size, char *output, int output_size);
+char *_crypt_gensalt_md5_rn(unsigned long count,
+ const char *input, int size, char *output, int output_size);
+char *_crypt_gensalt_blowfish_rn(unsigned long count,
+ const char *input, int size, char *output, int output_size);
/* disable 'extended DES crypt' */
/* #define DISABLE_XDES */
/* crypt-blowfish.c */
-char *_crypt_blowfish_rn(const char *key, const char *setting,
- char *output, int size);
+char *_crypt_blowfish_rn(const char *key, const char *setting,
+ char *output, int size);
/* crypt-des.c */
char *px_crypt_des(const char *key, const char *setting);
/* crypt-md5.c */
-char *px_crypt_md5(const char *pw, const char *salt,
- char *dst, unsigned dstlen);
+char *px_crypt_md5(const char *pw, const char *salt,
+ char *dst, unsigned dstlen);
#endif /* _PX_CRYPT_H */
diff --git a/contrib/pgcrypto/px.c b/contrib/pgcrypto/px.c
index aea8e863af0..0f02fb56c4f 100644
--- a/contrib/pgcrypto/px.c
+++ b/contrib/pgcrypto/px.c
@@ -56,7 +56,7 @@ static const struct error_desc px_err_list[] = {
{PXE_UNKNOWN_SALT_ALGO, "Unknown salt algorithm"},
{PXE_BAD_SALT_ROUNDS, "Incorrect number of rounds"},
{PXE_MCRYPT_INTERNAL, "mcrypt internal error"},
- {PXE_NO_RANDOM, "No strong random source"},
+ {PXE_NO_RANDOM, "Failed to generate strong random bits"},
{PXE_DECRYPT_FAILED, "Decryption failed"},
{PXE_PGP_CORRUPT_DATA, "Wrong key or corrupt data"},
{PXE_PGP_CORRUPT_ARMOR, "Corrupt ascii-armor"},
@@ -97,17 +97,9 @@ px_THROW_ERROR(int err)
{
if (err == PXE_NO_RANDOM)
{
-#ifdef HAVE_STRONG_RANDOM
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("could not generate a random number")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("generating random data is not supported by this build"),
- errdetail("This functionality requires a source of strong random numbers."),
- errhint("You need to rebuild PostgreSQL using --enable-strong-random.")));
-#endif
}
else
{
diff --git a/contrib/pgcrypto/px.h b/contrib/pgcrypto/px.h
index cef9c4b4565..0d4722a04a0 100644
--- a/contrib/pgcrypto/px.h
+++ b/contrib/pgcrypto/px.h
@@ -50,9 +50,6 @@ void *px_realloc(void *p, size_t s);
void px_free(void *p);
#endif
-/* max len of 'type' parms */
-#define PX_MAX_NAMELEN 128
-
/* max salt returned */
#define PX_MAX_SALT_LEN 128
diff --git a/contrib/pgcrypto/sha1.c b/contrib/pgcrypto/sha1.c
index fb6a57d917c..64671ac64d9 100644
--- a/contrib/pgcrypto/sha1.c
+++ b/contrib/pgcrypto/sha1.c
@@ -59,16 +59,6 @@ static uint32 _K[] = {0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6};
#define BCOUNT (ctxt->c.b64[0] / 8)
#define W(n) (ctxt->m.b32[(n)])
-#define PUTBYTE(x) \
-do { \
- ctxt->m.b8[(COUNT % 64)] = (x); \
- COUNT++; \
- COUNT %= 64; \
- ctxt->c.b64[0] += 8; \
- if (COUNT % 64 == 0) \
- sha1_step(ctxt); \
-} while (0)
-
#define PUTPAD(x) \
do { \
ctxt->m.b8[(COUNT % 64)] = (x); \
diff --git a/contrib/pgcrypto/sql/hmac-sha1.sql b/contrib/pgcrypto/sql/hmac-sha1.sql
index 6f741934bfd..3bc965578c5 100644
--- a/contrib/pgcrypto/sql/hmac-sha1.sql
+++ b/contrib/pgcrypto/sql/hmac-sha1.sql
@@ -1,5 +1,5 @@
--
--- HMAC-MD5
+-- HMAC-SHA1
--
SELECT encode(hmac(
diff --git a/contrib/pgcrypto/sql/pgp-decrypt.sql b/contrib/pgcrypto/sql/pgp-decrypt.sql
index f46a18f8cfd..557948d7c75 100644
--- a/contrib/pgcrypto/sql/pgp-decrypt.sql
+++ b/contrib/pgcrypto/sql/pgp-decrypt.sql
@@ -1,5 +1,5 @@
--
--- pgp_descrypt tests
+-- pgp decrypt tests
--
-- Checking ciphers
diff --git a/contrib/pgrowlocks/pgrowlocks.c b/contrib/pgrowlocks/pgrowlocks.c
index 94e051d642b..a2c44a916cf 100644
--- a/contrib/pgrowlocks/pgrowlocks.c
+++ b/contrib/pgrowlocks/pgrowlocks.c
@@ -24,10 +24,13 @@
#include "postgres.h"
+#include "access/heapam.h"
#include "access/multixact.h"
#include "access/relscan.h"
+#include "access/tableam.h"
#include "access/xact.h"
#include "catalog/namespace.h"
+#include "catalog/pg_am_d.h"
#include "catalog/pg_authid.h"
#include "funcapi.h"
#include "miscadmin.h"
@@ -37,7 +40,6 @@
#include "utils/builtins.h"
#include "utils/rel.h"
#include "utils/snapmgr.h"
-#include "utils/tqual.h"
#include "utils/varlena.h"
PG_MODULE_MAGIC;
@@ -55,7 +57,7 @@ PG_FUNCTION_INFO_V1(pgrowlocks);
typedef struct
{
Relation rel;
- HeapScanDesc scan;
+ TableScanDesc scan;
int ncolumns;
} MyData;
@@ -70,7 +72,8 @@ Datum
pgrowlocks(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
- HeapScanDesc scan;
+ TableScanDesc scan;
+ HeapScanDesc hscan;
HeapTuple tuple;
TupleDesc tupdesc;
AttInMetadata *attinmeta;
@@ -99,6 +102,10 @@ pgrowlocks(PG_FUNCTION_ARGS)
relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
rel = relation_openrv(relrv, AccessShareLock);
+ if (rel->rd_rel->relam != HEAP_TABLE_AM_OID)
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("only heap AM is supported")));
+
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
@@ -124,7 +131,8 @@ pgrowlocks(PG_FUNCTION_ARGS)
aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
RelationGetRelationName(rel));
- scan = heap_beginscan(rel, GetActiveSnapshot(), 0, NULL);
+ scan = table_beginscan(rel, GetActiveSnapshot(), 0, NULL);
+ hscan = (HeapScanDesc) scan;
mydata = palloc(sizeof(*mydata));
mydata->rel = rel;
mydata->scan = scan;
@@ -138,27 +146,28 @@ pgrowlocks(PG_FUNCTION_ARGS)
attinmeta = funcctx->attinmeta;
mydata = (MyData *) funcctx->user_fctx;
scan = mydata->scan;
+ hscan = (HeapScanDesc) scan;
/* scan the relation */
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- HTSU_Result htsu;
+ TM_Result htsu;
TransactionId xmax;
uint16 infomask;
/* must hold a buffer lock to call HeapTupleSatisfiesUpdate */
- LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
htsu = HeapTupleSatisfiesUpdate(tuple,
GetCurrentCommandId(false),
- scan->rs_cbuf);
+ hscan->rs_cbuf);
xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
infomask = tuple->t_data->t_infomask;
/*
- * A tuple is locked if HTSU returns BeingUpdated.
+ * A tuple is locked if HTSU returns BeingModified.
*/
- if (htsu == HeapTupleBeingUpdated)
+ if (htsu == TM_BeingModified)
{
char **values;
@@ -284,7 +293,7 @@ pgrowlocks(PG_FUNCTION_ARGS)
BackendXidGetPid(xmax));
}
- LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
/* build a tuple */
tuple = BuildTupleFromCStrings(attinmeta, values);
@@ -301,12 +310,12 @@ pgrowlocks(PG_FUNCTION_ARGS)
}
else
{
- LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
}
}
- heap_endscan(scan);
- heap_close(mydata->rel, AccessShareLock);
+ table_endscan(scan);
+ table_close(mydata->rel, AccessShareLock);
SRF_RETURN_DONE(funcctx);
}
diff --git a/contrib/pgstattuple/expected/pgstattuple.out b/contrib/pgstattuple/expected/pgstattuple.out
index a7087f6d457..9920dbfd408 100644
--- a/contrib/pgstattuple/expected/pgstattuple.out
+++ b/contrib/pgstattuple/expected/pgstattuple.out
@@ -48,7 +48,7 @@ select version, tree_level,
from pgstatindex('test_pkey');
version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation
---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+--------------------
- 3 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN
+ 4 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN
(1 row)
select version, tree_level,
@@ -58,7 +58,7 @@ select version, tree_level,
from pgstatindex('test_pkey'::text);
version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation
---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+--------------------
- 3 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN
+ 4 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN
(1 row)
select version, tree_level,
@@ -68,7 +68,7 @@ select version, tree_level,
from pgstatindex('test_pkey'::name);
version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation
---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+--------------------
- 3 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN
+ 4 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN
(1 row)
select version, tree_level,
@@ -78,7 +78,7 @@ select version, tree_level,
from pgstatindex('test_pkey'::regclass);
version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation
---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+--------------------
- 3 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN
+ 4 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN
(1 row)
select pg_relpages('test');
@@ -141,20 +141,23 @@ select * from pgstathashindex('test_hashidx');
select pgstatginindex('test_pkey');
ERROR: relation "test_pkey" is not a GIN index
select pgstathashindex('test_pkey');
-ERROR: relation "test_pkey" is not a HASH index
+ERROR: relation "test_pkey" is not a hash index
select pgstatindex('test_ginidx');
ERROR: relation "test_ginidx" is not a btree index
select pgstathashindex('test_ginidx');
-ERROR: relation "test_ginidx" is not a HASH index
+ERROR: relation "test_ginidx" is not a hash index
select pgstatindex('test_hashidx');
ERROR: relation "test_hashidx" is not a btree index
select pgstatginindex('test_hashidx');
ERROR: relation "test_hashidx" is not a GIN index
-- check that using any of these functions with unsupported relations will fail
create table test_partitioned (a int) partition by range (a);
+create index test_partitioned_index on test_partitioned(a);
-- these should all fail
select pgstattuple('test_partitioned');
ERROR: "test_partitioned" (partitioned table) is not supported
+select pgstattuple('test_partitioned_index');
+ERROR: "test_partitioned_index" (partitioned index) is not supported
select pgstattuple_approx('test_partitioned');
ERROR: "test_partitioned" is not a table or materialized view
select pg_relpages('test_partitioned');
@@ -229,7 +232,7 @@ create index test_partition_hash_idx on test_partition using hash (a);
select pgstatindex('test_partition_idx');
pgstatindex
------------------------------
- (3,0,8192,0,0,0,0,0,NaN,NaN)
+ (4,0,8192,0,0,0,0,0,NaN,NaN)
(1 row)
select pgstathashindex('test_partition_hash_idx');
diff --git a/contrib/pgstattuple/pgstatapprox.c b/contrib/pgstattuple/pgstatapprox.c
index ef33cacec6a..636c8d40aca 100644
--- a/contrib/pgstattuple/pgstatapprox.c
+++ b/contrib/pgstattuple/pgstatapprox.c
@@ -3,7 +3,7 @@
* pgstatapprox.c
* Bloat estimation functions
*
- * Copyright (c) 2014-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2014-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pgstattuple/pgstatapprox.c
@@ -12,12 +12,16 @@
*/
#include "postgres.h"
-#include "access/visibilitymap.h"
+#include "access/heapam.h"
+#include "access/relation.h"
#include "access/transam.h"
+#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/multixact.h"
#include "access/htup_details.h"
#include "catalog/namespace.h"
+#include "catalog/pg_am_d.h"
+#include "commands/vacuum.h"
#include "funcapi.h"
#include "miscadmin.h"
#include "storage/bufmgr.h"
@@ -25,8 +29,6 @@
#include "storage/procarray.h"
#include "storage/lmgr.h"
#include "utils/builtins.h"
-#include "utils/tqual.h"
-#include "commands/vacuum.h"
PG_FUNCTION_INFO_V1(pgstattuple_approx);
PG_FUNCTION_INFO_V1(pgstattuple_approx_v1_5);
@@ -287,6 +289,10 @@ pgstattuple_approx_internal(Oid relid, FunctionCallInfo fcinfo)
errmsg("\"%s\" is not a table or materialized view",
RelationGetRelationName(rel))));
+ if (rel->rd_rel->relam != HEAP_TABLE_AM_OID)
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("only heap AM is supported")));
+
statapprox_heap(rel, &stat);
relation_close(rel, AccessShareLock);
diff --git a/contrib/pgstattuple/pgstatindex.c b/contrib/pgstattuple/pgstatindex.c
index 75317b96a2f..4bae176e09e 100644
--- a/contrib/pgstattuple/pgstatindex.c
+++ b/contrib/pgstattuple/pgstatindex.c
@@ -28,10 +28,11 @@
#include "postgres.h"
#include "access/gin_private.h"
-#include "access/heapam.h"
#include "access/hash.h"
#include "access/htup_details.h"
#include "access/nbtree.h"
+#include "access/relation.h"
+#include "access/table.h"
#include "catalog/namespace.h"
#include "catalog/pg_am.h"
#include "funcapi.h"
@@ -601,10 +602,9 @@ pgstathashindex(PG_FUNCTION_ARGS)
if (!IS_HASH(rel))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("relation \"%s\" is not a HASH index",
+ errmsg("relation \"%s\" is not a hash index",
RelationGetRelationName(rel))));
-
/*
* Reject attempts to read non-local temporary relations; we would be
* likely to get wrong data since we have no visibility into the owning
@@ -727,7 +727,7 @@ pgstathashindex(PG_FUNCTION_ARGS)
}
/* -------------------------------------------------
- * GetHashPageStatis()
+ * GetHashPageStats()
*
* Collect statistics of single hash page
* -------------------------------------------------
diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c
index b599b6ca21a..70af43ebd5a 100644
--- a/contrib/pgstattuple/pgstattuple.c
+++ b/contrib/pgstattuple/pgstattuple.c
@@ -26,16 +26,17 @@
#include "access/gist_private.h"
#include "access/hash.h"
+#include "access/heapam.h"
#include "access/nbtree.h"
#include "access/relscan.h"
+#include "access/tableam.h"
#include "catalog/namespace.h"
-#include "catalog/pg_am.h"
+#include "catalog/pg_am_d.h"
#include "funcapi.h"
#include "miscadmin.h"
#include "storage/bufmgr.h"
#include "storage/lmgr.h"
#include "utils/builtins.h"
-#include "utils/tqual.h"
#include "utils/varlena.h"
PG_MODULE_MAGIC;
@@ -65,22 +66,22 @@ typedef void (*pgstat_page) (pgstattuple_type *, Relation, BlockNumber,
BufferAccessStrategy);
static Datum build_pgstattuple_type(pgstattuple_type *stat,
- FunctionCallInfo fcinfo);
+ FunctionCallInfo fcinfo);
static Datum pgstat_relation(Relation rel, FunctionCallInfo fcinfo);
static Datum pgstat_heap(Relation rel, FunctionCallInfo fcinfo);
static void pgstat_btree_page(pgstattuple_type *stat,
- Relation rel, BlockNumber blkno,
- BufferAccessStrategy bstrategy);
+ Relation rel, BlockNumber blkno,
+ BufferAccessStrategy bstrategy);
static void pgstat_hash_page(pgstattuple_type *stat,
- Relation rel, BlockNumber blkno,
- BufferAccessStrategy bstrategy);
+ Relation rel, BlockNumber blkno,
+ BufferAccessStrategy bstrategy);
static void pgstat_gist_page(pgstattuple_type *stat,
- Relation rel, BlockNumber blkno,
- BufferAccessStrategy bstrategy);
+ Relation rel, BlockNumber blkno,
+ BufferAccessStrategy bstrategy);
static Datum pgstat_index(Relation rel, BlockNumber start,
- pgstat_page pagefn, FunctionCallInfo fcinfo);
+ pgstat_page pagefn, FunctionCallInfo fcinfo);
static void pgstat_index_page(pgstattuple_type *stat, Page page,
- OffsetNumber minoff, OffsetNumber maxoff);
+ OffsetNumber minoff, OffsetNumber maxoff);
/*
* build_pgstattuple_type -- build a pgstattuple_type tuple
@@ -296,6 +297,9 @@ pgstat_relation(Relation rel, FunctionCallInfo fcinfo)
case RELKIND_PARTITIONED_TABLE:
err = "partitioned table";
break;
+ case RELKIND_PARTITIONED_INDEX:
+ err = "partitioned index";
+ break;
default:
err = "unknown";
break;
@@ -314,7 +318,8 @@ pgstat_relation(Relation rel, FunctionCallInfo fcinfo)
static Datum
pgstat_heap(Relation rel, FunctionCallInfo fcinfo)
{
- HeapScanDesc scan;
+ TableScanDesc scan;
+ HeapScanDesc hscan;
HeapTuple tuple;
BlockNumber nblocks;
BlockNumber block = 0; /* next block to count free space in */
@@ -323,11 +328,18 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo)
pgstattuple_type stat = {0};
SnapshotData SnapshotDirty;
+ if (rel->rd_rel->relam != HEAP_TABLE_AM_OID)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("only heap AM is supported")));
+
/* Disable syncscan because we assume we scan from block zero upwards */
- scan = heap_beginscan_strat(rel, SnapshotAny, 0, NULL, true, false);
+ scan = table_beginscan_strat(rel, SnapshotAny, 0, NULL, true, false);
+ hscan = (HeapScanDesc) scan;
+
InitDirtySnapshot(SnapshotDirty);
- nblocks = scan->rs_nblocks; /* # blocks to be scanned */
+ nblocks = hscan->rs_nblocks; /* # blocks to be scanned */
/* scan the relation */
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
@@ -335,9 +347,9 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo)
CHECK_FOR_INTERRUPTS();
/* must hold a buffer lock to call HeapTupleSatisfiesVisibility */
- LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
- if (HeapTupleSatisfiesVisibility(tuple, &SnapshotDirty, scan->rs_cbuf))
+ if (HeapTupleSatisfiesVisibility(tuple, &SnapshotDirty, hscan->rs_cbuf))
{
stat.tuple_len += tuple->t_len;
stat.tuple_count++;
@@ -348,7 +360,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo)
stat.dead_tuple_count++;
}
- LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
/*
* To avoid physically reading the table twice, try to do the
@@ -363,7 +375,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo)
CHECK_FOR_INTERRUPTS();
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, block,
- RBM_NORMAL, scan->rs_strategy);
+ RBM_NORMAL, hscan->rs_strategy);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
stat.free_space += PageGetHeapFreeSpace((Page) BufferGetPage(buffer));
UnlockReleaseBuffer(buffer);
@@ -376,14 +388,14 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo)
CHECK_FOR_INTERRUPTS();
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, block,
- RBM_NORMAL, scan->rs_strategy);
+ RBM_NORMAL, hscan->rs_strategy);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
stat.free_space += PageGetHeapFreeSpace((Page) BufferGetPage(buffer));
UnlockReleaseBuffer(buffer);
block++;
}
- heap_endscan(scan);
+ table_endscan(scan);
relation_close(rel, AccessShareLock);
stat.table_len = (uint64) nblocks * BLCKSZ;
diff --git a/contrib/pgstattuple/sql/pgstattuple.sql b/contrib/pgstattuple/sql/pgstattuple.sql
index a8e341e3518..cfa540302da 100644
--- a/contrib/pgstattuple/sql/pgstattuple.sql
+++ b/contrib/pgstattuple/sql/pgstattuple.sql
@@ -64,8 +64,10 @@ select pgstatginindex('test_hashidx');
-- check that using any of these functions with unsupported relations will fail
create table test_partitioned (a int) partition by range (a);
+create index test_partitioned_index on test_partitioned(a);
-- these should all fail
select pgstattuple('test_partitioned');
+select pgstattuple('test_partitioned_index');
select pgstattuple_approx('test_partitioned');
select pg_relpages('test_partitioned');
select pgstatindex('test_partitioned');
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index fe4893a8e05..57ed5f4b905 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -3,7 +3,7 @@
* connection.c
* Connection management functions for postgres_fdw
*
- * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/postgres_fdw/connection.c
@@ -15,8 +15,8 @@
#include "postgres_fdw.h"
#include "access/htup_details.h"
-#include "catalog/pg_user_mapping.h"
#include "access/xact.h"
+#include "catalog/pg_user_mapping.h"
#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "pgstat.h"
@@ -81,16 +81,16 @@ static void do_sql_command(PGconn *conn, const char *sql);
static void begin_remote_xact(ConnCacheEntry *entry);
static void pgfdw_xact_callback(XactEvent event, void *arg);
static void pgfdw_subxact_callback(SubXactEvent event,
- SubTransactionId mySubid,
- SubTransactionId parentSubid,
- void *arg);
+ SubTransactionId mySubid,
+ SubTransactionId parentSubid,
+ void *arg);
static void pgfdw_inval_callback(Datum arg, int cacheid, uint32 hashvalue);
static void pgfdw_reject_incomplete_xact_state_change(ConnCacheEntry *entry);
static bool pgfdw_cancel_query(PGconn *conn);
static bool pgfdw_exec_cleanup_query(PGconn *conn, const char *query,
- bool ignore_errors);
+ bool ignore_errors);
static bool pgfdw_get_cleanup_result(PGconn *conn, TimestampTz endtime,
- PGresult **result);
+ PGresult **result);
/*
@@ -546,7 +546,8 @@ pgfdw_get_result(PGconn *conn, const char *query)
/* Sleep until there's something to do */
wc = WaitLatchOrSocket(MyLatch,
- WL_LATCH_SET | WL_SOCKET_READABLE,
+ WL_LATCH_SET | WL_SOCKET_READABLE |
+ WL_EXIT_ON_PM_DEATH,
PQsocket(conn),
-1L, PG_WAIT_EXTENSION);
ResetLatch(MyLatch);
@@ -1152,7 +1153,8 @@ pgfdw_get_cleanup_result(PGconn *conn, TimestampTz endtime, PGresult **result)
/* Sleep until there's something to do */
wc = WaitLatchOrSocket(MyLatch,
- WL_LATCH_SET | WL_SOCKET_READABLE | WL_TIMEOUT,
+ WL_LATCH_SET | WL_SOCKET_READABLE |
+ WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
PQsocket(conn),
cur_timeout, PG_WAIT_EXTENSION);
ResetLatch(MyLatch);
diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c
index 6e2fa1420c4..431c34a4246 100644
--- a/contrib/postgres_fdw/deparse.c
+++ b/contrib/postgres_fdw/deparse.c
@@ -24,7 +24,7 @@
* with collations that match the remote table's columns, which we can
* consider to be user error.
*
- * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/postgres_fdw/deparse.c
@@ -35,9 +35,9 @@
#include "postgres_fdw.h"
-#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/sysattr.h"
+#include "access/table.h"
#include "catalog/pg_aggregate.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_namespace.h"
@@ -48,10 +48,9 @@
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "nodes/plannodes.h"
-#include "optimizer/clauses.h"
+#include "optimizer/optimizer.h"
#include "optimizer/prep.h"
#include "optimizer/tlist.h"
-#include "optimizer/var.h"
#include "parser/parsetree.h"
#include "utils/builtins.h"
#include "utils/lsyscache.h"
@@ -117,81 +116,84 @@ typedef struct deparse_expr_cxt
* remote server.
*/
static bool foreign_expr_walker(Node *node,
- foreign_glob_cxt *glob_cxt,
- foreign_loc_cxt *outer_cxt);
+ foreign_glob_cxt *glob_cxt,
+ foreign_loc_cxt *outer_cxt);
static char *deparse_type_name(Oid type_oid, int32 typemod);
/*
* Functions to construct string representation of a node tree.
*/
static void deparseTargetList(StringInfo buf,
- PlannerInfo *root,
- Index rtindex,
- Relation rel,
- bool is_returning,
- Bitmapset *attrs_used,
- bool qualify_col,
- List **retrieved_attrs);
+ RangeTblEntry *rte,
+ Index rtindex,
+ Relation rel,
+ bool is_returning,
+ Bitmapset *attrs_used,
+ bool qualify_col,
+ List **retrieved_attrs);
static void deparseExplicitTargetList(List *tlist,
- bool is_returning,
- List **retrieved_attrs,
- deparse_expr_cxt *context);
+ bool is_returning,
+ List **retrieved_attrs,
+ deparse_expr_cxt *context);
static void deparseSubqueryTargetList(deparse_expr_cxt *context);
-static void deparseReturningList(StringInfo buf, PlannerInfo *root,
- Index rtindex, Relation rel,
- bool trig_after_row,
- List *returningList,
- List **retrieved_attrs);
+static void deparseReturningList(StringInfo buf, RangeTblEntry *rte,
+ Index rtindex, Relation rel,
+ bool trig_after_row,
+ List *withCheckOptionList,
+ List *returningList,
+ List **retrieved_attrs);
static void deparseColumnRef(StringInfo buf, int varno, int varattno,
- PlannerInfo *root, bool qualify_col);
+ RangeTblEntry *rte, bool qualify_col);
static void deparseRelation(StringInfo buf, Relation rel);
static void deparseExpr(Expr *expr, deparse_expr_cxt *context);
static void deparseVar(Var *node, deparse_expr_cxt *context);
static void deparseConst(Const *node, deparse_expr_cxt *context, int showtype);
static void deparseParam(Param *node, deparse_expr_cxt *context);
-static void deparseArrayRef(ArrayRef *node, deparse_expr_cxt *context);
+static void deparseSubscriptingRef(SubscriptingRef *node, deparse_expr_cxt *context);
static void deparseFuncExpr(FuncExpr *node, deparse_expr_cxt *context);
static void deparseOpExpr(OpExpr *node, deparse_expr_cxt *context);
static void deparseOperatorName(StringInfo buf, Form_pg_operator opform);
static void deparseDistinctExpr(DistinctExpr *node, deparse_expr_cxt *context);
static void deparseScalarArrayOpExpr(ScalarArrayOpExpr *node,
- deparse_expr_cxt *context);
+ deparse_expr_cxt *context);
static void deparseRelabelType(RelabelType *node, deparse_expr_cxt *context);
static void deparseBoolExpr(BoolExpr *node, deparse_expr_cxt *context);
static void deparseNullTest(NullTest *node, deparse_expr_cxt *context);
static void deparseArrayExpr(ArrayExpr *node, deparse_expr_cxt *context);
static void printRemoteParam(int paramindex, Oid paramtype, int32 paramtypmod,
- deparse_expr_cxt *context);
+ deparse_expr_cxt *context);
static void printRemotePlaceholder(Oid paramtype, int32 paramtypmod,
- deparse_expr_cxt *context);
+ deparse_expr_cxt *context);
static void deparseSelectSql(List *tlist, bool is_subquery, List **retrieved_attrs,
- deparse_expr_cxt *context);
+ deparse_expr_cxt *context);
static void deparseLockingClause(deparse_expr_cxt *context);
-static void appendOrderByClause(List *pathkeys, deparse_expr_cxt *context);
+static void appendOrderByClause(List *pathkeys, bool has_final_sort,
+ deparse_expr_cxt *context);
+static void appendLimitClause(deparse_expr_cxt *context);
static void appendConditions(List *exprs, deparse_expr_cxt *context);
static void deparseFromExprForRel(StringInfo buf, PlannerInfo *root,
- RelOptInfo *foreignrel, bool use_alias,
- Index ignore_rel, List **ignore_conds,
- List **params_list);
+ RelOptInfo *foreignrel, bool use_alias,
+ Index ignore_rel, List **ignore_conds,
+ List **params_list);
static void deparseFromExpr(List *quals, deparse_expr_cxt *context);
static void deparseRangeTblRef(StringInfo buf, PlannerInfo *root,
- RelOptInfo *foreignrel, bool make_subquery,
- Index ignore_rel, List **ignore_conds, List **params_list);
+ RelOptInfo *foreignrel, bool make_subquery,
+ Index ignore_rel, List **ignore_conds, List **params_list);
static void deparseAggref(Aggref *node, deparse_expr_cxt *context);
static void appendGroupByClause(List *tlist, deparse_expr_cxt *context);
static void appendAggOrderBy(List *orderList, List *targetList,
- deparse_expr_cxt *context);
+ deparse_expr_cxt *context);
static void appendFunctionName(Oid funcid, deparse_expr_cxt *context);
static Node *deparseSortGroupClause(Index ref, List *tlist, bool force_colno,
- deparse_expr_cxt *context);
+ deparse_expr_cxt *context);
/*
* Helper functions
*/
static bool is_subquery_var(Var *node, RelOptInfo *foreignrel,
- int *relno, int *colno);
+ int *relno, int *colno);
static void get_relation_column_alias_ids(Var *node, RelOptInfo *foreignrel,
- int *relno, int *colno);
+ int *relno, int *colno);
/*
@@ -331,14 +333,13 @@ foreign_expr_walker(Node *node,
/* Var belongs to foreign table */
/*
- * System columns other than ctid and oid should not be
- * sent to the remote, since we don't make any effort to
- * ensure that local and remote values match (tableoid, in
+ * System columns other than ctid should not be sent to
+ * the remote, since we don't make any effort to ensure
+ * that local and remote values match (tableoid, in
* particular, almost certainly doesn't match).
*/
if (var->varattno < 0 &&
- var->varattno != SelfItemPointerAttributeNumber &&
- var->varattno != ObjectIdAttributeNumber)
+ var->varattno != SelfItemPointerAttributeNumber)
return false;
/* Else check the collation */
@@ -402,34 +403,34 @@ foreign_expr_walker(Node *node,
state = FDW_COLLATE_UNSAFE;
}
break;
- case T_ArrayRef:
+ case T_SubscriptingRef:
{
- ArrayRef *ar = (ArrayRef *) node;
+ SubscriptingRef *sr = (SubscriptingRef *) node;
/* Assignment should not be in restrictions. */
- if (ar->refassgnexpr != NULL)
+ if (sr->refassgnexpr != NULL)
return false;
/*
- * Recurse to remaining subexpressions. Since the array
+ * Recurse to remaining subexpressions. Since the container
* subscripts must yield (noncollatable) integers, they won't
* affect the inner_cxt state.
*/
- if (!foreign_expr_walker((Node *) ar->refupperindexpr,
+ if (!foreign_expr_walker((Node *) sr->refupperindexpr,
glob_cxt, &inner_cxt))
return false;
- if (!foreign_expr_walker((Node *) ar->reflowerindexpr,
+ if (!foreign_expr_walker((Node *) sr->reflowerindexpr,
glob_cxt, &inner_cxt))
return false;
- if (!foreign_expr_walker((Node *) ar->refexpr,
+ if (!foreign_expr_walker((Node *) sr->refexpr,
glob_cxt, &inner_cxt))
return false;
/*
- * Array subscripting should yield same collation as input,
- * but for safety use same logic as for function nodes.
+ * Container subscripting should yield same collation as
+ * input, but for safety use same logic as for function nodes.
*/
- collation = ar->refcollid;
+ collation = sr->refcollid;
if (collation == InvalidOid)
state = FDW_COLLATE_NONE;
else if (inner_cxt.state == FDW_COLLATE_SAFE &&
@@ -840,6 +841,55 @@ foreign_expr_walker(Node *node,
return true;
}
+/*
+ * Returns true if given expr is something we'd have to send the value of
+ * to the foreign server.
+ *
+ * This should return true when the expression is a shippable node that
+ * deparseExpr would add to context->params_list. Note that we don't care
+ * if the expression *contains* such a node, only whether one appears at top
+ * level. We need this to detect cases where setrefs.c would recognize a
+ * false match between an fdw_exprs item (which came from the params_list)
+ * and an entry in fdw_scan_tlist (which we're considering putting the given
+ * expression into).
+ */
+bool
+is_foreign_param(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Expr *expr)
+{
+ if (expr == NULL)
+ return false;
+
+ switch (nodeTag(expr))
+ {
+ case T_Var:
+ {
+ /* It would have to be sent unless it's a foreign Var */
+ Var *var = (Var *) expr;
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) (baserel->fdw_private);
+ Relids relids;
+
+ if (IS_UPPER_REL(baserel))
+ relids = fpinfo->outerrel->relids;
+ else
+ relids = baserel->relids;
+
+ if (bms_is_member(var->varno, relids) && var->varlevelsup == 0)
+ return false; /* foreign Var, so not a param */
+ else
+ return true; /* it'd have to be a param */
+ break;
+ }
+ case T_Param:
+ /* Params always have to be sent to the foreign server */
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
/*
* Convert type OID + typmod info into a type name we can ship to the remote
* server. Someplace else had better have verified that this type name is
@@ -930,8 +980,8 @@ build_tlist_to_deparse(RelOptInfo *foreignrel)
void
deparseSelectStmtForRel(StringInfo buf, PlannerInfo *root, RelOptInfo *rel,
List *tlist, List *remote_conds, List *pathkeys,
- bool is_subquery, List **retrieved_attrs,
- List **params_list)
+ bool has_final_sort, bool has_limit, bool is_subquery,
+ List **retrieved_attrs, List **params_list)
{
deparse_expr_cxt context;
PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) rel->fdw_private;
@@ -986,7 +1036,11 @@ deparseSelectStmtForRel(StringInfo buf, PlannerInfo *root, RelOptInfo *rel,
/* Add ORDER BY clause if we found any useful pathkeys */
if (pathkeys)
- appendOrderByClause(pathkeys, &context);
+ appendOrderByClause(pathkeys, has_final_sort, &context);
+
+ /* Add LIMIT clause if necessary */
+ if (has_limit)
+ appendLimitClause(&context);
/* Add any necessary FOR UPDATE/SHARE. */
deparseLockingClause(&context);
@@ -1048,11 +1102,11 @@ deparseSelectSql(List *tlist, bool is_subquery, List **retrieved_attrs,
* Core code already has some lock on each rel being planned, so we
* can use NoLock here.
*/
- Relation rel = heap_open(rte->relid, NoLock);
+ Relation rel = table_open(rte->relid, NoLock);
- deparseTargetList(buf, root, foreignrel->relid, rel, false,
+ deparseTargetList(buf, rte, foreignrel->relid, rel, false,
fpinfo->attrs_used, false, retrieved_attrs);
- heap_close(rel, NoLock);
+ table_close(rel, NoLock);
}
}
@@ -1076,7 +1130,7 @@ deparseFromExpr(List *quals, deparse_expr_cxt *context)
/* Construct FROM clause */
appendStringInfoString(buf, " FROM ");
deparseFromExprForRel(buf, context->root, scanrel,
- (bms_num_members(scanrel->relids) > 1),
+ (bms_membership(scanrel->relids) == BMS_MULTIPLE),
(Index) 0, NULL, context->params_list);
/* Construct WHERE clause */
@@ -1099,7 +1153,7 @@ deparseFromExpr(List *quals, deparse_expr_cxt *context)
*/
static void
deparseTargetList(StringInfo buf,
- PlannerInfo *root,
+ RangeTblEntry *rte,
Index rtindex,
Relation rel,
bool is_returning,
@@ -1137,15 +1191,15 @@ deparseTargetList(StringInfo buf,
appendStringInfoString(buf, " RETURNING ");
first = false;
- deparseColumnRef(buf, rtindex, i, root, qualify_col);
+ deparseColumnRef(buf, rtindex, i, rte, qualify_col);
*retrieved_attrs = lappend_int(*retrieved_attrs, i);
}
}
/*
- * Add ctid and oid if needed. We currently don't support retrieving any
- * other system columns.
+ * Add ctid if needed. We currently don't support retrieving any other
+ * system columns.
*/
if (bms_is_member(SelfItemPointerAttributeNumber - FirstLowInvalidHeapAttributeNumber,
attrs_used))
@@ -1163,22 +1217,6 @@ deparseTargetList(StringInfo buf,
*retrieved_attrs = lappend_int(*retrieved_attrs,
SelfItemPointerAttributeNumber);
}
- if (bms_is_member(ObjectIdAttributeNumber - FirstLowInvalidHeapAttributeNumber,
- attrs_used))
- {
- if (!first)
- appendStringInfoString(buf, ", ");
- else if (is_returning)
- appendStringInfoString(buf, " RETURNING ");
- first = false;
-
- if (qualify_col)
- ADD_REL_QUALIFIER(buf, rtindex);
- appendStringInfoString(buf, "oid");
-
- *retrieved_attrs = lappend_int(*retrieved_attrs,
- ObjectIdAttributeNumber);
- }
/* Don't generate bad syntax if no undropped columns */
if (first && !is_returning)
@@ -1262,7 +1300,7 @@ deparseLockingClause(deparse_expr_cxt *context)
}
/* Add the relation alias if we are here for a join relation */
- if (bms_num_members(rel->relids) > 1 &&
+ if (bms_membership(rel->relids) == BMS_MULTIPLE &&
rc->strength != LCS_NONE)
appendStringInfo(buf, " OF %s%d", REL_ALIAS_PREFIX, relid);
}
@@ -1459,7 +1497,7 @@ deparseFromExprForRel(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel,
if (fpinfo->jointype == JOIN_INNER)
{
*ignore_conds = list_concat(*ignore_conds,
- list_copy(fpinfo->joinclauses));
+ fpinfo->joinclauses);
fpinfo->joinclauses = NIL;
}
@@ -1493,7 +1531,7 @@ deparseFromExprForRel(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel,
{
Assert(fpinfo->jointype == JOIN_INNER);
Assert(fpinfo->joinclauses == NIL);
- appendStringInfo(buf, "%s", join_sql_o.data);
+ appendBinaryStringInfo(buf, join_sql_o.data, join_sql_o.len);
return;
}
}
@@ -1514,7 +1552,7 @@ deparseFromExprForRel(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel,
{
Assert(fpinfo->jointype == JOIN_INNER);
Assert(fpinfo->joinclauses == NIL);
- appendStringInfo(buf, "%s", join_sql_i.data);
+ appendBinaryStringInfo(buf, join_sql_i.data, join_sql_i.len);
return;
}
}
@@ -1559,7 +1597,7 @@ deparseFromExprForRel(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel,
* Core code already has some lock on each rel being planned, so we
* can use NoLock here.
*/
- Relation rel = heap_open(rte->relid, NoLock);
+ Relation rel = table_open(rte->relid, NoLock);
deparseRelation(buf, rel);
@@ -1571,7 +1609,7 @@ deparseFromExprForRel(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel,
if (use_alias)
appendStringInfo(buf, " %s%d", REL_ALIAS_PREFIX, foreignrel->relid);
- heap_close(rel, NoLock);
+ table_close(rel, NoLock);
}
}
@@ -1607,7 +1645,8 @@ deparseRangeTblRef(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel,
/* Deparse the subquery representing the relation. */
appendStringInfoChar(buf, '(');
deparseSelectStmtForRel(buf, root, foreignrel, NIL,
- fpinfo->remote_conds, NIL, true,
+ fpinfo->remote_conds, NIL,
+ false, false, true,
&retrieved_attrs, params_list);
appendStringInfoChar(buf, ')');
@@ -1645,14 +1684,15 @@ deparseRangeTblRef(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel,
* deparse remote INSERT statement
*
* The statement text is appended to buf, and we also create an integer List
- * of the columns being retrieved by RETURNING (if any), which is returned
- * to *retrieved_attrs.
+ * of the columns being retrieved by WITH CHECK OPTION or RETURNING (if any),
+ * which is returned to *retrieved_attrs.
*/
void
-deparseInsertSql(StringInfo buf, PlannerInfo *root,
+deparseInsertSql(StringInfo buf, RangeTblEntry *rte,
Index rtindex, Relation rel,
List *targetAttrs, bool doNothing,
- List *returningList, List **retrieved_attrs)
+ List *withCheckOptionList, List *returningList,
+ List **retrieved_attrs)
{
AttrNumber pindex;
bool first;
@@ -1674,7 +1714,7 @@ deparseInsertSql(StringInfo buf, PlannerInfo *root,
appendStringInfoString(buf, ", ");
first = false;
- deparseColumnRef(buf, rtindex, attnum, root, false);
+ deparseColumnRef(buf, rtindex, attnum, rte, false);
}
appendStringInfoString(buf, ") VALUES (");
@@ -1699,22 +1739,23 @@ deparseInsertSql(StringInfo buf, PlannerInfo *root,
if (doNothing)
appendStringInfoString(buf, " ON CONFLICT DO NOTHING");
- deparseReturningList(buf, root, rtindex, rel,
+ deparseReturningList(buf, rte, rtindex, rel,
rel->trigdesc && rel->trigdesc->trig_insert_after_row,
- returningList, retrieved_attrs);
+ withCheckOptionList, returningList, retrieved_attrs);
}
/*
* deparse remote UPDATE statement
*
* The statement text is appended to buf, and we also create an integer List
- * of the columns being retrieved by RETURNING (if any), which is returned
- * to *retrieved_attrs.
+ * of the columns being retrieved by WITH CHECK OPTION or RETURNING (if any),
+ * which is returned to *retrieved_attrs.
*/
void
-deparseUpdateSql(StringInfo buf, PlannerInfo *root,
+deparseUpdateSql(StringInfo buf, RangeTblEntry *rte,
Index rtindex, Relation rel,
- List *targetAttrs, List *returningList,
+ List *targetAttrs,
+ List *withCheckOptionList, List *returningList,
List **retrieved_attrs)
{
AttrNumber pindex;
@@ -1735,15 +1776,15 @@ deparseUpdateSql(StringInfo buf, PlannerInfo *root,
appendStringInfoString(buf, ", ");
first = false;
- deparseColumnRef(buf, rtindex, attnum, root, false);
+ deparseColumnRef(buf, rtindex, attnum, rte, false);
appendStringInfo(buf, " = $%d", pindex);
pindex++;
}
appendStringInfoString(buf, " WHERE ctid = $1");
- deparseReturningList(buf, root, rtindex, rel,
+ deparseReturningList(buf, rte, rtindex, rel,
rel->trigdesc && rel->trigdesc->trig_update_after_row,
- returningList, retrieved_attrs);
+ withCheckOptionList, returningList, retrieved_attrs);
}
/*
@@ -1777,6 +1818,7 @@ deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root,
int nestlevel;
bool first;
ListCell *lc;
+ RangeTblEntry *rte = planner_rt_fetch(rtindex, root);
/* Set up context struct for recursion */
context.root = root;
@@ -1808,7 +1850,7 @@ deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root,
appendStringInfoString(buf, ", ");
first = false;
- deparseColumnRef(buf, rtindex, attnum, root, false);
+ deparseColumnRef(buf, rtindex, attnum, rte, false);
appendStringInfoString(buf, " = ");
deparseExpr((Expr *) tle->expr, &context);
}
@@ -1819,7 +1861,7 @@ deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root,
{
List *ignore_conds = NIL;
- appendStringInfo(buf, " FROM ");
+ appendStringInfoString(buf, " FROM ");
deparseFromExprForRel(buf, root, foreignrel, true, rtindex,
&ignore_conds, params_list);
remote_conds = list_concat(remote_conds, ignore_conds);
@@ -1835,8 +1877,8 @@ deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root,
deparseExplicitTargetList(returningList, true, retrieved_attrs,
&context);
else
- deparseReturningList(buf, root, rtindex, rel, false,
- returningList, retrieved_attrs);
+ deparseReturningList(buf, rte, rtindex, rel, false,
+ NIL, returningList, retrieved_attrs);
}
/*
@@ -1847,7 +1889,7 @@ deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root,
* to *retrieved_attrs.
*/
void
-deparseDeleteSql(StringInfo buf, PlannerInfo *root,
+deparseDeleteSql(StringInfo buf, RangeTblEntry *rte,
Index rtindex, Relation rel,
List *returningList,
List **retrieved_attrs)
@@ -1856,9 +1898,9 @@ deparseDeleteSql(StringInfo buf, PlannerInfo *root,
deparseRelation(buf, rel);
appendStringInfoString(buf, " WHERE ctid = $1");
- deparseReturningList(buf, root, rtindex, rel,
+ deparseReturningList(buf, rte, rtindex, rel,
rel->trigdesc && rel->trigdesc->trig_delete_after_row,
- returningList, retrieved_attrs);
+ NIL, returningList, retrieved_attrs);
}
/*
@@ -1902,7 +1944,7 @@ deparseDirectDeleteSql(StringInfo buf, PlannerInfo *root,
{
List *ignore_conds = NIL;
- appendStringInfo(buf, " USING ");
+ appendStringInfoString(buf, " USING ");
deparseFromExprForRel(buf, root, foreignrel, true, rtindex,
&ignore_conds, params_list);
remote_conds = list_concat(remote_conds, ignore_conds);
@@ -1918,17 +1960,19 @@ deparseDirectDeleteSql(StringInfo buf, PlannerInfo *root,
deparseExplicitTargetList(returningList, true, retrieved_attrs,
&context);
else
- deparseReturningList(buf, root, rtindex, rel, false,
- returningList, retrieved_attrs);
+ deparseReturningList(buf, planner_rt_fetch(rtindex, root),
+ rtindex, rel, false,
+ NIL, returningList, retrieved_attrs);
}
/*
* Add a RETURNING clause, if needed, to an INSERT/UPDATE/DELETE.
*/
static void
-deparseReturningList(StringInfo buf, PlannerInfo *root,
+deparseReturningList(StringInfo buf, RangeTblEntry *rte,
Index rtindex, Relation rel,
bool trig_after_row,
+ List *withCheckOptionList,
List *returningList,
List **retrieved_attrs)
{
@@ -1941,6 +1985,21 @@ deparseReturningList(StringInfo buf, PlannerInfo *root,
bms_make_singleton(0 - FirstLowInvalidHeapAttributeNumber);
}
+ if (withCheckOptionList != NIL)
+ {
+ /*
+ * We need the attrs, non-system and system, mentioned in the local
+ * query's WITH CHECK OPTION list.
+ *
+ * Note: we do this to ensure that WCO constraints will be evaluated
+ * on the data actually inserted/updated on the remote side, which
+ * might differ from the data supplied by the core code, for example
+ * as a result of remote triggers.
+ */
+ pull_varattnos((Node *) withCheckOptionList, rtindex,
+ &attrs_used);
+ }
+
if (returningList != NIL)
{
/*
@@ -1952,7 +2011,7 @@ deparseReturningList(StringInfo buf, PlannerInfo *root,
}
if (attrs_used != NULL)
- deparseTargetList(buf, root, rtindex, rel, true, attrs_used, false,
+ deparseTargetList(buf, rte, rtindex, rel, true, attrs_used, false,
retrieved_attrs);
else
*retrieved_attrs = NIL;
@@ -2048,11 +2107,9 @@ deparseAnalyzeSql(StringInfo buf, Relation rel, List **retrieved_attrs)
* If qualify_col is true, qualify column name with the alias of relation.
*/
static void
-deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
+deparseColumnRef(StringInfo buf, int varno, int varattno, RangeTblEntry *rte,
bool qualify_col)
{
- RangeTblEntry *rte;
-
/* We support fetching the remote side's CTID and OID. */
if (varattno == SelfItemPointerAttributeNumber)
{
@@ -2060,12 +2117,6 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
ADD_REL_QUALIFIER(buf, varno);
appendStringInfoString(buf, "ctid");
}
- else if (varattno == ObjectIdAttributeNumber)
- {
- if (qualify_col)
- ADD_REL_QUALIFIER(buf, varno);
- appendStringInfoString(buf, "oid");
- }
else if (varattno < 0)
{
/*
@@ -2077,10 +2128,7 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
Oid fetchval = 0;
if (varattno == TableOidAttributeNumber)
- {
- rte = planner_rt_fetch(varno, root);
fetchval = rte->relid;
- }
if (qualify_col)
{
@@ -2100,14 +2148,11 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
/* Required only to be passed down to deparseTargetList(). */
List *retrieved_attrs;
- /* Get RangeTblEntry from array in PlannerInfo. */
- rte = planner_rt_fetch(varno, root);
-
/*
* The lock on the relation will be held by upper callers, so it's
* fine to open it with no lock here.
*/
- rel = heap_open(rte->relid, NoLock);
+ rel = table_open(rte->relid, NoLock);
/*
* The local name of the foreign table can not be recognized by the
@@ -2134,7 +2179,7 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
}
appendStringInfoString(buf, "ROW(");
- deparseTargetList(buf, root, varno, rel, false, attrs_used, qualify_col,
+ deparseTargetList(buf, rte, varno, rel, false, attrs_used, qualify_col,
&retrieved_attrs);
appendStringInfoChar(buf, ')');
@@ -2142,7 +2187,7 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
if (qualify_col)
appendStringInfoString(buf, " END");
- heap_close(rel, NoLock);
+ table_close(rel, NoLock);
bms_free(attrs_used);
}
else
@@ -2154,9 +2199,6 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
/* varno must not be any of OUTER_VAR, INNER_VAR and INDEX_VAR. */
Assert(!IS_SPECIAL_VARNO(varno));
- /* Get RangeTblEntry from array in PlannerInfo. */
- rte = planner_rt_fetch(varno, root);
-
/*
* If it's a column of a foreign table, and it has the column_name FDW
* option, use that value.
@@ -2284,8 +2326,8 @@ deparseExpr(Expr *node, deparse_expr_cxt *context)
case T_Param:
deparseParam((Param *) node, context);
break;
- case T_ArrayRef:
- deparseArrayRef((ArrayRef *) node, context);
+ case T_SubscriptingRef:
+ deparseSubscriptingRef((SubscriptingRef *) node, context);
break;
case T_FuncExpr:
deparseFuncExpr((FuncExpr *) node, context);
@@ -2337,7 +2379,7 @@ deparseVar(Var *node, deparse_expr_cxt *context)
int colno;
/* Qualify columns when multiple relations are involved. */
- bool qualify_col = (bms_num_members(relids) > 1);
+ bool qualify_col = (bms_membership(relids) == BMS_MULTIPLE);
/*
* If the Var belongs to the foreign relation that is deparsed as a
@@ -2354,7 +2396,8 @@ deparseVar(Var *node, deparse_expr_cxt *context)
if (bms_is_member(node->varno, relids) && node->varlevelsup == 0)
deparseColumnRef(context->buf, node->varno, node->varattno,
- context->root, qualify_col);
+ planner_rt_fetch(node->varno, context->root),
+ qualify_col);
else
{
/* Treat like a Param */
@@ -2531,10 +2574,10 @@ deparseParam(Param *node, deparse_expr_cxt *context)
}
/*
- * Deparse an array subscript expression.
+ * Deparse a container subscript expression.
*/
static void
-deparseArrayRef(ArrayRef *node, deparse_expr_cxt *context)
+deparseSubscriptingRef(SubscriptingRef *node, deparse_expr_cxt *context)
{
StringInfo buf = context->buf;
ListCell *lowlist_item;
@@ -2567,7 +2610,7 @@ deparseArrayRef(ArrayRef *node, deparse_expr_cxt *context)
{
deparseExpr(lfirst(lowlist_item), context);
appendStringInfoChar(buf, ':');
- lowlist_item = lnext(lowlist_item);
+ lowlist_item = lnext(node->reflowerindexpr, lowlist_item);
}
deparseExpr(lfirst(uplist_item), context);
appendStringInfoChar(buf, ']');
@@ -2630,7 +2673,7 @@ deparseFuncExpr(FuncExpr *node, deparse_expr_cxt *context)
{
if (!first)
appendStringInfoString(buf, ", ");
- if (use_variadic && lnext(arg) == NULL)
+ if (use_variadic && lnext(node->args, arg) == NULL)
appendStringInfoString(buf, "VARIADIC ");
deparseExpr((Expr *) lfirst(arg), context);
first = false;
@@ -2958,7 +3001,7 @@ deparseAggref(Aggref *node, deparse_expr_cxt *context)
first = false;
/* Add VARIADIC */
- if (use_variadic && lnext(arg) == NULL)
+ if (use_variadic && lnext(node->args, arg) == NULL)
appendStringInfoString(buf, "VARIADIC ");
deparseExpr((Expr *) n, context);
@@ -3122,7 +3165,8 @@ appendGroupByClause(List *tlist, deparse_expr_cxt *context)
* base relation are obtained and deparsed.
*/
static void
-appendOrderByClause(List *pathkeys, deparse_expr_cxt *context)
+appendOrderByClause(List *pathkeys, bool has_final_sort,
+ deparse_expr_cxt *context)
{
ListCell *lcell;
int nestlevel;
@@ -3139,7 +3183,19 @@ appendOrderByClause(List *pathkeys, deparse_expr_cxt *context)
PathKey *pathkey = lfirst(lcell);
Expr *em_expr;
- em_expr = find_em_expr_for_rel(pathkey->pk_eclass, baserel);
+ if (has_final_sort)
+ {
+ /*
+ * By construction, context->foreignrel is the input relation to
+ * the final sort.
+ */
+ em_expr = find_em_expr_for_input_target(context->root,
+ pathkey->pk_eclass,
+ context->foreignrel->reltarget);
+ }
+ else
+ em_expr = find_em_expr_for_rel(pathkey->pk_eclass, baserel);
+
Assert(em_expr != NULL);
appendStringInfoString(buf, delim);
@@ -3159,6 +3215,33 @@ appendOrderByClause(List *pathkeys, deparse_expr_cxt *context)
reset_transmission_modes(nestlevel);
}
+/*
+ * Deparse LIMIT/OFFSET clause.
+ */
+static void
+appendLimitClause(deparse_expr_cxt *context)
+{
+ PlannerInfo *root = context->root;
+ StringInfo buf = context->buf;
+ int nestlevel;
+
+ /* Make sure any constants in the exprs are printed portably */
+ nestlevel = set_transmission_modes();
+
+ if (root->parse->limitCount)
+ {
+ appendStringInfoString(buf, " LIMIT ");
+ deparseExpr((Expr *) root->parse->limitCount, context);
+ }
+ if (root->parse->limitOffset)
+ {
+ appendStringInfoString(buf, " OFFSET ");
+ deparseExpr((Expr *) root->parse->limitOffset, context);
+ }
+
+ reset_transmission_modes(nestlevel);
+}
+
/*
* appendFunctionName
* Deparses function name from given function oid.
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index e4d9469fdd3..f0c842a6078 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -129,13 +129,6 @@ CREATE FOREIGN TABLE ft6 (
c2 int NOT NULL,
c3 text
) SERVER loopback2 OPTIONS (schema_name 'S 1', table_name 'T 4');
--- A table with oids. CREATE FOREIGN TABLE doesn't support the
--- WITH OIDS option, but ALTER does.
-CREATE FOREIGN TABLE ft_pg_type (
- typname name,
- typlen smallint
-) SERVER loopback OPTIONS (schema_name 'pg_catalog', table_name 'pg_type');
-ALTER TABLE ft_pg_type SET WITH OIDS;
-- ===================================================================
-- tests for validator
-- ===================================================================
@@ -158,6 +151,7 @@ ALTER SERVER testserver1 OPTIONS (
keepalives 'value',
keepalives_idle 'value',
keepalives_interval 'value',
+ tcp_user_timeout 'value',
-- requiressl 'value',
sslcompression 'value',
sslmode 'value',
@@ -185,16 +179,15 @@ ALTER FOREIGN TABLE ft2 OPTIONS (schema_name 'S 1', table_name 'T 1');
ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
ALTER FOREIGN TABLE ft2 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
\det+
- List of foreign tables
- Schema | Table | Server | FDW options | Description
---------+------------+-----------+--------------------------------------------------+-------------
- public | ft1 | loopback | (schema_name 'S 1', table_name 'T 1') |
- public | ft2 | loopback | (schema_name 'S 1', table_name 'T 1') |
- public | ft4 | loopback | (schema_name 'S 1', table_name 'T 3') |
- public | ft5 | loopback | (schema_name 'S 1', table_name 'T 4') |
- public | ft6 | loopback2 | (schema_name 'S 1', table_name 'T 4') |
- public | ft_pg_type | loopback | (schema_name 'pg_catalog', table_name 'pg_type') |
-(6 rows)
+ List of foreign tables
+ Schema | Table | Server | FDW options | Description
+--------+-------+-----------+---------------------------------------+-------------
+ public | ft1 | loopback | (schema_name 'S 1', table_name 'T 1') |
+ public | ft2 | loopback | (schema_name 'S 1', table_name 'T 1') |
+ public | ft4 | loopback | (schema_name 'S 1', table_name 'T 3') |
+ public | ft5 | loopback | (schema_name 'S 1', table_name 'T 4') |
+ public | ft6 | loopback2 | (schema_name 'S 1', table_name 'T 4') |
+(5 rows)
-- Test that alteration of server options causes reconnection
-- Remote's errors might be non-English, so hide them to ensure stable results
@@ -244,11 +237,10 @@ ALTER FOREIGN TABLE ft2 OPTIONS (use_remote_estimate 'true');
-- ===================================================================
-- single table without alias
EXPLAIN (COSTS OFF) SELECT * FROM ft1 ORDER BY c3, c1 OFFSET 100 LIMIT 10;
- QUERY PLAN
----------------------------
- Limit
- -> Foreign Scan on ft1
-(2 rows)
+ QUERY PLAN
+---------------------
+ Foreign Scan on ft1
+(1 row)
SELECT * FROM ft1 ORDER BY c3, c1 OFFSET 100 LIMIT 10;
c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
@@ -296,14 +288,12 @@ SELECT * FROM ft1 t1 ORDER BY t1.c3, t1.c1, t1.tableoid OFFSET 100 LIMIT 10;
-- whole-row reference
EXPLAIN (VERBOSE, COSTS OFF) SELECT t1 FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
Output: t1.*, c3, c1
- -> Foreign Scan on public.ft1 t1
- Output: t1.*, c3, c1
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" ORDER BY c3 ASC NULLS LAST, "C 1" ASC NULLS LAST
-(5 rows)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" ORDER BY c3 ASC NULLS LAST, "C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint
+(3 rows)
SELECT t1 FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
t1
@@ -343,14 +333,12 @@ SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1';
-- with FOR UPDATE/SHARE
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------
- LockRows
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.*
- -> Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.*
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 101)) FOR UPDATE
-(5 rows)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 101)) FOR UPDATE
+(3 rows)
SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE;
c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
@@ -359,14 +347,12 @@ SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE;
(1 row)
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------
- LockRows
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.*
- -> Foreign Scan on public.ft1 t1
- Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.*
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 102)) FOR SHARE
-(5 rows)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 102)) FOR SHARE
+(3 rows)
SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE;
c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
@@ -679,7 +665,7 @@ EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = ANY(ARRAY[c2, 1, c1
Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = ANY (ARRAY[c2, 1, ("C 1" + 0)])))
(3 rows)
-EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- ArrayRef
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- SubscriptingRef
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------
Foreign Scan on public.ft1 t1
@@ -976,6 +962,25 @@ SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
9
(1 row)
+-- ORDER BY can be shipped, though
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------
+ Limit
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ -> Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c1 === t1.c2)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" ORDER BY c2 ASC NULLS LAST
+(6 rows)
+
+SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
-- but let's put them in an extension ...
ALTER EXTENSION postgres_fdw ADD FUNCTION postgres_fdw_abs(int);
ALTER EXTENSION postgres_fdw ADD OPERATOR === (int, int);
@@ -1013,6 +1018,22 @@ SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
9
(1 row)
+-- and both ORDER BY and LIMIT can be shipped
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" OPERATOR(public.===) c2)) ORDER BY c2 ASC NULLS LAST LIMIT 1::bigint
+(3 rows)
+
+SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
-- ===================================================================
-- JOIN queries
-- ===================================================================
@@ -1023,15 +1044,13 @@ ANALYZE ft5;
-- join two tables
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
- QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c1, t1.c3
- -> Foreign Scan
- Output: t1.c1, t2.c1, t1.c3
- Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
- Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3 FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST
-(6 rows)
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3 FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint
+(4 rows)
SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
c1 | c1
@@ -1051,18 +1070,13 @@ SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t
-- join three tables
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t3.c3 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) JOIN ft4 t3 ON (t3.c1 = t1.c1) ORDER BY t1.c3, t1.c1 OFFSET 10 LIMIT 10;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c2, t3.c3, t1.c3
- -> Sort
- Output: t1.c1, t2.c2, t3.c3, t1.c3
- Sort Key: t1.c3, t1.c1
- -> Foreign Scan
- Output: t1.c1, t2.c2, t3.c3, t1.c3
- Relations: ((public.ft1 t1) INNER JOIN (public.ft2 t2)) INNER JOIN (public.ft4 t3)
- Remote SQL: SELECT r1."C 1", r2.c2, r4.c3, r1.c3 FROM (("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) INNER JOIN "S 1"."T 3" r4 ON (((r1."C 1" = r4.c1))))
-(9 rows)
+ Relations: ((public.ft1 t1) INNER JOIN (public.ft2 t2)) INNER JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3, r1.c3 FROM (("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) INNER JOIN "S 1"."T 3" r4 ON (((r1."C 1" = r4.c1)))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
SELECT t1.c1, t2.c2, t3.c3 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) JOIN ft4 t3 ON (t3.c1 = t1.c1) ORDER BY t1.c3, t1.c1 OFFSET 10 LIMIT 10;
c1 | c2 | c3
@@ -1082,15 +1096,13 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) JOIN ft4 t
-- left outer join
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c1 FROM ft4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c1
- -> Foreign Scan
- Output: t1.c1, t2.c1
- Relations: (public.ft4 t1) LEFT JOIN (public.ft5 t2)
- Remote SQL: SELECT r1.c1, r2.c1 FROM ("S 1"."T 3" r1 LEFT JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) ORDER BY r1.c1 ASC NULLS LAST, r2.c1 ASC NULLS LAST
-(6 rows)
+ Relations: (public.ft4 t1) LEFT JOIN (public.ft5 t2)
+ Remote SQL: SELECT r1.c1, r2.c1 FROM ("S 1"."T 3" r1 LEFT JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) ORDER BY r1.c1 ASC NULLS LAST, r2.c1 ASC NULLS LAST LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
SELECT t1.c1, t2.c1 FROM ft4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
c1 | c1
@@ -1110,15 +1122,13 @@ SELECT t1.c1, t2.c1 FROM ft4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.
-- left outer join three tables
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
- QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c2, t3.c3
- -> Foreign Scan
- Output: t1.c1, t2.c2, t3.c3
- Relations: ((public.ft2 t1) LEFT JOIN (public.ft2 t2)) LEFT JOIN (public.ft4 t3)
- Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r1 LEFT JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) LEFT JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1))))
-(6 rows)
+ Relations: ((public.ft2 t1) LEFT JOIN (public.ft2 t2)) LEFT JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r1 LEFT JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) LEFT JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
c1 | c2 | c3
@@ -1183,15 +1193,13 @@ SELECT t1.c1, t1.c2, t2.c1, t2.c2 FROM ft4 t1 LEFT JOIN (SELECT * FROM ft5 WHERE
-- right outer join
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c1 FROM ft5 t1 RIGHT JOIN ft4 t2 ON (t1.c1 = t2.c1) ORDER BY t2.c1, t1.c1 OFFSET 10 LIMIT 10;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c1
- -> Foreign Scan
- Output: t1.c1, t2.c1
- Relations: (public.ft4 t2) LEFT JOIN (public.ft5 t1)
- Remote SQL: SELECT r1.c1, r2.c1 FROM ("S 1"."T 3" r2 LEFT JOIN "S 1"."T 4" r1 ON (((r1.c1 = r2.c1)))) ORDER BY r2.c1 ASC NULLS LAST, r1.c1 ASC NULLS LAST
-(6 rows)
+ Relations: (public.ft4 t2) LEFT JOIN (public.ft5 t1)
+ Remote SQL: SELECT r1.c1, r2.c1 FROM ("S 1"."T 3" r2 LEFT JOIN "S 1"."T 4" r1 ON (((r1.c1 = r2.c1)))) ORDER BY r2.c1 ASC NULLS LAST, r1.c1 ASC NULLS LAST LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
SELECT t1.c1, t2.c1 FROM ft5 t1 RIGHT JOIN ft4 t2 ON (t1.c1 = t2.c1) ORDER BY t2.c1, t1.c1 OFFSET 10 LIMIT 10;
c1 | c1
@@ -1211,15 +1219,13 @@ SELECT t1.c1, t2.c1 FROM ft5 t1 RIGHT JOIN ft4 t2 ON (t1.c1 = t2.c1) ORDER BY t2
-- right outer join three tables
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
- QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c2, t3.c3
- -> Foreign Scan
- Output: t1.c1, t2.c2, t3.c3
- Relations: ((public.ft4 t3) LEFT JOIN (public.ft2 t2)) LEFT JOIN (public.ft2 t1)
- Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 3" r4 LEFT JOIN "S 1"."T 1" r2 ON (((r2."C 1" = r4.c1)))) LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1"))))
-(6 rows)
+ Relations: ((public.ft4 t3) LEFT JOIN (public.ft2 t2)) LEFT JOIN (public.ft2 t1)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 3" r4 LEFT JOIN "S 1"."T 1" r2 ON (((r2."C 1" = r4.c1)))) LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
c1 | c2 | c3
@@ -1239,15 +1245,13 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGH
-- full outer join
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c1 FROM ft4 t1 FULL JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 45 LIMIT 10;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c1
- -> Foreign Scan
- Output: t1.c1, t2.c1
- Relations: (public.ft4 t1) FULL JOIN (public.ft5 t2)
- Remote SQL: SELECT r1.c1, r2.c1 FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) ORDER BY r1.c1 ASC NULLS LAST, r2.c1 ASC NULLS LAST
-(6 rows)
+ Relations: (public.ft4 t1) FULL JOIN (public.ft5 t2)
+ Remote SQL: SELECT r1.c1, r2.c1 FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) ORDER BY r1.c1 ASC NULLS LAST, r2.c1 ASC NULLS LAST LIMIT 10::bigint OFFSET 45::bigint
+(4 rows)
SELECT t1.c1, t2.c1 FROM ft4 t1 FULL JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 45 LIMIT 10;
c1 | c1
@@ -1291,15 +1295,13 @@ SELECT t1.c1, t2.c1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL
EXPLAIN (VERBOSE, COSTS OFF)
SELECT 1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t2 ON (TRUE) OFFSET 10 LIMIT 10;
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: 1
- -> Foreign Scan
- Output: 1
- Relations: (public.ft4) FULL JOIN (public.ft5)
- Remote SQL: SELECT NULL FROM ((SELECT NULL FROM "S 1"."T 3" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s4 FULL JOIN (SELECT NULL FROM "S 1"."T 4" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s5 ON (TRUE))
-(6 rows)
+ Relations: (public.ft4) FULL JOIN (public.ft5)
+ Remote SQL: SELECT NULL FROM ((SELECT NULL FROM "S 1"."T 3" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s4 FULL JOIN (SELECT NULL FROM "S 1"."T 4" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s5 ON (TRUE)) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
SELECT 1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t2 ON (TRUE) OFFSET 10 LIMIT 10;
?column?
@@ -1414,15 +1416,13 @@ SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM "S 1"."T 3" WHERE c1 = 50) t1 INNE
-- full outer join + inner join
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c1, t3.c1 FROM ft4 t1 INNER JOIN ft5 t2 ON (t1.c1 = t2.c1 + 1 and t1.c1 between 50 and 60) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) ORDER BY t1.c1, t2.c1, t3.c1 LIMIT 10;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c1, t3.c1
- -> Foreign Scan
- Output: t1.c1, t2.c1, t3.c1
- Relations: ((public.ft4 t1) INNER JOIN (public.ft5 t2)) FULL JOIN (public.ft4 t3)
- Remote SQL: SELECT r1.c1, r2.c1, r4.c1 FROM (("S 1"."T 3" r1 INNER JOIN "S 1"."T 4" r2 ON (((r1.c1 = (r2.c1 + 1))) AND ((r1.c1 >= 50)) AND ((r1.c1 <= 60)))) FULL JOIN "S 1"."T 3" r4 ON (((r2.c1 = r4.c1)))) ORDER BY r1.c1 ASC NULLS LAST, r2.c1 ASC NULLS LAST, r4.c1 ASC NULLS LAST
-(6 rows)
+ Relations: ((public.ft4 t1) INNER JOIN (public.ft5 t2)) FULL JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1.c1, r2.c1, r4.c1 FROM (("S 1"."T 3" r1 INNER JOIN "S 1"."T 4" r2 ON (((r1.c1 = (r2.c1 + 1))) AND ((r1.c1 >= 50)) AND ((r1.c1 <= 60)))) FULL JOIN "S 1"."T 3" r4 ON (((r2.c1 = r4.c1)))) ORDER BY r1.c1 ASC NULLS LAST, r2.c1 ASC NULLS LAST, r4.c1 ASC NULLS LAST LIMIT 10::bigint
+(4 rows)
SELECT t1.c1, t2.c1, t3.c1 FROM ft4 t1 INNER JOIN ft5 t2 ON (t1.c1 = t2.c1 + 1 and t1.c1 between 50 and 60) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) ORDER BY t1.c1, t2.c1, t3.c1 LIMIT 10;
c1 | c1 | c1
@@ -1442,15 +1442,13 @@ SELECT t1.c1, t2.c1, t3.c1 FROM ft4 t1 INNER JOIN ft5 t2 ON (t1.c1 = t2.c1 + 1 a
-- full outer join three tables
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
- QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c2, t3.c3
- -> Foreign Scan
- Output: t1.c1, t2.c2, t3.c3
- Relations: ((public.ft2 t1) FULL JOIN (public.ft2 t2)) FULL JOIN (public.ft4 t3)
- Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r1 FULL JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) FULL JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1))))
-(6 rows)
+ Relations: ((public.ft2 t1) FULL JOIN (public.ft2 t2)) FULL JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r1 FULL JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) FULL JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
c1 | c2 | c3
@@ -1470,15 +1468,13 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL
-- full outer join + right outer join
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
- QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c2, t3.c3
- -> Foreign Scan
- Output: t1.c1, t2.c2, t3.c3
- Relations: ((public.ft4 t3) LEFT JOIN (public.ft2 t2)) LEFT JOIN (public.ft2 t1)
- Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 3" r4 LEFT JOIN "S 1"."T 1" r2 ON (((r2."C 1" = r4.c1)))) LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1"))))
-(6 rows)
+ Relations: ((public.ft4 t3) LEFT JOIN (public.ft2 t2)) LEFT JOIN (public.ft2 t1)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 3" r4 LEFT JOIN "S 1"."T 1" r2 ON (((r2."C 1" = r4.c1)))) LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
c1 | c2 | c3
@@ -1498,15 +1494,13 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT
-- right outer join + full outer join
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
- QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c2, t3.c3
- -> Foreign Scan
- Output: t1.c1, t2.c2, t3.c3
- Relations: ((public.ft2 t2) LEFT JOIN (public.ft2 t1)) FULL JOIN (public.ft4 t3)
- Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r2 LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) FULL JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1))))
-(6 rows)
+ Relations: ((public.ft2 t2) LEFT JOIN (public.ft2 t1)) FULL JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r2 LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) FULL JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
c1 | c2 | c3
@@ -1526,15 +1520,13 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL
-- full outer join + left outer join
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
- QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c2, t3.c3
- -> Foreign Scan
- Output: t1.c1, t2.c2, t3.c3
- Relations: ((public.ft2 t1) FULL JOIN (public.ft2 t2)) LEFT JOIN (public.ft4 t3)
- Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r1 FULL JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) LEFT JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1))))
-(6 rows)
+ Relations: ((public.ft2 t1) FULL JOIN (public.ft2 t2)) LEFT JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r1 FULL JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) LEFT JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
c1 | c2 | c3
@@ -1554,15 +1546,13 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT
-- left outer join + full outer join
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
- QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c2, t3.c3
- -> Foreign Scan
- Output: t1.c1, t2.c2, t3.c3
- Relations: ((public.ft2 t1) LEFT JOIN (public.ft2 t2)) FULL JOIN (public.ft4 t3)
- Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r1 LEFT JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) FULL JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1))))
-(6 rows)
+ Relations: ((public.ft2 t1) LEFT JOIN (public.ft2 t2)) FULL JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r1 LEFT JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) FULL JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
c1 | c2 | c3
@@ -1582,15 +1572,13 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL
-- right outer join + left outer join
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
- QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c2, t3.c3
- -> Foreign Scan
- Output: t1.c1, t2.c2, t3.c3
- Relations: ((public.ft2 t2) LEFT JOIN (public.ft2 t1)) LEFT JOIN (public.ft4 t3)
- Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r2 LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) LEFT JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1))))
-(6 rows)
+ Relations: ((public.ft2 t2) LEFT JOIN (public.ft2 t1)) LEFT JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r2 LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) LEFT JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
c1 | c2 | c3
@@ -1610,15 +1598,13 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT
-- left outer join + right outer join
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c2, t3.c3
- -> Foreign Scan
- Output: t1.c1, t2.c2, t3.c3
- Relations: (public.ft4 t3) LEFT JOIN ((public.ft2 t1) INNER JOIN (public.ft2 t2))
- Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM ("S 1"."T 3" r4 LEFT JOIN ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ON (((r2."C 1" = r4.c1))))
-(6 rows)
+ Relations: (public.ft4 t3) LEFT JOIN ((public.ft2 t1) INNER JOIN (public.ft2 t2))
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM ("S 1"."T 3" r4 LEFT JOIN ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ON (((r2."C 1" = r4.c1)))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
c1 | c2 | c3
@@ -1669,15 +1655,13 @@ SELECT t1.c1, t2.c1 FROM ft4 t1 FULL JOIN ft5 t2 ON (t1.c1 = t2.c1) WHERE (t1.c1
-- full outer join + WHERE clause with shippable extensions set
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2, t1.c3 FROM ft1 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE postgres_fdw_abs(t1.c1) > 0 OFFSET 10 LIMIT 10;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c2, t1.c3
- -> Foreign Scan
- Output: t1.c1, t2.c2, t1.c3
- Relations: (public.ft1 t1) FULL JOIN (public.ft2 t2)
- Remote SQL: SELECT r1."C 1", r2.c2, r1.c3 FROM ("S 1"."T 1" r1 FULL JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) WHERE ((public.postgres_fdw_abs(r1."C 1") > 0))
-(6 rows)
+ Relations: (public.ft1 t1) FULL JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2.c2, r1.c3 FROM ("S 1"."T 1" r1 FULL JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) WHERE ((public.postgres_fdw_abs(r1."C 1") > 0)) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
ALTER SERVER loopback OPTIONS (DROP extensions);
-- full outer join + WHERE clause with shippable extensions not set
@@ -1699,35 +1683,13 @@ ALTER SERVER loopback OPTIONS (ADD extensions 'postgres_fdw');
-- tests whole-row reference for row marks
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR UPDATE OF t1;
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
- -> LockRows
- Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
- -> Foreign Scan
- Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
- Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
- Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST FOR UPDATE OF r1
- -> Sort
- Output: t1.c1, t1.c3, t1.*, t2.c1, t2.*
- Sort Key: t1.c3 USING <, t1.c1
- -> Merge Join
- Output: t1.c1, t1.c3, t1.*, t2.c1, t2.*
- Merge Cond: (t1.c1 = t2.c1)
- -> Sort
- Output: t1.c1, t1.c3, t1.*
- Sort Key: t1.c1
- -> Foreign Scan on public.ft1 t1
- Output: t1.c1, t1.c3, t1.*
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR UPDATE
- -> Sort
- Output: t2.c1, t2.*
- Sort Key: t2.c1
- -> Foreign Scan on public.ft2 t2
- Output: t2.c1, t2.*
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
-(26 rows)
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint FOR UPDATE OF r1
+(4 rows)
SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR UPDATE OF t1;
c1 | c1
@@ -1746,35 +1708,13 @@ SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR UPDATE;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
- -> LockRows
- Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
- -> Foreign Scan
- Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
- Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
- Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST FOR UPDATE OF r1 FOR UPDATE OF r2
- -> Sort
- Output: t1.c1, t1.c3, t1.*, t2.c1, t2.*
- Sort Key: t1.c3 USING <, t1.c1
- -> Merge Join
- Output: t1.c1, t1.c3, t1.*, t2.c1, t2.*
- Merge Cond: (t1.c1 = t2.c1)
- -> Sort
- Output: t1.c1, t1.c3, t1.*
- Sort Key: t1.c1
- -> Foreign Scan on public.ft1 t1
- Output: t1.c1, t1.c3, t1.*
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR UPDATE
- -> Sort
- Output: t2.c1, t2.*
- Sort Key: t2.c1
- -> Foreign Scan on public.ft2 t2
- Output: t2.c1, t2.*
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR UPDATE
-(26 rows)
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint FOR UPDATE OF r1 FOR UPDATE OF r2
+(4 rows)
SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR UPDATE;
c1 | c1
@@ -1794,35 +1734,13 @@ SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t
-- join two tables with FOR SHARE clause
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR SHARE OF t1;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
- -> LockRows
- Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
- -> Foreign Scan
- Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
- Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
- Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST FOR SHARE OF r1
- -> Sort
- Output: t1.c1, t1.c3, t1.*, t2.c1, t2.*
- Sort Key: t1.c3 USING <, t1.c1
- -> Merge Join
- Output: t1.c1, t1.c3, t1.*, t2.c1, t2.*
- Merge Cond: (t1.c1 = t2.c1)
- -> Sort
- Output: t1.c1, t1.c3, t1.*
- Sort Key: t1.c1
- -> Foreign Scan on public.ft1 t1
- Output: t1.c1, t1.c3, t1.*
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR SHARE
- -> Sort
- Output: t2.c1, t2.*
- Sort Key: t2.c1
- -> Foreign Scan on public.ft2 t2
- Output: t2.c1, t2.*
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
-(26 rows)
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint FOR SHARE OF r1
+(4 rows)
SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR SHARE OF t1;
c1 | c1
@@ -1841,35 +1759,13 @@ SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR SHARE;
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
- -> LockRows
- Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
- -> Foreign Scan
- Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
- Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
- Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST FOR SHARE OF r1 FOR SHARE OF r2
- -> Sort
- Output: t1.c1, t1.c3, t1.*, t2.c1, t2.*
- Sort Key: t1.c3 USING <, t1.c1
- -> Merge Join
- Output: t1.c1, t1.c3, t1.*, t2.c1, t2.*
- Merge Cond: (t1.c1 = t2.c1)
- -> Sort
- Output: t1.c1, t1.c3, t1.*
- Sort Key: t1.c1
- -> Foreign Scan on public.ft1 t1
- Output: t1.c1, t1.c3, t1.*
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR SHARE
- -> Sort
- Output: t2.c1, t2.*
- Sort Key: t2.c1
- -> Foreign Scan on public.ft2 t2
- Output: t2.c1, t2.*
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR SHARE
-(26 rows)
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint FOR SHARE OF r1 FOR SHARE OF r2
+(4 rows)
SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR SHARE;
c1 | c1
@@ -1888,7 +1784,7 @@ SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t
-- join in CTE
EXPLAIN (VERBOSE, COSTS OFF)
-WITH t (c1_1, c1_3, c2_1) AS (SELECT t1.c1, t1.c3, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1 OFFSET 100 LIMIT 10;
+WITH t (c1_1, c1_3, c2_1) AS MATERIALIZED (SELECT t1.c1, t1.c3, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1 OFFSET 100 LIMIT 10;
QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------
Limit
@@ -1905,7 +1801,7 @@ WITH t (c1_1, c1_3, c2_1) AS (SELECT t1.c1, t1.c3, t2.c1 FROM ft1 t1 JOIN ft2 t2
Output: t.c1_1, t.c2_1, t.c1_3
(12 rows)
-WITH t (c1_1, c1_3, c2_1) AS (SELECT t1.c1, t1.c3, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1 OFFSET 100 LIMIT 10;
+WITH t (c1_1, c1_3, c2_1) AS MATERIALIZED (SELECT t1.c1, t1.c3, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1 OFFSET 100 LIMIT 10;
c1_1 | c2_1
------+------
101 | 101
@@ -1923,15 +1819,13 @@ WITH t (c1_1, c1_3, c2_1) AS (SELECT t1.c1, t1.c3, t2.c1 FROM ft1 t1 JOIN ft2 t2
-- ctid with whole-row reference
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.ctid, t1, t2, t1.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.ctid, t1.*, t2.*, t1.c1, t1.c3
- -> Foreign Scan
- Output: t1.ctid, t1.*, t2.*, t1.c1, t1.c3
- Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
- Remote SQL: SELECT r1.ctid, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END, r1."C 1", r1.c3 FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST
-(6 rows)
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1.ctid, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END, r1."C 1", r1.c3 FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint
+(4 rows)
-- SEMI JOIN, not pushed down
EXPLAIN (VERBOSE, COSTS OFF)
@@ -1999,27 +1893,16 @@ SELECT t1.c1 FROM ft1 t1 WHERE NOT EXISTS (SELECT 1 FROM ft2 t2 WHERE t1.c1 = t2
119
(10 rows)
--- CROSS JOIN, not pushed down
+-- CROSS JOIN can be pushed down
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c1 FROM ft1 t1 CROSS JOIN ft2 t2 ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
- QUERY PLAN
----------------------------------------------------------------------
- Limit
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: t1.c1, t2.c1
- -> Sort
- Output: t1.c1, t2.c1
- Sort Key: t1.c1, t2.c1
- -> Nested Loop
- Output: t1.c1, t2.c1
- -> Foreign Scan on public.ft1 t1
- Output: t1.c1
- Remote SQL: SELECT "C 1" FROM "S 1"."T 1"
- -> Materialize
- Output: t2.c1
- -> Foreign Scan on public.ft2 t2
- Output: t2.c1
- Remote SQL: SELECT "C 1" FROM "S 1"."T 1"
-(15 rows)
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2."C 1" FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (TRUE)) ORDER BY r1."C 1" ASC NULLS LAST, r2."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint
+(4 rows)
SELECT t1.c1, t2.c1 FROM ft1 t1 CROSS JOIN ft2 t2 ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
c1 | c1
@@ -2336,74 +2219,84 @@ SELECT ft5, ft5.c1, ft5.c2, ft5.c3, ft4.c1, ft4.c2 FROM ft5 left join ft4 on ft5
-- multi-way join involving multiple merge joins
-- (this case used to have EPQ-related planning problems)
+CREATE TABLE local_tbl (c1 int NOT NULL, c2 int NOT NULL, c3 text, CONSTRAINT local_tbl_pkey PRIMARY KEY (c1));
+INSERT INTO local_tbl SELECT id, id % 10, to_char(id, 'FM0000') FROM generate_series(1, 1000) id;
+ANALYZE local_tbl;
SET enable_nestloop TO false;
SET enable_hashjoin TO false;
EXPLAIN (VERBOSE, COSTS OFF)
-SELECT * FROM ft1, ft2, ft4, ft5 WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1
- AND ft1.c2 = ft5.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE;
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+SELECT * FROM ft1, ft2, ft4, ft5, local_tbl WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1
+ AND ft1.c2 = ft5.c1 AND ft1.c2 = local_tbl.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
LockRows
- Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3, ft1.*, ft2.*, ft4.*, ft5.*
- -> Foreign Scan
- Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3, ft1.*, ft2.*, ft4.*, ft5.*
- Relations: (((public.ft1) INNER JOIN (public.ft2)) INNER JOIN (public.ft4)) INNER JOIN (public.ft5)
- Remote SQL: SELECT r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8, r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8, r3.c1, r3.c2, r3.c3, r4.c1, r4.c2, r4.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END, CASE WHEN (r3.*)::text IS NOT NULL THEN ROW(r3.c1, r3.c2, r3.c3) END, CASE WHEN (r4.*)::text IS NOT NULL THEN ROW(r4.c1, r4.c2, r4.c3) END FROM ((("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")) AND ((r2."C 1" < 100)) AND ((r1."C 1" < 100)))) INNER JOIN "S 1"."T 3" r3 ON (((r1.c2 = r3.c1)))) INNER JOIN "S 1"."T 4" r4 ON (((r1.c2 = r4.c1)))) FOR UPDATE OF r1 FOR UPDATE OF r2 FOR UPDATE OF r3 FOR UPDATE OF r4
- -> Merge Join
- Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3, ft1.*, ft2.*, ft4.*, ft5.*
- Merge Cond: (ft1.c2 = ft5.c1)
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3, local_tbl.c1, local_tbl.c2, local_tbl.c3, ft1.*, ft2.*, ft4.*, ft5.*, local_tbl.ctid
+ -> Merge Join
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3, local_tbl.c1, local_tbl.c2, local_tbl.c3, ft1.*, ft2.*, ft4.*, ft5.*, local_tbl.ctid
+ Inner Unique: true
+ Merge Cond: (ft1.c2 = local_tbl.c1)
+ -> Foreign Scan
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*, ft4.c1, ft4.c2, ft4.c3, ft4.*, ft5.c1, ft5.c2, ft5.c3, ft5.*
+ Relations: (((public.ft1) INNER JOIN (public.ft2)) INNER JOIN (public.ft4)) INNER JOIN (public.ft5)
+ Remote SQL: SELECT r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END, r3.c1, r3.c2, r3.c3, CASE WHEN (r3.*)::text IS NOT NULL THEN ROW(r3.c1, r3.c2, r3.c3) END, r4.c1, r4.c2, r4.c3, CASE WHEN (r4.*)::text IS NOT NULL THEN ROW(r4.c1, r4.c2, r4.c3) END FROM ((("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")) AND ((r2."C 1" < 100)) AND ((r1."C 1" < 100)))) INNER JOIN "S 1"."T 3" r3 ON (((r1.c2 = r3.c1)))) INNER JOIN "S 1"."T 4" r4 ON (((r1.c2 = r4.c1)))) ORDER BY r1.c2 ASC NULLS LAST FOR UPDATE OF r1 FOR UPDATE OF r2 FOR UPDATE OF r3 FOR UPDATE OF r4
-> Merge Join
- Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*, ft4.c1, ft4.c2, ft4.c3, ft4.*
- Merge Cond: (ft1.c2 = ft4.c1)
- -> Sort
- Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*
- Sort Key: ft1.c2
- -> Merge Join
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*, ft4.c1, ft4.c2, ft4.c3, ft4.*, ft5.c1, ft5.c2, ft5.c3, ft5.*
+ Merge Cond: (ft1.c2 = ft5.c1)
+ -> Merge Join
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*, ft4.c1, ft4.c2, ft4.c3, ft4.*
+ Merge Cond: (ft1.c2 = ft4.c1)
+ -> Sort
Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*
- Merge Cond: (ft1.c1 = ft2.c1)
- -> Sort
- Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*
- Sort Key: ft1.c1
- -> Foreign Scan on public.ft1
+ Sort Key: ft1.c2
+ -> Merge Join
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*
+ Merge Cond: (ft1.c1 = ft2.c1)
+ -> Sort
Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" < 100)) FOR UPDATE
- -> Materialize
- Output: ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*
- -> Foreign Scan on public.ft2
+ Sort Key: ft1.c1
+ -> Foreign Scan on public.ft1
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" < 100)) FOR UPDATE
+ -> Materialize
Output: ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" < 100)) ORDER BY "C 1" ASC NULLS LAST FOR UPDATE
- -> Sort
- Output: ft4.c1, ft4.c2, ft4.c3, ft4.*
- Sort Key: ft4.c1
- -> Foreign Scan on public.ft4
+ -> Foreign Scan on public.ft2
+ Output: ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" < 100)) ORDER BY "C 1" ASC NULLS LAST FOR UPDATE
+ -> Sort
Output: ft4.c1, ft4.c2, ft4.c3, ft4.*
- Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 3" FOR UPDATE
- -> Sort
- Output: ft5.c1, ft5.c2, ft5.c3, ft5.*
- Sort Key: ft5.c1
- -> Foreign Scan on public.ft5
+ Sort Key: ft4.c1
+ -> Foreign Scan on public.ft4
+ Output: ft4.c1, ft4.c2, ft4.c3, ft4.*
+ Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 3" FOR UPDATE
+ -> Sort
Output: ft5.c1, ft5.c2, ft5.c3, ft5.*
- Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 4" FOR UPDATE
-(41 rows)
-
-SELECT * FROM ft1, ft2, ft4, ft5 WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1
- AND ft1.c2 = ft5.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE;
- c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c1 | c2 | c3
-----+----+-------+------------------------------+--------------------------+----+------------+-----+----+----+-------+------------------------------+--------------------------+----+------------+-----+----+----+--------+----+----+--------
- 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006
- 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006
- 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006
- 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006
- 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006
- 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006
- 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006
- 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006
- 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006
- 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006
+ Sort Key: ft5.c1
+ -> Foreign Scan on public.ft5
+ Output: ft5.c1, ft5.c2, ft5.c3, ft5.*
+ Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 4" FOR UPDATE
+ -> Index Scan using local_tbl_pkey on public.local_tbl
+ Output: local_tbl.c1, local_tbl.c2, local_tbl.c3, local_tbl.ctid
+(47 rows)
+
+SELECT * FROM ft1, ft2, ft4, ft5, local_tbl WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1
+ AND ft1.c2 = ft5.c1 AND ft1.c2 = local_tbl.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c1 | c2 | c3 | c1 | c2 | c3
+----+----+-------+------------------------------+--------------------------+----+------------+-----+----+----+-------+------------------------------+--------------------------+----+------------+-----+----+----+--------+----+----+--------+----+----+------
+ 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
(10 rows)
RESET enable_nestloop;
RESET enable_hashjoin;
+DROP TABLE local_tbl;
-- check join pushdown in situations where multiple userids are involved
CREATE ROLE regress_view_owner SUPERUSER;
CREATE USER MAPPING FOR regress_view_owner SERVER loopback;
@@ -2452,15 +2345,13 @@ SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN v5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1
ALTER VIEW v4 OWNER TO regress_view_owner;
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN v5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10; -- can be pushed down
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: ft4.c1, ft5.c2, ft5.c1
- -> Foreign Scan
- Output: ft4.c1, ft5.c2, ft5.c1
- Relations: (public.ft4) LEFT JOIN (public.ft5)
- Remote SQL: SELECT r6.c1, r9.c2, r9.c1 FROM ("S 1"."T 3" r6 LEFT JOIN "S 1"."T 4" r9 ON (((r6.c1 = r9.c1)))) ORDER BY r6.c1 ASC NULLS LAST, r9.c1 ASC NULLS LAST
-(6 rows)
+ Relations: (public.ft4) LEFT JOIN (public.ft5)
+ Remote SQL: SELECT r6.c1, r9.c2, r9.c1 FROM ("S 1"."T 3" r6 LEFT JOIN "S 1"."T 4" r9 ON (((r6.c1 = r9.c1)))) ORDER BY r6.c1 ASC NULLS LAST, r9.c1 ASC NULLS LAST LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN v5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
c1 | c2
@@ -2517,15 +2408,13 @@ SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c
ALTER VIEW v4 OWNER TO CURRENT_USER;
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10; -- can be pushed down
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: ft4.c1, t2.c2, t2.c1
- -> Foreign Scan
- Output: ft4.c1, t2.c2, t2.c1
- Relations: (public.ft4) LEFT JOIN (public.ft5 t2)
- Remote SQL: SELECT r6.c1, r2.c2, r2.c1 FROM ("S 1"."T 3" r6 LEFT JOIN "S 1"."T 4" r2 ON (((r6.c1 = r2.c1)))) ORDER BY r6.c1 ASC NULLS LAST, r2.c1 ASC NULLS LAST
-(6 rows)
+ Relations: (public.ft4) LEFT JOIN (public.ft5 t2)
+ Remote SQL: SELECT r6.c1, r2.c2, r2.c1 FROM ("S 1"."T 3" r6 LEFT JOIN "S 1"."T 4" r2 ON (((r6.c1 = r2.c1)))) ORDER BY r6.c1 ASC NULLS LAST, r2.c1 ASC NULLS LAST LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
c1 | c2
@@ -2552,18 +2441,13 @@ DROP ROLE regress_view_owner;
-- Simple aggregates
explain (verbose, costs off)
select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2;
- QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------------
- Result
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: (count(c6)), (sum(c1)), (avg(c1)), (min(c2)), (max(c1)), (stddev(c2)), ((sum(c1)) * ((random() <= '1'::double precision))::integer), c2
- -> Sort
- Output: (count(c6)), (sum(c1)), (avg(c1)), (min(c2)), (max(c1)), (stddev(c2)), c2
- Sort Key: (count(ft1.c6)), (sum(ft1.c1))
- -> Foreign Scan
- Output: (count(c6)), (sum(c1)), (avg(c1)), (min(c2)), (max(c1)), (stddev(c2)), c2
- Relations: Aggregate on (public.ft1)
- Remote SQL: SELECT count(c6), sum("C 1"), avg("C 1"), min(c2), max("C 1"), stddev(c2), c2 FROM "S 1"."T 1" WHERE ((c2 < 5)) GROUP BY 7
-(9 rows)
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT count(c6), sum("C 1"), avg("C 1"), min(c2), max("C 1"), stddev(c2), c2 FROM "S 1"."T 1" WHERE ((c2 < 5)) GROUP BY 7 ORDER BY count(c6) ASC NULLS LAST, sum("C 1") ASC NULLS LAST
+(4 rows)
select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2;
count | sum | avg | min | max | stddev | sum2
@@ -2575,6 +2459,22 @@ select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (ran
100 | 50500 | 505.0000000000000000 | 0 | 1000 | 0 | 50500
(5 rows)
+explain (verbose, costs off)
+select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2 limit 1;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (count(c6)), (sum(c1)), (avg(c1)), (min(c2)), (max(c1)), (stddev(c2)), ((sum(c1)) * ((random() <= '1'::double precision))::integer), c2
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT count(c6), sum("C 1"), avg("C 1"), min(c2), max("C 1"), stddev(c2), c2 FROM "S 1"."T 1" WHERE ((c2 < 5)) GROUP BY 7 ORDER BY count(c6) ASC NULLS LAST, sum("C 1") ASC NULLS LAST LIMIT 1::bigint
+(4 rows)
+
+select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2 limit 1;
+ count | sum | avg | min | max | stddev | sum2
+-------+-------+----------------------+-----+-----+--------+-------
+ 100 | 49600 | 496.0000000000000000 | 1 | 991 | 0 | 49600
+(1 row)
+
-- Aggregate is not pushed down as aggregation contains random()
explain (verbose, costs off)
select sum(c1 * (random() <= 1)::int) as sum, avg(c1) from ft1;
@@ -2621,16 +2521,13 @@ select sum(t1.c1), count(t2.c1) from ft1 t1 inner join ft2 t2 on (t1.c1 = t2.c1)
-- GROUP BY clause having expressions
explain (verbose, costs off)
select c2/2, sum(c2) * (c2/2) from ft1 group by c2/2 order by c2/2;
- QUERY PLAN
----------------------------------------------------------------------------------------
- Sort
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: ((c2 / 2)), ((sum(c2) * (c2 / 2)))
- Sort Key: ((ft1.c2 / 2))
- -> Foreign Scan
- Output: ((c2 / 2)), ((sum(c2) * (c2 / 2)))
- Relations: Aggregate on (public.ft1)
- Remote SQL: SELECT (c2 / 2), (sum(c2) * (c2 / 2)) FROM "S 1"."T 1" GROUP BY 1
-(7 rows)
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT (c2 / 2), (sum(c2) * (c2 / 2)) FROM "S 1"."T 1" GROUP BY 1 ORDER BY (c2 / 2) ASC NULLS LAST
+(4 rows)
select c2/2, sum(c2) * (c2/2) from ft1 group by c2/2 order by c2/2;
?column? | ?column?
@@ -2645,18 +2542,15 @@ select c2/2, sum(c2) * (c2/2) from ft1 group by c2/2 order by c2/2;
-- Aggregates in subquery are pushed down.
explain (verbose, costs off)
select count(x.a), sum(x.a) from (select c2 a, sum(c1) b from ft1 group by c2, sqrt(c1) order by 1, 2) x;
- QUERY PLAN
----------------------------------------------------------------------------------------------
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------
Aggregate
Output: count(ft1.c2), sum(ft1.c2)
- -> Sort
+ -> Foreign Scan
Output: ft1.c2, (sum(ft1.c1)), (sqrt((ft1.c1)::double precision))
- Sort Key: ft1.c2, (sum(ft1.c1))
- -> Foreign Scan
- Output: ft1.c2, (sum(ft1.c1)), (sqrt((ft1.c1)::double precision))
- Relations: Aggregate on (public.ft1)
- Remote SQL: SELECT c2, sum("C 1"), sqrt("C 1") FROM "S 1"."T 1" GROUP BY 1, 3
-(9 rows)
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT c2, sum("C 1"), sqrt("C 1") FROM "S 1"."T 1" GROUP BY 1, 3 ORDER BY c2 ASC NULLS LAST, sum("C 1") ASC NULLS LAST
+(6 rows)
select count(x.a), sum(x.a) from (select c2 a, sum(c1) b from ft1 group by c2, sqrt(c1) order by 1, 2) x;
count | sum
@@ -2742,16 +2636,13 @@ select count(c2) w, c2 x, 5 y, 7.0 z from ft1 group by 2, y, 9.0::int order by 2
-- Also, ORDER BY contains an aggregate function
explain (verbose, costs off)
select c2, c2 from ft1 where c2 > 6 group by 1, 2 order by sum(c1);
- QUERY PLAN
------------------------------------------------------------------------------------------------
- Sort
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: c2, c2, (sum(c1))
- Sort Key: (sum(ft1.c1))
- -> Foreign Scan
- Output: c2, c2, (sum(c1))
- Relations: Aggregate on (public.ft1)
- Remote SQL: SELECT c2, c2, sum("C 1") FROM "S 1"."T 1" WHERE ((c2 > 6)) GROUP BY 1, 2
-(7 rows)
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT c2, c2, sum("C 1") FROM "S 1"."T 1" WHERE ((c2 > 6)) GROUP BY 1, 2 ORDER BY sum("C 1") ASC NULLS LAST
+(4 rows)
select c2, c2 from ft1 where c2 > 6 group by 1, 2 order by sum(c1);
c2 | c2
@@ -2764,16 +2655,13 @@ select c2, c2 from ft1 where c2 > 6 group by 1, 2 order by sum(c1);
-- Testing HAVING clause shippability
explain (verbose, costs off)
select c2, sum(c1) from ft2 group by c2 having avg(c1) < 500 and sum(c1) < 49800 order by c2;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------
- Sort
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: c2, (sum(c1))
- Sort Key: ft2.c2
- -> Foreign Scan
- Output: c2, (sum(c1))
- Relations: Aggregate on (public.ft2)
- Remote SQL: SELECT c2, sum("C 1") FROM "S 1"."T 1" GROUP BY 1 HAVING ((avg("C 1") < 500::numeric)) AND ((sum("C 1") < 49800))
-(7 rows)
+ Relations: Aggregate on (public.ft2)
+ Remote SQL: SELECT c2, sum("C 1") FROM "S 1"."T 1" GROUP BY 1 HAVING ((avg("C 1") < 500::numeric)) AND ((sum("C 1") < 49800)) ORDER BY c2 ASC NULLS LAST
+(4 rows)
select c2, sum(c1) from ft2 group by c2 having avg(c1) < 500 and sum(c1) < 49800 order by c2;
c2 | sum
@@ -2819,20 +2707,57 @@ select sum(c1) from ft1 group by c2 having avg(c1 * (random() <= 1)::int) > 100
Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1"
(10 rows)
+-- Remote aggregate in combination with a local Param (for the output
+-- of an initplan) can be trouble, per bug #15781
+explain (verbose, costs off)
+select exists(select 1 from pg_enum), sum(c1) from ft1;
+ QUERY PLAN
+--------------------------------------------------
+ Foreign Scan
+ Output: $0, (sum(ft1.c1))
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT sum("C 1") FROM "S 1"."T 1"
+ InitPlan 1 (returns $0)
+ -> Seq Scan on pg_catalog.pg_enum
+(6 rows)
+
+select exists(select 1 from pg_enum), sum(c1) from ft1;
+ exists | sum
+--------+--------
+ t | 500500
+(1 row)
+
+explain (verbose, costs off)
+select exists(select 1 from pg_enum), sum(c1) from ft1 group by 1;
+ QUERY PLAN
+---------------------------------------------------
+ GroupAggregate
+ Output: ($0), sum(ft1.c1)
+ Group Key: $0
+ InitPlan 1 (returns $0)
+ -> Seq Scan on pg_catalog.pg_enum
+ -> Foreign Scan on public.ft1
+ Output: $0, ft1.c1
+ Remote SQL: SELECT "C 1" FROM "S 1"."T 1"
+(8 rows)
+
+select exists(select 1 from pg_enum), sum(c1) from ft1 group by 1;
+ exists | sum
+--------+--------
+ t | 500500
+(1 row)
+
-- Testing ORDER BY, DISTINCT, FILTER, Ordered-sets and VARIADIC within aggregates
-- ORDER BY within aggregate, same column used to order
explain (verbose, costs off)
select array_agg(c1 order by c1) from ft1 where c1 < 100 group by c2 order by 1;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------
- Sort
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: (array_agg(c1 ORDER BY c1)), c2
- Sort Key: (array_agg(ft1.c1 ORDER BY ft1.c1))
- -> Foreign Scan
- Output: (array_agg(c1 ORDER BY c1)), c2
- Relations: Aggregate on (public.ft1)
- Remote SQL: SELECT array_agg("C 1" ORDER BY "C 1" ASC NULLS LAST), c2 FROM "S 1"."T 1" WHERE (("C 1" < 100)) GROUP BY 2
-(7 rows)
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT array_agg("C 1" ORDER BY "C 1" ASC NULLS LAST), c2 FROM "S 1"."T 1" WHERE (("C 1" < 100)) GROUP BY 2 ORDER BY array_agg("C 1" ORDER BY "C 1" ASC NULLS LAST) ASC NULLS LAST
+(4 rows)
select array_agg(c1 order by c1) from ft1 where c1 < 100 group by c2 order by 1;
array_agg
@@ -2869,16 +2794,13 @@ select array_agg(c5 order by c1 desc) from ft2 where c2 = 6 and c1 < 50;
-- DISTINCT within aggregate
explain (verbose, costs off)
select array_agg(distinct (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Sort
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: (array_agg(DISTINCT (t1.c1 % 5))), ((t2.c1 % 3))
- Sort Key: (array_agg(DISTINCT (t1.c1 % 5)))
- -> Foreign Scan
- Output: (array_agg(DISTINCT (t1.c1 % 5))), ((t2.c1 % 3))
- Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2))
- Remote SQL: SELECT array_agg(DISTINCT (r1.c1 % 5)), (r2.c1 % 3) FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 < 20) OR ((r1.c1 IS NULL) AND (r2.c1 < 5)))) GROUP BY 2
-(7 rows)
+ Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2))
+ Remote SQL: SELECT array_agg(DISTINCT (r1.c1 % 5)), (r2.c1 % 3) FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 < 20) OR ((r1.c1 IS NULL) AND (r2.c1 < 5)))) GROUP BY 2 ORDER BY array_agg(DISTINCT (r1.c1 % 5)) ASC NULLS LAST
+(4 rows)
select array_agg(distinct (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
array_agg
@@ -2890,16 +2812,13 @@ select array_agg(distinct (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2
-- DISTINCT combined with ORDER BY within aggregate
explain (verbose, costs off)
select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
- QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Sort
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: (array_agg(DISTINCT (t1.c1 % 5) ORDER BY (t1.c1 % 5))), ((t2.c1 % 3))
- Sort Key: (array_agg(DISTINCT (t1.c1 % 5) ORDER BY (t1.c1 % 5)))
- -> Foreign Scan
- Output: (array_agg(DISTINCT (t1.c1 % 5) ORDER BY (t1.c1 % 5))), ((t2.c1 % 3))
- Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2))
- Remote SQL: SELECT array_agg(DISTINCT (r1.c1 % 5) ORDER BY ((r1.c1 % 5)) ASC NULLS LAST), (r2.c1 % 3) FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 < 20) OR ((r1.c1 IS NULL) AND (r2.c1 < 5)))) GROUP BY 2
-(7 rows)
+ Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2))
+ Remote SQL: SELECT array_agg(DISTINCT (r1.c1 % 5) ORDER BY ((r1.c1 % 5)) ASC NULLS LAST), (r2.c1 % 3) FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 < 20) OR ((r1.c1 IS NULL) AND (r2.c1 < 5)))) GROUP BY 2 ORDER BY array_agg(DISTINCT (r1.c1 % 5) ORDER BY ((r1.c1 % 5)) ASC NULLS LAST) ASC NULLS LAST
+(4 rows)
select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
array_agg
@@ -2910,16 +2829,13 @@ select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5) from ft4 t1 full join ft
explain (verbose, costs off)
select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5 desc nulls last) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Sort
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: (array_agg(DISTINCT (t1.c1 % 5) ORDER BY (t1.c1 % 5) DESC NULLS LAST)), ((t2.c1 % 3))
- Sort Key: (array_agg(DISTINCT (t1.c1 % 5) ORDER BY (t1.c1 % 5) DESC NULLS LAST))
- -> Foreign Scan
- Output: (array_agg(DISTINCT (t1.c1 % 5) ORDER BY (t1.c1 % 5) DESC NULLS LAST)), ((t2.c1 % 3))
- Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2))
- Remote SQL: SELECT array_agg(DISTINCT (r1.c1 % 5) ORDER BY ((r1.c1 % 5)) DESC NULLS LAST), (r2.c1 % 3) FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 < 20) OR ((r1.c1 IS NULL) AND (r2.c1 < 5)))) GROUP BY 2
-(7 rows)
+ Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2))
+ Remote SQL: SELECT array_agg(DISTINCT (r1.c1 % 5) ORDER BY ((r1.c1 % 5)) DESC NULLS LAST), (r2.c1 % 3) FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 < 20) OR ((r1.c1 IS NULL) AND (r2.c1 < 5)))) GROUP BY 2 ORDER BY array_agg(DISTINCT (r1.c1 % 5) ORDER BY ((r1.c1 % 5)) DESC NULLS LAST) ASC NULLS LAST
+(4 rows)
select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5 desc nulls last) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
array_agg
@@ -2931,16 +2847,13 @@ select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5 desc nulls last) from ft4
-- FILTER within aggregate
explain (verbose, costs off)
select sum(c1) filter (where c1 < 100 and c2 > 5) from ft1 group by c2 order by 1 nulls last;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------
- Sort
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: (sum(c1) FILTER (WHERE ((c1 < 100) AND (c2 > 5)))), c2
- Sort Key: (sum(ft1.c1) FILTER (WHERE ((ft1.c1 < 100) AND (ft1.c2 > 5))))
- -> Foreign Scan
- Output: (sum(c1) FILTER (WHERE ((c1 < 100) AND (c2 > 5)))), c2
- Relations: Aggregate on (public.ft1)
- Remote SQL: SELECT sum("C 1") FILTER (WHERE (("C 1" < 100) AND (c2 > 5))), c2 FROM "S 1"."T 1" GROUP BY 2
-(7 rows)
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT sum("C 1") FILTER (WHERE (("C 1" < 100) AND (c2 > 5))), c2 FROM "S 1"."T 1" GROUP BY 2 ORDER BY sum("C 1") FILTER (WHERE (("C 1" < 100) AND (c2 > 5))) ASC NULLS LAST
+(4 rows)
select sum(c1) filter (where c1 < 100 and c2 > 5) from ft1 group by c2 order by 1 nulls last;
sum
@@ -3217,6 +3130,8 @@ select array_agg(c1 order by c1 using operator(public.<^)) from ft2 where c2 = 6
Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE (("C 1" < 100)) AND ((c2 = 6))
(6 rows)
+-- Update local stats on ft2
+ANALYZE ft2;
-- Add into extension
alter extension postgres_fdw add operator class my_op_class using btree;
alter extension postgres_fdw add function my_op_cmp(a int, b int);
@@ -3337,16 +3252,13 @@ select count(*), x.b from ft1, (select c2 a, sum(c1) b from ft1 group by c2) x w
-- FULL join with IS NULL check in HAVING
explain (verbose, costs off)
select avg(t1.c1), sum(t2.c1) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) group by t2.c1 having (avg(t1.c1) is null and sum(t2.c1) < 10) or sum(t2.c1) is null order by 1 nulls last, 2;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Sort
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
Output: (avg(t1.c1)), (sum(t2.c1)), t2.c1
- Sort Key: (avg(t1.c1)), (sum(t2.c1))
- -> Foreign Scan
- Output: (avg(t1.c1)), (sum(t2.c1)), t2.c1
- Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2))
- Remote SQL: SELECT avg(r1.c1), sum(r2.c1), r2.c1 FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) GROUP BY 3 HAVING ((((avg(r1.c1) IS NULL) AND (sum(r2.c1) < 10)) OR (sum(r2.c1) IS NULL)))
-(7 rows)
+ Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2))
+ Remote SQL: SELECT avg(r1.c1), sum(r2.c1), r2.c1 FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) GROUP BY 3 HAVING ((((avg(r1.c1) IS NULL) AND (sum(r2.c1) < 10)) OR (sum(r2.c1) IS NULL))) ORDER BY avg(r1.c1) ASC NULLS LAST, sum(r2.c1) ASC NULLS LAST
+(4 rows)
select avg(t1.c1), sum(t2.c1) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) group by t2.c1 having (avg(t1.c1) is null and sum(t2.c1) < 10) or sum(t2.c1) is null order by 1 nulls last, 2;
avg | sum
@@ -3427,6 +3339,62 @@ select c2, sum from "S 1"."T 1" t1, lateral (select sum(t2.c1 + t1."C 1") sum fr
(2 rows)
reset enable_hashagg;
+-- bug #15613: bad plan for foreign table scan with lateral reference
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT ref_0.c2, subq_1.*
+FROM
+ "S 1"."T 1" AS ref_0,
+ LATERAL (
+ SELECT ref_0."C 1" c1, subq_0.*
+ FROM (SELECT ref_0.c2, ref_1.c3
+ FROM ft1 AS ref_1) AS subq_0
+ RIGHT JOIN ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
+ ) AS subq_1
+WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
+ORDER BY ref_0."C 1";
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: ref_0.c2, ref_0."C 1", (ref_0.c2), ref_1.c3, ref_0."C 1"
+ -> Nested Loop
+ Output: ref_0.c2, ref_0."C 1", ref_1.c3, (ref_0.c2)
+ -> Index Scan using t1_pkey on "S 1"."T 1" ref_0
+ Output: ref_0."C 1", ref_0.c2, ref_0.c3, ref_0.c4, ref_0.c5, ref_0.c6, ref_0.c7, ref_0.c8
+ Index Cond: (ref_0."C 1" < 10)
+ -> Foreign Scan on public.ft1 ref_1
+ Output: ref_1.c3, ref_0.c2
+ Remote SQL: SELECT c3 FROM "S 1"."T 1" WHERE ((c3 = '00001'::text))
+ -> Materialize
+ Output: ref_3.c3
+ -> Foreign Scan on public.ft2 ref_3
+ Output: ref_3.c3
+ Remote SQL: SELECT c3 FROM "S 1"."T 1" WHERE ((c3 = '00001'::text))
+(15 rows)
+
+SELECT ref_0.c2, subq_1.*
+FROM
+ "S 1"."T 1" AS ref_0,
+ LATERAL (
+ SELECT ref_0."C 1" c1, subq_0.*
+ FROM (SELECT ref_0.c2, ref_1.c3
+ FROM ft1 AS ref_1) AS subq_0
+ RIGHT JOIN ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
+ ) AS subq_1
+WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
+ORDER BY ref_0."C 1";
+ c2 | c1 | c2 | c3
+----+----+----+-------
+ 1 | 1 | 1 | 00001
+ 2 | 2 | 2 | 00001
+ 3 | 3 | 3 | 00001
+ 4 | 4 | 4 | 00001
+ 5 | 5 | 5 | 00001
+ 6 | 6 | 6 | 00001
+ 7 | 7 | 7 | 00001
+ 8 | 8 | 8 | 00001
+ 9 | 9 | 9 | 00001
+(9 rows)
+
-- Check with placeHolderVars
explain (verbose, costs off)
select sum(q.a), count(q.b) from ft4 left join (select 13, avg(ft1.c1), sum(ft2.c1) from ft1 right join ft2 on (ft1.c1 = ft2.c1)) q(a, b, c) on (ft4.c1 <= q.b);
@@ -4001,14 +3969,12 @@ SELECT * FROM ft1 t1 WHERE t1.tableoid = 'ft1'::regclass LIMIT 1;
EXPLAIN (VERBOSE, COSTS OFF)
SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1;
- QUERY PLAN
--------------------------------------------------------------------------------
- Limit
- Output: ((tableoid)::regclass), c1, c2, c3, c4, c5, c6, c7, c8
- -> Foreign Scan on public.ft1 t1
- Output: (tableoid)::regclass, c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
-(5 rows)
+ QUERY PLAN
+-----------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: (tableoid)::regclass, c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" LIMIT 1::bigint
+(3 rows)
SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1;
tableoid | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
@@ -4033,34 +3999,17 @@ SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)';
EXPLAIN (VERBOSE, COSTS OFF)
SELECT ctid, * FROM ft1 t1 LIMIT 1;
- QUERY PLAN
--------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
Output: ctid, c1, c2, c3, c4, c5, c6, c7, c8
- -> Foreign Scan on public.ft1 t1
- Output: ctid, c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8, ctid FROM "S 1"."T 1"
-(5 rows)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8, ctid FROM "S 1"."T 1" LIMIT 1::bigint
+(3 rows)
SELECT ctid, * FROM ft1 t1 LIMIT 1;
ctid | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
-------+----+----+-------+------------------------------+--------------------------+----+------------+-----
- (0,1) | 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
-(1 row)
-
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT oid, * FROM ft_pg_type WHERE typname = 'int4';
- QUERY PLAN
-----------------------------------------------------------------------------------------------------
- Foreign Scan on public.ft_pg_type
- Output: oid, typname, typlen
- Remote SQL: SELECT typname, typlen, oid FROM pg_catalog.pg_type WHERE ((typname = 'int4'::name))
-(3 rows)
-
-SELECT oid, * FROM ft_pg_type WHERE typname = 'int4';
- oid | typname | typlen
------+---------+--------
- 23 | int4 | 4
+ (0,1) | 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
(1 row)
-- ===================================================================
@@ -4087,16 +4036,16 @@ DROP FUNCTION f_test(int);
-- ===================================================================
ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE int;
SELECT * FROM ft1 WHERE c1 = 1; -- ERROR
-ERROR: invalid input syntax for integer: "foo"
+ERROR: invalid input syntax for type integer: "foo"
CONTEXT: column "c8" of foreign table "ft1"
SELECT ft1.c1, ft2.c2, ft1.c8 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR
-ERROR: invalid input syntax for integer: "foo"
+ERROR: invalid input syntax for type integer: "foo"
CONTEXT: column "c8" of foreign table "ft1"
SELECT ft1.c1, ft2.c2, ft1 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR
-ERROR: invalid input syntax for integer: "foo"
+ERROR: invalid input syntax for type integer: "foo"
CONTEXT: whole-row reference to foreign table "ft1"
SELECT sum(c2), array_agg(c8) FROM ft1 GROUP BY c8; -- ERROR
-ERROR: invalid input syntax for integer: "foo"
+ERROR: invalid input syntax for type integer: "foo"
CONTEXT: processing expression at position 2 in select list
ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE user_enum;
-- ===================================================================
@@ -4233,18 +4182,21 @@ explain (verbose, costs off) select * from ft3 where f2 = 'foo' COLLATE "C";
explain (verbose, costs off) select * from ft3 f, loct3 l
where f.f3 = l.f3 COLLATE "POSIX" and l.f1 = 'foo';
- QUERY PLAN
----------------------------------------------------------
- Nested Loop
+ QUERY PLAN
+-------------------------------------------------------------
+ Hash Join
Output: f.f1, f.f2, f.f3, l.f1, l.f2, l.f3
- Join Filter: ((f.f3)::text = (l.f3)::text)
- -> Index Scan using loct3_f1_key on public.loct3 l
- Output: l.f1, l.f2, l.f3
- Index Cond: (l.f1 = 'foo'::text)
+ Inner Unique: true
+ Hash Cond: ((f.f3)::text = (l.f3)::text)
-> Foreign Scan on public.ft3 f
Output: f.f1, f.f2, f.f3
Remote SQL: SELECT f1, f2, f3 FROM public.loct3
-(9 rows)
+ -> Hash
+ Output: l.f1, l.f2, l.f3
+ -> Index Scan using loct3_f1_key on public.loct3 l
+ Output: l.f1, l.f2, l.f3
+ Index Cond: (l.f1 = 'foo'::text)
+(12 rows)
-- ===================================================================
-- test writable foreign table stuff
@@ -4257,12 +4209,10 @@ INSERT INTO ft2 (c1,c2,c3) SELECT c1+1000,c2+100, c3 || c3 FROM ft2 LIMIT 20;
Remote SQL: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
-> Subquery Scan on "*SELECT*"
Output: "*SELECT*"."?column?", "*SELECT*"."?column?_1", NULL::integer, "*SELECT*"."?column?_2", NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying, 'ft2 '::character(10), NULL::user_enum
- -> Limit
- Output: ((ft2_1.c1 + 1000)), ((ft2_1.c2 + 100)), ((ft2_1.c3 || ft2_1.c3))
- -> Foreign Scan on public.ft2 ft2_1
- Output: (ft2_1.c1 + 1000), (ft2_1.c2 + 100), (ft2_1.c3 || ft2_1.c3)
- Remote SQL: SELECT "C 1", c2, c3 FROM "S 1"."T 1"
-(9 rows)
+ -> Foreign Scan on public.ft2 ft2_1
+ Output: (ft2_1.c1 + 1000), (ft2_1.c2 + 100), (ft2_1.c3 || ft2_1.c3)
+ Remote SQL: SELECT "C 1", c2, c3 FROM "S 1"."T 1" LIMIT 20::bigint
+(7 rows)
INSERT INTO ft2 (c1,c2,c3) SELECT c1+1000,c2+100, c3 || c3 FROM ft2 LIMIT 20;
INSERT INTO ft2 (c1,c2,c3)
@@ -5367,49 +5317,49 @@ SELECT c1,c2,c3,c4 FROM ft2 ORDER BY c1;
(819 rows)
EXPLAIN (verbose, costs off)
-INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass;
+INSERT INTO ft2 (c1,c2,c3) VALUES (1200,999,'foo') RETURNING tableoid::regclass;
QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Insert on public.ft2
- Output: (tableoid)::regclass
+ Output: (ft2.tableoid)::regclass
Remote SQL: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
-> Result
- Output: 9999, 999, NULL::integer, 'foo'::text, NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying, 'ft2 '::character(10), NULL::user_enum
+ Output: 1200, 999, NULL::integer, 'foo'::text, NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying, 'ft2 '::character(10), NULL::user_enum
(5 rows)
-INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass;
+INSERT INTO ft2 (c1,c2,c3) VALUES (1200,999,'foo') RETURNING tableoid::regclass;
tableoid
----------
ft2
(1 row)
EXPLAIN (verbose, costs off)
-UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass; -- can be pushed down
+UPDATE ft2 SET c3 = 'bar' WHERE c1 = 1200 RETURNING tableoid::regclass; -- can be pushed down
QUERY PLAN
------------------------------------------------------------------------------------
Update on public.ft2
Output: (tableoid)::regclass
-> Foreign Update on public.ft2
- Remote SQL: UPDATE "S 1"."T 1" SET c3 = 'bar'::text WHERE (("C 1" = 9999))
+ Remote SQL: UPDATE "S 1"."T 1" SET c3 = 'bar'::text WHERE (("C 1" = 1200))
(4 rows)
-UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass;
+UPDATE ft2 SET c3 = 'bar' WHERE c1 = 1200 RETURNING tableoid::regclass;
tableoid
----------
ft2
(1 row)
EXPLAIN (verbose, costs off)
-DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass; -- can be pushed down
+DELETE FROM ft2 WHERE c1 = 1200 RETURNING tableoid::regclass; -- can be pushed down
QUERY PLAN
--------------------------------------------------------------------
Delete on public.ft2
Output: (tableoid)::regclass
-> Foreign Delete on public.ft2
- Remote SQL: DELETE FROM "S 1"."T 1" WHERE (("C 1" = 9999))
+ Remote SQL: DELETE FROM "S 1"."T 1" WHERE (("C 1" = 1200))
(4 rows)
-DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass;
+DELETE FROM ft2 WHERE c1 = 1200 RETURNING tableoid::regclass;
tableoid
----------
ft2
@@ -6009,14 +5959,12 @@ VACUUM ANALYZE "S 1"."T 1";
-- FIRST behavior here.
-- ORDER BY DESC NULLS LAST options
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 ORDER BY c6 DESC NULLS LAST, c1 OFFSET 795 LIMIT 10;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1
Output: c1, c2, c3, c4, c5, c6, c7, c8
- -> Foreign Scan on public.ft1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" ORDER BY c6 DESC NULLS LAST, "C 1" ASC NULLS LAST
-(5 rows)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" ORDER BY c6 DESC NULLS LAST, "C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 795::bigint
+(3 rows)
SELECT * FROM ft1 ORDER BY c6 DESC NULLS LAST, c1 OFFSET 795 LIMIT 10;
c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
@@ -6035,14 +5983,12 @@ SELECT * FROM ft1 ORDER BY c6 DESC NULLS LAST, c1 OFFSET 795 LIMIT 10;
-- ORDER BY DESC NULLS FIRST options
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 ORDER BY c6 DESC NULLS FIRST, c1 OFFSET 15 LIMIT 10;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1
Output: c1, c2, c3, c4, c5, c6, c7, c8
- -> Foreign Scan on public.ft1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" ORDER BY c6 DESC NULLS FIRST, "C 1" ASC NULLS LAST
-(5 rows)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" ORDER BY c6 DESC NULLS FIRST, "C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 15::bigint
+(3 rows)
SELECT * FROM ft1 ORDER BY c6 DESC NULLS FIRST, c1 OFFSET 15 LIMIT 10;
c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
@@ -6061,14 +6007,12 @@ SELECT * FROM ft1 ORDER BY c6 DESC NULLS FIRST, c1 OFFSET 15 LIMIT 10;
-- ORDER BY ASC NULLS FIRST options
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 ORDER BY c6 ASC NULLS FIRST, c1 OFFSET 15 LIMIT 10;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------
- Limit
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1
Output: c1, c2, c3, c4, c5, c6, c7, c8
- -> Foreign Scan on public.ft1
- Output: c1, c2, c3, c4, c5, c6, c7, c8
- Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" ORDER BY c6 ASC NULLS FIRST, "C 1" ASC NULLS LAST
-(5 rows)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" ORDER BY c6 ASC NULLS FIRST, "C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 15::bigint
+(3 rows)
SELECT * FROM ft1 ORDER BY c6 ASC NULLS FIRST, c1 OFFSET 15 LIMIT 10;
c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
@@ -6173,10 +6117,12 @@ ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c2negative;
-- ===================================================================
-- test WITH CHECK OPTION constraints
-- ===================================================================
+CREATE FUNCTION row_before_insupd_trigfunc() RETURNS trigger AS $$BEGIN NEW.a := NEW.a + 10; RETURN NEW; END$$ LANGUAGE plpgsql;
CREATE TABLE base_tbl (a int, b int);
ALTER TABLE base_tbl SET (autovacuum_enabled = 'false');
+CREATE TRIGGER row_before_insupd_trigger BEFORE INSERT OR UPDATE ON base_tbl FOR EACH ROW EXECUTE PROCEDURE row_before_insupd_trigfunc();
CREATE FOREIGN TABLE foreign_tbl (a int, b int)
- SERVER loopback OPTIONS(table_name 'base_tbl');
+ SERVER loopback OPTIONS (table_name 'base_tbl');
CREATE VIEW rw_view AS SELECT * FROM foreign_tbl
WHERE a < b WITH CHECK OPTION;
\d+ rw_view
@@ -6192,45 +6138,162 @@ View definition:
WHERE foreign_tbl.a < foreign_tbl.b;
Options: check_option=cascaded
-INSERT INTO rw_view VALUES (0, 10); -- ok
-INSERT INTO rw_view VALUES (10, 0); -- should fail
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 5);
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Insert on public.foreign_tbl
+ Remote SQL: INSERT INTO public.base_tbl(a, b) VALUES ($1, $2) RETURNING a, b
+ -> Result
+ Output: 0, 5
+(4 rows)
+
+INSERT INTO rw_view VALUES (0, 5); -- should fail
ERROR: new row violates check option for view "rw_view"
-DETAIL: Failing row contains (10, 0).
+DETAIL: Failing row contains (10, 5).
EXPLAIN (VERBOSE, COSTS OFF)
-UPDATE rw_view SET b = 20 WHERE a = 0; -- not pushed down
- QUERY PLAN
---------------------------------------------------------------------------------------------------
+INSERT INTO rw_view VALUES (0, 15);
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Insert on public.foreign_tbl
+ Remote SQL: INSERT INTO public.base_tbl(a, b) VALUES ($1, $2) RETURNING a, b
+ -> Result
+ Output: 0, 15
+(4 rows)
+
+INSERT INTO rw_view VALUES (0, 15); -- ok
+SELECT * FROM foreign_tbl;
+ a | b
+----+----
+ 10 | 15
+(1 row)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE rw_view SET b = b + 5;
+ QUERY PLAN
+---------------------------------------------------------------------------------------
Update on public.foreign_tbl
- Remote SQL: UPDATE public.base_tbl SET b = $2 WHERE ctid = $1
+ Remote SQL: UPDATE public.base_tbl SET b = $2 WHERE ctid = $1 RETURNING a, b
-> Foreign Scan on public.foreign_tbl
- Output: foreign_tbl.a, 20, foreign_tbl.ctid
- Remote SQL: SELECT a, ctid FROM public.base_tbl WHERE ((a < b)) AND ((a = 0)) FOR UPDATE
+ Output: foreign_tbl.a, (foreign_tbl.b + 5), foreign_tbl.ctid
+ Remote SQL: SELECT a, b, ctid FROM public.base_tbl WHERE ((a < b)) FOR UPDATE
(5 rows)
-UPDATE rw_view SET b = 20 WHERE a = 0; -- ok
+UPDATE rw_view SET b = b + 5; -- should fail
+ERROR: new row violates check option for view "rw_view"
+DETAIL: Failing row contains (20, 20).
EXPLAIN (VERBOSE, COSTS OFF)
-UPDATE rw_view SET b = -20 WHERE a = 0; -- not pushed down
- QUERY PLAN
---------------------------------------------------------------------------------------------------
+UPDATE rw_view SET b = b + 15;
+ QUERY PLAN
+---------------------------------------------------------------------------------------
Update on public.foreign_tbl
- Remote SQL: UPDATE public.base_tbl SET b = $2 WHERE ctid = $1
+ Remote SQL: UPDATE public.base_tbl SET b = $2 WHERE ctid = $1 RETURNING a, b
-> Foreign Scan on public.foreign_tbl
- Output: foreign_tbl.a, '-20'::integer, foreign_tbl.ctid
- Remote SQL: SELECT a, ctid FROM public.base_tbl WHERE ((a < b)) AND ((a = 0)) FOR UPDATE
+ Output: foreign_tbl.a, (foreign_tbl.b + 15), foreign_tbl.ctid
+ Remote SQL: SELECT a, b, ctid FROM public.base_tbl WHERE ((a < b)) FOR UPDATE
(5 rows)
-UPDATE rw_view SET b = -20 WHERE a = 0; -- should fail
-ERROR: new row violates check option for view "rw_view"
-DETAIL: Failing row contains (0, -20).
+UPDATE rw_view SET b = b + 15; -- ok
SELECT * FROM foreign_tbl;
- a | b
----+----
- 0 | 20
+ a | b
+----+----
+ 20 | 30
(1 row)
DROP FOREIGN TABLE foreign_tbl CASCADE;
NOTICE: drop cascades to view rw_view
+DROP TRIGGER row_before_insupd_trigger ON base_tbl;
DROP TABLE base_tbl;
+-- test WCO for partitions
+CREATE TABLE child_tbl (a int, b int);
+ALTER TABLE child_tbl SET (autovacuum_enabled = 'false');
+CREATE TRIGGER row_before_insupd_trigger BEFORE INSERT OR UPDATE ON child_tbl FOR EACH ROW EXECUTE PROCEDURE row_before_insupd_trigfunc();
+CREATE FOREIGN TABLE foreign_tbl (a int, b int)
+ SERVER loopback OPTIONS (table_name 'child_tbl');
+CREATE TABLE parent_tbl (a int, b int) PARTITION BY RANGE(a);
+ALTER TABLE parent_tbl ATTACH PARTITION foreign_tbl FOR VALUES FROM (0) TO (100);
+CREATE VIEW rw_view AS SELECT * FROM parent_tbl
+ WHERE a < b WITH CHECK OPTION;
+\d+ rw_view
+ View "public.rw_view"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+--------+---------+-----------+----------+---------+---------+-------------
+ a | integer | | | | plain |
+ b | integer | | | | plain |
+View definition:
+ SELECT parent_tbl.a,
+ parent_tbl.b
+ FROM parent_tbl
+ WHERE parent_tbl.a < parent_tbl.b;
+Options: check_option=cascaded
+
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 5);
+ QUERY PLAN
+-----------------------------
+ Insert on public.parent_tbl
+ -> Result
+ Output: 0, 5
+(3 rows)
+
+INSERT INTO rw_view VALUES (0, 5); -- should fail
+ERROR: new row violates check option for view "rw_view"
+DETAIL: Failing row contains (10, 5).
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 15);
+ QUERY PLAN
+-----------------------------
+ Insert on public.parent_tbl
+ -> Result
+ Output: 0, 15
+(3 rows)
+
+INSERT INTO rw_view VALUES (0, 15); -- ok
+SELECT * FROM foreign_tbl;
+ a | b
+----+----
+ 10 | 15
+(1 row)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE rw_view SET b = b + 5;
+ QUERY PLAN
+----------------------------------------------------------------------------------------
+ Update on public.parent_tbl
+ Foreign Update on public.foreign_tbl
+ Remote SQL: UPDATE public.child_tbl SET b = $2 WHERE ctid = $1 RETURNING a, b
+ -> Foreign Scan on public.foreign_tbl
+ Output: foreign_tbl.a, (foreign_tbl.b + 5), foreign_tbl.ctid
+ Remote SQL: SELECT a, b, ctid FROM public.child_tbl WHERE ((a < b)) FOR UPDATE
+(6 rows)
+
+UPDATE rw_view SET b = b + 5; -- should fail
+ERROR: new row violates check option for view "rw_view"
+DETAIL: Failing row contains (20, 20).
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE rw_view SET b = b + 15;
+ QUERY PLAN
+----------------------------------------------------------------------------------------
+ Update on public.parent_tbl
+ Foreign Update on public.foreign_tbl
+ Remote SQL: UPDATE public.child_tbl SET b = $2 WHERE ctid = $1 RETURNING a, b
+ -> Foreign Scan on public.foreign_tbl
+ Output: foreign_tbl.a, (foreign_tbl.b + 15), foreign_tbl.ctid
+ Remote SQL: SELECT a, b, ctid FROM public.child_tbl WHERE ((a < b)) FOR UPDATE
+(6 rows)
+
+UPDATE rw_view SET b = b + 15; -- ok
+SELECT * FROM foreign_tbl;
+ a | b
+----+----
+ 20 | 30
+(1 row)
+
+DROP FOREIGN TABLE foreign_tbl CASCADE;
+DROP TRIGGER row_before_insupd_trigger ON child_tbl;
+DROP TABLE parent_tbl CASCADE;
+NOTICE: drop cascades to view rw_view
+DROP FUNCTION row_before_insupd_trigfunc;
-- ===================================================================
-- test serial columns (ie, sequence-based defaults)
-- ===================================================================
@@ -6266,6 +6329,31 @@ select * from rem1;
11 | bye remote
(4 rows)
+-- ===================================================================
+-- test generated columns
+-- ===================================================================
+create table gloc1 (a int, b int);
+alter table gloc1 set (autovacuum_enabled = 'false');
+create foreign table grem1 (
+ a int,
+ b int generated always as (a * 2) stored)
+ server loopback options(table_name 'gloc1');
+insert into grem1 (a) values (1), (2);
+update grem1 set a = 22 where a = 2;
+select * from gloc1;
+ a | b
+----+----
+ 1 | 2
+ 22 | 44
+(2 rows)
+
+select * from grem1;
+ a | b
+----+----
+ 1 | 2
+ 22 | 44
+(2 rows)
+
-- ===================================================================
-- test local triggers
-- ===================================================================
@@ -6470,6 +6558,25 @@ SELECT * from loc1;
2 | skidoo triggered !
(2 rows)
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f1 = 10; -- all columns should be transmitted
+ QUERY PLAN
+-----------------------------------------------------------------------
+ Update on public.rem1
+ Remote SQL: UPDATE public.loc1 SET f1 = $2, f2 = $3 WHERE ctid = $1
+ -> Foreign Scan on public.rem1
+ Output: 10, f2, ctid, rem1.*
+ Remote SQL: SELECT f1, f2, ctid FROM public.loc1 FOR UPDATE
+(5 rows)
+
+UPDATE rem1 set f1 = 10;
+SELECT * from loc1;
+ f1 | f2
+----+--------------------------------
+ 10 | skidoo triggered ! triggered !
+ 10 | skidoo triggered ! triggered !
+(2 rows)
+
DELETE FROM rem1;
-- Add a second trigger, to check that the changes are propagated correctly
-- from trigger to trigger
@@ -6582,7 +6689,7 @@ NOTICE: trig_row_after(23, skidoo) AFTER ROW INSERT ON rem1
NOTICE: NEW: (13,"test triggered !")
ctid
--------
- (0,27)
+ (0,29)
(1 row)
-- cleanup
@@ -6686,10 +6793,10 @@ BEFORE UPDATE ON rem1
FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
EXPLAIN (verbose, costs off)
UPDATE rem1 set f2 = ''; -- can't be pushed down
- QUERY PLAN
----------------------------------------------------------------------
+ QUERY PLAN
+-----------------------------------------------------------------------
Update on public.rem1
- Remote SQL: UPDATE public.loc1 SET f2 = $2 WHERE ctid = $1
+ Remote SQL: UPDATE public.loc1 SET f1 = $2, f2 = $3 WHERE ctid = $1
-> Foreign Scan on public.rem1
Output: f1, ''::text, ctid, rem1.*
Remote SQL: SELECT f1, f2, ctid FROM public.loc1 FOR UPDATE
@@ -6951,9 +7058,9 @@ select * from bar where f1 in (select f1 from foo) for update;
QUERY PLAN
----------------------------------------------------------------------------------------------
LockRows
- Output: bar.f1, bar.f2, bar.ctid, bar.*, bar.tableoid, foo.ctid, foo.*, foo.tableoid
+ Output: bar.f1, bar.f2, bar.ctid, foo.ctid, bar.*, bar.tableoid, foo.*, foo.tableoid
-> Hash Join
- Output: bar.f1, bar.f2, bar.ctid, bar.*, bar.tableoid, foo.ctid, foo.*, foo.tableoid
+ Output: bar.f1, bar.f2, bar.ctid, foo.ctid, bar.*, bar.tableoid, foo.*, foo.tableoid
Inner Unique: true
Hash Cond: (bar.f1 = foo.f1)
-> Append
@@ -6963,15 +7070,15 @@ select * from bar where f1 in (select f1 from foo) for update;
Output: bar2.f1, bar2.f2, bar2.ctid, bar2.*, bar2.tableoid
Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR UPDATE
-> Hash
- Output: foo.ctid, foo.*, foo.tableoid, foo.f1
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
-> HashAggregate
- Output: foo.ctid, foo.*, foo.tableoid, foo.f1
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
Group Key: foo.f1
-> Append
-> Seq Scan on public.foo
- Output: foo.ctid, foo.*, foo.tableoid, foo.f1
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
-> Foreign Scan on public.foo2
- Output: foo2.ctid, foo2.*, foo2.tableoid, foo2.f1
+ Output: foo2.ctid, foo2.f1, foo2.*, foo2.tableoid
Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
(23 rows)
@@ -6989,9 +7096,9 @@ select * from bar where f1 in (select f1 from foo) for share;
QUERY PLAN
----------------------------------------------------------------------------------------------
LockRows
- Output: bar.f1, bar.f2, bar.ctid, bar.*, bar.tableoid, foo.ctid, foo.*, foo.tableoid
+ Output: bar.f1, bar.f2, bar.ctid, foo.ctid, bar.*, bar.tableoid, foo.*, foo.tableoid
-> Hash Join
- Output: bar.f1, bar.f2, bar.ctid, bar.*, bar.tableoid, foo.ctid, foo.*, foo.tableoid
+ Output: bar.f1, bar.f2, bar.ctid, foo.ctid, bar.*, bar.tableoid, foo.*, foo.tableoid
Inner Unique: true
Hash Cond: (bar.f1 = foo.f1)
-> Append
@@ -7001,15 +7108,15 @@ select * from bar where f1 in (select f1 from foo) for share;
Output: bar2.f1, bar2.f2, bar2.ctid, bar2.*, bar2.tableoid
Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR SHARE
-> Hash
- Output: foo.ctid, foo.*, foo.tableoid, foo.f1
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
-> HashAggregate
- Output: foo.ctid, foo.*, foo.tableoid, foo.f1
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
Group Key: foo.f1
-> Append
-> Seq Scan on public.foo
- Output: foo.ctid, foo.*, foo.tableoid, foo.f1
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
-> Foreign Scan on public.foo2
- Output: foo2.ctid, foo2.*, foo2.tableoid, foo2.f1
+ Output: foo2.ctid, foo2.f1, foo2.*, foo2.tableoid
Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
(23 rows)
@@ -7038,15 +7145,15 @@ update bar set f2 = f2 + 100 where f1 in (select f1 from foo);
-> Seq Scan on public.bar
Output: bar.f1, bar.f2, bar.ctid
-> Hash
- Output: foo.ctid, foo.*, foo.tableoid, foo.f1
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
-> HashAggregate
- Output: foo.ctid, foo.*, foo.tableoid, foo.f1
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
Group Key: foo.f1
-> Append
-> Seq Scan on public.foo
- Output: foo.ctid, foo.*, foo.tableoid, foo.f1
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
-> Foreign Scan on public.foo2
- Output: foo2.ctid, foo2.*, foo2.tableoid, foo2.f1
+ Output: foo2.ctid, foo2.f1, foo2.*, foo2.tableoid
Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
-> Hash Join
Output: bar2.f1, (bar2.f2 + 100), bar2.f3, bar2.ctid, foo.ctid, foo.*, foo.tableoid
@@ -7056,15 +7163,15 @@ update bar set f2 = f2 + 100 where f1 in (select f1 from foo);
Output: bar2.f1, bar2.f2, bar2.f3, bar2.ctid
Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR UPDATE
-> Hash
- Output: foo.ctid, foo.*, foo.tableoid, foo.f1
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
-> HashAggregate
- Output: foo.ctid, foo.*, foo.tableoid, foo.f1
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
Group Key: foo.f1
-> Append
-> Seq Scan on public.foo
- Output: foo.ctid, foo.*, foo.tableoid, foo.f1
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
-> Foreign Scan on public.foo2
- Output: foo2.ctid, foo2.*, foo2.tableoid, foo2.f1
+ Output: foo2.ctid, foo2.f1, foo2.*, foo2.tableoid
Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
(39 rows)
@@ -7316,12 +7423,12 @@ AFTER UPDATE OR DELETE ON bar2
FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
explain (verbose, costs off)
update bar set f2 = f2 + 100;
- QUERY PLAN
---------------------------------------------------------------------------------------
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------
Update on public.bar
Update on public.bar
Foreign Update on public.bar2
- Remote SQL: UPDATE public.loct2 SET f2 = $2 WHERE ctid = $1 RETURNING f1, f2, f3
+ Remote SQL: UPDATE public.loct2 SET f1 = $2, f2 = $3, f3 = $4 WHERE ctid = $1 RETURNING f1, f2, f3
-> Seq Scan on public.bar
Output: bar.f1, (bar.f2 + 100), bar.ctid
-> Foreign Scan on public.bar2
@@ -7370,6 +7477,81 @@ drop table bar cascade;
NOTICE: drop cascades to foreign table bar2
drop table loct1;
drop table loct2;
+-- Test pushing down UPDATE/DELETE joins to the remote server
+create table parent (a int, b text);
+create table loct1 (a int, b text);
+create table loct2 (a int, b text);
+create foreign table remt1 (a int, b text)
+ server loopback options (table_name 'loct1');
+create foreign table remt2 (a int, b text)
+ server loopback options (table_name 'loct2');
+alter foreign table remt1 inherit parent;
+insert into remt1 values (1, 'foo');
+insert into remt1 values (2, 'bar');
+insert into remt2 values (1, 'foo');
+insert into remt2 values (2, 'bar');
+analyze remt1;
+analyze remt2;
+explain (verbose, costs off)
+update parent set b = parent.b || remt2.b from remt2 where parent.a = remt2.a returning *;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------
+ Update on public.parent
+ Output: parent.a, parent.b, remt2.a, remt2.b
+ Update on public.parent
+ Foreign Update on public.remt1
+ -> Nested Loop
+ Output: parent.a, (parent.b || remt2.b), parent.ctid, remt2.*, remt2.a, remt2.b
+ Join Filter: (parent.a = remt2.a)
+ -> Seq Scan on public.parent
+ Output: parent.a, parent.b, parent.ctid
+ -> Foreign Scan on public.remt2
+ Output: remt2.b, remt2.*, remt2.a
+ Remote SQL: SELECT a, b FROM public.loct2
+ -> Foreign Update
+ Remote SQL: UPDATE public.loct1 r4 SET b = (r4.b || r2.b) FROM public.loct2 r2 WHERE ((r4.a = r2.a)) RETURNING r4.a, r4.b, r2.a, r2.b
+(14 rows)
+
+update parent set b = parent.b || remt2.b from remt2 where parent.a = remt2.a returning *;
+ a | b | a | b
+---+--------+---+-----
+ 1 | foofoo | 1 | foo
+ 2 | barbar | 2 | bar
+(2 rows)
+
+explain (verbose, costs off)
+delete from parent using remt2 where parent.a = remt2.a returning parent;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------
+ Delete on public.parent
+ Output: parent.*
+ Delete on public.parent
+ Foreign Delete on public.remt1
+ -> Nested Loop
+ Output: parent.ctid, remt2.*
+ Join Filter: (parent.a = remt2.a)
+ -> Seq Scan on public.parent
+ Output: parent.ctid, parent.a
+ -> Foreign Scan on public.remt2
+ Output: remt2.*, remt2.a
+ Remote SQL: SELECT a, b FROM public.loct2
+ -> Foreign Delete
+ Remote SQL: DELETE FROM public.loct1 r4 USING public.loct2 r2 WHERE ((r4.a = r2.a)) RETURNING r4.a, r4.b
+(14 rows)
+
+delete from parent using remt2 where parent.a = remt2.a returning parent;
+ parent
+------------
+ (1,foofoo)
+ (2,barbar)
+(2 rows)
+
+-- cleanup
+drop foreign table remt1;
+drop foreign table remt2;
+drop table loct1;
+drop table loct2;
+drop table parent;
-- ===================================================================
-- test tuple routing for foreign-table partitions
-- ===================================================================
@@ -7454,6 +7636,48 @@ select tableoid::regclass, * FROM itrtest;
remp1 | 1 | foo
(1 row)
+delete from itrtest;
+drop index loct1_idx;
+-- Test that remote triggers work with insert tuple routing
+create function br_insert_trigfunc() returns trigger as $$
+begin
+ new.b := new.b || ' triggered !';
+ return new;
+end
+$$ language plpgsql;
+create trigger loct1_br_insert_trigger before insert on loct1
+ for each row execute procedure br_insert_trigfunc();
+create trigger loct2_br_insert_trigger before insert on loct2
+ for each row execute procedure br_insert_trigfunc();
+-- The new values are concatenated with ' triggered !'
+insert into itrtest values (1, 'foo') returning *;
+ a | b
+---+-----------------
+ 1 | foo triggered !
+(1 row)
+
+insert into itrtest values (2, 'qux') returning *;
+ a | b
+---+-----------------
+ 2 | qux triggered !
+(1 row)
+
+insert into itrtest values (1, 'test1'), (2, 'test2') returning *;
+ a | b
+---+-------------------
+ 1 | test1 triggered !
+ 2 | test2 triggered !
+(2 rows)
+
+with result as (insert into itrtest values (1, 'test1'), (2, 'test2') returning *) select * from result;
+ a | b
+---+-------------------
+ 1 | test1 triggered !
+ 2 | test2 triggered !
+(2 rows)
+
+drop trigger loct1_br_insert_trigger on loct1;
+drop trigger loct2_br_insert_trigger on loct2;
drop table itrtest;
drop table loct1;
drop table loct2;
@@ -7518,6 +7742,188 @@ select tableoid::regclass, * FROM locp;
-- The executor should not let unexercised FDWs shut down
update utrtest set a = 1 where b = 'foo';
+-- Test that remote triggers work with update tuple routing
+create trigger loct_br_insert_trigger before insert on loct
+ for each row execute procedure br_insert_trigfunc();
+delete from utrtest;
+insert into utrtest values (2, 'qux');
+-- Check case where the foreign partition is a subplan target rel
+explain (verbose, costs off)
+update utrtest set a = 1 where a = 1 or a = 2 returning *;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------
+ Update on public.utrtest
+ Output: remp.a, remp.b
+ Foreign Update on public.remp
+ Update on public.locp
+ -> Foreign Update on public.remp
+ Remote SQL: UPDATE public.loct SET a = 1 WHERE (((a = 1) OR (a = 2))) RETURNING a, b
+ -> Seq Scan on public.locp
+ Output: 1, locp.b, locp.ctid
+ Filter: ((locp.a = 1) OR (locp.a = 2))
+(9 rows)
+
+-- The new values are concatenated with ' triggered !'
+update utrtest set a = 1 where a = 1 or a = 2 returning *;
+ a | b
+---+-----------------
+ 1 | qux triggered !
+(1 row)
+
+delete from utrtest;
+insert into utrtest values (2, 'qux');
+-- Check case where the foreign partition isn't a subplan target rel
+explain (verbose, costs off)
+update utrtest set a = 1 where a = 2 returning *;
+ QUERY PLAN
+--------------------------------------
+ Update on public.utrtest
+ Output: locp.a, locp.b
+ Update on public.locp
+ -> Seq Scan on public.locp
+ Output: 1, locp.b, locp.ctid
+ Filter: (locp.a = 2)
+(6 rows)
+
+-- The new values are concatenated with ' triggered !'
+update utrtest set a = 1 where a = 2 returning *;
+ a | b
+---+-----------------
+ 1 | qux triggered !
+(1 row)
+
+drop trigger loct_br_insert_trigger on loct;
+-- We can move rows to a foreign partition that has been updated already,
+-- but can't move rows to a foreign partition that hasn't been updated yet
+delete from utrtest;
+insert into utrtest values (1, 'foo');
+insert into utrtest values (2, 'qux');
+-- Test the former case:
+-- with a direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 1 returning *;
+ QUERY PLAN
+-----------------------------------------------------------------
+ Update on public.utrtest
+ Output: remp.a, remp.b
+ Foreign Update on public.remp
+ Update on public.locp
+ -> Foreign Update on public.remp
+ Remote SQL: UPDATE public.loct SET a = 1 RETURNING a, b
+ -> Seq Scan on public.locp
+ Output: 1, locp.b, locp.ctid
+(8 rows)
+
+update utrtest set a = 1 returning *;
+ a | b
+---+-----
+ 1 | foo
+ 1 | qux
+(2 rows)
+
+delete from utrtest;
+insert into utrtest values (1, 'foo');
+insert into utrtest values (2, 'qux');
+-- with a non-direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 1 from (values (1), (2)) s(x) where a = s.x returning *;
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Update on public.utrtest
+ Output: remp.a, remp.b, "*VALUES*".column1
+ Foreign Update on public.remp
+ Remote SQL: UPDATE public.loct SET a = $2 WHERE ctid = $1 RETURNING a, b
+ Update on public.locp
+ -> Hash Join
+ Output: 1, remp.b, remp.ctid, "*VALUES*".*, "*VALUES*".column1
+ Hash Cond: (remp.a = "*VALUES*".column1)
+ -> Foreign Scan on public.remp
+ Output: remp.b, remp.ctid, remp.a
+ Remote SQL: SELECT a, b, ctid FROM public.loct FOR UPDATE
+ -> Hash
+ Output: "*VALUES*".*, "*VALUES*".column1
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".*, "*VALUES*".column1
+ -> Hash Join
+ Output: 1, locp.b, locp.ctid, "*VALUES*".*, "*VALUES*".column1
+ Hash Cond: (locp.a = "*VALUES*".column1)
+ -> Seq Scan on public.locp
+ Output: locp.b, locp.ctid, locp.a
+ -> Hash
+ Output: "*VALUES*".*, "*VALUES*".column1
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".*, "*VALUES*".column1
+(24 rows)
+
+update utrtest set a = 1 from (values (1), (2)) s(x) where a = s.x returning *;
+ a | b | x
+---+-----+---
+ 1 | foo | 1
+ 1 | qux | 2
+(2 rows)
+
+-- Change the definition of utrtest so that the foreign partition get updated
+-- after the local partition
+delete from utrtest;
+alter table utrtest detach partition remp;
+drop foreign table remp;
+alter table loct drop constraint loct_a_check;
+alter table loct add check (a in (3));
+create foreign table remp (a int check (a in (3)), b text) server loopback options (table_name 'loct');
+alter table utrtest attach partition remp for values in (3);
+insert into utrtest values (2, 'qux');
+insert into utrtest values (3, 'xyzzy');
+-- Test the latter case:
+-- with a direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 3 returning *;
+ QUERY PLAN
+-----------------------------------------------------------------
+ Update on public.utrtest
+ Output: locp.a, locp.b
+ Update on public.locp
+ Foreign Update on public.remp
+ -> Seq Scan on public.locp
+ Output: 3, locp.b, locp.ctid
+ -> Foreign Update on public.remp
+ Remote SQL: UPDATE public.loct SET a = 3 RETURNING a, b
+(8 rows)
+
+update utrtest set a = 3 returning *; -- ERROR
+ERROR: cannot route tuples into foreign table to be updated "remp"
+-- with a non-direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 3 from (values (2), (3)) s(x) where a = s.x returning *;
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Update on public.utrtest
+ Output: locp.a, locp.b, "*VALUES*".column1
+ Update on public.locp
+ Foreign Update on public.remp
+ Remote SQL: UPDATE public.loct SET a = $2 WHERE ctid = $1 RETURNING a, b
+ -> Hash Join
+ Output: 3, locp.b, locp.ctid, "*VALUES*".*, "*VALUES*".column1
+ Hash Cond: (locp.a = "*VALUES*".column1)
+ -> Seq Scan on public.locp
+ Output: locp.b, locp.ctid, locp.a
+ -> Hash
+ Output: "*VALUES*".*, "*VALUES*".column1
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".*, "*VALUES*".column1
+ -> Hash Join
+ Output: 3, remp.b, remp.ctid, "*VALUES*".*, "*VALUES*".column1
+ Hash Cond: (remp.a = "*VALUES*".column1)
+ -> Foreign Scan on public.remp
+ Output: remp.b, remp.ctid, remp.a
+ Remote SQL: SELECT a, b, ctid FROM public.loct FOR UPDATE
+ -> Hash
+ Output: "*VALUES*".*, "*VALUES*".column1
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".*, "*VALUES*".column1
+(24 rows)
+
+update utrtest set a = 3 from (values (2), (3)) s(x) where a = s.x returning *; -- ERROR
+ERROR: cannot route tuples into foreign table to be updated "remp"
drop table utrtest;
drop table loct;
-- Test copy tuple routing
@@ -7704,6 +8110,22 @@ drop trigger rem2_trig_row_before on rem2;
drop trigger rem2_trig_row_after on rem2;
drop trigger loc2_trig_row_before_insert on loc2;
delete from rem2;
+-- test COPY FROM with foreign table created in the same transaction
+create table loc3 (f1 int, f2 text);
+begin;
+create foreign table rem3 (f1 int, f2 text)
+ server loopback options(table_name 'loc3');
+copy rem3 from stdin;
+commit;
+select * from rem3;
+ f1 | f2
+----+-----
+ 1 | foo
+ 2 | bar
+(2 rows)
+
+drop foreign table rem3;
+drop table loc3;
-- ===================================================================
-- test IMPORT FOREIGN SCHEMA
-- ===================================================================
@@ -7948,7 +8370,7 @@ CREATE TABLE import_source.t5 (c1 int, c2 text collate "C", "Col" "Colors");
CREATE SCHEMA import_dest5;
BEGIN;
DROP TYPE "Colors" CASCADE;
-NOTICE: drop cascades to table import_source.t5 column Col
+NOTICE: drop cascades to column Col of table import_source.t5
IMPORT FOREIGN SCHEMA import_source LIMIT TO (t5)
FROM SERVER loopback INTO import_dest5; -- ERROR
ERROR: type "public.Colors" does not exist
@@ -8047,8 +8469,9 @@ ALTER TABLE fprt2_p1 SET (autovacuum_enabled = 'false');
ALTER TABLE fprt2_p2 SET (autovacuum_enabled = 'false');
INSERT INTO fprt2_p1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 249, 3) i;
INSERT INTO fprt2_p2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(250, 499, 3) i;
-CREATE FOREIGN TABLE ftprt2_p1 PARTITION OF fprt2 FOR VALUES FROM (0) TO (250)
+CREATE FOREIGN TABLE ftprt2_p1 (b int, c varchar, a int)
SERVER loopback OPTIONS (table_name 'fprt2_p1', use_remote_estimate 'true');
+ALTER TABLE fprt2 ATTACH PARTITION ftprt2_p1 FOR VALUES FROM (0) TO (250);
CREATE FOREIGN TABLE ftprt2_p2 PARTITION OF fprt2 FOR VALUES FROM (250) TO (500)
SERVER loopback OPTIONS (table_name 'fprt2_p2', use_remote_estimate 'true');
ANALYZE fprt2;
@@ -8077,17 +8500,16 @@ SELECT t1.a,t2.b,t3.c FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) INNER J
400 | 400 | 0008
(4 rows)
--- left outer join + nullable clasue
-EXPLAIN (COSTS OFF)
+-- left outer join + nullable clause
+EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.a,t2.b,t2.c FROM fprt1 t1 LEFT JOIN (SELECT * FROM fprt2 WHERE a < 10) t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a < 10 ORDER BY 1,2,3;
- QUERY PLAN
------------------------------------------------------------------------------------
- Sort
- Sort Key: t1.a, ftprt2_p1.b, ftprt2_p1.c
- -> Append
- -> Foreign Scan
- Relations: (public.ftprt1_p1 t1) LEFT JOIN (public.ftprt2_p1 fprt2)
-(5 rows)
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.a, ftprt2_p1.b, ftprt2_p1.c
+ Relations: (public.ftprt1_p1 t1) LEFT JOIN (public.ftprt2_p1 fprt2)
+ Remote SQL: SELECT r5.a, r6.b, r6.c FROM (public.fprt1_p1 r5 LEFT JOIN public.fprt2_p1 r6 ON (((r5.a = r6.b)) AND ((r5.b = r6.a)) AND ((r6.a < 10)))) WHERE ((r5.a < 10)) ORDER BY r5.a ASC NULLS LAST, r6.b ASC NULLS LAST, r6.c ASC NULLS LAST
+(4 rows)
SELECT t1.a,t2.b,t2.c FROM fprt1 t1 LEFT JOIN (SELECT * FROM fprt2 WHERE a < 10) t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a < 10 ORDER BY 1,2,3;
a | b | c
@@ -8099,28 +8521,42 @@ SELECT t1.a,t2.b,t2.c FROM fprt1 t1 LEFT JOIN (SELECT * FROM fprt2 WHERE a < 10)
8 | |
(5 rows)
--- with whole-row reference
+-- with whole-row reference; partitionwise join does not apply
EXPLAIN (COSTS OFF)
-SELECT t1,t2 FROM fprt1 t1 JOIN fprt2 t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a % 25 =0 ORDER BY 1,2;
- QUERY PLAN
----------------------------------------------------------------------------------
+SELECT t1.wr, t2.wr FROM (SELECT t1 wr, a FROM fprt1 t1 WHERE t1.a % 25 = 0) t1 FULL JOIN (SELECT t2 wr, b FROM fprt2 t2 WHERE t2.b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY 1,2;
+ QUERY PLAN
+--------------------------------------------------------
Sort
Sort Key: ((t1.*)::fprt1), ((t2.*)::fprt2)
- -> Append
- -> Foreign Scan
- Relations: (public.ftprt1_p1 t1) INNER JOIN (public.ftprt2_p1 t2)
- -> Foreign Scan
- Relations: (public.ftprt1_p2 t1) INNER JOIN (public.ftprt2_p2 t2)
-(7 rows)
+ -> Hash Full Join
+ Hash Cond: (t1.a = t2.b)
+ -> Append
+ -> Foreign Scan on ftprt1_p1 t1
+ -> Foreign Scan on ftprt1_p2 t1_1
+ -> Hash
+ -> Append
+ -> Foreign Scan on ftprt2_p1 t2
+ -> Foreign Scan on ftprt2_p2 t2_1
+(11 rows)
-SELECT t1,t2 FROM fprt1 t1 JOIN fprt2 t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a % 25 =0 ORDER BY 1,2;
- t1 | t2
+SELECT t1.wr, t2.wr FROM (SELECT t1 wr, a FROM fprt1 t1 WHERE t1.a % 25 = 0) t1 FULL JOIN (SELECT t2 wr, b FROM fprt2 t2 WHERE t2.b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY 1,2;
+ wr | wr
----------------+----------------
(0,0,0000) | (0,0,0000)
+ (50,50,0001) |
+ (100,100,0002) |
(150,150,0003) | (150,150,0003)
+ (200,200,0004) |
(250,250,0005) | (250,250,0005)
+ (300,300,0006) |
+ (350,350,0007) |
(400,400,0008) | (400,400,0008)
-(4 rows)
+ (450,450,0009) |
+ | (75,75,0001)
+ | (225,225,0004)
+ | (325,325,0006)
+ | (475,475,0009)
+(14 rows)
-- join with lateral reference
EXPLAIN (COSTS OFF)
@@ -8145,7 +8581,7 @@ SELECT t1.a,t1.b FROM fprt1 t1, LATERAL (SELECT t2.a, t2.b FROM fprt2 t2 WHERE t
400 | 400
(4 rows)
--- with PHVs, partition-wise join selected but no join pushdown
+-- with PHVs, partitionwise join selected but no join pushdown
EXPLAIN (COSTS OFF)
SELECT t1.a, t1.phv, t2.b, t2.phv FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE a % 25 = 0) t1 FULL JOIN (SELECT 't2_phv' phv, * FROM fprt2 WHERE b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY t1.a, t2.b;
QUERY PLAN
@@ -8184,6 +8620,34 @@ SELECT t1.a, t1.phv, t2.b, t2.phv FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE
| | 475 | t2_phv
(14 rows)
+-- test FOR UPDATE; partitionwise join does not apply
+EXPLAIN (COSTS OFF)
+SELECT t1.a, t2.b FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) WHERE t1.a % 25 = 0 ORDER BY 1,2 FOR UPDATE OF t1;
+ QUERY PLAN
+--------------------------------------------------------------
+ LockRows
+ -> Sort
+ Sort Key: t1.a
+ -> Hash Join
+ Hash Cond: (t2.b = t1.a)
+ -> Append
+ -> Foreign Scan on ftprt2_p1 t2
+ -> Foreign Scan on ftprt2_p2 t2_1
+ -> Hash
+ -> Append
+ -> Foreign Scan on ftprt1_p1 t1
+ -> Foreign Scan on ftprt1_p2 t1_1
+(12 rows)
+
+SELECT t1.a, t2.b FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) WHERE t1.a % 25 = 0 ORDER BY 1,2 FOR UPDATE OF t1;
+ a | b
+-----+-----
+ 0 | 0
+ 150 | 150
+ 250 | 250
+ 400 | 400
+(4 rows)
+
RESET enable_partitionwise_join;
-- ===================================================================
-- test partitionwise aggregates
diff --git a/contrib/postgres_fdw/option.c b/contrib/postgres_fdw/option.c
index 6854f1bd91e..7ea68c3ce3d 100644
--- a/contrib/postgres_fdw/option.c
+++ b/contrib/postgres_fdw/option.c
@@ -3,7 +3,7 @@
* option.c
* FDW option handling for postgres_fdw
*
- * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/postgres_fdw/option.c
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index 30e572632ee..82d8140ba25 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -3,7 +3,7 @@
* postgres_fdw.c
* Foreign-data wrapper for remote PostgreSQL servers
*
- * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/postgres_fdw/postgres_fdw.c
@@ -16,6 +16,7 @@
#include "access/htup_details.h"
#include "access/sysattr.h"
+#include "access/table.h"
#include "catalog/pg_class.h"
#include "commands/defrem.h"
#include "commands/explain.h"
@@ -25,16 +26,17 @@
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
-#include "optimizer/cost.h"
#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/planmain.h"
#include "optimizer/restrictinfo.h"
-#include "optimizer/var.h"
#include "optimizer/tlist.h"
#include "parser/parsetree.h"
#include "utils/builtins.h"
+#include "utils/float.h"
#include "utils/guc.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
@@ -183,6 +185,10 @@ typedef struct PgFdwModifyState
/* working memory context */
MemoryContext temp_cxt; /* context for per-tuple temporary data */
+
+ /* for update row movement if subplan result rel */
+ struct PgFdwModifyState *aux_fmstate; /* foreign-insert state, if
+ * created */
} PgFdwModifyState;
/*
@@ -244,6 +250,32 @@ typedef struct PgFdwAnalyzeState
MemoryContext temp_cxt; /* context for per-tuple temporary data */
} PgFdwAnalyzeState;
+/*
+ * This enum describes what's kept in the fdw_private list for a ForeignPath.
+ * We store:
+ *
+ * 1) Boolean flag showing if the remote query has the final sort
+ * 2) Boolean flag showing if the remote query has the LIMIT clause
+ */
+enum FdwPathPrivateIndex
+{
+ /* has-final-sort flag (as an integer Value node) */
+ FdwPathPrivateHasFinalSort,
+ /* has-limit flag (as an integer Value node) */
+ FdwPathPrivateHasLimit
+};
+
+/* Struct for extra information passed to estimate_path_cost_size() */
+typedef struct
+{
+ PathTarget *target;
+ bool has_final_sort;
+ bool has_limit;
+ double limit_tuples;
+ int64 count_est;
+ int64 offset_est;
+} PgFdwPathExtraData;
+
/*
* Identify the attribute where data conversion fails.
*/
@@ -277,178 +309,199 @@ PG_FUNCTION_INFO_V1(postgres_fdw_handler);
* FDW callback routines
*/
static void postgresGetForeignRelSize(PlannerInfo *root,
- RelOptInfo *baserel,
- Oid foreigntableid);
+ RelOptInfo *baserel,
+ Oid foreigntableid);
static void postgresGetForeignPaths(PlannerInfo *root,
- RelOptInfo *baserel,
- Oid foreigntableid);
+ RelOptInfo *baserel,
+ Oid foreigntableid);
static ForeignScan *postgresGetForeignPlan(PlannerInfo *root,
- RelOptInfo *foreignrel,
- Oid foreigntableid,
- ForeignPath *best_path,
- List *tlist,
- List *scan_clauses,
- Plan *outer_plan);
+ RelOptInfo *foreignrel,
+ Oid foreigntableid,
+ ForeignPath *best_path,
+ List *tlist,
+ List *scan_clauses,
+ Plan *outer_plan);
static void postgresBeginForeignScan(ForeignScanState *node, int eflags);
static TupleTableSlot *postgresIterateForeignScan(ForeignScanState *node);
static void postgresReScanForeignScan(ForeignScanState *node);
static void postgresEndForeignScan(ForeignScanState *node);
static void postgresAddForeignUpdateTargets(Query *parsetree,
- RangeTblEntry *target_rte,
- Relation target_relation);
+ RangeTblEntry *target_rte,
+ Relation target_relation);
static List *postgresPlanForeignModify(PlannerInfo *root,
- ModifyTable *plan,
- Index resultRelation,
- int subplan_index);
+ ModifyTable *plan,
+ Index resultRelation,
+ int subplan_index);
static void postgresBeginForeignModify(ModifyTableState *mtstate,
- ResultRelInfo *resultRelInfo,
- List *fdw_private,
- int subplan_index,
- int eflags);
+ ResultRelInfo *resultRelInfo,
+ List *fdw_private,
+ int subplan_index,
+ int eflags);
static TupleTableSlot *postgresExecForeignInsert(EState *estate,
- ResultRelInfo *resultRelInfo,
- TupleTableSlot *slot,
- TupleTableSlot *planSlot);
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot);
static TupleTableSlot *postgresExecForeignUpdate(EState *estate,
- ResultRelInfo *resultRelInfo,
- TupleTableSlot *slot,
- TupleTableSlot *planSlot);
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot);
static TupleTableSlot *postgresExecForeignDelete(EState *estate,
- ResultRelInfo *resultRelInfo,
- TupleTableSlot *slot,
- TupleTableSlot *planSlot);
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot);
static void postgresEndForeignModify(EState *estate,
- ResultRelInfo *resultRelInfo);
+ ResultRelInfo *resultRelInfo);
static void postgresBeginForeignInsert(ModifyTableState *mtstate,
- ResultRelInfo *resultRelInfo);
+ ResultRelInfo *resultRelInfo);
static void postgresEndForeignInsert(EState *estate,
- ResultRelInfo *resultRelInfo);
+ ResultRelInfo *resultRelInfo);
static int postgresIsForeignRelUpdatable(Relation rel);
static bool postgresPlanDirectModify(PlannerInfo *root,
- ModifyTable *plan,
- Index resultRelation,
- int subplan_index);
+ ModifyTable *plan,
+ Index resultRelation,
+ int subplan_index);
static void postgresBeginDirectModify(ForeignScanState *node, int eflags);
static TupleTableSlot *postgresIterateDirectModify(ForeignScanState *node);
static void postgresEndDirectModify(ForeignScanState *node);
static void postgresExplainForeignScan(ForeignScanState *node,
- ExplainState *es);
+ ExplainState *es);
static void postgresExplainForeignModify(ModifyTableState *mtstate,
- ResultRelInfo *rinfo,
- List *fdw_private,
- int subplan_index,
- ExplainState *es);
+ ResultRelInfo *rinfo,
+ List *fdw_private,
+ int subplan_index,
+ ExplainState *es);
static void postgresExplainDirectModify(ForeignScanState *node,
- ExplainState *es);
+ ExplainState *es);
static bool postgresAnalyzeForeignTable(Relation relation,
- AcquireSampleRowsFunc *func,
- BlockNumber *totalpages);
+ AcquireSampleRowsFunc *func,
+ BlockNumber *totalpages);
static List *postgresImportForeignSchema(ImportForeignSchemaStmt *stmt,
- Oid serverOid);
+ Oid serverOid);
static void postgresGetForeignJoinPaths(PlannerInfo *root,
- RelOptInfo *joinrel,
- RelOptInfo *outerrel,
- RelOptInfo *innerrel,
- JoinType jointype,
- JoinPathExtraData *extra);
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ JoinPathExtraData *extra);
static bool postgresRecheckForeignScan(ForeignScanState *node,
- TupleTableSlot *slot);
+ TupleTableSlot *slot);
static void postgresGetForeignUpperPaths(PlannerInfo *root,
- UpperRelationKind stage,
- RelOptInfo *input_rel,
- RelOptInfo *output_rel,
- void *extra);
+ UpperRelationKind stage,
+ RelOptInfo *input_rel,
+ RelOptInfo *output_rel,
+ void *extra);
/*
* Helper functions
*/
static void estimate_path_cost_size(PlannerInfo *root,
- RelOptInfo *foreignrel,
- List *param_join_conds,
- List *pathkeys,
- double *p_rows, int *p_width,
- Cost *p_startup_cost, Cost *p_total_cost);
+ RelOptInfo *foreignrel,
+ List *param_join_conds,
+ List *pathkeys,
+ PgFdwPathExtraData *fpextra,
+ double *p_rows, int *p_width,
+ Cost *p_startup_cost, Cost *p_total_cost);
static void get_remote_estimate(const char *sql,
- PGconn *conn,
- double *rows,
- int *width,
- Cost *startup_cost,
- Cost *total_cost);
+ PGconn *conn,
+ double *rows,
+ int *width,
+ Cost *startup_cost,
+ Cost *total_cost);
+static void adjust_foreign_grouping_path_cost(PlannerInfo *root,
+ List *pathkeys,
+ double retrieved_rows,
+ double width,
+ double limit_tuples,
+ Cost *p_startup_cost,
+ Cost *p_run_cost);
static bool ec_member_matches_foreign(PlannerInfo *root, RelOptInfo *rel,
- EquivalenceClass *ec, EquivalenceMember *em,
- void *arg);
+ EquivalenceClass *ec, EquivalenceMember *em,
+ void *arg);
static void create_cursor(ForeignScanState *node);
static void fetch_more_data(ForeignScanState *node);
static void close_cursor(PGconn *conn, unsigned int cursor_number);
static PgFdwModifyState *create_foreign_modify(EState *estate,
- ResultRelInfo *resultRelInfo,
- CmdType operation,
- Plan *subplan,
- char *query,
- List *target_attrs,
- bool has_returning,
- List *retrieved_attrs);
+ RangeTblEntry *rte,
+ ResultRelInfo *resultRelInfo,
+ CmdType operation,
+ Plan *subplan,
+ char *query,
+ List *target_attrs,
+ bool has_returning,
+ List *retrieved_attrs);
+static TupleTableSlot *execute_foreign_modify(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ CmdType operation,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot);
static void prepare_foreign_modify(PgFdwModifyState *fmstate);
static const char **convert_prep_stmt_params(PgFdwModifyState *fmstate,
- ItemPointer tupleid,
- TupleTableSlot *slot);
+ ItemPointer tupleid,
+ TupleTableSlot *slot);
static void store_returning_result(PgFdwModifyState *fmstate,
- TupleTableSlot *slot, PGresult *res);
+ TupleTableSlot *slot, PGresult *res);
static void finish_foreign_modify(PgFdwModifyState *fmstate);
static List *build_remote_returning(Index rtindex, Relation rel,
- List *returningList);
+ List *returningList);
static void rebuild_fdw_scan_tlist(ForeignScan *fscan, List *tlist);
static void execute_dml_stmt(ForeignScanState *node);
static TupleTableSlot *get_returning_data(ForeignScanState *node);
static void init_returning_filter(PgFdwDirectModifyState *dmstate,
- List *fdw_scan_tlist,
- Index rtindex);
+ List *fdw_scan_tlist,
+ Index rtindex);
static TupleTableSlot *apply_returning_filter(PgFdwDirectModifyState *dmstate,
- TupleTableSlot *slot,
- EState *estate);
+ TupleTableSlot *slot,
+ EState *estate);
static void prepare_query_params(PlanState *node,
- List *fdw_exprs,
- int numParams,
- FmgrInfo **param_flinfo,
- List **param_exprs,
- const char ***param_values);
+ List *fdw_exprs,
+ int numParams,
+ FmgrInfo **param_flinfo,
+ List **param_exprs,
+ const char ***param_values);
static void process_query_params(ExprContext *econtext,
- FmgrInfo *param_flinfo,
- List *param_exprs,
- const char **param_values);
-static int postgresAcquireSampleRowsFunc(Relation relation, int elevel,
- HeapTuple *rows, int targrows,
- double *totalrows,
- double *totaldeadrows);
+ FmgrInfo *param_flinfo,
+ List *param_exprs,
+ const char **param_values);
+static int postgresAcquireSampleRowsFunc(Relation relation, int elevel,
+ HeapTuple *rows, int targrows,
+ double *totalrows,
+ double *totaldeadrows);
static void analyze_row_processor(PGresult *res, int row,
- PgFdwAnalyzeState *astate);
+ PgFdwAnalyzeState *astate);
static HeapTuple make_tuple_from_result_row(PGresult *res,
- int row,
- Relation rel,
- AttInMetadata *attinmeta,
- List *retrieved_attrs,
- ForeignScanState *fsstate,
- MemoryContext temp_context);
+ int row,
+ Relation rel,
+ AttInMetadata *attinmeta,
+ List *retrieved_attrs,
+ ForeignScanState *fsstate,
+ MemoryContext temp_context);
static void conversion_error_callback(void *arg);
static bool foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel,
- JoinType jointype, RelOptInfo *outerrel, RelOptInfo *innerrel,
- JoinPathExtraData *extra);
+ JoinType jointype, RelOptInfo *outerrel, RelOptInfo *innerrel,
+ JoinPathExtraData *extra);
static bool foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel,
- Node *havingQual);
+ Node *havingQual);
static List *get_useful_pathkeys_for_relation(PlannerInfo *root,
- RelOptInfo *rel);
+ RelOptInfo *rel);
static List *get_useful_ecs_for_relation(PlannerInfo *root, RelOptInfo *rel);
static void add_paths_with_pathkeys_for_rel(PlannerInfo *root, RelOptInfo *rel,
- Path *epq_path);
+ Path *epq_path);
static void add_foreign_grouping_paths(PlannerInfo *root,
- RelOptInfo *input_rel,
- RelOptInfo *grouped_rel,
- GroupPathExtraData *extra);
+ RelOptInfo *input_rel,
+ RelOptInfo *grouped_rel,
+ GroupPathExtraData *extra);
+static void add_foreign_ordered_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ RelOptInfo *ordered_rel);
+static void add_foreign_final_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ RelOptInfo *final_rel,
+ FinalPathExtraData *extra);
static void apply_server_options(PgFdwRelationInfo *fpinfo);
static void apply_table_options(PgFdwRelationInfo *fpinfo);
static void merge_fdw_options(PgFdwRelationInfo *fpinfo,
- const PgFdwRelationInfo *fpinfo_o,
- const PgFdwRelationInfo *fpinfo_i);
+ const PgFdwRelationInfo *fpinfo_o,
+ const PgFdwRelationInfo *fpinfo_i);
/*
@@ -608,10 +661,11 @@ postgresGetForeignRelSize(PlannerInfo *root,
cost_qual_eval(&fpinfo->local_conds_cost, fpinfo->local_conds, root);
/*
- * Set cached relation costs to some negative value, so that we can detect
- * when they are set to some sensible costs during one (usually the first)
- * of the calls to estimate_path_cost_size().
+ * Set # of retrieved rows and cached relation costs to some negative
+ * value, so that we can detect when they are set to some sensible values,
+ * during one (usually the first) of the calls to estimate_path_cost_size.
*/
+ fpinfo->retrieved_rows = -1;
fpinfo->rel_startup_cost = -1;
fpinfo->rel_total_cost = -1;
@@ -629,7 +683,7 @@ postgresGetForeignRelSize(PlannerInfo *root,
* values in fpinfo so we don't need to do it again to generate the
* basic foreign path.
*/
- estimate_path_cost_size(root, baserel, NIL, NIL,
+ estimate_path_cost_size(root, baserel, NIL, NIL, NULL,
&fpinfo->rows, &fpinfo->width,
&fpinfo->startup_cost, &fpinfo->total_cost);
@@ -660,7 +714,7 @@ postgresGetForeignRelSize(PlannerInfo *root,
set_baserel_size_estimates(root, baserel);
/* Fill in basically-bogus cost estimates for use later. */
- estimate_path_cost_size(root, baserel, NIL, NIL,
+ estimate_path_cost_size(root, baserel, NIL, NIL, NULL,
&fpinfo->rows, &fpinfo->width,
&fpinfo->startup_cost, &fpinfo->total_cost);
}
@@ -819,6 +873,7 @@ get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel)
* Pushing the query_pathkeys to the remote server is always worth
* considering, because it might let us avoid a local sort.
*/
+ fpinfo->qp_is_pushdown_safe = false;
if (root->query_pathkeys)
{
bool query_pathkeys_ok = true;
@@ -849,7 +904,10 @@ get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel)
}
if (query_pathkeys_ok)
+ {
useful_pathkeys_list = list_make1(list_copy(root->query_pathkeys));
+ fpinfo->qp_is_pushdown_safe = true;
+ }
}
/*
@@ -929,6 +987,9 @@ postgresGetForeignPaths(PlannerInfo *root,
* baserestrict conditions we were able to send to remote, there might
* actually be an indexscan happening there). We already did all the work
* to estimate cost and size of this path.
+ *
+ * Although this path uses no join clauses, it could still have required
+ * parameterization due to LATERAL refs in its tlist.
*/
path = create_foreignscan_path(root, baserel,
NULL, /* default pathtarget */
@@ -936,7 +997,7 @@ postgresGetForeignPaths(PlannerInfo *root,
fpinfo->startup_cost,
fpinfo->total_cost,
NIL, /* no pathkeys */
- NULL, /* no outer rel either */
+ baserel->lateral_relids,
NULL, /* no extra plan */
NIL); /* no fdw_private list */
add_path(baserel, (Path *) path);
@@ -1091,7 +1152,7 @@ postgresGetForeignPaths(PlannerInfo *root,
/* Get a cost estimate from the remote */
estimate_path_cost_size(root, baserel,
- param_info->ppi_clauses, NIL,
+ param_info->ppi_clauses, NIL, NULL,
&rows, &width,
&startup_cost, &total_cost);
@@ -1138,8 +1199,21 @@ postgresGetForeignPlan(PlannerInfo *root,
List *fdw_recheck_quals = NIL;
List *retrieved_attrs;
StringInfoData sql;
+ bool has_final_sort = false;
+ bool has_limit = false;
ListCell *lc;
+ /*
+ * Get FDW private data created by postgresGetForeignUpperPaths(), if any.
+ */
+ if (best_path->fdw_private)
+ {
+ has_final_sort = intVal(list_nth(best_path->fdw_private,
+ FdwPathPrivateHasFinalSort));
+ has_limit = intVal(list_nth(best_path->fdw_private,
+ FdwPathPrivateHasLimit));
+ }
+
if (IS_SIMPLE_REL(foreignrel))
{
/*
@@ -1227,11 +1301,9 @@ postgresGetForeignPlan(PlannerInfo *root,
/*
* Ensure that the outer plan produces a tuple whose descriptor
- * matches our scan tuple slot. This is safe because all scans and
- * joins support projection, so we never need to insert a Result node.
- * Also, remove the local conditions from outer plan's quals, lest
- * they will be evaluated twice, once by the local plan and once by
- * the scan.
+ * matches our scan tuple slot. Also, remove the local conditions
+ * from outer plan's quals, lest they be evaluated twice, once by the
+ * local plan and once by the scan.
*/
if (outer_plan)
{
@@ -1244,23 +1316,42 @@ postgresGetForeignPlan(PlannerInfo *root,
*/
Assert(!IS_UPPER_REL(foreignrel));
- outer_plan->targetlist = fdw_scan_tlist;
-
+ /*
+ * First, update the plan's qual list if possible. In some cases
+ * the quals might be enforced below the topmost plan level, in
+ * which case we'll fail to remove them; it's not worth working
+ * harder than this.
+ */
foreach(lc, local_exprs)
{
- Join *join_plan = (Join *) outer_plan;
Node *qual = lfirst(lc);
outer_plan->qual = list_delete(outer_plan->qual, qual);
/*
* For an inner join the local conditions of foreign scan plan
- * can be part of the joinquals as well.
+ * can be part of the joinquals as well. (They might also be
+ * in the mergequals or hashquals, but we can't touch those
+ * without breaking the plan.)
*/
- if (join_plan->jointype == JOIN_INNER)
- join_plan->joinqual = list_delete(join_plan->joinqual,
- qual);
+ if (IsA(outer_plan, NestLoop) ||
+ IsA(outer_plan, MergeJoin) ||
+ IsA(outer_plan, HashJoin))
+ {
+ Join *join_plan = (Join *) outer_plan;
+
+ if (join_plan->jointype == JOIN_INNER)
+ join_plan->joinqual = list_delete(join_plan->joinqual,
+ qual);
+ }
}
+
+ /*
+ * Now fix the subplan's tlist --- this might result in inserting
+ * a Result node atop the plan tree.
+ */
+ outer_plan = change_plan_targetlist(outer_plan, fdw_scan_tlist,
+ best_path->path.parallel_safe);
}
}
@@ -1271,7 +1362,8 @@ postgresGetForeignPlan(PlannerInfo *root,
initStringInfo(&sql);
deparseSelectStmtForRel(&sql, root, foreignrel, fdw_scan_tlist,
remote_exprs, best_path->path.pathkeys,
- false, &retrieved_attrs, ¶ms_list);
+ has_final_sort, has_limit, false,
+ &retrieved_attrs, ¶ms_list);
/* Remember remote_exprs for possible use by postgresPlanDirectModify */
fpinfo->final_remote_exprs = remote_exprs;
@@ -1343,7 +1435,7 @@ postgresBeginForeignScan(ForeignScanState *node, int eflags)
rtindex = fsplan->scan.scanrelid;
else
rtindex = bms_next_member(fsplan->fs_relids, -1);
- rte = rt_fetch(rtindex, estate->es_range_table);
+ rte = exec_rt_fetch(rtindex, estate);
userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
/* Get info about foreign table. */
@@ -1441,10 +1533,9 @@ postgresIterateForeignScan(ForeignScanState *node)
/*
* Return the next tuple.
*/
- ExecStoreTuple(fsstate->tuples[fsstate->next_tuple++],
- slot,
- InvalidBuffer,
- false);
+ ExecStoreHeapTuple(fsstate->tuples[fsstate->next_tuple++],
+ slot,
+ false);
return slot;
}
@@ -1581,6 +1672,7 @@ postgresPlanForeignModify(PlannerInfo *root,
Relation rel;
StringInfoData sql;
List *targetAttrs = NIL;
+ List *withCheckOptionList = NIL;
List *returningList = NIL;
List *retrieved_attrs = NIL;
bool doNothing = false;
@@ -1591,16 +1683,23 @@ postgresPlanForeignModify(PlannerInfo *root,
* Core code already has some lock on each rel being planned, so we can
* use NoLock here.
*/
- rel = heap_open(rte->relid, NoLock);
+ rel = table_open(rte->relid, NoLock);
/*
* In an INSERT, we transmit all columns that are defined in the foreign
- * table. In an UPDATE, we transmit only columns that were explicitly
- * targets of the UPDATE, so as to avoid unnecessary data transmission.
- * (We can't do that for INSERT since we would miss sending default values
- * for columns not listed in the source statement.)
- */
- if (operation == CMD_INSERT)
+ * table. In an UPDATE, if there are BEFORE ROW UPDATE triggers on the
+ * foreign table, we transmit all columns like INSERT; else we transmit
+ * only columns that were explicitly targets of the UPDATE, so as to avoid
+ * unnecessary data transmission. (We can't do that for INSERT since we
+ * would miss sending default values for columns not listed in the source
+ * statement, and for UPDATE if there are BEFORE ROW UPDATE triggers since
+ * those triggers might change values for non-target columns, in which
+ * case we would miss sending changed values for those columns.)
+ */
+ if (operation == CMD_INSERT ||
+ (operation == CMD_UPDATE &&
+ rel->trigdesc &&
+ rel->trigdesc->trig_update_before_row))
{
TupleDesc tupdesc = RelationGetDescr(rel);
int attnum;
@@ -1616,9 +1715,10 @@ postgresPlanForeignModify(PlannerInfo *root,
else if (operation == CMD_UPDATE)
{
int col;
+ Bitmapset *allUpdatedCols = bms_union(rte->updatedCols, rte->extraUpdatedCols);
col = -1;
- while ((col = bms_next_member(rte->updatedCols, col)) >= 0)
+ while ((col = bms_next_member(allUpdatedCols, col)) >= 0)
{
/* bit numbers are offset by FirstLowInvalidHeapAttributeNumber */
AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
@@ -1629,6 +1729,13 @@ postgresPlanForeignModify(PlannerInfo *root,
}
}
+ /*
+ * Extract the relevant WITH CHECK OPTION list if any.
+ */
+ if (plan->withCheckOptionLists)
+ withCheckOptionList = (List *) list_nth(plan->withCheckOptionLists,
+ subplan_index);
+
/*
* Extract the relevant RETURNING list if any.
*/
@@ -1653,17 +1760,19 @@ postgresPlanForeignModify(PlannerInfo *root,
switch (operation)
{
case CMD_INSERT:
- deparseInsertSql(&sql, root, resultRelation, rel,
- targetAttrs, doNothing, returningList,
+ deparseInsertSql(&sql, rte, resultRelation, rel,
+ targetAttrs, doNothing,
+ withCheckOptionList, returningList,
&retrieved_attrs);
break;
case CMD_UPDATE:
- deparseUpdateSql(&sql, root, resultRelation, rel,
- targetAttrs, returningList,
+ deparseUpdateSql(&sql, rte, resultRelation, rel,
+ targetAttrs,
+ withCheckOptionList, returningList,
&retrieved_attrs);
break;
case CMD_DELETE:
- deparseDeleteSql(&sql, root, resultRelation, rel,
+ deparseDeleteSql(&sql, rte, resultRelation, rel,
returningList,
&retrieved_attrs);
break;
@@ -1672,7 +1781,7 @@ postgresPlanForeignModify(PlannerInfo *root,
break;
}
- heap_close(rel, NoLock);
+ table_close(rel, NoLock);
/*
* Build the fdw_private list that will be available to the executor.
@@ -1700,6 +1809,7 @@ postgresBeginForeignModify(ModifyTableState *mtstate,
List *target_attrs;
bool has_returning;
List *retrieved_attrs;
+ RangeTblEntry *rte;
/*
* Do nothing in EXPLAIN (no ANALYZE) case. resultRelInfo->ri_FdwState
@@ -1718,8 +1828,13 @@ postgresBeginForeignModify(ModifyTableState *mtstate,
retrieved_attrs = (List *) list_nth(fdw_private,
FdwModifyPrivateRetrievedAttrs);
+ /* Find RTE. */
+ rte = exec_rt_fetch(resultRelInfo->ri_RangeTableIndex,
+ mtstate->ps.state);
+
/* Construct an execution state. */
fmstate = create_foreign_modify(mtstate->ps.state,
+ rte,
resultRelInfo,
mtstate->operation,
mtstate->mt_plans[subplan_index]->plan,
@@ -1742,57 +1857,21 @@ postgresExecForeignInsert(EState *estate,
TupleTableSlot *planSlot)
{
PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState;
- const char **p_values;
- PGresult *res;
- int n_rows;
-
- /* Set up the prepared statement on the remote server, if we didn't yet */
- if (!fmstate->p_name)
- prepare_foreign_modify(fmstate);
-
- /* Convert parameters needed by prepared statement to text form */
- p_values = convert_prep_stmt_params(fmstate, NULL, slot);
-
- /*
- * Execute the prepared statement.
- */
- if (!PQsendQueryPrepared(fmstate->conn,
- fmstate->p_name,
- fmstate->p_nums,
- p_values,
- NULL,
- NULL,
- 0))
- pgfdw_report_error(ERROR, NULL, fmstate->conn, false, fmstate->query);
+ TupleTableSlot *rslot;
/*
- * Get the result, and check for success.
- *
- * We don't use a PG_TRY block here, so be careful not to throw error
- * without releasing the PGresult.
+ * If the fmstate has aux_fmstate set, use the aux_fmstate (see
+ * postgresBeginForeignInsert())
*/
- res = pgfdw_get_result(fmstate->conn, fmstate->query);
- if (PQresultStatus(res) !=
- (fmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK))
- pgfdw_report_error(ERROR, res, fmstate->conn, true, fmstate->query);
-
- /* Check number of rows affected, and fetch RETURNING tuple if any */
- if (fmstate->has_returning)
- {
- n_rows = PQntuples(res);
- if (n_rows > 0)
- store_returning_result(fmstate, slot, res);
- }
- else
- n_rows = atoi(PQcmdTuples(res));
-
- /* And clean up */
- PQclear(res);
-
- MemoryContextReset(fmstate->temp_cxt);
+ if (fmstate->aux_fmstate)
+ resultRelInfo->ri_FdwState = fmstate->aux_fmstate;
+ rslot = execute_foreign_modify(estate, resultRelInfo, CMD_INSERT,
+ slot, planSlot);
+ /* Revert that change */
+ if (fmstate->aux_fmstate)
+ resultRelInfo->ri_FdwState = fmstate;
- /* Return NULL if nothing was inserted on the remote end */
- return (n_rows > 0) ? slot : NULL;
+ return rslot;
}
/*
@@ -1805,70 +1884,8 @@ postgresExecForeignUpdate(EState *estate,
TupleTableSlot *slot,
TupleTableSlot *planSlot)
{
- PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState;
- Datum datum;
- bool isNull;
- const char **p_values;
- PGresult *res;
- int n_rows;
-
- /* Set up the prepared statement on the remote server, if we didn't yet */
- if (!fmstate->p_name)
- prepare_foreign_modify(fmstate);
-
- /* Get the ctid that was passed up as a resjunk column */
- datum = ExecGetJunkAttribute(planSlot,
- fmstate->ctidAttno,
- &isNull);
- /* shouldn't ever get a null result... */
- if (isNull)
- elog(ERROR, "ctid is NULL");
-
- /* Convert parameters needed by prepared statement to text form */
- p_values = convert_prep_stmt_params(fmstate,
- (ItemPointer) DatumGetPointer(datum),
- slot);
-
- /*
- * Execute the prepared statement.
- */
- if (!PQsendQueryPrepared(fmstate->conn,
- fmstate->p_name,
- fmstate->p_nums,
- p_values,
- NULL,
- NULL,
- 0))
- pgfdw_report_error(ERROR, NULL, fmstate->conn, false, fmstate->query);
-
- /*
- * Get the result, and check for success.
- *
- * We don't use a PG_TRY block here, so be careful not to throw error
- * without releasing the PGresult.
- */
- res = pgfdw_get_result(fmstate->conn, fmstate->query);
- if (PQresultStatus(res) !=
- (fmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK))
- pgfdw_report_error(ERROR, res, fmstate->conn, true, fmstate->query);
-
- /* Check number of rows affected, and fetch RETURNING tuple if any */
- if (fmstate->has_returning)
- {
- n_rows = PQntuples(res);
- if (n_rows > 0)
- store_returning_result(fmstate, slot, res);
- }
- else
- n_rows = atoi(PQcmdTuples(res));
-
- /* And clean up */
- PQclear(res);
-
- MemoryContextReset(fmstate->temp_cxt);
-
- /* Return NULL if nothing was updated on the remote end */
- return (n_rows > 0) ? slot : NULL;
+ return execute_foreign_modify(estate, resultRelInfo, CMD_UPDATE,
+ slot, planSlot);
}
/*
@@ -1881,70 +1898,8 @@ postgresExecForeignDelete(EState *estate,
TupleTableSlot *slot,
TupleTableSlot *planSlot)
{
- PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState;
- Datum datum;
- bool isNull;
- const char **p_values;
- PGresult *res;
- int n_rows;
-
- /* Set up the prepared statement on the remote server, if we didn't yet */
- if (!fmstate->p_name)
- prepare_foreign_modify(fmstate);
-
- /* Get the ctid that was passed up as a resjunk column */
- datum = ExecGetJunkAttribute(planSlot,
- fmstate->ctidAttno,
- &isNull);
- /* shouldn't ever get a null result... */
- if (isNull)
- elog(ERROR, "ctid is NULL");
-
- /* Convert parameters needed by prepared statement to text form */
- p_values = convert_prep_stmt_params(fmstate,
- (ItemPointer) DatumGetPointer(datum),
- NULL);
-
- /*
- * Execute the prepared statement.
- */
- if (!PQsendQueryPrepared(fmstate->conn,
- fmstate->p_name,
- fmstate->p_nums,
- p_values,
- NULL,
- NULL,
- 0))
- pgfdw_report_error(ERROR, NULL, fmstate->conn, false, fmstate->query);
-
- /*
- * Get the result, and check for success.
- *
- * We don't use a PG_TRY block here, so be careful not to throw error
- * without releasing the PGresult.
- */
- res = pgfdw_get_result(fmstate->conn, fmstate->query);
- if (PQresultStatus(res) !=
- (fmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK))
- pgfdw_report_error(ERROR, res, fmstate->conn, true, fmstate->query);
-
- /* Check number of rows affected, and fetch RETURNING tuple if any */
- if (fmstate->has_returning)
- {
- n_rows = PQntuples(res);
- if (n_rows > 0)
- store_returning_result(fmstate, slot, res);
- }
- else
- n_rows = atoi(PQcmdTuples(res));
-
- /* And clean up */
- PQclear(res);
-
- MemoryContextReset(fmstate->temp_cxt);
-
- /* Return NULL if nothing was deleted on the remote end */
- return (n_rows > 0) ? slot : NULL;
+ return execute_foreign_modify(estate, resultRelInfo, CMD_DELETE,
+ slot, planSlot);
}
/*
@@ -1974,11 +1929,11 @@ postgresBeginForeignInsert(ModifyTableState *mtstate,
ResultRelInfo *resultRelInfo)
{
PgFdwModifyState *fmstate;
- Plan *plan = mtstate->ps.plan;
+ ModifyTable *plan = castNode(ModifyTable, mtstate->ps.plan);
+ EState *estate = mtstate->ps.state;
+ Index resultRelation = resultRelInfo->ri_RangeTableIndex;
Relation rel = resultRelInfo->ri_RelationDesc;
RangeTblEntry *rte;
- Query *query;
- PlannerInfo *root;
TupleDesc tupdesc = RelationGetDescr(rel);
int attnum;
StringInfoData sql;
@@ -1986,19 +1941,23 @@ postgresBeginForeignInsert(ModifyTableState *mtstate,
List *retrieved_attrs = NIL;
bool doNothing = false;
- initStringInfo(&sql);
+ /*
+ * If the foreign table we are about to insert routed rows into is also an
+ * UPDATE subplan result rel that will be updated later, proceeding with
+ * the INSERT will result in the later UPDATE incorrectly modifying those
+ * routed rows, so prevent the INSERT --- it would be nice if we could
+ * handle this case; but for now, throw an error for safety.
+ */
+ if (plan && plan->operation == CMD_UPDATE &&
+ (resultRelInfo->ri_usesFdwDirectModify ||
+ resultRelInfo->ri_FdwState) &&
+ resultRelInfo > mtstate->resultRelInfo + mtstate->mt_whichplan)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot route tuples into foreign table to be updated \"%s\"",
+ RelationGetRelationName(rel))));
- /* Set up largely-dummy planner state. */
- rte = makeNode(RangeTblEntry);
- rte->rtekind = RTE_RELATION;
- rte->relid = RelationGetRelid(rel);
- rte->relkind = RELKIND_FOREIGN_TABLE;
- query = makeNode(Query);
- query->commandType = CMD_INSERT;
- query->resultRelation = 1;
- query->rtable = list_make1(rte);
- root = makeNode(PlannerInfo);
- root->parse = query;
+ initStringInfo(&sql);
/* We transmit all columns that are defined in the foreign table. */
for (attnum = 1; attnum <= tupdesc->natts; attnum++)
@@ -2012,7 +1971,7 @@ postgresBeginForeignInsert(ModifyTableState *mtstate,
/* Check if we add the ON CONFLICT clause to the remote query. */
if (plan)
{
- OnConflictAction onConflictAction = ((ModifyTable *) plan)->onConflictAction;
+ OnConflictAction onConflictAction = plan->onConflictAction;
/* We only support DO NOTHING without an inference specification. */
if (onConflictAction == ONCONFLICT_NOTHING)
@@ -2022,12 +1981,42 @@ postgresBeginForeignInsert(ModifyTableState *mtstate,
(int) onConflictAction);
}
+ /*
+ * If the foreign table is a partition, we need to create a new RTE
+ * describing the foreign table for use by deparseInsertSql and
+ * create_foreign_modify() below, after first copying the parent's RTE and
+ * modifying some fields to describe the foreign partition to work on.
+ * However, if this is invoked by UPDATE, the existing RTE may already
+ * correspond to this partition if it is one of the UPDATE subplan target
+ * rels; in that case, we can just use the existing RTE as-is.
+ */
+ rte = exec_rt_fetch(resultRelation, estate);
+ if (rte->relid != RelationGetRelid(rel))
+ {
+ rte = copyObject(rte);
+ rte->relid = RelationGetRelid(rel);
+ rte->relkind = RELKIND_FOREIGN_TABLE;
+
+ /*
+ * For UPDATE, we must use the RT index of the first subplan target
+ * rel's RTE, because the core code would have built expressions for
+ * the partition, such as RETURNING, using that RT index as varno of
+ * Vars contained in those expressions.
+ */
+ if (plan && plan->operation == CMD_UPDATE &&
+ resultRelation == plan->rootRelation)
+ resultRelation = mtstate->resultRelInfo[0].ri_RangeTableIndex;
+ }
+
/* Construct the SQL command string. */
- deparseInsertSql(&sql, root, 1, rel, targetAttrs, doNothing,
- resultRelInfo->ri_returningList, &retrieved_attrs);
+ deparseInsertSql(&sql, rte, resultRelation, rel, targetAttrs, doNothing,
+ resultRelInfo->ri_WithCheckOptions,
+ resultRelInfo->ri_returningList,
+ &retrieved_attrs);
/* Construct an execution state. */
fmstate = create_foreign_modify(mtstate->ps.state,
+ rte,
resultRelInfo,
CMD_INSERT,
NULL,
@@ -2036,7 +2025,19 @@ postgresBeginForeignInsert(ModifyTableState *mtstate,
retrieved_attrs != NIL,
retrieved_attrs);
- resultRelInfo->ri_FdwState = fmstate;
+ /*
+ * If the given resultRelInfo already has PgFdwModifyState set, it means
+ * the foreign table is an UPDATE subplan result rel; in which case, store
+ * the resulting state into the aux_fmstate of the PgFdwModifyState.
+ */
+ if (resultRelInfo->ri_FdwState)
+ {
+ Assert(plan && plan->operation == CMD_UPDATE);
+ Assert(resultRelInfo->ri_usesFdwDirectModify == false);
+ ((PgFdwModifyState *) resultRelInfo->ri_FdwState)->aux_fmstate = fmstate;
+ }
+ else
+ resultRelInfo->ri_FdwState = fmstate;
}
/*
@@ -2051,6 +2052,13 @@ postgresEndForeignInsert(EState *estate,
Assert(fmstate != NULL);
+ /*
+ * If the fmstate has aux_fmstate set, get the aux_fmstate (see
+ * postgresBeginForeignInsert())
+ */
+ if (fmstate->aux_fmstate)
+ fmstate = fmstate->aux_fmstate;
+
/* Destroy the execution state */
finish_foreign_modify(fmstate);
}
@@ -2237,7 +2245,7 @@ postgresPlanDirectModify(PlannerInfo *root,
* Core code already has some lock on each rel being planned, so we can
* use NoLock here.
*/
- rel = heap_open(rte->relid, NoLock);
+ rel = table_open(rte->relid, NoLock);
/*
* Recall the qual clauses that must be evaluated remotely. (These are
@@ -2323,7 +2331,7 @@ postgresPlanDirectModify(PlannerInfo *root,
rebuild_fdw_scan_tlist(fscan, returningList);
}
- heap_close(rel, NoLock);
+ table_close(rel, NoLock);
return true;
}
@@ -2361,7 +2369,7 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags)
* ExecCheckRTEPerms() does.
*/
rtindex = estate->es_result_relation_info->ri_RangeTableIndex;
- rte = rt_fetch(rtindex, estate->es_range_table);
+ rte = exec_rt_fetch(rtindex, estate);
userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
/* Get info about foreign table. */
@@ -2511,10 +2519,6 @@ postgresEndDirectModify(ForeignScanState *node)
ReleaseConnection(dmstate->conn);
dmstate->conn = NULL;
- /* close the target relation. */
- if (dmstate->resultRel)
- ExecCloseScanRelation(dmstate->resultRel);
-
/* MemoryContext will be deleted automatically. */
}
@@ -2599,8 +2603,10 @@ postgresExplainDirectModify(ForeignScanState *node, ExplainState *es)
*
* param_join_conds are the parameterization clauses with outer relations.
* pathkeys specify the expected sort order if any for given path being costed.
+ * fpextra specifies additional post-scan/join-processing steps such as the
+ * final sort and the LIMIT restriction.
*
- * The function returns the cost and size estimates in p_row, p_width,
+ * The function returns the cost and size estimates in p_rows, p_width,
* p_startup_cost and p_total_cost variables.
*/
static void
@@ -2608,6 +2614,7 @@ estimate_path_cost_size(PlannerInfo *root,
RelOptInfo *foreignrel,
List *param_join_conds,
List *pathkeys,
+ PgFdwPathExtraData *fpextra,
double *p_rows, int *p_width,
Cost *p_startup_cost, Cost *p_total_cost)
{
@@ -2617,7 +2624,9 @@ estimate_path_cost_size(PlannerInfo *root,
int width;
Cost startup_cost;
Cost total_cost;
- Cost cpu_per_tuple;
+
+ /* Make sure the core code has set up the relation's reltarget */
+ Assert(foreignrel->reltarget);
/*
* If the table or the server is configured to use remote estimates,
@@ -2658,7 +2667,7 @@ estimate_path_cost_size(PlannerInfo *root,
* baserestrictinfo plus any extra join_conds relevant to this
* particular path.
*/
- remote_conds = list_concat(list_copy(remote_param_join_conds),
+ remote_conds = list_concat(remote_param_join_conds,
fpinfo->remote_conds);
/*
@@ -2669,8 +2678,10 @@ estimate_path_cost_size(PlannerInfo *root,
initStringInfo(&sql);
appendStringInfoString(&sql, "EXPLAIN ");
deparseSelectStmtForRel(&sql, root, foreignrel, fdw_scan_tlist,
- remote_conds, pathkeys, false,
- &retrieved_attrs, NULL);
+ remote_conds, pathkeys,
+ fpextra ? fpextra->has_final_sort : false,
+ fpextra ? fpextra->has_limit : false,
+ false, &retrieved_attrs, NULL);
/* Get the remote estimate */
conn = GetConnection(fpinfo->user, false);
@@ -2696,6 +2707,24 @@ estimate_path_cost_size(PlannerInfo *root,
cost_qual_eval(&local_cost, local_param_join_conds, root);
startup_cost += local_cost.startup;
total_cost += local_cost.per_tuple * retrieved_rows;
+
+ /*
+ * Add in tlist eval cost for each output row. In case of an
+ * aggregate, some of the tlist expressions such as grouping
+ * expressions will be evaluated remotely, so adjust the costs.
+ */
+ startup_cost += foreignrel->reltarget->cost.startup;
+ total_cost += foreignrel->reltarget->cost.startup;
+ total_cost += foreignrel->reltarget->cost.per_tuple * rows;
+ if (IS_UPPER_REL(foreignrel))
+ {
+ QualCost tlist_cost;
+
+ cost_qual_eval(&tlist_cost, fdw_scan_tlist, root);
+ startup_cost -= tlist_cost.startup;
+ total_cost -= tlist_cost.startup;
+ total_cost -= tlist_cost.per_tuple * rows;
+ }
}
else
{
@@ -2708,26 +2737,39 @@ estimate_path_cost_size(PlannerInfo *root,
Assert(param_join_conds == NIL);
/*
- * Use rows/width estimates made by set_baserel_size_estimates() for
- * base foreign relations and set_joinrel_size_estimates() for join
- * between foreign relations.
+ * We will come here again and again with different set of pathkeys or
+ * additional post-scan/join-processing steps that caller wants to
+ * cost. We don't need to calculate the cost/size estimates for the
+ * underlying scan, join, or grouping each time. Instead, use those
+ * estimates if we have cached them already.
*/
- rows = foreignrel->rows;
- width = foreignrel->reltarget->width;
-
- /* Back into an estimate of the number of retrieved rows. */
- retrieved_rows = clamp_row_est(rows / fpinfo->local_conds_sel);
-
- /*
- * We will come here again and again with different set of pathkeys
- * that caller wants to cost. We don't need to calculate the cost of
- * bare scan each time. Instead, use the costs if we have cached them
- * already.
- */
- if (fpinfo->rel_startup_cost > 0 && fpinfo->rel_total_cost > 0)
+ if (fpinfo->rel_startup_cost >= 0 && fpinfo->rel_total_cost >= 0)
{
+ Assert(fpinfo->retrieved_rows >= 1);
+
+ rows = fpinfo->rows;
+ retrieved_rows = fpinfo->retrieved_rows;
+ width = fpinfo->width;
startup_cost = fpinfo->rel_startup_cost;
run_cost = fpinfo->rel_total_cost - fpinfo->rel_startup_cost;
+
+ /*
+ * If we estimate the costs of a foreign scan or a foreign join
+ * with additional post-scan/join-processing steps, the scan or
+ * join costs obtained from the cache wouldn't yet contain the
+ * eval costs for the final scan/join target, which would've been
+ * updated by apply_scanjoin_target_to_paths(); add the eval costs
+ * now.
+ */
+ if (fpextra && !IS_UPPER_REL(foreignrel))
+ {
+ /* Shouldn't get here unless we have LIMIT */
+ Assert(fpextra->has_limit);
+ Assert(foreignrel->reloptkind == RELOPT_BASEREL ||
+ foreignrel->reloptkind == RELOPT_JOINREL);
+ startup_cost += foreignrel->reltarget->cost.startup;
+ run_cost += foreignrel->reltarget->cost.per_tuple * rows;
+ }
}
else if (IS_JOIN_REL(foreignrel))
{
@@ -2737,6 +2779,10 @@ estimate_path_cost_size(PlannerInfo *root,
QualCost remote_conds_cost;
double nrows;
+ /* Use rows/width estimates made by the core code. */
+ rows = foreignrel->rows;
+ width = foreignrel->reltarget->width;
+
/* For join we expect inner and outer relations set */
Assert(fpinfo->innerrel && fpinfo->outerrel);
@@ -2745,7 +2791,12 @@ estimate_path_cost_size(PlannerInfo *root,
/* Estimate of number of rows in cross product */
nrows = fpinfo_i->rows * fpinfo_o->rows;
- /* Clamp retrieved rows estimate to at most size of cross product */
+
+ /*
+ * Back into an estimate of the number of retrieved rows. Just in
+ * case this is nuts, clamp to at most nrows.
+ */
+ retrieved_rows = clamp_row_est(rows / fpinfo->local_conds_sel);
retrieved_rows = Min(retrieved_rows, nrows);
/*
@@ -2794,18 +2845,24 @@ estimate_path_cost_size(PlannerInfo *root,
nrows = clamp_row_est(nrows * fpinfo->joinclause_sel);
run_cost += nrows * remote_conds_cost.per_tuple;
run_cost += fpinfo->local_conds_cost.per_tuple * retrieved_rows;
+
+ /* Add in tlist eval cost for each output row */
+ startup_cost += foreignrel->reltarget->cost.startup;
+ run_cost += foreignrel->reltarget->cost.per_tuple * rows;
}
else if (IS_UPPER_REL(foreignrel))
{
+ RelOptInfo *outerrel = fpinfo->outerrel;
PgFdwRelationInfo *ofpinfo;
- PathTarget *ptarget = foreignrel->reltarget;
AggClauseCosts aggcosts;
double input_rows;
int numGroupCols;
double numGroups = 1;
- /* Make sure the core code set the pathtarget. */
- Assert(ptarget != NULL);
+ /* The upper relation should have its outer relation set */
+ Assert(outerrel);
+ /* and that outer relation should have its reltarget set */
+ Assert(outerrel->reltarget);
/*
* This cost model is mixture of costing done for sorted and
@@ -2813,17 +2870,12 @@ estimate_path_cost_size(PlannerInfo *root,
* strategy will be considered at remote side, thus for
* simplicity, we put all startup related costs in startup_cost
* and all finalization and run cost are added in total_cost.
- *
- * Also, core does not care about costing HAVING expressions and
- * adding that to the costs. So similarly, here too we are not
- * considering remote and local conditions for costing.
*/
- ofpinfo = (PgFdwRelationInfo *) fpinfo->outerrel->fdw_private;
+ ofpinfo = (PgFdwRelationInfo *) outerrel->fdw_private;
- /* Get rows and width from input rel */
+ /* Get rows from input rel */
input_rows = ofpinfo->rows;
- width = ofpinfo->width;
/* Collect statistics about aggregates for estimating costs. */
MemSet(&aggcosts, 0, sizeof(AggClauseCosts));
@@ -2849,39 +2901,87 @@ estimate_path_cost_size(PlannerInfo *root,
input_rows, NULL);
/*
- * Number of rows expected from foreign server will be same as
- * that of number of groups.
+ * Get the retrieved_rows and rows estimates. If there are HAVING
+ * quals, account for their selectivity.
*/
- rows = retrieved_rows = numGroups;
+ if (root->parse->havingQual)
+ {
+ /* Factor in the selectivity of the remotely-checked quals */
+ retrieved_rows =
+ clamp_row_est(numGroups *
+ clauselist_selectivity(root,
+ fpinfo->remote_conds,
+ 0,
+ JOIN_INNER,
+ NULL));
+ /* Factor in the selectivity of the locally-checked quals */
+ rows = clamp_row_est(retrieved_rows * fpinfo->local_conds_sel);
+ }
+ else
+ {
+ rows = retrieved_rows = numGroups;
+ }
+
+ /* Use width estimate made by the core code. */
+ width = foreignrel->reltarget->width;
/*-----
* Startup cost includes:
- * 1. Startup cost for underneath input * relation
+ * 1. Startup cost for underneath input relation, adjusted for
+ * tlist replacement by apply_scanjoin_target_to_paths()
* 2. Cost of performing aggregation, per cost_agg()
- * 3. Startup cost for PathTarget eval
*-----
*/
startup_cost = ofpinfo->rel_startup_cost;
+ startup_cost += outerrel->reltarget->cost.startup;
startup_cost += aggcosts.transCost.startup;
startup_cost += aggcosts.transCost.per_tuple * input_rows;
+ startup_cost += aggcosts.finalCost.startup;
startup_cost += (cpu_operator_cost * numGroupCols) * input_rows;
- startup_cost += ptarget->cost.startup;
/*-----
* Run time cost includes:
- * 1. Run time cost of underneath input relation
+ * 1. Run time cost of underneath input relation, adjusted for
+ * tlist replacement by apply_scanjoin_target_to_paths()
* 2. Run time cost of performing aggregation, per cost_agg()
- * 3. PathTarget eval cost for each output row
*-----
*/
run_cost = ofpinfo->rel_total_cost - ofpinfo->rel_startup_cost;
- run_cost += aggcosts.finalCost * numGroups;
+ run_cost += outerrel->reltarget->cost.per_tuple * input_rows;
+ run_cost += aggcosts.finalCost.per_tuple * numGroups;
run_cost += cpu_tuple_cost * numGroups;
- run_cost += ptarget->cost.per_tuple * numGroups;
+
+ /* Account for the eval cost of HAVING quals, if any */
+ if (root->parse->havingQual)
+ {
+ QualCost remote_cost;
+
+ /* Add in the eval cost of the remotely-checked quals */
+ cost_qual_eval(&remote_cost, fpinfo->remote_conds, root);
+ startup_cost += remote_cost.startup;
+ run_cost += remote_cost.per_tuple * numGroups;
+ /* Add in the eval cost of the locally-checked quals */
+ startup_cost += fpinfo->local_conds_cost.startup;
+ run_cost += fpinfo->local_conds_cost.per_tuple * retrieved_rows;
+ }
+
+ /* Add in tlist eval cost for each output row */
+ startup_cost += foreignrel->reltarget->cost.startup;
+ run_cost += foreignrel->reltarget->cost.per_tuple * rows;
}
else
{
- /* Clamp retrieved rows estimates to at most foreignrel->tuples. */
+ Cost cpu_per_tuple;
+
+ /* Use rows/width estimates made by set_baserel_size_estimates. */
+ rows = foreignrel->rows;
+ width = foreignrel->reltarget->width;
+
+ /*
+ * Back into an estimate of the number of retrieved rows. Just in
+ * case this is nuts, clamp to at most foreignrel->tuples.
+ */
+ retrieved_rows = clamp_row_est(rows / fpinfo->local_conds_sel);
retrieved_rows = Min(retrieved_rows, foreignrel->tuples);
/*
@@ -2896,6 +2996,10 @@ estimate_path_cost_size(PlannerInfo *root,
startup_cost += foreignrel->baserestrictcost.startup;
cpu_per_tuple = cpu_tuple_cost + foreignrel->baserestrictcost.per_tuple;
run_cost += cpu_per_tuple * foreignrel->tuples;
+
+ /* Add in tlist eval cost for each output row */
+ startup_cost += foreignrel->reltarget->cost.startup;
+ run_cost += foreignrel->reltarget->cost.per_tuple * rows;
}
/*
@@ -2909,24 +3013,65 @@ estimate_path_cost_size(PlannerInfo *root,
*/
if (pathkeys != NIL)
{
- startup_cost *= DEFAULT_FDW_SORT_MULTIPLIER;
- run_cost *= DEFAULT_FDW_SORT_MULTIPLIER;
+ if (IS_UPPER_REL(foreignrel))
+ {
+ Assert(foreignrel->reloptkind == RELOPT_UPPER_REL &&
+ fpinfo->stage == UPPERREL_GROUP_AGG);
+ adjust_foreign_grouping_path_cost(root, pathkeys,
+ retrieved_rows, width,
+ fpextra->limit_tuples,
+ &startup_cost, &run_cost);
+ }
+ else
+ {
+ startup_cost *= DEFAULT_FDW_SORT_MULTIPLIER;
+ run_cost *= DEFAULT_FDW_SORT_MULTIPLIER;
+ }
}
total_cost = startup_cost + run_cost;
+
+ /* Adjust the cost estimates if we have LIMIT */
+ if (fpextra && fpextra->has_limit)
+ {
+ adjust_limit_rows_costs(&rows, &startup_cost, &total_cost,
+ fpextra->offset_est, fpextra->count_est);
+ retrieved_rows = rows;
+ }
+ }
+
+ /*
+ * If this includes the final sort step, the given target, which will be
+ * applied to the resulting path, might have different expressions from
+ * the foreignrel's reltarget (see make_sort_input_target()); adjust tlist
+ * eval costs.
+ */
+ if (fpextra && fpextra->has_final_sort &&
+ fpextra->target != foreignrel->reltarget)
+ {
+ QualCost oldcost = foreignrel->reltarget->cost;
+ QualCost newcost = fpextra->target->cost;
+
+ startup_cost += newcost.startup - oldcost.startup;
+ total_cost += newcost.startup - oldcost.startup;
+ total_cost += (newcost.per_tuple - oldcost.per_tuple) * rows;
}
/*
- * Cache the costs for scans without any pathkeys or parameterization
- * before adding the costs for transferring data from the foreign server.
- * These costs are useful for costing the join between this relation and
- * another foreign relation or to calculate the costs of paths with
- * pathkeys for this relation, when the costs can not be obtained from the
- * foreign server. This function will be called at least once for every
- * foreign relation without pathkeys and parameterization.
+ * Cache the retrieved rows and cost estimates for scans, joins, or
+ * groupings without any parameterization, pathkeys, or additional
+ * post-scan/join-processing steps, before adding the costs for
+ * transferring data from the foreign server. These estimates are useful
+ * for costing remote joins involving this relation or costing other
+ * remote operations on this relation such as remote sorts and remote
+ * LIMIT restrictions, when the costs can not be obtained from the foreign
+ * server. This function will be called at least once for every foreign
+ * relation without any parameterization, pathkeys, or additional
+ * post-scan/join-processing steps.
*/
- if (pathkeys == NIL && param_join_conds == NIL)
+ if (pathkeys == NIL && param_join_conds == NIL && fpextra == NULL)
{
+ fpinfo->retrieved_rows = retrieved_rows;
fpinfo->rel_startup_cost = startup_cost;
fpinfo->rel_total_cost = total_cost;
}
@@ -2942,6 +3087,30 @@ estimate_path_cost_size(PlannerInfo *root,
total_cost += fpinfo->fdw_tuple_cost * retrieved_rows;
total_cost += cpu_tuple_cost * retrieved_rows;
+ /*
+ * If we have LIMIT, we should prefer performing the restriction remotely
+ * rather than locally, as the former avoids extra row fetches from the
+ * remote that the latter might cause. But since the core code doesn't
+ * account for such fetches when estimating the costs of the local
+ * restriction (see create_limit_path()), there would be no difference
+ * between the costs of the local restriction and the costs of the remote
+ * restriction estimated above if we don't use remote estimates (except
+ * for the case where the foreignrel is a grouping relation, the given
+ * pathkeys is not NIL, and the effects of a bounded sort for that rel is
+ * accounted for in costing the remote restriction). Tweak the costs of
+ * the remote restriction to ensure we'll prefer it if LIMIT is a useful
+ * one.
+ */
+ if (!fpinfo->use_remote_estimate &&
+ fpextra && fpextra->has_limit &&
+ fpextra->limit_tuples > 0 &&
+ fpextra->limit_tuples < fpinfo->rows)
+ {
+ Assert(fpinfo->rows > 0);
+ total_cost -= (total_cost - startup_cost) * 0.05 *
+ (fpinfo->rows - fpextra->limit_tuples) / fpinfo->rows;
+ }
+
/* Return results. */
*p_rows = rows;
*p_width = width;
@@ -3000,6 +3169,59 @@ get_remote_estimate(const char *sql, PGconn *conn,
PG_END_TRY();
}
+/*
+ * Adjust the cost estimates of a foreign grouping path to include the cost of
+ * generating properly-sorted output.
+ */
+static void
+adjust_foreign_grouping_path_cost(PlannerInfo *root,
+ List *pathkeys,
+ double retrieved_rows,
+ double width,
+ double limit_tuples,
+ Cost *p_startup_cost,
+ Cost *p_run_cost)
+{
+ /*
+ * If the GROUP BY clause isn't sort-able, the plan chosen by the remote
+ * side is unlikely to generate properly-sorted output, so it would need
+ * an explicit sort; adjust the given costs with cost_sort(). Likewise,
+ * if the GROUP BY clause is sort-able but isn't a superset of the given
+ * pathkeys, adjust the costs with that function. Otherwise, adjust the
+ * costs by applying the same heuristic as for the scan or join case.
+ */
+ if (!grouping_is_sortable(root->parse->groupClause) ||
+ !pathkeys_contained_in(pathkeys, root->group_pathkeys))
+ {
+ Path sort_path; /* dummy for result of cost_sort */
+
+ cost_sort(&sort_path,
+ root,
+ pathkeys,
+ *p_startup_cost + *p_run_cost,
+ retrieved_rows,
+ width,
+ 0.0,
+ work_mem,
+ limit_tuples);
+
+ *p_startup_cost = sort_path.startup_cost;
+ *p_run_cost = sort_path.total_cost - sort_path.startup_cost;
+ }
+ else
+ {
+ /*
+ * The default extra cost seems too large for foreign-grouping cases;
+ * add 1/4th of that default.
+ */
+ double sort_multiplier = 1.0 + (DEFAULT_FDW_SORT_MULTIPLIER
+ - 1.0) * 0.25;
+
+ *p_startup_cost *= sort_multiplier;
+ *p_run_cost *= sort_multiplier;
+ }
+}
+
/*
* Detect whether we want to process an EquivalenceClass member.
*
@@ -3255,6 +3477,7 @@ close_cursor(PGconn *conn, unsigned int cursor_number)
*/
static PgFdwModifyState *
create_foreign_modify(EState *estate,
+ RangeTblEntry *rte,
ResultRelInfo *resultRelInfo,
CmdType operation,
Plan *subplan,
@@ -3266,7 +3489,6 @@ create_foreign_modify(EState *estate,
PgFdwModifyState *fmstate;
Relation rel = resultRelInfo->ri_RelationDesc;
TupleDesc tupdesc = RelationGetDescr(rel);
- RangeTblEntry *rte;
Oid userid;
ForeignTable *table;
UserMapping *user;
@@ -3283,7 +3505,6 @@ create_foreign_modify(EState *estate,
* Identify which user to do the remote access as. This should match what
* ExecCheckRTEPerms() does.
*/
- rte = rt_fetch(resultRelInfo->ri_RangeTableIndex, estate->es_range_table);
userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
/* Get info about foreign table. */
@@ -3348,9 +3569,104 @@ create_foreign_modify(EState *estate,
Assert(fmstate->p_nums <= n_params);
+ /* Initialize auxiliary state */
+ fmstate->aux_fmstate = NULL;
+
return fmstate;
}
+/*
+ * execute_foreign_modify
+ * Perform foreign-table modification as required, and fetch RETURNING
+ * result if any. (This is the shared guts of postgresExecForeignInsert,
+ * postgresExecForeignUpdate, and postgresExecForeignDelete.)
+ */
+static TupleTableSlot *
+execute_foreign_modify(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ CmdType operation,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot)
+{
+ PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState;
+ ItemPointer ctid = NULL;
+ const char **p_values;
+ PGresult *res;
+ int n_rows;
+
+ /* The operation should be INSERT, UPDATE, or DELETE */
+ Assert(operation == CMD_INSERT ||
+ operation == CMD_UPDATE ||
+ operation == CMD_DELETE);
+
+ /* Set up the prepared statement on the remote server, if we didn't yet */
+ if (!fmstate->p_name)
+ prepare_foreign_modify(fmstate);
+
+ /*
+ * For UPDATE/DELETE, get the ctid that was passed up as a resjunk column
+ */
+ if (operation == CMD_UPDATE || operation == CMD_DELETE)
+ {
+ Datum datum;
+ bool isNull;
+
+ datum = ExecGetJunkAttribute(planSlot,
+ fmstate->ctidAttno,
+ &isNull);
+ /* shouldn't ever get a null result... */
+ if (isNull)
+ elog(ERROR, "ctid is NULL");
+ ctid = (ItemPointer) DatumGetPointer(datum);
+ }
+
+ /* Convert parameters needed by prepared statement to text form */
+ p_values = convert_prep_stmt_params(fmstate, ctid, slot);
+
+ /*
+ * Execute the prepared statement.
+ */
+ if (!PQsendQueryPrepared(fmstate->conn,
+ fmstate->p_name,
+ fmstate->p_nums,
+ p_values,
+ NULL,
+ NULL,
+ 0))
+ pgfdw_report_error(ERROR, NULL, fmstate->conn, false, fmstate->query);
+
+ /*
+ * Get the result, and check for success.
+ *
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ res = pgfdw_get_result(fmstate->conn, fmstate->query);
+ if (PQresultStatus(res) !=
+ (fmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK))
+ pgfdw_report_error(ERROR, res, fmstate->conn, true, fmstate->query);
+
+ /* Check number of rows affected, and fetch RETURNING tuple if any */
+ if (fmstate->has_returning)
+ {
+ n_rows = PQntuples(res);
+ if (n_rows > 0)
+ store_returning_result(fmstate, slot, res);
+ }
+ else
+ n_rows = atoi(PQcmdTuples(res));
+
+ /* And clean up */
+ PQclear(res);
+
+ MemoryContextReset(fmstate->temp_cxt);
+
+ /*
+ * Return NULL if nothing was inserted/updated/deleted on the remote end
+ */
+ return (n_rows > 0) ? slot : NULL;
+}
+
/*
* prepare_foreign_modify
* Establish a prepared statement for execution of INSERT/UPDATE/DELETE
@@ -3481,8 +3797,12 @@ store_returning_result(PgFdwModifyState *fmstate,
fmstate->retrieved_attrs,
NULL,
fmstate->temp_cxt);
- /* tuple will be deleted when it is cleared from the slot */
- ExecStoreTuple(newtup, slot, InvalidBuffer, true);
+
+ /*
+ * The returning slot will not necessarily be suitable to store
+ * heaptuples directly, so allow for conversion.
+ */
+ ExecForceStoreHeapTuple(newtup, slot, true);
}
PG_CATCH();
{
@@ -3602,8 +3922,7 @@ build_remote_returning(Index rtindex, Relation rel, List *returningList)
if (IsA(var, Var) &&
var->varno == rtindex &&
var->varattno <= InvalidAttrNumber &&
- var->varattno != SelfItemPointerAttributeNumber &&
- var->varattno != ObjectIdAttributeNumber)
+ var->varattno != SelfItemPointerAttributeNumber)
continue; /* don't need it */
if (tlist_member((Expr *) var, tlist))
@@ -3755,7 +4074,7 @@ get_returning_data(ForeignScanState *node)
dmstate->retrieved_attrs,
node,
dmstate->temp_cxt);
- ExecStoreTuple(newtup, slot, InvalidBuffer, false);
+ ExecStoreHeapTuple(newtup, slot, false);
}
PG_CATCH();
{
@@ -3834,8 +4153,6 @@ init_returning_filter(PgFdwDirectModifyState *dmstate,
*/
if (attrno == SelfItemPointerAttributeNumber)
dmstate->ctidAttno = i;
- else if (attrno == ObjectIdAttributeNumber)
- dmstate->oidAttno = i;
else
Assert(false);
dmstate->hasSystemCols = true;
@@ -3863,6 +4180,7 @@ apply_returning_filter(PgFdwDirectModifyState *dmstate,
TupleTableSlot *slot,
EState *estate)
{
+ ResultRelInfo *relInfo = estate->es_result_relation_info;
TupleDesc resultTupType = RelationGetDescr(dmstate->resultRel);
TupleTableSlot *resultSlot;
Datum *values;
@@ -3872,11 +4190,9 @@ apply_returning_filter(PgFdwDirectModifyState *dmstate,
int i;
/*
- * Use the trigger tuple slot as a place to store the result tuple.
+ * Use the return tuple slot as a place to store the result tuple.
*/
- resultSlot = estate->es_trig_tuple_slot;
- if (resultSlot->tts_tupleDescriptor != resultTupType)
- ExecSetSlotDescriptor(resultSlot, resultTupType);
+ resultSlot = ExecGetReturningSlot(estate, relInfo);
/*
* Extract all the values of the scan tuple.
@@ -3917,11 +4233,13 @@ apply_returning_filter(PgFdwDirectModifyState *dmstate,
ExecStoreVirtualTuple(resultSlot);
/*
- * If we have any system columns to return, install them.
+ * If we have any system columns to return, materialize a heap tuple in
+ * the slot from column values set above and install system columns in
+ * that tuple.
*/
if (dmstate->hasSystemCols)
{
- HeapTuple resultTup = ExecMaterializeSlot(resultSlot);
+ HeapTuple resultTup = ExecFetchSlotHeapTuple(resultSlot, true, NULL);
/* ctid */
if (dmstate->ctidAttno)
@@ -3932,15 +4250,6 @@ apply_returning_filter(PgFdwDirectModifyState *dmstate,
resultTup->t_self = *ctid;
}
- /* oid */
- if (dmstate->oidAttno)
- {
- Oid oid = InvalidOid;
-
- oid = DatumGetObjectId(old_values[dmstate->oidAttno - 1]);
- HeapTupleSetOid(resultTup, oid);
- }
-
/*
* And remaining columns
*
@@ -4181,20 +4490,51 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
/* In what follows, do not risk leaking any PGresults. */
PG_TRY();
{
+ char fetch_sql[64];
+ int fetch_size;
+ ListCell *lc;
+
res = pgfdw_exec_query(conn, sql.data);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
pgfdw_report_error(ERROR, res, conn, false, sql.data);
PQclear(res);
res = NULL;
+ /*
+ * Determine the fetch size. The default is arbitrary, but shouldn't
+ * be enormous.
+ */
+ fetch_size = 100;
+ foreach(lc, server->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "fetch_size") == 0)
+ {
+ fetch_size = strtol(defGetString(def), NULL, 10);
+ break;
+ }
+ }
+ foreach(lc, table->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "fetch_size") == 0)
+ {
+ fetch_size = strtol(defGetString(def), NULL, 10);
+ break;
+ }
+ }
+
+ /* Construct command to fetch rows from remote. */
+ snprintf(fetch_sql, sizeof(fetch_sql), "FETCH %d FROM c%u",
+ fetch_size, cursor_number);
+
/* Retrieve and process rows a batch at a time. */
for (;;)
{
- char fetch_sql[64];
- int fetch_size;
int numrows;
int i;
- ListCell *lc;
/* Allow users to cancel long query */
CHECK_FOR_INTERRUPTS();
@@ -4205,33 +4545,7 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
* then just adjust rowstoskip and samplerows appropriately.
*/
- /* The fetch size is arbitrary, but shouldn't be enormous. */
- fetch_size = 100;
- foreach(lc, server->options)
- {
- DefElem *def = (DefElem *) lfirst(lc);
-
- if (strcmp(def->defname, "fetch_size") == 0)
- {
- fetch_size = strtol(defGetString(def), NULL, 10);
- break;
- }
- }
- foreach(lc, table->options)
- {
- DefElem *def = (DefElem *) lfirst(lc);
-
- if (strcmp(def->defname, "fetch_size") == 0)
- {
- fetch_size = strtol(defGetString(def), NULL, 10);
- break;
- }
- }
-
/* Fetch some rows */
- snprintf(fetch_sql, sizeof(fetch_sql), "FETCH %d FROM c%u",
- fetch_size, cursor_number);
-
res = pgfdw_exec_query(conn, fetch_sql);
/* On error, report the original query, not the FETCH. */
if (PQresultStatus(res) != PGRES_TUPLES_OK)
@@ -4705,7 +5019,8 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
bool is_remote_clause = is_foreign_expr(root, joinrel,
rinfo->clause);
- if (IS_OUTER_JOIN(jointype) && !rinfo->is_pushed_down)
+ if (IS_OUTER_JOIN(jointype) &&
+ !RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
{
if (!is_remote_clause)
return false;
@@ -4787,23 +5102,23 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
{
case JOIN_INNER:
fpinfo->remote_conds = list_concat(fpinfo->remote_conds,
- list_copy(fpinfo_i->remote_conds));
+ fpinfo_i->remote_conds);
fpinfo->remote_conds = list_concat(fpinfo->remote_conds,
- list_copy(fpinfo_o->remote_conds));
+ fpinfo_o->remote_conds);
break;
case JOIN_LEFT:
fpinfo->joinclauses = list_concat(fpinfo->joinclauses,
- list_copy(fpinfo_i->remote_conds));
+ fpinfo_i->remote_conds);
fpinfo->remote_conds = list_concat(fpinfo->remote_conds,
- list_copy(fpinfo_o->remote_conds));
+ fpinfo_o->remote_conds);
break;
case JOIN_RIGHT:
fpinfo->joinclauses = list_concat(fpinfo->joinclauses,
- list_copy(fpinfo_o->remote_conds));
+ fpinfo_o->remote_conds);
fpinfo->remote_conds = list_concat(fpinfo->remote_conds,
- list_copy(fpinfo_i->remote_conds));
+ fpinfo_i->remote_conds);
break;
case JOIN_FULL:
@@ -4864,10 +5179,11 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
fpinfo->user = NULL;
/*
- * Set cached relation costs to some negative value, so that we can detect
- * when they are set to some sensible costs, during one (usually the
- * first) of the calls to estimate_path_cost_size().
+ * Set # of retrieved rows and cached relation costs to some negative
+ * value, so that we can detect when they are set to some sensible values,
+ * during one (usually the first) of the calls to estimate_path_cost_size.
*/
+ fpinfo->retrieved_rows = -1;
fpinfo->rel_startup_cost = -1;
fpinfo->rel_total_cost = -1;
@@ -4913,12 +5229,12 @@ add_paths_with_pathkeys_for_rel(PlannerInfo *root, RelOptInfo *rel,
List *useful_pathkeys = lfirst(lc);
Path *sorted_epq_path;
- estimate_path_cost_size(root, rel, NIL, useful_pathkeys,
+ estimate_path_cost_size(root, rel, NIL, useful_pathkeys, NULL,
&rows, &width, &startup_cost, &total_cost);
/*
- * The EPQ path must be at least as well sorted as the path itself,
- * in case it gets used as input to a mergejoin.
+ * The EPQ path must be at least as well sorted as the path itself, in
+ * case it gets used as input to a mergejoin.
*/
sorted_epq_path = epq_path;
if (sorted_epq_path != NULL &&
@@ -4931,16 +5247,28 @@ add_paths_with_pathkeys_for_rel(PlannerInfo *root, RelOptInfo *rel,
useful_pathkeys,
-1.0);
- add_path(rel, (Path *)
- create_foreignscan_path(root, rel,
- NULL,
- rows,
- startup_cost,
- total_cost,
- useful_pathkeys,
- NULL,
- sorted_epq_path,
- NIL));
+ if (IS_SIMPLE_REL(rel))
+ add_path(rel, (Path *)
+ create_foreignscan_path(root, rel,
+ NULL,
+ rows,
+ startup_cost,
+ total_cost,
+ useful_pathkeys,
+ rel->lateral_relids,
+ sorted_epq_path,
+ NIL));
+ else
+ add_path(rel, (Path *)
+ create_foreign_join_path(root, rel,
+ NULL,
+ rows,
+ startup_cost,
+ total_cost,
+ useful_pathkeys,
+ rel->lateral_relids,
+ sorted_epq_path,
+ NIL));
}
}
@@ -5075,6 +5403,13 @@ postgresGetForeignJoinPaths(PlannerInfo *root,
if (joinrel->fdw_private)
return;
+ /*
+ * This code does not work for joins with lateral references, since those
+ * must have parameterized paths, which we don't generate yet.
+ */
+ if (!bms_is_empty(joinrel->lateral_relids))
+ return;
+
/*
* Create unfinished PgFdwRelationInfo entry which is used to indicate
* that the join relation is already considered, so that we won't waste
@@ -5145,8 +5480,8 @@ postgresGetForeignJoinPaths(PlannerInfo *root,
extra->sjinfo);
/* Estimate costs for bare join relation */
- estimate_path_cost_size(root, joinrel, NIL, NIL, &rows,
- &width, &startup_cost, &total_cost);
+ estimate_path_cost_size(root, joinrel, NIL, NIL, NULL,
+ &rows, &width, &startup_cost, &total_cost);
/* Now update this information in the joinrel */
joinrel->rows = rows;
joinrel->reltarget->width = width;
@@ -5159,16 +5494,16 @@ postgresGetForeignJoinPaths(PlannerInfo *root,
* Create a new join path and add it to the joinrel which represents a
* join between foreign tables.
*/
- joinpath = create_foreignscan_path(root,
- joinrel,
- NULL, /* default pathtarget */
- rows,
- startup_cost,
- total_cost,
- NIL, /* no pathkeys */
- NULL, /* no required_outer */
- epq_path,
- NIL); /* no fdw_private */
+ joinpath = create_foreign_join_path(root,
+ joinrel,
+ NULL, /* default pathtarget */
+ rows,
+ startup_cost,
+ total_cost,
+ NIL, /* no pathkeys */
+ joinrel->lateral_relids,
+ epq_path,
+ NIL); /* no fdw_private */
/* Add generated path into joinrel by add_path(). */
add_path(joinrel, (Path *) joinpath);
@@ -5192,7 +5527,6 @@ foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel,
PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) grouped_rel->fdw_private;
PathTarget *grouping_target = grouped_rel->reltarget;
PgFdwRelationInfo *ofpinfo;
- List *aggvars;
ListCell *lc;
int i;
List *tlist = NIL;
@@ -5218,6 +5552,15 @@ foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel,
* server. All GROUP BY expressions will be part of the grouping target
* and thus there is no need to search for them separately. Add grouping
* expressions into target list which will be passed to foreign server.
+ *
+ * A tricky fine point is that we must not put any expression into the
+ * target list that is just a foreign param (that is, something that
+ * deparse.c would conclude has to be sent to the foreign server). If we
+ * do, the expression will also appear in the fdw_exprs list of the plan
+ * node, and setrefs.c will get confused and decide that the fdw_exprs
+ * entry is actually a reference to the fdw_scan_tlist entry, resulting in
+ * a broken plan. Somewhat oddly, it's OK if the expression contains such
+ * a node, as long as it's not at top level; then no match is possible.
*/
i = 0;
foreach(lc, grouping_target->exprs)
@@ -5238,6 +5581,13 @@ foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel,
if (!is_foreign_expr(root, grouped_rel, expr))
return false;
+ /*
+ * If it would be a foreign param, we can't put it into the tlist,
+ * so we have to fail.
+ */
+ if (is_foreign_param(root, grouped_rel, expr))
+ return false;
+
/*
* Pushable, so add to tlist. We need to create a TLE for this
* expression and apply the sortgroupref to it. We cannot use
@@ -5253,9 +5603,11 @@ foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel,
else
{
/*
- * Non-grouping expression we need to compute. Is it shippable?
+ * Non-grouping expression we need to compute. Can we ship it
+ * as-is to the foreign server?
*/
- if (is_foreign_expr(root, grouped_rel, expr))
+ if (is_foreign_expr(root, grouped_rel, expr) &&
+ !is_foreign_param(root, grouped_rel, expr))
{
/* Yes, so add to tlist as-is; OK to suppress duplicates */
tlist = add_to_flat_tlist(tlist, list_make1(expr));
@@ -5263,12 +5615,16 @@ foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel,
else
{
/* Not pushable as a whole; extract its Vars and aggregates */
+ List *aggvars;
+
aggvars = pull_var_clause((Node *) expr,
PVC_INCLUDE_AGGREGATES);
/*
* If any aggregate expression is not shippable, then we
- * cannot push down aggregation to the foreign server.
+ * cannot push down aggregation to the foreign server. (We
+ * don't have to check is_foreign_param, since that certainly
+ * won't return true for any such expression.)
*/
if (!is_foreign_expr(root, grouped_rel, (Expr *) aggvars))
return false;
@@ -5355,7 +5711,8 @@ foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel,
* If aggregates within local conditions are not safe to push
* down, then we cannot push down the query. Vars are already
* part of GROUP BY clause which are checked above, so no need to
- * access them again here.
+ * access them again here. Again, we need not check
+ * is_foreign_param for a foreign aggregate.
*/
if (IsA(expr, Aggref))
{
@@ -5374,10 +5731,11 @@ foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel,
fpinfo->pushdown_safe = true;
/*
- * Set cached relation costs to some negative value, so that we can detect
- * when they are set to some sensible costs, during one (usually the
- * first) of the calls to estimate_path_cost_size().
+ * Set # of retrieved rows and cached relation costs to some negative
+ * value, so that we can detect when they are set to some sensible values,
+ * during one (usually the first) of the calls to estimate_path_cost_size.
*/
+ fpinfo->retrieved_rows = -1;
fpinfo->rel_startup_cost = -1;
fpinfo->rel_total_cost = -1;
@@ -5396,8 +5754,6 @@ foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel,
* postgresGetForeignUpperPaths
* Add paths for post-join operations like aggregation, grouping etc. if
* corresponding operations are safe to push down.
- *
- * Right now, we only support aggregate, grouping and having clause pushdown.
*/
static void
postgresGetForeignUpperPaths(PlannerInfo *root, UpperRelationKind stage,
@@ -5415,15 +5771,34 @@ postgresGetForeignUpperPaths(PlannerInfo *root, UpperRelationKind stage,
return;
/* Ignore stages we don't support; and skip any duplicate calls. */
- if (stage != UPPERREL_GROUP_AGG || output_rel->fdw_private)
+ if ((stage != UPPERREL_GROUP_AGG &&
+ stage != UPPERREL_ORDERED &&
+ stage != UPPERREL_FINAL) ||
+ output_rel->fdw_private)
return;
fpinfo = (PgFdwRelationInfo *) palloc0(sizeof(PgFdwRelationInfo));
fpinfo->pushdown_safe = false;
+ fpinfo->stage = stage;
output_rel->fdw_private = fpinfo;
- add_foreign_grouping_paths(root, input_rel, output_rel,
- (GroupPathExtraData *) extra);
+ switch (stage)
+ {
+ case UPPERREL_GROUP_AGG:
+ add_foreign_grouping_paths(root, input_rel, output_rel,
+ (GroupPathExtraData *) extra);
+ break;
+ case UPPERREL_ORDERED:
+ add_foreign_ordered_paths(root, input_rel, output_rel);
+ break;
+ case UPPERREL_FINAL:
+ add_foreign_final_paths(root, input_rel, output_rel,
+ (FinalPathExtraData *) extra);
+ break;
+ default:
+ elog(ERROR, "unexpected upper relation: %d", (int) stage);
+ break;
+ }
}
/*
@@ -5476,9 +5851,25 @@ add_foreign_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
if (!foreign_grouping_ok(root, grouped_rel, extra->havingQual))
return;
+ /*
+ * Compute the selectivity and cost of the local_conds, so we don't have
+ * to do it over again for each path. (Currently we create just a single
+ * path here, but in future it would be possible that we build more paths
+ * such as pre-sorted paths as in postgresGetForeignPaths and
+ * postgresGetForeignJoinPaths.) The best we can do for these conditions
+ * is to estimate selectivity on the basis of local statistics.
+ */
+ fpinfo->local_conds_sel = clauselist_selectivity(root,
+ fpinfo->local_conds,
+ 0,
+ JOIN_INNER,
+ NULL);
+
+ cost_qual_eval(&fpinfo->local_conds_cost, fpinfo->local_conds, root);
+
/* Estimate the cost of push down */
- estimate_path_cost_size(root, grouped_rel, NIL, NIL, &rows,
- &width, &startup_cost, &total_cost);
+ estimate_path_cost_size(root, grouped_rel, NIL, NIL, NULL,
+ &rows, &width, &startup_cost, &total_cost);
/* Now update this information in the fpinfo */
fpinfo->rows = rows;
@@ -5487,21 +5878,382 @@ add_foreign_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
fpinfo->total_cost = total_cost;
/* Create and add foreign path to the grouping relation. */
- grouppath = create_foreignscan_path(root,
- grouped_rel,
- grouped_rel->reltarget,
- rows,
- startup_cost,
- total_cost,
- NIL, /* no pathkeys */
- NULL, /* no required_outer */
- NULL,
- NIL); /* no fdw_private */
+ grouppath = create_foreign_upper_path(root,
+ grouped_rel,
+ grouped_rel->reltarget,
+ rows,
+ startup_cost,
+ total_cost,
+ NIL, /* no pathkeys */
+ NULL,
+ NIL); /* no fdw_private */
/* Add generated path into grouped_rel by add_path(). */
add_path(grouped_rel, (Path *) grouppath);
}
+/*
+ * add_foreign_ordered_paths
+ * Add foreign paths for performing the final sort remotely.
+ *
+ * Given input_rel contains the source-data Paths. The paths are added to the
+ * given ordered_rel.
+ */
+static void
+add_foreign_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel,
+ RelOptInfo *ordered_rel)
+{
+ Query *parse = root->parse;
+ PgFdwRelationInfo *ifpinfo = input_rel->fdw_private;
+ PgFdwRelationInfo *fpinfo = ordered_rel->fdw_private;
+ PgFdwPathExtraData *fpextra;
+ double rows;
+ int width;
+ Cost startup_cost;
+ Cost total_cost;
+ List *fdw_private;
+ ForeignPath *ordered_path;
+ ListCell *lc;
+
+ /* Shouldn't get here unless the query has ORDER BY */
+ Assert(parse->sortClause);
+
+ /* We don't support cases where there are any SRFs in the targetlist */
+ if (parse->hasTargetSRFs)
+ return;
+
+ /* Save the input_rel as outerrel in fpinfo */
+ fpinfo->outerrel = input_rel;
+
+ /*
+ * Copy foreign table, foreign server, user mapping, FDW options etc.
+ * details from the input relation's fpinfo.
+ */
+ fpinfo->table = ifpinfo->table;
+ fpinfo->server = ifpinfo->server;
+ fpinfo->user = ifpinfo->user;
+ merge_fdw_options(fpinfo, ifpinfo, NULL);
+
+ /*
+ * If the input_rel is a base or join relation, we would already have
+ * considered pushing down the final sort to the remote server when
+ * creating pre-sorted foreign paths for that relation, because the
+ * query_pathkeys is set to the root->sort_pathkeys in that case (see
+ * standard_qp_callback()).
+ */
+ if (input_rel->reloptkind == RELOPT_BASEREL ||
+ input_rel->reloptkind == RELOPT_JOINREL)
+ {
+ Assert(root->query_pathkeys == root->sort_pathkeys);
+
+ /* Safe to push down if the query_pathkeys is safe to push down */
+ fpinfo->pushdown_safe = ifpinfo->qp_is_pushdown_safe;
+
+ return;
+ }
+
+ /* The input_rel should be a grouping relation */
+ Assert(input_rel->reloptkind == RELOPT_UPPER_REL &&
+ ifpinfo->stage == UPPERREL_GROUP_AGG);
+
+ /*
+ * We try to create a path below by extending a simple foreign path for
+ * the underlying grouping relation to perform the final sort remotely,
+ * which is stored into the fdw_private list of the resulting path.
+ */
+
+ /* Assess if it is safe to push down the final sort */
+ foreach(lc, root->sort_pathkeys)
+ {
+ PathKey *pathkey = (PathKey *) lfirst(lc);
+ EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
+ Expr *sort_expr;
+
+ /*
+ * is_foreign_expr would detect volatile expressions as well, but
+ * checking ec_has_volatile here saves some cycles.
+ */
+ if (pathkey_ec->ec_has_volatile)
+ return;
+
+ /* Get the sort expression for the pathkey_ec */
+ sort_expr = find_em_expr_for_input_target(root,
+ pathkey_ec,
+ input_rel->reltarget);
+
+ /* If it's unsafe to remote, we cannot push down the final sort */
+ if (!is_foreign_expr(root, input_rel, sort_expr))
+ return;
+ }
+
+ /* Safe to push down */
+ fpinfo->pushdown_safe = true;
+
+ /* Construct PgFdwPathExtraData */
+ fpextra = (PgFdwPathExtraData *) palloc0(sizeof(PgFdwPathExtraData));
+ fpextra->target = root->upper_targets[UPPERREL_ORDERED];
+ fpextra->has_final_sort = true;
+
+ /* Estimate the costs of performing the final sort remotely */
+ estimate_path_cost_size(root, input_rel, NIL, root->sort_pathkeys, fpextra,
+ &rows, &width, &startup_cost, &total_cost);
+
+ /*
+ * Build the fdw_private list that will be used by postgresGetForeignPlan.
+ * Items in the list must match order in enum FdwPathPrivateIndex.
+ */
+ fdw_private = list_make2(makeInteger(true), makeInteger(false));
+
+ /* Create foreign ordering path */
+ ordered_path = create_foreign_upper_path(root,
+ input_rel,
+ root->upper_targets[UPPERREL_ORDERED],
+ rows,
+ startup_cost,
+ total_cost,
+ root->sort_pathkeys,
+ NULL, /* no extra plan */
+ fdw_private);
+
+ /* and add it to the ordered_rel */
+ add_path(ordered_rel, (Path *) ordered_path);
+}
+
+/*
+ * add_foreign_final_paths
+ * Add foreign paths for performing the final processing remotely.
+ *
+ * Given input_rel contains the source-data Paths. The paths are added to the
+ * given final_rel.
+ */
+static void
+add_foreign_final_paths(PlannerInfo *root, RelOptInfo *input_rel,
+ RelOptInfo *final_rel,
+ FinalPathExtraData *extra)
+{
+ Query *parse = root->parse;
+ PgFdwRelationInfo *ifpinfo = (PgFdwRelationInfo *) input_rel->fdw_private;
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) final_rel->fdw_private;
+ bool has_final_sort = false;
+ List *pathkeys = NIL;
+ PgFdwPathExtraData *fpextra;
+ bool save_use_remote_estimate = false;
+ double rows;
+ int width;
+ Cost startup_cost;
+ Cost total_cost;
+ List *fdw_private;
+ ForeignPath *final_path;
+
+ /*
+ * Currently, we only support this for SELECT commands
+ */
+ if (parse->commandType != CMD_SELECT)
+ return;
+
+ /*
+ * No work if there is no FOR UPDATE/SHARE clause and if there is no need
+ * to add a LIMIT node
+ */
+ if (!parse->rowMarks && !extra->limit_needed)
+ return;
+
+ /* We don't support cases where there are any SRFs in the targetlist */
+ if (parse->hasTargetSRFs)
+ return;
+
+ /* Save the input_rel as outerrel in fpinfo */
+ fpinfo->outerrel = input_rel;
+
+ /*
+ * Copy foreign table, foreign server, user mapping, FDW options etc.
+ * details from the input relation's fpinfo.
+ */
+ fpinfo->table = ifpinfo->table;
+ fpinfo->server = ifpinfo->server;
+ fpinfo->user = ifpinfo->user;
+ merge_fdw_options(fpinfo, ifpinfo, NULL);
+
+ /*
+ * If there is no need to add a LIMIT node, there might be a ForeignPath
+ * in the input_rel's pathlist that implements all behavior of the query.
+ * Note: we would already have accounted for the query's FOR UPDATE/SHARE
+ * (if any) before we get here.
+ */
+ if (!extra->limit_needed)
+ {
+ ListCell *lc;
+
+ Assert(parse->rowMarks);
+
+ /*
+ * Grouping and aggregation are not supported with FOR UPDATE/SHARE,
+ * so the input_rel should be a base, join, or ordered relation; and
+ * if it's an ordered relation, its input relation should be a base or
+ * join relation.
+ */
+ Assert(input_rel->reloptkind == RELOPT_BASEREL ||
+ input_rel->reloptkind == RELOPT_JOINREL ||
+ (input_rel->reloptkind == RELOPT_UPPER_REL &&
+ ifpinfo->stage == UPPERREL_ORDERED &&
+ (ifpinfo->outerrel->reloptkind == RELOPT_BASEREL ||
+ ifpinfo->outerrel->reloptkind == RELOPT_JOINREL)));
+
+ foreach(lc, input_rel->pathlist)
+ {
+ Path *path = (Path *) lfirst(lc);
+
+ /*
+ * apply_scanjoin_target_to_paths() uses create_projection_path()
+ * to adjust each of its input paths if needed, whereas
+ * create_ordered_paths() uses apply_projection_to_path() to do
+ * that. So the former might have put a ProjectionPath on top of
+ * the ForeignPath; look through ProjectionPath and see if the
+ * path underneath it is ForeignPath.
+ */
+ if (IsA(path, ForeignPath) ||
+ (IsA(path, ProjectionPath) &&
+ IsA(((ProjectionPath *) path)->subpath, ForeignPath)))
+ {
+ /*
+ * Create foreign final path; this gets rid of a
+ * no-longer-needed outer plan (if any), which makes the
+ * EXPLAIN output look cleaner
+ */
+ final_path = create_foreign_upper_path(root,
+ path->parent,
+ path->pathtarget,
+ path->rows,
+ path->startup_cost,
+ path->total_cost,
+ path->pathkeys,
+ NULL, /* no extra plan */
+ NULL); /* no fdw_private */
+
+ /* and add it to the final_rel */
+ add_path(final_rel, (Path *) final_path);
+
+ /* Safe to push down */
+ fpinfo->pushdown_safe = true;
+
+ return;
+ }
+ }
+
+ /*
+ * If we get here it means no ForeignPaths; since we would already
+ * have considered pushing down all operations for the query to the
+ * remote server, give up on it.
+ */
+ return;
+ }
+
+ Assert(extra->limit_needed);
+
+ /*
+ * If the input_rel is an ordered relation, replace the input_rel with its
+ * input relation
+ */
+ if (input_rel->reloptkind == RELOPT_UPPER_REL &&
+ ifpinfo->stage == UPPERREL_ORDERED)
+ {
+ input_rel = ifpinfo->outerrel;
+ ifpinfo = (PgFdwRelationInfo *) input_rel->fdw_private;
+ has_final_sort = true;
+ pathkeys = root->sort_pathkeys;
+ }
+
+ /* The input_rel should be a base, join, or grouping relation */
+ Assert(input_rel->reloptkind == RELOPT_BASEREL ||
+ input_rel->reloptkind == RELOPT_JOINREL ||
+ (input_rel->reloptkind == RELOPT_UPPER_REL &&
+ ifpinfo->stage == UPPERREL_GROUP_AGG));
+
+ /*
+ * We try to create a path below by extending a simple foreign path for
+ * the underlying base, join, or grouping relation to perform the final
+ * sort (if has_final_sort) and the LIMIT restriction remotely, which is
+ * stored into the fdw_private list of the resulting path. (We
+ * re-estimate the costs of sorting the underlying relation, if
+ * has_final_sort.)
+ */
+
+ /*
+ * Assess if it is safe to push down the LIMIT and OFFSET to the remote
+ * server
+ */
+
+ /*
+ * If the underlying relation has any local conditions, the LIMIT/OFFSET
+ * cannot be pushed down.
+ */
+ if (ifpinfo->local_conds)
+ return;
+
+ /*
+ * Also, the LIMIT/OFFSET cannot be pushed down, if their expressions are
+ * not safe to remote.
+ */
+ if (!is_foreign_expr(root, input_rel, (Expr *) parse->limitOffset) ||
+ !is_foreign_expr(root, input_rel, (Expr *) parse->limitCount))
+ return;
+
+ /* Safe to push down */
+ fpinfo->pushdown_safe = true;
+
+ /* Construct PgFdwPathExtraData */
+ fpextra = (PgFdwPathExtraData *) palloc0(sizeof(PgFdwPathExtraData));
+ fpextra->target = root->upper_targets[UPPERREL_FINAL];
+ fpextra->has_final_sort = has_final_sort;
+ fpextra->has_limit = extra->limit_needed;
+ fpextra->limit_tuples = extra->limit_tuples;
+ fpextra->count_est = extra->count_est;
+ fpextra->offset_est = extra->offset_est;
+
+ /*
+ * Estimate the costs of performing the final sort and the LIMIT
+ * restriction remotely. If has_final_sort is false, we wouldn't need to
+ * execute EXPLAIN anymore if use_remote_estimate, since the costs can be
+ * roughly estimated using the costs we already have for the underlying
+ * relation, in the same way as when use_remote_estimate is false. Since
+ * it's pretty expensive to execute EXPLAIN, force use_remote_estimate to
+ * false in that case.
+ */
+ if (!fpextra->has_final_sort)
+ {
+ save_use_remote_estimate = ifpinfo->use_remote_estimate;
+ ifpinfo->use_remote_estimate = false;
+ }
+ estimate_path_cost_size(root, input_rel, NIL, pathkeys, fpextra,
+ &rows, &width, &startup_cost, &total_cost);
+ if (!fpextra->has_final_sort)
+ ifpinfo->use_remote_estimate = save_use_remote_estimate;
+
+ /*
+ * Build the fdw_private list that will be used by postgresGetForeignPlan.
+ * Items in the list must match order in enum FdwPathPrivateIndex.
+ */
+ fdw_private = list_make2(makeInteger(has_final_sort),
+ makeInteger(extra->limit_needed));
+
+ /*
+ * Create foreign final path; this gets rid of a no-longer-needed outer
+ * plan (if any), which makes the EXPLAIN output look cleaner
+ */
+ final_path = create_foreign_upper_path(root,
+ input_rel,
+ root->upper_targets[UPPERREL_FINAL],
+ rows,
+ startup_cost,
+ total_cost,
+ pathkeys,
+ NULL, /* no extra plan */
+ fdw_private);
+
+ /* and add it to the final_rel */
+ add_path(final_rel, (Path *) final_path);
+}
+
/*
* Create a tuple from the specified row of the PGresult.
*
@@ -5524,7 +6276,6 @@ make_tuple_from_result_row(PGresult *res,
Datum *values;
bool *nulls;
ItemPointer ctid = NULL;
- Oid oid = InvalidOid;
ConversionLocation errpos;
ErrorContextCallback errcallback;
MemoryContext oldcontext;
@@ -5607,17 +6358,6 @@ make_tuple_from_result_row(PGresult *res,
ctid = (ItemPointer) DatumGetPointer(datum);
}
}
- else if (i == ObjectIdAttributeNumber)
- {
- /* oid */
- if (valstr != NULL)
- {
- Datum datum;
-
- datum = DirectFunctionCall1(oidin, CStringGetDatum(valstr));
- oid = DatumGetObjectId(datum);
- }
- }
errpos.cur_attno = 0;
j++;
@@ -5661,12 +6401,6 @@ make_tuple_from_result_row(PGresult *res,
HeapTupleHeaderSetXmin(tuple->t_data, InvalidTransactionId);
HeapTupleHeaderSetCmin(tuple->t_data, InvalidTransactionId);
- /*
- * If we have an OID to return, install it.
- */
- if (OidIsValid(oid))
- HeapTupleSetOid(tuple, oid);
-
/* Clean up */
MemoryContextReset(temp_context);
@@ -5695,8 +6429,6 @@ conversion_error_callback(void *arg)
attname = NameStr(attr->attname);
else if (errpos->cur_attno == SelfItemPointerAttributeNumber)
attname = "ctid";
- else if (errpos->cur_attno == ObjectIdAttributeNumber)
- attname = "oid";
relname = RelationGetRelationName(errpos->rel);
}
@@ -5721,7 +6453,7 @@ conversion_error_callback(void *arg)
RangeTblEntry *rte;
Var *var = (Var *) tle->expr;
- rte = rt_fetch(var->varno, estate->es_range_table);
+ rte = exec_rt_fetch(var->varno, estate);
if (var->varattno == 0)
is_wholerow = true;
@@ -5757,7 +6489,8 @@ find_em_expr_for_rel(EquivalenceClass *ec, RelOptInfo *rel)
{
EquivalenceMember *em = lfirst(lc_em);
- if (bms_is_subset(em->em_relids, rel->relids))
+ if (bms_is_subset(em->em_relids, rel->relids) &&
+ !bms_is_empty(em->em_relids))
{
/*
* If there is more than one equivalence member whose Vars are
@@ -5771,3 +6504,65 @@ find_em_expr_for_rel(EquivalenceClass *ec, RelOptInfo *rel)
/* We didn't find any suitable equivalence class expression */
return NULL;
}
+
+/*
+ * Find an equivalence class member expression to be computed as a sort column
+ * in the given target.
+ */
+Expr *
+find_em_expr_for_input_target(PlannerInfo *root,
+ EquivalenceClass *ec,
+ PathTarget *target)
+{
+ ListCell *lc1;
+ int i;
+
+ i = 0;
+ foreach(lc1, target->exprs)
+ {
+ Expr *expr = (Expr *) lfirst(lc1);
+ Index sgref = get_pathtarget_sortgroupref(target, i);
+ ListCell *lc2;
+
+ /* Ignore non-sort expressions */
+ if (sgref == 0 ||
+ get_sortgroupref_clause_noerr(sgref,
+ root->parse->sortClause) == NULL)
+ {
+ i++;
+ continue;
+ }
+
+ /* We ignore binary-compatible relabeling on both ends */
+ while (expr && IsA(expr, RelabelType))
+ expr = ((RelabelType *) expr)->arg;
+
+ /* Locate an EquivalenceClass member matching this expr, if any */
+ foreach(lc2, ec->ec_members)
+ {
+ EquivalenceMember *em = (EquivalenceMember *) lfirst(lc2);
+ Expr *em_expr;
+
+ /* Don't match constants */
+ if (em->em_is_const)
+ continue;
+
+ /* Ignore child members */
+ if (em->em_is_child)
+ continue;
+
+ /* Match if same expression (after stripping relabel) */
+ em_expr = em->em_expr;
+ while (em_expr && IsA(em_expr, RelabelType))
+ em_expr = ((RelabelType *) em_expr)->arg;
+
+ if (equal(em_expr, expr))
+ return em->em_expr;
+ }
+
+ i++;
+ }
+
+ elog(ERROR, "could not find pathkey item to sort");
+ return NULL; /* keep compiler quiet */
+}
diff --git a/contrib/postgres_fdw/postgres_fdw.h b/contrib/postgres_fdw/postgres_fdw.h
index d37cc88b6ec..6acb7dcf6cd 100644
--- a/contrib/postgres_fdw/postgres_fdw.h
+++ b/contrib/postgres_fdw/postgres_fdw.h
@@ -3,7 +3,7 @@
* postgres_fdw.h
* Foreign-data wrapper for remote PostgreSQL servers
*
- * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2012-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/postgres_fdw/postgres_fdw.h
@@ -15,7 +15,7 @@
#include "foreign/foreign.h"
#include "lib/stringinfo.h"
-#include "nodes/relation.h"
+#include "nodes/pathnodes.h"
#include "utils/relcache.h"
#include "libpq-fe.h"
@@ -49,6 +49,9 @@ typedef struct PgFdwRelationInfo
/* Bitmap of attr numbers we need to fetch from the remote server. */
Bitmapset *attrs_used;
+ /* True means that the query_pathkeys is safe to push down */
+ bool qp_is_pushdown_safe;
+
/* Cost and selectivity of local_conds. */
QualCost local_conds_cost;
Selectivity local_conds_sel;
@@ -56,12 +59,18 @@ typedef struct PgFdwRelationInfo
/* Selectivity of join conditions */
Selectivity joinclause_sel;
- /* Estimated size and cost for a scan or join. */
+ /* Estimated size and cost for a scan, join, or grouping/aggregation. */
double rows;
int width;
Cost startup_cost;
Cost total_cost;
- /* Costs excluding costs for transferring data from the foreign server */
+
+ /*
+ * Estimated number of rows fetched from the foreign server, and costs
+ * excluding costs for transferring those rows from the foreign server.
+ * These are only used by estimate_path_cost_size().
+ */
+ double retrieved_rows;
Cost rel_startup_cost;
Cost rel_total_cost;
@@ -92,6 +101,9 @@ typedef struct PgFdwRelationInfo
/* joinclauses contains only JOIN/ON conditions for an outer join */
List *joinclauses; /* List of RestrictInfo */
+ /* Upper relation information */
+ UpperRelationKind stage;
+
/* Grouping information */
List *grouped_tlist;
@@ -122,62 +134,72 @@ extern unsigned int GetPrepStmtNumber(PGconn *conn);
extern PGresult *pgfdw_get_result(PGconn *conn, const char *query);
extern PGresult *pgfdw_exec_query(PGconn *conn, const char *query);
extern void pgfdw_report_error(int elevel, PGresult *res, PGconn *conn,
- bool clear, const char *sql);
+ bool clear, const char *sql);
/* in option.c */
-extern int ExtractConnectionOptions(List *defelems,
- const char **keywords,
- const char **values);
+extern int ExtractConnectionOptions(List *defelems,
+ const char **keywords,
+ const char **values);
extern List *ExtractExtensionList(const char *extensionsString,
- bool warnOnMissing);
+ bool warnOnMissing);
/* in deparse.c */
extern void classifyConditions(PlannerInfo *root,
- RelOptInfo *baserel,
- List *input_conds,
- List **remote_conds,
- List **local_conds);
+ RelOptInfo *baserel,
+ List *input_conds,
+ List **remote_conds,
+ List **local_conds);
extern bool is_foreign_expr(PlannerInfo *root,
- RelOptInfo *baserel,
- Expr *expr);
-extern void deparseInsertSql(StringInfo buf, PlannerInfo *root,
- Index rtindex, Relation rel,
- List *targetAttrs, bool doNothing, List *returningList,
- List **retrieved_attrs);
-extern void deparseUpdateSql(StringInfo buf, PlannerInfo *root,
- Index rtindex, Relation rel,
- List *targetAttrs, List *returningList,
- List **retrieved_attrs);
+ RelOptInfo *baserel,
+ Expr *expr);
+extern bool is_foreign_param(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Expr *expr);
+extern void deparseInsertSql(StringInfo buf, RangeTblEntry *rte,
+ Index rtindex, Relation rel,
+ List *targetAttrs, bool doNothing,
+ List *withCheckOptionList, List *returningList,
+ List **retrieved_attrs);
+extern void deparseUpdateSql(StringInfo buf, RangeTblEntry *rte,
+ Index rtindex, Relation rel,
+ List *targetAttrs,
+ List *withCheckOptionList, List *returningList,
+ List **retrieved_attrs);
extern void deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root,
- Index rtindex, Relation rel,
- RelOptInfo *foreignrel,
- List *targetlist,
- List *targetAttrs,
- List *remote_conds,
- List **params_list,
- List *returningList,
- List **retrieved_attrs);
-extern void deparseDeleteSql(StringInfo buf, PlannerInfo *root,
- Index rtindex, Relation rel,
- List *returningList,
- List **retrieved_attrs);
+ Index rtindex, Relation rel,
+ RelOptInfo *foreignrel,
+ List *targetlist,
+ List *targetAttrs,
+ List *remote_conds,
+ List **params_list,
+ List *returningList,
+ List **retrieved_attrs);
+extern void deparseDeleteSql(StringInfo buf, RangeTblEntry *rte,
+ Index rtindex, Relation rel,
+ List *returningList,
+ List **retrieved_attrs);
extern void deparseDirectDeleteSql(StringInfo buf, PlannerInfo *root,
- Index rtindex, Relation rel,
- RelOptInfo *foreignrel,
- List *remote_conds,
- List **params_list,
- List *returningList,
- List **retrieved_attrs);
+ Index rtindex, Relation rel,
+ RelOptInfo *foreignrel,
+ List *remote_conds,
+ List **params_list,
+ List *returningList,
+ List **retrieved_attrs);
extern void deparseAnalyzeSizeSql(StringInfo buf, Relation rel);
extern void deparseAnalyzeSql(StringInfo buf, Relation rel,
- List **retrieved_attrs);
+ List **retrieved_attrs);
extern void deparseStringLiteral(StringInfo buf, const char *val);
extern Expr *find_em_expr_for_rel(EquivalenceClass *ec, RelOptInfo *rel);
+extern Expr *find_em_expr_for_input_target(PlannerInfo *root,
+ EquivalenceClass *ec,
+ PathTarget *target);
extern List *build_tlist_to_deparse(RelOptInfo *foreignrel);
extern void deparseSelectStmtForRel(StringInfo buf, PlannerInfo *root,
- RelOptInfo *foreignrel, List *tlist,
- List *remote_conds, List *pathkeys, bool is_subquery,
- List **retrieved_attrs, List **params_list);
+ RelOptInfo *foreignrel, List *tlist,
+ List *remote_conds, List *pathkeys,
+ bool has_final_sort, bool has_limit,
+ bool is_subquery,
+ List **retrieved_attrs, List **params_list);
extern const char *get_jointype_name(JoinType jointype);
/* in shippable.c */
diff --git a/contrib/postgres_fdw/shippable.c b/contrib/postgres_fdw/shippable.c
index 7f2ed0499c0..2b55a40db36 100644
--- a/contrib/postgres_fdw/shippable.c
+++ b/contrib/postgres_fdw/shippable.c
@@ -13,7 +13,7 @@
* functions or functions using nonportable collations. Those considerations
* need not be accounted for here.
*
- * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/postgres_fdw/shippable.c
@@ -137,7 +137,7 @@ lookup_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo)
/*
* Return true if given object is one of PostgreSQL's built-in objects.
*
- * We use FirstBootstrapObjectId as the cutoff, so that we only consider
+ * We use FirstGenbkiObjectId as the cutoff, so that we only consider
* objects with hand-assigned OIDs to be "built in", not for instance any
* function or type defined in the information_schema.
*
@@ -154,7 +154,7 @@ lookup_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo)
bool
is_builtin(Oid objectId)
{
- return (objectId < FirstBootstrapObjectId);
+ return (objectId < FirstGenbkiObjectId);
}
/*
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql
index e1df952e7af..630b803e262 100644
--- a/contrib/postgres_fdw/sql/postgres_fdw.sql
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -142,14 +142,6 @@ CREATE FOREIGN TABLE ft6 (
c3 text
) SERVER loopback2 OPTIONS (schema_name 'S 1', table_name 'T 4');
--- A table with oids. CREATE FOREIGN TABLE doesn't support the
--- WITH OIDS option, but ALTER does.
-CREATE FOREIGN TABLE ft_pg_type (
- typname name,
- typlen smallint
-) SERVER loopback OPTIONS (schema_name 'pg_catalog', table_name 'pg_type');
-ALTER TABLE ft_pg_type SET WITH OIDS;
-
-- ===================================================================
-- tests for validator
-- ===================================================================
@@ -172,6 +164,7 @@ ALTER SERVER testserver1 OPTIONS (
keepalives 'value',
keepalives_idle 'value',
keepalives_interval 'value',
+ tcp_user_timeout 'value',
-- requiressl 'value',
sslcompression 'value',
sslmode 'value',
@@ -304,7 +297,7 @@ EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = -c1; -- Op
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE 1 = c1!; -- OpExpr(r)
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE (c1 IS NOT NULL) IS DISTINCT FROM (c1 IS NOT NULL); -- DistinctExpr
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = ANY(ARRAY[c2, 1, c1 + 0]); -- ScalarArrayOpExpr
-EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- ArrayRef
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- SubscriptingRef
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c6 = E'foo''s\\bar'; -- check special chars
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c8 = 'foo'; -- can't be sent to remote
-- parameterized remote path for foreign table
@@ -357,6 +350,11 @@ EXPLAIN (VERBOSE, COSTS OFF)
SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
+-- ORDER BY can be shipped, though
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+
-- but let's put them in an extension ...
ALTER EXTENSION postgres_fdw ADD FUNCTION postgres_fdw_abs(int);
ALTER EXTENSION postgres_fdw ADD OPERATOR === (int, int);
@@ -370,6 +368,11 @@ EXPLAIN (VERBOSE, COSTS OFF)
SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
+-- and both ORDER BY and LIMIT can be shipped
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+
-- ===================================================================
-- JOIN queries
-- ===================================================================
@@ -501,8 +504,8 @@ SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t
SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR SHARE;
-- join in CTE
EXPLAIN (VERBOSE, COSTS OFF)
-WITH t (c1_1, c1_3, c2_1) AS (SELECT t1.c1, t1.c3, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1 OFFSET 100 LIMIT 10;
-WITH t (c1_1, c1_3, c2_1) AS (SELECT t1.c1, t1.c3, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1 OFFSET 100 LIMIT 10;
+WITH t (c1_1, c1_3, c2_1) AS MATERIALIZED (SELECT t1.c1, t1.c3, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1 OFFSET 100 LIMIT 10;
+WITH t (c1_1, c1_3, c2_1) AS MATERIALIZED (SELECT t1.c1, t1.c3, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1 OFFSET 100 LIMIT 10;
-- ctid with whole-row reference
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.ctid, t1, t2, t1.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
@@ -514,7 +517,7 @@ SELECT t1.c1 FROM ft1 t1 WHERE EXISTS (SELECT 1 FROM ft2 t2 WHERE t1.c1 = t2.c1)
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1 FROM ft1 t1 WHERE NOT EXISTS (SELECT 1 FROM ft2 t2 WHERE t1.c1 = t2.c2) ORDER BY t1.c1 OFFSET 100 LIMIT 10;
SELECT t1.c1 FROM ft1 t1 WHERE NOT EXISTS (SELECT 1 FROM ft2 t2 WHERE t1.c1 = t2.c2) ORDER BY t1.c1 OFFSET 100 LIMIT 10;
--- CROSS JOIN, not pushed down
+-- CROSS JOIN can be pushed down
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.c1, t2.c1 FROM ft1 t1 CROSS JOIN ft2 t2 ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
SELECT t1.c1, t2.c1 FROM ft1 t1 CROSS JOIN ft2 t2 ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
@@ -567,15 +570,19 @@ SELECT ft5, ft5.c1, ft5.c2, ft5.c3, ft4.c1, ft4.c2 FROM ft5 left join ft4 on ft5
-- multi-way join involving multiple merge joins
-- (this case used to have EPQ-related planning problems)
+CREATE TABLE local_tbl (c1 int NOT NULL, c2 int NOT NULL, c3 text, CONSTRAINT local_tbl_pkey PRIMARY KEY (c1));
+INSERT INTO local_tbl SELECT id, id % 10, to_char(id, 'FM0000') FROM generate_series(1, 1000) id;
+ANALYZE local_tbl;
SET enable_nestloop TO false;
SET enable_hashjoin TO false;
EXPLAIN (VERBOSE, COSTS OFF)
-SELECT * FROM ft1, ft2, ft4, ft5 WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1
- AND ft1.c2 = ft5.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE;
-SELECT * FROM ft1, ft2, ft4, ft5 WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1
- AND ft1.c2 = ft5.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE;
+SELECT * FROM ft1, ft2, ft4, ft5, local_tbl WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1
+ AND ft1.c2 = ft5.c1 AND ft1.c2 = local_tbl.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE;
+SELECT * FROM ft1, ft2, ft4, ft5, local_tbl WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1
+ AND ft1.c2 = ft5.c1 AND ft1.c2 = local_tbl.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE;
RESET enable_nestloop;
RESET enable_hashjoin;
+DROP TABLE local_tbl;
-- check join pushdown in situations where multiple userids are involved
CREATE ROLE regress_view_owner SUPERUSER;
@@ -617,6 +624,10 @@ explain (verbose, costs off)
select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2;
select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2;
+explain (verbose, costs off)
+select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2 limit 1;
+select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2 limit 1;
+
-- Aggregate is not pushed down as aggregation contains random()
explain (verbose, costs off)
select sum(c1 * (random() <= 1)::int) as sum, avg(c1) from ft1;
@@ -674,6 +685,16 @@ select count(*) from (select c5, count(c1) from ft1 group by c5, sqrt(c2) having
explain (verbose, costs off)
select sum(c1) from ft1 group by c2 having avg(c1 * (random() <= 1)::int) > 100 order by 1;
+-- Remote aggregate in combination with a local Param (for the output
+-- of an initplan) can be trouble, per bug #15781
+explain (verbose, costs off)
+select exists(select 1 from pg_enum), sum(c1) from ft1;
+select exists(select 1 from pg_enum), sum(c1) from ft1;
+
+explain (verbose, costs off)
+select exists(select 1 from pg_enum), sum(c1) from ft1 group by 1;
+select exists(select 1 from pg_enum), sum(c1) from ft1 group by 1;
+
-- Testing ORDER BY, DISTINCT, FILTER, Ordered-sets and VARIADIC within aggregates
@@ -815,6 +836,9 @@ create operator class my_op_class for type int using btree family my_op_family a
explain (verbose, costs off)
select array_agg(c1 order by c1 using operator(public.<^)) from ft2 where c2 = 6 and c1 < 100 group by c2;
+-- Update local stats on ft2
+ANALYZE ft2;
+
-- Add into extension
alter extension postgres_fdw add operator class my_op_class using btree;
alter extension postgres_fdw add function my_op_cmp(a int, b int);
@@ -884,6 +908,32 @@ select c2, sum from "S 1"."T 1" t1, lateral (select sum(t2.c1 + t1."C 1") sum fr
select c2, sum from "S 1"."T 1" t1, lateral (select sum(t2.c1 + t1."C 1") sum from ft2 t2 group by t2.c1) qry where t1.c2 * 2 = qry.sum and t1.c2 < 3 and t1."C 1" < 100 order by 1;
reset enable_hashagg;
+-- bug #15613: bad plan for foreign table scan with lateral reference
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT ref_0.c2, subq_1.*
+FROM
+ "S 1"."T 1" AS ref_0,
+ LATERAL (
+ SELECT ref_0."C 1" c1, subq_0.*
+ FROM (SELECT ref_0.c2, ref_1.c3
+ FROM ft1 AS ref_1) AS subq_0
+ RIGHT JOIN ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
+ ) AS subq_1
+WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
+ORDER BY ref_0."C 1";
+
+SELECT ref_0.c2, subq_1.*
+FROM
+ "S 1"."T 1" AS ref_0,
+ LATERAL (
+ SELECT ref_0."C 1" c1, subq_0.*
+ FROM (SELECT ref_0.c2, ref_1.c3
+ FROM ft1 AS ref_1) AS subq_0
+ RIGHT JOIN ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
+ ) AS subq_1
+WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
+ORDER BY ref_0."C 1";
+
-- Check with placeHolderVars
explain (verbose, costs off)
select sum(q.a), count(q.b) from ft4 left join (select 13, avg(ft1.c1), sum(ft2.c1) from ft1 right join ft2 on (ft1.c1 = ft2.c1)) q(a, b, c) on (ft4.c1 <= q.b);
@@ -1002,9 +1052,6 @@ SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)';
EXPLAIN (VERBOSE, COSTS OFF)
SELECT ctid, * FROM ft1 t1 LIMIT 1;
SELECT ctid, * FROM ft1 t1 LIMIT 1;
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT oid, * FROM ft_pg_type WHERE typname = 'int4';
-SELECT oid, * FROM ft_pg_type WHERE typname = 'int4';
-- ===================================================================
-- used in PL/pgSQL function
@@ -1099,14 +1146,14 @@ DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2;
DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2;
SELECT c1,c2,c3,c4 FROM ft2 ORDER BY c1;
EXPLAIN (verbose, costs off)
-INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass;
-INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass;
+INSERT INTO ft2 (c1,c2,c3) VALUES (1200,999,'foo') RETURNING tableoid::regclass;
+INSERT INTO ft2 (c1,c2,c3) VALUES (1200,999,'foo') RETURNING tableoid::regclass;
EXPLAIN (verbose, costs off)
-UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass; -- can be pushed down
-UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass;
+UPDATE ft2 SET c3 = 'bar' WHERE c1 = 1200 RETURNING tableoid::regclass; -- can be pushed down
+UPDATE ft2 SET c3 = 'bar' WHERE c1 = 1200 RETURNING tableoid::regclass;
EXPLAIN (verbose, costs off)
-DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass; -- can be pushed down
-DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass;
+DELETE FROM ft2 WHERE c1 = 1200 RETURNING tableoid::regclass; -- can be pushed down
+DELETE FROM ft2 WHERE c1 = 1200 RETURNING tableoid::regclass;
-- Test UPDATE/DELETE with RETURNING on a three-table join
INSERT INTO ft2 (c1,c2,c3)
@@ -1262,27 +1309,74 @@ ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c2negative;
-- test WITH CHECK OPTION constraints
-- ===================================================================
+CREATE FUNCTION row_before_insupd_trigfunc() RETURNS trigger AS $$BEGIN NEW.a := NEW.a + 10; RETURN NEW; END$$ LANGUAGE plpgsql;
+
CREATE TABLE base_tbl (a int, b int);
ALTER TABLE base_tbl SET (autovacuum_enabled = 'false');
+CREATE TRIGGER row_before_insupd_trigger BEFORE INSERT OR UPDATE ON base_tbl FOR EACH ROW EXECUTE PROCEDURE row_before_insupd_trigfunc();
CREATE FOREIGN TABLE foreign_tbl (a int, b int)
- SERVER loopback OPTIONS(table_name 'base_tbl');
+ SERVER loopback OPTIONS (table_name 'base_tbl');
CREATE VIEW rw_view AS SELECT * FROM foreign_tbl
WHERE a < b WITH CHECK OPTION;
\d+ rw_view
-INSERT INTO rw_view VALUES (0, 10); -- ok
-INSERT INTO rw_view VALUES (10, 0); -- should fail
EXPLAIN (VERBOSE, COSTS OFF)
-UPDATE rw_view SET b = 20 WHERE a = 0; -- not pushed down
-UPDATE rw_view SET b = 20 WHERE a = 0; -- ok
+INSERT INTO rw_view VALUES (0, 5);
+INSERT INTO rw_view VALUES (0, 5); -- should fail
EXPLAIN (VERBOSE, COSTS OFF)
-UPDATE rw_view SET b = -20 WHERE a = 0; -- not pushed down
-UPDATE rw_view SET b = -20 WHERE a = 0; -- should fail
+INSERT INTO rw_view VALUES (0, 15);
+INSERT INTO rw_view VALUES (0, 15); -- ok
+SELECT * FROM foreign_tbl;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE rw_view SET b = b + 5;
+UPDATE rw_view SET b = b + 5; -- should fail
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE rw_view SET b = b + 15;
+UPDATE rw_view SET b = b + 15; -- ok
SELECT * FROM foreign_tbl;
DROP FOREIGN TABLE foreign_tbl CASCADE;
+DROP TRIGGER row_before_insupd_trigger ON base_tbl;
DROP TABLE base_tbl;
+-- test WCO for partitions
+
+CREATE TABLE child_tbl (a int, b int);
+ALTER TABLE child_tbl SET (autovacuum_enabled = 'false');
+CREATE TRIGGER row_before_insupd_trigger BEFORE INSERT OR UPDATE ON child_tbl FOR EACH ROW EXECUTE PROCEDURE row_before_insupd_trigfunc();
+CREATE FOREIGN TABLE foreign_tbl (a int, b int)
+ SERVER loopback OPTIONS (table_name 'child_tbl');
+
+CREATE TABLE parent_tbl (a int, b int) PARTITION BY RANGE(a);
+ALTER TABLE parent_tbl ATTACH PARTITION foreign_tbl FOR VALUES FROM (0) TO (100);
+
+CREATE VIEW rw_view AS SELECT * FROM parent_tbl
+ WHERE a < b WITH CHECK OPTION;
+\d+ rw_view
+
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 5);
+INSERT INTO rw_view VALUES (0, 5); -- should fail
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 15);
+INSERT INTO rw_view VALUES (0, 15); -- ok
+SELECT * FROM foreign_tbl;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE rw_view SET b = b + 5;
+UPDATE rw_view SET b = b + 5; -- should fail
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE rw_view SET b = b + 15;
+UPDATE rw_view SET b = b + 15; -- ok
+SELECT * FROM foreign_tbl;
+
+DROP FOREIGN TABLE foreign_tbl CASCADE;
+DROP TRIGGER row_before_insupd_trigger ON child_tbl;
+DROP TABLE parent_tbl CASCADE;
+
+DROP FUNCTION row_before_insupd_trigfunc;
+
-- ===================================================================
-- test serial columns (ie, sequence-based defaults)
-- ===================================================================
@@ -1298,6 +1392,20 @@ insert into rem1(f2) values('bye remote');
select * from loc1;
select * from rem1;
+-- ===================================================================
+-- test generated columns
+-- ===================================================================
+create table gloc1 (a int, b int);
+alter table gloc1 set (autovacuum_enabled = 'false');
+create foreign table grem1 (
+ a int,
+ b int generated always as (a * 2) stored)
+ server loopback options(table_name 'gloc1');
+insert into grem1 (a) values (1), (2);
+update grem1 set a = 22 where a = 2;
+select * from gloc1;
+select * from grem1;
+
-- ===================================================================
-- test local triggers
-- ===================================================================
@@ -1445,6 +1553,11 @@ SELECT * from loc1;
UPDATE rem1 set f2 = 'skidoo' RETURNING f2;
SELECT * from loc1;
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f1 = 10; -- all columns should be transmitted
+UPDATE rem1 set f1 = 10;
+SELECT * from loc1;
+
DELETE FROM rem1;
-- Add a second trigger, to check that the changes are propagated correctly
@@ -1767,6 +1880,38 @@ drop table bar cascade;
drop table loct1;
drop table loct2;
+-- Test pushing down UPDATE/DELETE joins to the remote server
+create table parent (a int, b text);
+create table loct1 (a int, b text);
+create table loct2 (a int, b text);
+create foreign table remt1 (a int, b text)
+ server loopback options (table_name 'loct1');
+create foreign table remt2 (a int, b text)
+ server loopback options (table_name 'loct2');
+alter foreign table remt1 inherit parent;
+
+insert into remt1 values (1, 'foo');
+insert into remt1 values (2, 'bar');
+insert into remt2 values (1, 'foo');
+insert into remt2 values (2, 'bar');
+
+analyze remt1;
+analyze remt2;
+
+explain (verbose, costs off)
+update parent set b = parent.b || remt2.b from remt2 where parent.a = remt2.a returning *;
+update parent set b = parent.b || remt2.b from remt2 where parent.a = remt2.a returning *;
+explain (verbose, costs off)
+delete from parent using remt2 where parent.a = remt2.a returning parent;
+delete from parent using remt2 where parent.a = remt2.a returning parent;
+
+-- cleanup
+drop foreign table remt1;
+drop foreign table remt2;
+drop table loct1;
+drop table loct2;
+drop table parent;
+
-- ===================================================================
-- test tuple routing for foreign-table partitions
-- ===================================================================
@@ -1804,6 +1949,31 @@ insert into itrtest values (1, 'bar') on conflict (a) do update set b = excluded
select tableoid::regclass, * FROM itrtest;
+delete from itrtest;
+
+drop index loct1_idx;
+
+-- Test that remote triggers work with insert tuple routing
+create function br_insert_trigfunc() returns trigger as $$
+begin
+ new.b := new.b || ' triggered !';
+ return new;
+end
+$$ language plpgsql;
+create trigger loct1_br_insert_trigger before insert on loct1
+ for each row execute procedure br_insert_trigfunc();
+create trigger loct2_br_insert_trigger before insert on loct2
+ for each row execute procedure br_insert_trigfunc();
+
+-- The new values are concatenated with ' triggered !'
+insert into itrtest values (1, 'foo') returning *;
+insert into itrtest values (2, 'qux') returning *;
+insert into itrtest values (1, 'test1'), (2, 'test2') returning *;
+with result as (insert into itrtest values (1, 'test1'), (2, 'test2') returning *) select * from result;
+
+drop trigger loct1_br_insert_trigger on loct1;
+drop trigger loct2_br_insert_trigger on loct2;
+
drop table itrtest;
drop table loct1;
drop table loct2;
@@ -1836,6 +2006,75 @@ select tableoid::regclass, * FROM locp;
-- The executor should not let unexercised FDWs shut down
update utrtest set a = 1 where b = 'foo';
+-- Test that remote triggers work with update tuple routing
+create trigger loct_br_insert_trigger before insert on loct
+ for each row execute procedure br_insert_trigfunc();
+
+delete from utrtest;
+insert into utrtest values (2, 'qux');
+
+-- Check case where the foreign partition is a subplan target rel
+explain (verbose, costs off)
+update utrtest set a = 1 where a = 1 or a = 2 returning *;
+-- The new values are concatenated with ' triggered !'
+update utrtest set a = 1 where a = 1 or a = 2 returning *;
+
+delete from utrtest;
+insert into utrtest values (2, 'qux');
+
+-- Check case where the foreign partition isn't a subplan target rel
+explain (verbose, costs off)
+update utrtest set a = 1 where a = 2 returning *;
+-- The new values are concatenated with ' triggered !'
+update utrtest set a = 1 where a = 2 returning *;
+
+drop trigger loct_br_insert_trigger on loct;
+
+-- We can move rows to a foreign partition that has been updated already,
+-- but can't move rows to a foreign partition that hasn't been updated yet
+
+delete from utrtest;
+insert into utrtest values (1, 'foo');
+insert into utrtest values (2, 'qux');
+
+-- Test the former case:
+-- with a direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 1 returning *;
+update utrtest set a = 1 returning *;
+
+delete from utrtest;
+insert into utrtest values (1, 'foo');
+insert into utrtest values (2, 'qux');
+
+-- with a non-direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 1 from (values (1), (2)) s(x) where a = s.x returning *;
+update utrtest set a = 1 from (values (1), (2)) s(x) where a = s.x returning *;
+
+-- Change the definition of utrtest so that the foreign partition get updated
+-- after the local partition
+delete from utrtest;
+alter table utrtest detach partition remp;
+drop foreign table remp;
+alter table loct drop constraint loct_a_check;
+alter table loct add check (a in (3));
+create foreign table remp (a int check (a in (3)), b text) server loopback options (table_name 'loct');
+alter table utrtest attach partition remp for values in (3);
+insert into utrtest values (2, 'qux');
+insert into utrtest values (3, 'xyzzy');
+
+-- Test the latter case:
+-- with a direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 3 returning *;
+update utrtest set a = 3 returning *; -- ERROR
+
+-- with a non-direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 3 from (values (2), (3)) s(x) where a = s.x returning *;
+update utrtest set a = 3 from (values (2), (3)) s(x) where a = s.x returning *; -- ERROR
+
drop table utrtest;
drop table loct;
@@ -2004,6 +2243,20 @@ drop trigger loc2_trig_row_before_insert on loc2;
delete from rem2;
+-- test COPY FROM with foreign table created in the same transaction
+create table loc3 (f1 int, f2 text);
+begin;
+create foreign table rem3 (f1 int, f2 text)
+ server loopback options(table_name 'loc3');
+copy rem3 from stdin;
+1 foo
+2 bar
+\.
+commit;
+select * from rem3;
+drop foreign table rem3;
+drop table loc3;
+
-- ===================================================================
-- test IMPORT FOREIGN SCHEMA
-- ===================================================================
@@ -2135,8 +2388,9 @@ ALTER TABLE fprt2_p1 SET (autovacuum_enabled = 'false');
ALTER TABLE fprt2_p2 SET (autovacuum_enabled = 'false');
INSERT INTO fprt2_p1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 249, 3) i;
INSERT INTO fprt2_p2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(250, 499, 3) i;
-CREATE FOREIGN TABLE ftprt2_p1 PARTITION OF fprt2 FOR VALUES FROM (0) TO (250)
+CREATE FOREIGN TABLE ftprt2_p1 (b int, c varchar, a int)
SERVER loopback OPTIONS (table_name 'fprt2_p1', use_remote_estimate 'true');
+ALTER TABLE fprt2 ATTACH PARTITION ftprt2_p1 FOR VALUES FROM (0) TO (250);
CREATE FOREIGN TABLE ftprt2_p2 PARTITION OF fprt2 FOR VALUES FROM (250) TO (500)
SERVER loopback OPTIONS (table_name 'fprt2_p2', use_remote_estimate 'true');
ANALYZE fprt2;
@@ -2148,26 +2402,31 @@ EXPLAIN (COSTS OFF)
SELECT t1.a,t2.b,t3.c FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) INNER JOIN fprt1 t3 ON (t2.b = t3.a) WHERE t1.a % 25 =0 ORDER BY 1,2,3;
SELECT t1.a,t2.b,t3.c FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) INNER JOIN fprt1 t3 ON (t2.b = t3.a) WHERE t1.a % 25 =0 ORDER BY 1,2,3;
--- left outer join + nullable clasue
-EXPLAIN (COSTS OFF)
+-- left outer join + nullable clause
+EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1.a,t2.b,t2.c FROM fprt1 t1 LEFT JOIN (SELECT * FROM fprt2 WHERE a < 10) t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a < 10 ORDER BY 1,2,3;
SELECT t1.a,t2.b,t2.c FROM fprt1 t1 LEFT JOIN (SELECT * FROM fprt2 WHERE a < 10) t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a < 10 ORDER BY 1,2,3;
--- with whole-row reference
+-- with whole-row reference; partitionwise join does not apply
EXPLAIN (COSTS OFF)
-SELECT t1,t2 FROM fprt1 t1 JOIN fprt2 t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a % 25 =0 ORDER BY 1,2;
-SELECT t1,t2 FROM fprt1 t1 JOIN fprt2 t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a % 25 =0 ORDER BY 1,2;
+SELECT t1.wr, t2.wr FROM (SELECT t1 wr, a FROM fprt1 t1 WHERE t1.a % 25 = 0) t1 FULL JOIN (SELECT t2 wr, b FROM fprt2 t2 WHERE t2.b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY 1,2;
+SELECT t1.wr, t2.wr FROM (SELECT t1 wr, a FROM fprt1 t1 WHERE t1.a % 25 = 0) t1 FULL JOIN (SELECT t2 wr, b FROM fprt2 t2 WHERE t2.b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY 1,2;
-- join with lateral reference
EXPLAIN (COSTS OFF)
SELECT t1.a,t1.b FROM fprt1 t1, LATERAL (SELECT t2.a, t2.b FROM fprt2 t2 WHERE t1.a = t2.b AND t1.b = t2.a) q WHERE t1.a%25 = 0 ORDER BY 1,2;
SELECT t1.a,t1.b FROM fprt1 t1, LATERAL (SELECT t2.a, t2.b FROM fprt2 t2 WHERE t1.a = t2.b AND t1.b = t2.a) q WHERE t1.a%25 = 0 ORDER BY 1,2;
--- with PHVs, partition-wise join selected but no join pushdown
+-- with PHVs, partitionwise join selected but no join pushdown
EXPLAIN (COSTS OFF)
SELECT t1.a, t1.phv, t2.b, t2.phv FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE a % 25 = 0) t1 FULL JOIN (SELECT 't2_phv' phv, * FROM fprt2 WHERE b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY t1.a, t2.b;
SELECT t1.a, t1.phv, t2.b, t2.phv FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE a % 25 = 0) t1 FULL JOIN (SELECT 't2_phv' phv, * FROM fprt2 WHERE b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY t1.a, t2.b;
+-- test FOR UPDATE; partitionwise join does not apply
+EXPLAIN (COSTS OFF)
+SELECT t1.a, t2.b FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) WHERE t1.a % 25 = 0 ORDER BY 1,2 FOR UPDATE OF t1;
+SELECT t1.a, t2.b FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) WHERE t1.a % 25 = 0 ORDER BY 1,2 FOR UPDATE OF t1;
+
RESET enable_partitionwise_join;
diff --git a/contrib/seg/Makefile b/contrib/seg/Makefile
index 41270f84f62..62b658e7243 100644
--- a/contrib/seg/Makefile
+++ b/contrib/seg/Makefile
@@ -8,6 +8,8 @@ DATA = seg--1.1.sql seg--1.1--1.2.sql seg--1.2--1.3.sql \
seg--1.0--1.1.sql seg--unpackaged--1.0.sql
PGFILEDESC = "seg - line segment data type"
+HEADERS = segdata.h
+
REGRESS = seg
EXTRA_CLEAN = y.tab.c y.tab.h
diff --git a/contrib/seg/expected/seg.out b/contrib/seg/expected/seg.out
index a289dbe5f99..80b0bca1568 100644
--- a/contrib/seg/expected/seg.out
+++ b/contrib/seg/expected/seg.out
@@ -1127,7 +1127,7 @@ FROM test_seg WHERE s @> '11.2..11.3' OR s IS NULL ORDER BY s;
2.1 | 6.95 | 11.8
2.3 | Infinity | Infinity
2.3 | Infinity | Infinity
- 2.4 | 6.85 | 11.3
+ 2.4 | 6.8500004 | 11.3
2.5 | 7 | 11.5
2.5 | 7.15 | 11.8
2.6 | Infinity | Infinity
@@ -1155,7 +1155,7 @@ FROM test_seg WHERE s @> '11.2..11.3' OR s IS NULL ORDER BY s;
4.5 | 59.75 | 115
4.7 | 8.25 | 11.8
4.8 | 8.15 | 11.5
- 4.8 | 8.2 | 11.6
+ 4.8 | 8.200001 | 11.6
4.8 | 8.65 | 12.5
4.8 | Infinity | Infinity
4.9 | 8.45 | 12
@@ -1244,7 +1244,7 @@ FROM test_seg WHERE s @> '11.2..11.3' OR s IS NULL ORDER BY s;
9 | 10.5 | 12
9 | Infinity | Infinity
9.2 | 10.6 | 12
- 9.4 | 10.8 | 12.2
+ 9.4 | 10.799999 | 12.2
9.5 | 10.75 | 12
9.5 | 10.85 | 12.2
9.5 | Infinity | Infinity
diff --git a/contrib/seg/expected/seg_1.out b/contrib/seg/expected/seg_1.out
deleted file mode 100644
index 48abb65bb0c..00000000000
--- a/contrib/seg/expected/seg_1.out
+++ /dev/null
@@ -1,1266 +0,0 @@
---
--- Test seg datatype
---
-CREATE EXTENSION seg;
--- Check whether any of our opclasses fail amvalidate
-SELECT amname, opcname
-FROM pg_opclass opc LEFT JOIN pg_am am ON am.oid = opcmethod
-WHERE opc.oid >= 16384 AND NOT amvalidate(opc.oid);
- amname | opcname
---------+---------
-(0 rows)
-
---
--- testing the input and output functions
---
--- Any number
-SELECT '1'::seg AS seg;
- seg
------
- 1
-(1 row)
-
-SELECT '-1'::seg AS seg;
- seg
------
- -1
-(1 row)
-
-SELECT '1.0'::seg AS seg;
- seg
------
- 1.0
-(1 row)
-
-SELECT '-1.0'::seg AS seg;
- seg
-------
- -1.0
-(1 row)
-
-SELECT '1e7'::seg AS seg;
- seg
---------
- 1e+007
-(1 row)
-
-SELECT '-1e7'::seg AS seg;
- seg
----------
- -1e+007
-(1 row)
-
-SELECT '1.0e7'::seg AS seg;
- seg
-----------
- 1.0e+007
-(1 row)
-
-SELECT '-1.0e7'::seg AS seg;
- seg
------------
- -1.0e+007
-(1 row)
-
-SELECT '1e+7'::seg AS seg;
- seg
---------
- 1e+007
-(1 row)
-
-SELECT '-1e+7'::seg AS seg;
- seg
----------
- -1e+007
-(1 row)
-
-SELECT '1.0e+7'::seg AS seg;
- seg
-----------
- 1.0e+007
-(1 row)
-
-SELECT '-1.0e+7'::seg AS seg;
- seg
------------
- -1.0e+007
-(1 row)
-
-SELECT '1e-7'::seg AS seg;
- seg
---------
- 1e-007
-(1 row)
-
-SELECT '-1e-7'::seg AS seg;
- seg
----------
- -1e-007
-(1 row)
-
-SELECT '1.0e-7'::seg AS seg;
- seg
-----------
- 1.0e-007
-(1 row)
-
-SELECT '-1.0e-7'::seg AS seg;
- seg
------------
- -1.0e-007
-(1 row)
-
-SELECT '2e-6'::seg AS seg;
- seg
---------
- 2e-006
-(1 row)
-
-SELECT '2e-5'::seg AS seg;
- seg
---------
- 2e-005
-(1 row)
-
-SELECT '2e-4'::seg AS seg;
- seg
---------
- 0.0002
-(1 row)
-
-SELECT '2e-3'::seg AS seg;
- seg
--------
- 0.002
-(1 row)
-
-SELECT '2e-2'::seg AS seg;
- seg
-------
- 0.02
-(1 row)
-
-SELECT '2e-1'::seg AS seg;
- seg
------
- 0.2
-(1 row)
-
-SELECT '2e-0'::seg AS seg;
- seg
------
- 2
-(1 row)
-
-SELECT '2e+0'::seg AS seg;
- seg
------
- 2
-(1 row)
-
-SELECT '2e+1'::seg AS seg;
- seg
------
- 2e1
-(1 row)
-
-SELECT '2e+2'::seg AS seg;
- seg
------
- 2e2
-(1 row)
-
-SELECT '2e+3'::seg AS seg;
- seg
------
- 2e3
-(1 row)
-
-SELECT '2e+4'::seg AS seg;
- seg
------
- 2e4
-(1 row)
-
-SELECT '2e+5'::seg AS seg;
- seg
---------
- 2e+005
-(1 row)
-
-SELECT '2e+6'::seg AS seg;
- seg
---------
- 2e+006
-(1 row)
-
--- Significant digits preserved
-SELECT '1'::seg AS seg;
- seg
------
- 1
-(1 row)
-
-SELECT '1.0'::seg AS seg;
- seg
------
- 1.0
-(1 row)
-
-SELECT '1.00'::seg AS seg;
- seg
-------
- 1.00
-(1 row)
-
-SELECT '1.000'::seg AS seg;
- seg
--------
- 1.000
-(1 row)
-
-SELECT '1.0000'::seg AS seg;
- seg
---------
- 1.0000
-(1 row)
-
-SELECT '1.00000'::seg AS seg;
- seg
----------
- 1.00000
-(1 row)
-
-SELECT '1.000000'::seg AS seg;
- seg
----------
- 1.00000
-(1 row)
-
-SELECT '0.000000120'::seg AS seg;
- seg
------------
- 1.20e-007
-(1 row)
-
-SELECT '3.400e5'::seg AS seg;
- seg
-------------
- 3.400e+005
-(1 row)
-
--- Digits truncated
-SELECT '12.34567890123456'::seg AS seg;
- seg
----------
- 12.3457
-(1 row)
-
--- Numbers with certainty indicators
-SELECT '~6.5'::seg AS seg;
- seg
-------
- ~6.5
-(1 row)
-
-SELECT '<6.5'::seg AS seg;
- seg
-------
- <6.5
-(1 row)
-
-SELECT '>6.5'::seg AS seg;
- seg
-------
- >6.5
-(1 row)
-
-SELECT '~ 6.5'::seg AS seg;
- seg
-------
- ~6.5
-(1 row)
-
-SELECT '< 6.5'::seg AS seg;
- seg
-------
- <6.5
-(1 row)
-
-SELECT '> 6.5'::seg AS seg;
- seg
-------
- >6.5
-(1 row)
-
--- Open intervals
-SELECT '0..'::seg AS seg;
- seg
-------
- 0 ..
-(1 row)
-
-SELECT '0...'::seg AS seg;
- seg
-------
- 0 ..
-(1 row)
-
-SELECT '0 ..'::seg AS seg;
- seg
-------
- 0 ..
-(1 row)
-
-SELECT '0 ...'::seg AS seg;
- seg
-------
- 0 ..
-(1 row)
-
-SELECT '..0'::seg AS seg;
- seg
-------
- .. 0
-(1 row)
-
-SELECT '...0'::seg AS seg;
- seg
-------
- .. 0
-(1 row)
-
-SELECT '.. 0'::seg AS seg;
- seg
-------
- .. 0
-(1 row)
-
-SELECT '... 0'::seg AS seg;
- seg
-------
- .. 0
-(1 row)
-
--- Finite intervals
-SELECT '0 .. 1'::seg AS seg;
- seg
---------
- 0 .. 1
-(1 row)
-
-SELECT '-1 .. 0'::seg AS seg;
- seg
----------
- -1 .. 0
-(1 row)
-
-SELECT '-1 .. 1'::seg AS seg;
- seg
----------
- -1 .. 1
-(1 row)
-
--- (+/-) intervals
-SELECT '0(+-)1'::seg AS seg;
- seg
----------
- -1 .. 1
-(1 row)
-
-SELECT '0(+-)1.0'::seg AS seg;
- seg
--------------
- -1.0 .. 1.0
-(1 row)
-
-SELECT '1.0(+-)0.005'::seg AS seg;
- seg
-----------------
- 0.995 .. 1.005
-(1 row)
-
-SELECT '101(+-)1'::seg AS seg;
- seg
-------------------
- 1.00e2 .. 1.02e2
-(1 row)
-
--- incorrect number of significant digits in 99.0:
-SELECT '100(+-)1'::seg AS seg;
- seg
-----------------
- 99.0 .. 1.01e2
-(1 row)
-
--- invalid input
-SELECT ''::seg AS seg;
-ERROR: bad seg representation
-LINE 1: SELECT ''::seg AS seg;
- ^
-DETAIL: syntax error at end of input
-SELECT 'ABC'::seg AS seg;
-ERROR: bad seg representation
-LINE 1: SELECT 'ABC'::seg AS seg;
- ^
-DETAIL: syntax error at or near "A"
-SELECT '1ABC'::seg AS seg;
-ERROR: bad seg representation
-LINE 1: SELECT '1ABC'::seg AS seg;
- ^
-DETAIL: syntax error at or near "A"
-SELECT '1.'::seg AS seg;
-ERROR: bad seg representation
-LINE 1: SELECT '1.'::seg AS seg;
- ^
-DETAIL: syntax error at or near "."
-SELECT '1.....'::seg AS seg;
-ERROR: bad seg representation
-LINE 1: SELECT '1.....'::seg AS seg;
- ^
-DETAIL: syntax error at or near ".."
-SELECT '.1'::seg AS seg;
-ERROR: bad seg representation
-LINE 1: SELECT '.1'::seg AS seg;
- ^
-DETAIL: syntax error at or near "."
-SELECT '1..2.'::seg AS seg;
-ERROR: bad seg representation
-LINE 1: SELECT '1..2.'::seg AS seg;
- ^
-DETAIL: syntax error at or near "."
-SELECT '1 e7'::seg AS seg;
-ERROR: bad seg representation
-LINE 1: SELECT '1 e7'::seg AS seg;
- ^
-DETAIL: syntax error at or near "e"
-SELECT '1e700'::seg AS seg;
-ERROR: "1e700" is out of range for type real
-LINE 1: SELECT '1e700'::seg AS seg;
- ^
---
--- testing the operators
---
--- equality/inequality:
---
-SELECT '24 .. 33.20'::seg = '24 .. 33.20'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '24 .. 33.20'::seg = '24 .. 33.21'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '24 .. 33.20'::seg != '24 .. 33.20'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '24 .. 33.20'::seg != '24 .. 33.21'::seg AS bool;
- bool
-------
- t
-(1 row)
-
--- overlap
---
-SELECT '1'::seg && '1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '1'::seg && '2'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '0 ..'::seg && '0 ..'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. 1'::seg && '0 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '..0'::seg && '0..'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '-1 .. 0.1'::seg && '0 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '-1 .. 0'::seg && '0 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '-1 .. -0.0001'::seg && '0 .. 1'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '0 ..'::seg && '1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. 1'::seg && '1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. 1'::seg && '2'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '0 .. 2'::seg && '1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '1'::seg && '0 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '2'::seg && '0 .. 1'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '1'::seg && '0 .. 2'::seg AS bool;
- bool
-------
- t
-(1 row)
-
--- overlap on the left
---
-SELECT '1'::seg &< '0'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '1'::seg &< '1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '1'::seg &< '2'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. 1'::seg &< '0'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '0 .. 1'::seg &< '1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. 1'::seg &< '2'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. 1'::seg &< '0 .. 0.5'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '0 .. 1'::seg &< '0 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. 1'::seg &< '0 .. 2'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. 1'::seg &< '1 .. 2'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. 1'::seg &< '2 .. 3'::seg AS bool;
- bool
-------
- t
-(1 row)
-
--- overlap on the right
---
-SELECT '0'::seg &> '1'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '1'::seg &> '1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '2'::seg &> '1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0'::seg &> '0 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '1'::seg &> '0 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '2'::seg &> '0 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. 0.5'::seg &> '0 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. 1'::seg &> '0 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. 2'::seg &> '0 .. 2'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '1 .. 2'::seg &> '0 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '2 .. 3'::seg &> '0 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
--- left
---
-SELECT '1'::seg << '0'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '1'::seg << '1'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '1'::seg << '2'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. 1'::seg << '0'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '0 .. 1'::seg << '1'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '0 .. 1'::seg << '2'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. 1'::seg << '0 .. 0.5'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '0 .. 1'::seg << '0 .. 1'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '0 .. 1'::seg << '0 .. 2'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '0 .. 1'::seg << '1 .. 2'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '0 .. 1'::seg << '2 .. 3'::seg AS bool;
- bool
-------
- t
-(1 row)
-
--- right
---
-SELECT '0'::seg >> '1'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '1'::seg >> '1'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '2'::seg >> '1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0'::seg >> '0 .. 1'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '1'::seg >> '0 .. 1'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '2'::seg >> '0 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. 0.5'::seg >> '0 .. 1'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '0 .. 1'::seg >> '0 .. 1'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '0 .. 2'::seg >> '0 .. 2'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '1 .. 2'::seg >> '0 .. 1'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '2 .. 3'::seg >> '0 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
--- "contained in" (the left value belongs within the interval specified in the right value):
---
-SELECT '0'::seg <@ '0'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0'::seg <@ '0 ..'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0'::seg <@ '.. 0'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0'::seg <@ '-1 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0'::seg <@ '-1 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '-1'::seg <@ '-1 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '1'::seg <@ '-1 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '-1 .. 1'::seg <@ '-1 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
--- "contains" (the left value contains the interval specified in the right value):
---
-SELECT '0'::seg @> '0'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '0 .. '::seg <@ '0'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '.. 0'::seg <@ '0'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '-1 .. 1'::seg <@ '0'::seg AS bool;
- bool
-------
- f
-(1 row)
-
-SELECT '0'::seg <@ '-1 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '-1'::seg <@ '-1 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
-SELECT '1'::seg <@ '-1 .. 1'::seg AS bool;
- bool
-------
- t
-(1 row)
-
--- Load some example data and build the index
---
-CREATE TABLE test_seg (s seg);
-\copy test_seg from 'data/test_seg.data'
-CREATE INDEX test_seg_ix ON test_seg USING gist (s);
-EXPLAIN (COSTS OFF)
-SELECT count(*) FROM test_seg WHERE s @> '11..11.3';
- QUERY PLAN
--------------------------------------------------------
- Aggregate
- -> Bitmap Heap Scan on test_seg
- Recheck Cond: (s @> '1.1e1 .. 11.3'::seg)
- -> Bitmap Index Scan on test_seg_ix
- Index Cond: (s @> '1.1e1 .. 11.3'::seg)
-(5 rows)
-
-SELECT count(*) FROM test_seg WHERE s @> '11..11.3';
- count
--------
- 143
-(1 row)
-
-SET enable_bitmapscan = false;
-EXPLAIN (COSTS OFF)
-SELECT count(*) FROM test_seg WHERE s @> '11..11.3';
- QUERY PLAN
------------------------------------------------------
- Aggregate
- -> Index Only Scan using test_seg_ix on test_seg
- Index Cond: (s @> '1.1e1 .. 11.3'::seg)
-(3 rows)
-
-SELECT count(*) FROM test_seg WHERE s @> '11..11.3';
- count
--------
- 143
-(1 row)
-
-RESET enable_bitmapscan;
--- Test sorting
-SELECT * FROM test_seg WHERE s @> '11..11.3' GROUP BY s;
- s
------------------
- .. 4.0e1
- .. >8.2e1
- .. 9.0e1
- <1.0 .. >13.0
- 1.3 .. 12.0
- 2.0 .. 11.5
- 2.1 .. 11.8
- <2.3 ..
- >2.3 ..
- 2.4 .. 11.3
- 2.5 .. 11.5
- 2.5 .. 11.8
- 2.6 ..
- 2.7 .. 12.0
- <3.0 ..
- 3 .. 5.8e1
- 3.1 .. 11.5
- 3.5 .. 11.5
- 3.5 .. 12.2
- <4.0 .. >1.2e1
- <4.0 ..
- 4 .. 1.2e1
- 4.0 .. 11.7
- 4.0 .. 12.5
- 4.0 .. 13.0
- 4.0 .. 6.0e1
- 4.0 ..
- 4.2 .. 11.5
- 4.2 .. 11.7
- <4.5 .. >1.2e1
- 4.5 .. 11.5
- 4.5 .. <1.2e1
- 4.5 .. >1.2e1
- 4.5 .. 12.5
- 4.5 .. 1.15e2
- 4.7 .. 11.8
- 4.8 .. 11.5
- 4.8 .. 11.6
- 4.8 .. 12.5
- 4.8 ..
- 4.9 .. >1.2e1
- 4.9 ..
- 5 .. 11.5
- 5 .. 1.2e1
- 5 .. 3.0e1
- 5.0 .. 11.4
- 5.0 .. 11.5
- 5.0 .. 11.6
- 5.0 .. 11.7
- 5.0 .. 12.0
- 5.0 .. >12.0
- 5.0 .. >1.2e1
- 5.2 .. 11.5
- 5.2 .. >1.2e1
- 5.25 .. >1.2e1
- 5.3 .. 11.5
- 5.3 .. 1.3e1
- 5.3 .. >9.0e1
- 5.3 ..
- 5.4 ..
- 5.5 .. 11.5
- 5.5 .. 11.7
- 5.5 .. 1.2e1
- 5.5 .. >1.2e1
- 5.5 .. 12.5
- 5.5 .. 13.5
- 5.5 ..
- >5.5 ..
- 5.7 ..
- 5.9 ..
- 6 .. 11.5
- 6 .. >1.2e1
- 6.0 .. 11.5
- 6.0 .. 1.3e1
- >6.0 .. <11.5
- 6.1 .. >1.2e1
- 6.1 ..
- 6.2 .. >11.5
- 6.3 ..
- 6.5 .. 11.5
- 6.5 .. 12.0
- 6.5 .. >12.0
- 6.5 ..
- 6.6 ..
- 6.7 .. 11.5
- 6.7 ..
- 6.75 ..
- 6.8 ..
- 6.9 .. 12.2
- 6.9 .. >9.0e1
- 6.9 ..
- <7.0 .. >11.5
- 7.0 .. 11.5
- 7.0 .. >11.5
- 7.0 ..
- >7.15 ..
- 7.2 .. 13.5
- 7.3 .. >9.0e1
- 7.3 ..
- >7.3 ..
- 7.4 .. 12.1
- 7.4 ..
- 7.5 .. 11.5
- 7.5 .. 12.0
- 7.5 ..
- 7.7 .. 11.5
- 7.7 ..
- 7.75 ..
- 8.0 .. 11.7
- 8.0 .. 12.0
- 8.0 .. >13.0
- 8.2 ..
- 8.3 ..
- 8.5 .. >11.5
- 8.5 .. 12.5
- 8.5 ..
- 8.6 .. >9.9e1
- 8.7 .. 11.3
- 8.7 .. 11.7
- 8.9 .. 11.5
- 9 .. >1.2e1
- 9.0 .. 11.3
- 9.0 .. 11.5
- 9.0 .. 1.2e1
- 9.0 ..
- 9.2 .. 1.2e1
- 9.4 .. 12.2
- <9.5 .. 1.2e1
- <9.5 .. >12.2
- 9.5 ..
- 9.6 .. 11.5
- 9.7 .. 11.5
- 9.7 .. >1.2e1
- 9.8 .. >12.5
- <1.0e1 .. >11.6
- 10.0 .. 11.5
- 10.0 .. 12.5
- 10.0 .. >12.5
- 10.2 .. 11.8
- <10.5 .. 11.5
- 10.5 .. 11.5
- 10.5 .. <13.5
- 10.7 .. 12.3
-(143 rows)
-
--- Test functions
-SELECT seg_lower(s), seg_center(s), seg_upper(s)
-FROM test_seg WHERE s @> '11.2..11.3' OR s IS NULL ORDER BY s;
- seg_lower | seg_center | seg_upper
------------+------------+-----------
- -Infinity | -Infinity | 40
- -Infinity | -Infinity | 82
- -Infinity | -Infinity | 90
- 1 | 7 | 13
- 1.3 | 6.65 | 12
- 2 | 6.75 | 11.5
- 2.1 | 6.95 | 11.8
- 2.3 | Infinity | Infinity
- 2.3 | Infinity | Infinity
- 2.4 | 6.85 | 11.3
- 2.5 | 7 | 11.5
- 2.5 | 7.15 | 11.8
- 2.6 | Infinity | Infinity
- 2.7 | 7.35 | 12
- 3 | Infinity | Infinity
- 3 | 30.5 | 58
- 3.1 | 7.3 | 11.5
- 3.5 | 7.5 | 11.5
- 3.5 | 7.85 | 12.2
- 4 | 8 | 12
- 4 | Infinity | Infinity
- 4 | 8 | 12
- 4 | 7.85 | 11.7
- 4 | 8.25 | 12.5
- 4 | 8.5 | 13
- 4 | 32 | 60
- 4 | Infinity | Infinity
- 4.2 | 7.85 | 11.5
- 4.2 | 7.95 | 11.7
- 4.5 | 8.25 | 12
- 4.5 | 8 | 11.5
- 4.5 | 8.25 | 12
- 4.5 | 8.25 | 12
- 4.5 | 8.5 | 12.5
- 4.5 | 59.75 | 115
- 4.7 | 8.25 | 11.8
- 4.8 | 8.15 | 11.5
- 4.8 | 8.2 | 11.6
- 4.8 | 8.65 | 12.5
- 4.8 | Infinity | Infinity
- 4.9 | 8.45 | 12
- 4.9 | Infinity | Infinity
- 5 | 8.25 | 11.5
- 5 | 8.5 | 12
- 5 | 17.5 | 30
- 5 | 8.2 | 11.4
- 5 | 8.25 | 11.5
- 5 | 8.3 | 11.6
- 5 | 8.35 | 11.7
- 5 | 8.5 | 12
- 5 | 8.5 | 12
- 5 | 8.5 | 12
- 5.2 | 8.35 | 11.5
- 5.2 | 8.6 | 12
- 5.25 | 8.625 | 12
- 5.3 | 8.4 | 11.5
- 5.3 | 9.15 | 13
- 5.3 | 47.65 | 90
- 5.3 | Infinity | Infinity
- 5.4 | Infinity | Infinity
- 5.5 | 8.5 | 11.5
- 5.5 | 8.6 | 11.7
- 5.5 | 8.75 | 12
- 5.5 | 8.75 | 12
- 5.5 | 9 | 12.5
- 5.5 | 9.5 | 13.5
- 5.5 | Infinity | Infinity
- 5.5 | Infinity | Infinity
- 5.7 | Infinity | Infinity
- 5.9 | Infinity | Infinity
- 6 | 8.75 | 11.5
- 6 | 9 | 12
- 6 | 8.75 | 11.5
- 6 | 9.5 | 13
- 6 | 8.75 | 11.5
- 6.1 | 9.05 | 12
- 6.1 | Infinity | Infinity
- 6.2 | 8.85 | 11.5
- 6.3 | Infinity | Infinity
- 6.5 | 9 | 11.5
- 6.5 | 9.25 | 12
- 6.5 | 9.25 | 12
- 6.5 | Infinity | Infinity
- 6.6 | Infinity | Infinity
- 6.7 | 9.1 | 11.5
- 6.7 | Infinity | Infinity
- 6.75 | Infinity | Infinity
- 6.8 | Infinity | Infinity
- 6.9 | 9.55 | 12.2
- 6.9 | 48.45 | 90
- 6.9 | Infinity | Infinity
- 7 | 9.25 | 11.5
- 7 | 9.25 | 11.5
- 7 | 9.25 | 11.5
- 7 | Infinity | Infinity
- 7.15 | Infinity | Infinity
- 7.2 | 10.35 | 13.5
- 7.3 | 48.65 | 90
- 7.3 | Infinity | Infinity
- 7.3 | Infinity | Infinity
- 7.4 | 9.75 | 12.1
- 7.4 | Infinity | Infinity
- 7.5 | 9.5 | 11.5
- 7.5 | 9.75 | 12
- 7.5 | Infinity | Infinity
- 7.7 | 9.6 | 11.5
- 7.7 | Infinity | Infinity
- 7.75 | Infinity | Infinity
- 8 | 9.85 | 11.7
- 8 | 10 | 12
- 8 | 10.5 | 13
- 8.2 | Infinity | Infinity
- 8.3 | Infinity | Infinity
- 8.5 | 10 | 11.5
- 8.5 | 10.5 | 12.5
- 8.5 | Infinity | Infinity
- 8.6 | 53.8 | 99
- 8.7 | 10 | 11.3
- 8.7 | 10.2 | 11.7
- 8.9 | 10.2 | 11.5
- 9 | 10.5 | 12
- 9 | 10.15 | 11.3
- 9 | 10.25 | 11.5
- 9 | 10.5 | 12
- 9 | Infinity | Infinity
- 9.2 | 10.6 | 12
- 9.4 | 10.8 | 12.2
- 9.5 | 10.75 | 12
- 9.5 | 10.85 | 12.2
- 9.5 | Infinity | Infinity
- 9.6 | 10.55 | 11.5
- 9.7 | 10.6 | 11.5
- 9.7 | 10.85 | 12
- 9.8 | 11.15 | 12.5
- 10 | 10.8 | 11.6
- 10 | 10.75 | 11.5
- 10 | 11.25 | 12.5
- 10 | 11.25 | 12.5
- 10.2 | 11 | 11.8
- 10.5 | 11 | 11.5
- 10.5 | 11 | 11.5
- 10.5 | 12 | 13.5
- 10.7 | 11.5 | 12.3
- | |
-(144 rows)
-
diff --git a/contrib/sepgsql/database.c b/contrib/sepgsql/database.c
index c641ec3565e..8edd3df643c 100644
--- a/contrib/sepgsql/database.c
+++ b/contrib/sepgsql/database.c
@@ -4,16 +4,16 @@
*
* Routines corresponding to database objects
*
- * Copyright (c) 2010-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2019, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/genam.h"
-#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/sysattr.h"
+#include "access/table.h"
#include "catalog/dependency.h"
#include "catalog/pg_database.h"
#include "catalog/indexing.h"
@@ -21,7 +21,7 @@
#include "commands/seclabel.h"
#include "utils/builtins.h"
#include "utils/fmgroids.h"
-#include "utils/tqual.h"
+#include "utils/snapmgr.h"
#include "sepgsql.h"
/*
@@ -63,7 +63,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
* check db_database:{getattr} permission
*/
initStringInfo(&audit_name);
- appendStringInfo(&audit_name, "%s", quote_identifier(dtemplate));
+ appendStringInfoString(&audit_name, quote_identifier(dtemplate));
sepgsql_avc_check_perms_label(tcontext,
SEPG_CLASS_DB_DATABASE,
SEPG_DB_DATABASE__GETATTR,
@@ -77,10 +77,10 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
* XXX - uncoming version of libselinux supports to take object name to
* handle special treatment on default security label.
*/
- rel = heap_open(DatabaseRelationId, AccessShareLock);
+ rel = table_open(DatabaseRelationId, AccessShareLock);
ScanKeyInit(&skey,
- ObjectIdAttributeNumber,
+ Anum_pg_database_oid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(databaseId));
@@ -101,8 +101,8 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
* check db_database:{create} permission
*/
resetStringInfo(&audit_name);
- appendStringInfo(&audit_name, "%s",
- quote_identifier(NameStr(datForm->datname)));
+ appendStringInfoString(&audit_name,
+ quote_identifier(NameStr(datForm->datname)));
sepgsql_avc_check_perms_label(ncontext,
SEPG_CLASS_DB_DATABASE,
SEPG_DB_DATABASE__CREATE,
@@ -110,7 +110,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
true);
systable_endscan(sscan);
- heap_close(rel, AccessShareLock);
+ table_close(rel, AccessShareLock);
/*
* Assign the default security label on the new database
diff --git a/contrib/sepgsql/dml.c b/contrib/sepgsql/dml.c
index 9bdbd7b60f5..2892346f800 100644
--- a/contrib/sepgsql/dml.c
+++ b/contrib/sepgsql/dml.c
@@ -4,7 +4,7 @@
*
* Routines to handle DML permission checks
*
- * Copyright (c) 2010-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2019, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
@@ -161,12 +161,10 @@ check_relation_privileges(Oid relOid,
*/
if (sepgsql_getenforce() > 0)
{
- Oid relnamespace = get_rel_namespace(relOid);
-
- if (IsSystemNamespace(relnamespace) &&
- (required & (SEPG_DB_TABLE__UPDATE |
+ if ((required & (SEPG_DB_TABLE__UPDATE |
SEPG_DB_TABLE__INSERT |
- SEPG_DB_TABLE__DELETE)) != 0)
+ SEPG_DB_TABLE__DELETE)) != 0 &&
+ IsCatalogRelationOid(relOid))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("SELinux: hardwired security policy violation")));
diff --git a/contrib/sepgsql/expected/alter.out b/contrib/sepgsql/expected/alter.out
index 0948139f934..836acea33bc 100644
--- a/contrib/sepgsql/expected/alter.out
+++ b/contrib/sepgsql/expected/alter.out
@@ -179,9 +179,9 @@ LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_reg
LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column a"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column a of table regtest_table"
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column x"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column x of table regtest_table_3"
LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
LINE 1: SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" f...
^
@@ -196,9 +196,9 @@ LINE 1: ..."regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(p...
QUERY: SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL)
LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column a"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column a of table regtest_table"
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column x"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column x of table regtest_table_3"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4eq(integer,integer)"
ALTER TABLE regtest_table ADD CONSTRAINT test_ck CHECK (b like '%abc%') NOT VALID; -- not supported
ALTER TABLE regtest_table VALIDATE CONSTRAINT test_ck; -- not supported
@@ -212,12 +212,6 @@ ALTER TABLE regtest_table ENABLE TRIGGER regtest_test_trig; -- not supported
CREATE RULE regtest_test_rule AS ON INSERT TO regtest_table_3 DO ALSO NOTHING;
ALTER TABLE regtest_table_3 DISABLE RULE regtest_test_rule; -- not supported
ALTER TABLE regtest_table_3 ENABLE RULE regtest_test_rule; -- not supported
-ALTER TABLE regtest_table SET WITH OIDS;
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.oid"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid"
-ALTER TABLE regtest_table SET WITHOUT OIDS;
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.oid"
ALTER TABLE regtest_table SET (fillfactor = 75);
LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table"
ALTER TABLE regtest_table RESET (fillfactor);
@@ -265,15 +259,8 @@ LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_re
LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_ptable_1_tens.p"
ALTER TABLE regtest_ptable ADD CONSTRAINT test_ck CHECK (p like '%abc%') NOT VALID; -- not supported by sepgsql
ALTER TABLE regtest_ptable DROP CONSTRAINT test_ck; -- not supported by sepgsql
-ALTER TABLE regtest_ptable SET WITH OIDS;
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_ptable.oid"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table_part.oid"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_ptable_1_tens.oid"
-ALTER TABLE regtest_ptable SET WITHOUT OIDS;
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table_part.oid"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_ptable_1_tens.oid"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_ptable.oid"
ALTER TABLE regtest_ptable SET TABLESPACE pg_default;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_ptable"
-- partitioned table child
ALTER TABLE regtest_table_part ALTER p SET DEFAULT 'abcd'; -- not supported by sepgsql
ALTER TABLE regtest_table_part ALTER p SET DEFAULT 'XYZ'; -- not supported by sepgsql
diff --git a/contrib/sepgsql/expected/ddl.out b/contrib/sepgsql/expected/ddl.out
index 1c0409a7a65..729111351a0 100644
--- a/contrib/sepgsql/expected/ddl.out
+++ b/contrib/sepgsql/expected/ddl.out
@@ -61,9 +61,9 @@ LINE 1: ALTER TABLE regtest_table ADD COLUMN z int;
^
LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.z"
-CREATE TABLE regtest_table_2 (a int) WITH OIDS;
+CREATE TABLE regtest_table_2 (a int);
LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
-LINE 1: CREATE TABLE regtest_table_2 (a int) WITH OIDS;
+LINE 1: CREATE TABLE regtest_table_2 (a int);
^
LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
@@ -73,7 +73,6 @@ LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_reg
LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.xmax"
LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.cmin"
LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.xmin"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid"
LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.ctid"
LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.a"
CREATE TABLE regtest_ptable (a int) PARTITION BY RANGE (a);
@@ -413,8 +412,6 @@ LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsq
LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema.regtest_view"
ALTER TABLE regtest_table DROP COLUMN y;
LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.y"
-ALTER TABLE regtest_table_2 SET WITHOUT OIDS;
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid"
ALTER TABLE regtest_ptable DROP COLUMN q CASCADE;
LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_ptable_ones.q"
LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_ptable_tens.q"
@@ -424,10 +421,10 @@ LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regte
LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_ptable.q"
DROP TABLE regtest_table;
LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_x_seq"
-LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table"
LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_x_seq"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table"
LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.tableoid"
LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.cmax"
diff --git a/contrib/sepgsql/expected/misc.out b/contrib/sepgsql/expected/misc.out
index 32b3bb4f585..b2c01e03ded 100644
--- a/contrib/sepgsql/expected/misc.out
+++ b/contrib/sepgsql/expected/misc.out
@@ -17,8 +17,8 @@ SET client_min_messages = log;
-- regular function and operators
SELECT * FROM t1 WHERE x > 50 AND y like '%64%';
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column x"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column y"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column x of table t1"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column y of table t1"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4gt(integer,integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)"
x | y
@@ -33,14 +33,14 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re
SELECT * FROM t1p WHERE o > 50 AND p like '%64%';
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p column o"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p column p"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p"
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_ones"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column o"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column p"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_ones"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_ones"
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_tens"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column o"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column p"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_tens"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_tens"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4gt(integer,integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)"
o | p
@@ -54,8 +54,8 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re
SELECT * FROM t1p_ones WHERE o > 50 AND p like '%64%';
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_ones"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column o"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column p"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_ones"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_ones"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4gt(integer,integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)"
o | p
@@ -64,8 +64,8 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re
SELECT * FROM t1p_tens WHERE o > 50 AND p like '%64%';
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_tens"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column o"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column p"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_tens"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_tens"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4gt(integer,integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)"
o | p
@@ -80,12 +80,12 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re
-- aggregate function
SELECT MIN(x), AVG(x) FROM t1;
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column x"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column x of table t1"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.min(integer)"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4smaller(integer,integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.avg(integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4_avg_accum(bigint[],integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int8_avg(bigint[])"
-LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.min(integer)"
-LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4smaller(integer,integer)"
min | avg
-----+---------------------
1 | 50.5000000000000000
@@ -93,16 +93,16 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re
SELECT MIN(o), AVG(o) FROM t1p;
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p column o"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p"
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_ones"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column o"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_ones"
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_tens"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column o"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_tens"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.min(integer)"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4smaller(integer,integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.avg(integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4_avg_accum(bigint[],integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int8_avg(bigint[])"
-LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.min(integer)"
-LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4smaller(integer,integer)"
min | avg
-----+---------------------
0 | 49.5000000000000000
@@ -110,12 +110,12 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re
SELECT MIN(o), AVG(o) FROM t1p_ones;
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_ones"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column o"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_ones"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.min(integer)"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4smaller(integer,integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.avg(integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4_avg_accum(bigint[],integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int8_avg(bigint[])"
-LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.min(integer)"
-LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4smaller(integer,integer)"
min | avg
-----+--------------------
0 | 4.5000000000000000
@@ -123,12 +123,12 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re
SELECT MIN(o), AVG(o) FROM t1p_tens;
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_tens"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column o"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_tens"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.min(integer)"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4smaller(integer,integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.avg(integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4_avg_accum(bigint[],integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int8_avg(bigint[])"
-LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.min(integer)"
-LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4smaller(integer,integer)"
min | avg
-----+---------------------
10 | 54.5000000000000000
@@ -137,8 +137,8 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re
-- window function
SELECT row_number() OVER (order by x), * FROM t1 WHERE y like '%86%';
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column x"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column y"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column x of table t1"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column y of table t1"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4eq(integer,integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.row_number()"
@@ -160,14 +160,14 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re
SELECT row_number() OVER (order by o), * FROM t1p WHERE p like '%86%';
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p column o"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p column p"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p"
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_ones"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column o"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column p"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_ones"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_ones"
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_tens"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column o"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column p"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_tens"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_tens"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4eq(integer,integer)"
@@ -190,8 +190,8 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re
SELECT row_number() OVER (order by o), * FROM t1p_ones WHERE p like '%86%';
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_ones"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column o"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column p"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_ones"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_ones"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4eq(integer,integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.row_number()"
@@ -202,8 +202,8 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re
SELECT row_number() OVER (order by o), * FROM t1p_tens WHERE p like '%86%';
LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_tens"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column o"
-LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column p"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_tens"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_tens"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4eq(integer,integer)"
LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.row_number()"
diff --git a/contrib/sepgsql/hooks.c b/contrib/sepgsql/hooks.c
index 4249ed552c8..ebfa441b47a 100644
--- a/contrib/sepgsql/hooks.c
+++ b/contrib/sepgsql/hooks.c
@@ -4,7 +4,7 @@
*
* Entrypoints of the hooks in PostgreSQL, and dispatches the callbacks.
*
- * Copyright (c) 2010-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2019, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
diff --git a/contrib/sepgsql/label.c b/contrib/sepgsql/label.c
index dba0986e02a..63a2dd5cc1b 100644
--- a/contrib/sepgsql/label.c
+++ b/contrib/sepgsql/label.c
@@ -4,7 +4,7 @@
*
* Routines to support SELinux labels (security context)
*
- * Copyright (c) 2010-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2019, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
@@ -12,9 +12,9 @@
#include
-#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/genam.h"
+#include "access/table.h"
#include "access/xact.h"
#include "catalog/catalog.h"
#include "catalog/dependency.h"
@@ -35,7 +35,6 @@
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/rel.h"
-#include "utils/tqual.h"
#include "sepgsql.h"
@@ -208,23 +207,16 @@ sepgsql_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
SubTransactionId parentSubid, void *arg)
{
ListCell *cell;
- ListCell *prev;
- ListCell *next;
if (event == SUBXACT_EVENT_ABORT_SUB)
{
- prev = NULL;
- for (cell = list_head(client_label_pending); cell; cell = next)
+ foreach(cell, client_label_pending)
{
pending_label *plabel = lfirst(cell);
- next = lnext(cell);
-
if (plabel->subid == mySubid)
client_label_pending
- = list_delete_cell(client_label_pending, cell, prev);
- else
- prev = cell;
+ = foreach_delete_current(client_label_pending, cell);
}
}
}
@@ -661,7 +653,7 @@ sepgsql_mcstrans_out(PG_FUNCTION_ARGS)
}
/*
- * quote_object_names
+ * quote_object_name
*
* It tries to quote the supplied identifiers
*/
@@ -677,7 +669,7 @@ quote_object_name(const char *src1, const char *src2,
if (src1)
{
temp = quote_identifier(src1);
- appendStringInfo(&result, "%s", temp);
+ appendStringInfoString(&result, temp);
if (src1 != temp)
pfree((void *) temp);
}
@@ -727,7 +719,7 @@ exec_object_restorecon(struct selabel_handle *sehnd, Oid catalogId)
* Open the target catalog. We don't want to allow writable accesses by
* other session during initial labeling.
*/
- rel = heap_open(catalogId, AccessShareLock);
+ rel = table_open(catalogId, AccessShareLock);
sscan = systable_beginscan(rel, InvalidOid, false,
NULL, 0, NULL);
@@ -758,7 +750,7 @@ exec_object_restorecon(struct selabel_handle *sehnd, Oid catalogId)
NULL, NULL, NULL);
object.classId = DatabaseRelationId;
- object.objectId = HeapTupleGetOid(tuple);
+ object.objectId = datForm->oid;
object.objectSubId = 0;
break;
@@ -772,7 +764,7 @@ exec_object_restorecon(struct selabel_handle *sehnd, Oid catalogId)
NULL, NULL);
object.classId = NamespaceRelationId;
- object.objectId = HeapTupleGetOid(tuple);
+ object.objectId = nspForm->oid;
object.objectSubId = 0;
break;
@@ -797,7 +789,7 @@ exec_object_restorecon(struct selabel_handle *sehnd, Oid catalogId)
pfree(namespace_name);
object.classId = RelationRelationId;
- object.objectId = HeapTupleGetOid(tuple);
+ object.objectId = relForm->oid;
object.objectSubId = 0;
break;
@@ -838,7 +830,7 @@ exec_object_restorecon(struct selabel_handle *sehnd, Oid catalogId)
pfree(namespace_name);
object.classId = ProcedureRelationId;
- object.objectId = HeapTupleGetOid(tuple);
+ object.objectId = proForm->oid;
object.objectSubId = 0;
break;
@@ -881,7 +873,7 @@ exec_object_restorecon(struct selabel_handle *sehnd, Oid catalogId)
}
systable_endscan(sscan);
- heap_close(rel, NoLock);
+ table_close(rel, NoLock);
}
/*
diff --git a/contrib/sepgsql/launcher b/contrib/sepgsql/launcher
index 45139f37504..fa449339db2 100755
--- a/contrib/sepgsql/launcher
+++ b/contrib/sepgsql/launcher
@@ -2,7 +2,7 @@
#
# A wrapper script to launch psql command in regression test
#
-# Copyright (c) 2010-2018, PostgreSQL Global Development Group
+# Copyright (c) 2010-2019, PostgreSQL Global Development Group
#
# -------------------------------------------------------------------------
diff --git a/contrib/sepgsql/proc.c b/contrib/sepgsql/proc.c
index c6a817d7c58..aa12dbe2367 100644
--- a/contrib/sepgsql/proc.c
+++ b/contrib/sepgsql/proc.c
@@ -4,16 +4,16 @@
*
* Routines corresponding to procedure objects
*
- * Copyright (c) 2010-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2019, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/genam.h"
-#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/sysattr.h"
+#include "access/table.h"
#include "catalog/dependency.h"
#include "catalog/indexing.h"
#include "catalog/pg_namespace.h"
@@ -24,8 +24,8 @@
#include "utils/builtins.h"
#include "utils/fmgroids.h"
#include "utils/lsyscache.h"
+#include "utils/snapmgr.h"
#include "utils/syscache.h"
-#include "utils/tqual.h"
#include "sepgsql.h"
@@ -56,10 +56,10 @@ sepgsql_proc_post_create(Oid functionId)
* Fetch namespace of the new procedure. Because pg_proc entry is not
* visible right now, we need to scan the catalog using SnapshotSelf.
*/
- rel = heap_open(ProcedureRelationId, AccessShareLock);
+ rel = table_open(ProcedureRelationId, AccessShareLock);
ScanKeyInit(&skey,
- ObjectIdAttributeNumber,
+ Anum_pg_proc_oid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(functionId));
@@ -141,7 +141,7 @@ sepgsql_proc_post_create(Oid functionId)
* Cleanup
*/
systable_endscan(sscan);
- heap_close(rel, AccessShareLock);
+ table_close(rel, AccessShareLock);
pfree(audit_name.data);
pfree(tcontext);
@@ -250,10 +250,10 @@ sepgsql_proc_setattr(Oid functionId)
/*
* Fetch newer catalog
*/
- rel = heap_open(ProcedureRelationId, AccessShareLock);
+ rel = table_open(ProcedureRelationId, AccessShareLock);
ScanKeyInit(&skey,
- ObjectIdAttributeNumber,
+ Anum_pg_proc_oid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(functionId));
@@ -305,7 +305,7 @@ sepgsql_proc_setattr(Oid functionId)
ReleaseSysCache(oldtup);
systable_endscan(sscan);
- heap_close(rel, AccessShareLock);
+ table_close(rel, AccessShareLock);
}
/*
diff --git a/contrib/sepgsql/relation.c b/contrib/sepgsql/relation.c
index f0c22715aa5..061527559c1 100644
--- a/contrib/sepgsql/relation.c
+++ b/contrib/sepgsql/relation.c
@@ -4,16 +4,16 @@
*
* Routines corresponding to relation/attribute objects
*
- * Copyright (c) 2010-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2019, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/genam.h"
-#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/sysattr.h"
+#include "access/table.h"
#include "catalog/indexing.h"
#include "catalog/dependency.h"
#include "catalog/pg_attribute.h"
@@ -26,8 +26,8 @@
#include "utils/catcache.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
+#include "utils/snapmgr.h"
#include "utils/syscache.h"
-#include "utils/tqual.h"
#include "sepgsql.h"
@@ -67,7 +67,7 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
* Compute a default security label of the new column underlying the
* specified relation, and check permission to create it.
*/
- rel = heap_open(AttributeRelationId, AccessShareLock);
+ rel = table_open(AttributeRelationId, AccessShareLock);
ScanKeyInit(&skey[0],
Anum_pg_attribute_attrelid,
@@ -120,7 +120,7 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
SetSecurityLabel(&object, SEPGSQL_LABEL_TAG, ncontext);
systable_endscan(sscan);
- heap_close(rel, AccessShareLock);
+ table_close(rel, AccessShareLock);
pfree(tcontext);
pfree(ncontext);
@@ -259,10 +259,10 @@ sepgsql_relation_post_create(Oid relOid)
* Fetch catalog record of the new relation. Because pg_class entry is not
* visible right now, we need to scan the catalog using SnapshotSelf.
*/
- rel = heap_open(RelationRelationId, AccessShareLock);
+ rel = table_open(RelationRelationId, AccessShareLock);
ScanKeyInit(&skey,
- ObjectIdAttributeNumber,
+ Anum_pg_class_oid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(relOid));
@@ -358,7 +358,7 @@ sepgsql_relation_post_create(Oid relOid)
HeapTuple atup;
Form_pg_attribute attForm;
- arel = heap_open(AttributeRelationId, AccessShareLock);
+ arel = table_open(AttributeRelationId, AccessShareLock);
ScanKeyInit(&akey,
Anum_pg_attribute_attrelid,
@@ -400,13 +400,13 @@ sepgsql_relation_post_create(Oid relOid)
pfree(ccontext);
}
systable_endscan(ascan);
- heap_close(arel, AccessShareLock);
+ table_close(arel, AccessShareLock);
}
pfree(rcontext);
out:
systable_endscan(sscan);
- heap_close(rel, AccessShareLock);
+ table_close(rel, AccessShareLock);
}
/*
@@ -611,10 +611,10 @@ sepgsql_relation_setattr(Oid relOid)
/*
* Fetch newer catalog
*/
- rel = heap_open(RelationRelationId, AccessShareLock);
+ rel = table_open(RelationRelationId, AccessShareLock);
ScanKeyInit(&skey,
- ObjectIdAttributeNumber,
+ Anum_pg_class_oid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(relOid));
@@ -667,7 +667,7 @@ sepgsql_relation_setattr(Oid relOid)
ReleaseSysCache(oldtup);
systable_endscan(sscan);
- heap_close(rel, AccessShareLock);
+ table_close(rel, AccessShareLock);
}
/*
@@ -723,7 +723,7 @@ sepgsql_relation_setattr_extra(Relation catalog,
static void
sepgsql_index_modify(Oid indexOid)
{
- Relation catalog = heap_open(IndexRelationId, AccessShareLock);
+ Relation catalog = table_open(IndexRelationId, AccessShareLock);
/* check db_table:{setattr} permission of the table being indexed */
sepgsql_relation_setattr_extra(catalog,
@@ -731,5 +731,5 @@ sepgsql_index_modify(Oid indexOid)
indexOid,
Anum_pg_index_indrelid,
Anum_pg_index_indexrelid);
- heap_close(catalog, AccessShareLock);
+ table_close(catalog, AccessShareLock);
}
diff --git a/contrib/sepgsql/schema.c b/contrib/sepgsql/schema.c
index bc15a36a459..4c4a90f9781 100644
--- a/contrib/sepgsql/schema.c
+++ b/contrib/sepgsql/schema.c
@@ -4,16 +4,16 @@
*
* Routines corresponding to schema objects
*
- * Copyright (c) 2010-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2019, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/genam.h"
-#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/sysattr.h"
+#include "access/table.h"
#include "catalog/dependency.h"
#include "catalog/indexing.h"
#include "catalog/pg_database.h"
@@ -24,7 +24,7 @@
#include "utils/builtins.h"
#include "utils/fmgroids.h"
#include "utils/lsyscache.h"
-#include "utils/tqual.h"
+#include "utils/snapmgr.h"
#include "sepgsql.h"
@@ -56,10 +56,10 @@ sepgsql_schema_post_create(Oid namespaceId)
* handle special treatment on default security label; such as special
* label on "pg_temp" schema.
*/
- rel = heap_open(NamespaceRelationId, AccessShareLock);
+ rel = table_open(NamespaceRelationId, AccessShareLock);
ScanKeyInit(&skey,
- ObjectIdAttributeNumber,
+ Anum_pg_namespace_oid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(namespaceId));
@@ -93,7 +93,7 @@ sepgsql_schema_post_create(Oid namespaceId)
audit_name.data,
true);
systable_endscan(sscan);
- heap_close(rel, AccessShareLock);
+ table_close(rel, AccessShareLock);
/*
* Assign the default security label on a new procedure
diff --git a/contrib/sepgsql/selinux.c b/contrib/sepgsql/selinux.c
index 47def00a460..192aabea0b3 100644
--- a/contrib/sepgsql/selinux.c
+++ b/contrib/sepgsql/selinux.c
@@ -5,7 +5,7 @@
* Interactions between userspace and selinux in kernelspace,
* using libselinux api.
*
- * Copyright (c) 2010-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2019, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
@@ -657,10 +657,8 @@ sepgsql_getenforce(void)
/*
* sepgsql_audit_log
*
- * It generates a security audit record. In the default, it writes out
- * audit records into standard PG's logfile. It also allows to set up
- * external audit log receiver, such as auditd in Linux, using the
- * sepgsql_audit_hook.
+ * It generates a security audit record. It writes out audit records
+ * into standard PG's logfile.
*
* SELinux can control what should be audited and should not using
* "auditdeny" and "auditallow" rules in the security policy. In the
@@ -702,7 +700,7 @@ sepgsql_audit_log(bool denied,
appendStringInfo(&buf, " %s", av_name);
}
}
- appendStringInfo(&buf, " }");
+ appendStringInfoString(&buf, " }");
/*
* Call external audit module, if loaded
diff --git a/contrib/sepgsql/sepgsql-regtest.te b/contrib/sepgsql/sepgsql-regtest.te
index e5d65243e6b..5d9af1a0ddb 100644
--- a/contrib/sepgsql/sepgsql-regtest.te
+++ b/contrib/sepgsql/sepgsql-regtest.te
@@ -31,6 +31,9 @@ userdom_base_user_template(sepgsql_regtest_superuser)
userdom_manage_home_role(sepgsql_regtest_superuser_r, sepgsql_regtest_superuser_t)
userdom_exec_user_home_content_files(sepgsql_regtest_superuser_t)
userdom_write_user_tmp_sockets(sepgsql_regtest_superuser_t)
+
+auth_read_passwd(sepgsql_regtest_superuser_t)
+
optional_policy(`
postgresql_stream_connect(sepgsql_regtest_superuser_t)
postgresql_unconfined(sepgsql_regtest_superuser_t)
@@ -60,6 +63,9 @@ userdom_base_user_template(sepgsql_regtest_dba)
userdom_manage_home_role(sepgsql_regtest_dba_r, sepgsql_regtest_dba_t)
userdom_exec_user_home_content_files(sepgsql_regtest_dba_t)
userdom_write_user_tmp_sockets(sepgsql_regtest_user_t)
+
+auth_read_passwd(sepgsql_regtest_dba_t)
+
optional_policy(`
postgresql_admin(sepgsql_regtest_dba_t, sepgsql_regtest_dba_r)
postgresql_stream_connect(sepgsql_regtest_dba_t)
@@ -98,6 +104,9 @@ userdom_base_user_template(sepgsql_regtest_user)
userdom_manage_home_role(sepgsql_regtest_user_r, sepgsql_regtest_user_t)
userdom_exec_user_home_content_files(sepgsql_regtest_user_t)
userdom_write_user_tmp_sockets(sepgsql_regtest_user_t)
+
+auth_read_passwd(sepgsql_regtest_user_t)
+
optional_policy(`
postgresql_role(sepgsql_regtest_user_r, sepgsql_regtest_user_t)
postgresql_stream_connect(sepgsql_regtest_user_t)
@@ -126,6 +135,8 @@ userdom_manage_home_role(sepgsql_regtest_pool_r, sepgsql_regtest_pool_t)
userdom_exec_user_home_content_files(sepgsql_regtest_pool_t)
userdom_write_user_tmp_sockets(sepgsql_regtest_pool_t)
+auth_read_passwd(sepgsql_regtest_pool_t)
+
type sepgsql_regtest_foo_t;
type sepgsql_regtest_var_t;
type sepgsql_regtest_foo_table_t;
diff --git a/contrib/sepgsql/sepgsql.h b/contrib/sepgsql/sepgsql.h
index 99adfc522a3..4787934650a 100644
--- a/contrib/sepgsql/sepgsql.h
+++ b/contrib/sepgsql/sepgsql.h
@@ -4,7 +4,7 @@
*
* Definitions corresponding to SE-PostgreSQL
*
- * Copyright (c) 2010-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2019, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
@@ -226,43 +226,43 @@ extern int sepgsql_set_mode(int new_mode);
extern bool sepgsql_getenforce(void);
extern void sepgsql_audit_log(bool denied,
- const char *scontext,
- const char *tcontext,
- uint16 tclass,
- uint32 audited,
- const char *audit_name);
+ const char *scontext,
+ const char *tcontext,
+ uint16 tclass,
+ uint32 audited,
+ const char *audit_name);
extern void sepgsql_compute_avd(const char *scontext,
- const char *tcontext,
- uint16 tclass,
- struct av_decision *avd);
+ const char *tcontext,
+ uint16 tclass,
+ struct av_decision *avd);
extern char *sepgsql_compute_create(const char *scontext,
- const char *tcontext,
- uint16 tclass,
- const char *objname);
+ const char *tcontext,
+ uint16 tclass,
+ const char *objname);
extern bool sepgsql_check_perms(const char *scontext,
- const char *tcontext,
- uint16 tclass,
- uint32 required,
- const char *audit_name,
- bool abort_on_violation);
+ const char *tcontext,
+ uint16 tclass,
+ uint32 required,
+ const char *audit_name,
+ bool abort_on_violation);
/*
* uavc.c
*/
#define SEPGSQL_AVC_NOAUDIT ((void *)(-1))
extern bool sepgsql_avc_check_perms_label(const char *tcontext,
- uint16 tclass,
- uint32 required,
- const char *audit_name,
- bool abort_on_violation);
+ uint16 tclass,
+ uint32 required,
+ const char *audit_name,
+ bool abort_on_violation);
extern bool sepgsql_avc_check_perms(const ObjectAddress *tobject,
- uint16 tclass,
- uint32 required,
- const char *audit_name,
- bool abort_on_violation);
+ uint16 tclass,
+ uint32 required,
+ const char *audit_name,
+ bool abort_on_violation);
extern char *sepgsql_avc_trusted_proc(Oid functionId);
extern void sepgsql_avc_init(void);
@@ -271,10 +271,10 @@ extern void sepgsql_avc_init(void);
*/
extern char *sepgsql_get_client_label(void);
extern void sepgsql_init_client_label(void);
-extern char *sepgsql_get_label(Oid relOid, Oid objOid, int32 subId);
+extern char *sepgsql_get_label(Oid classId, Oid objectId, int32 subId);
extern void sepgsql_object_relabel(const ObjectAddress *object,
- const char *seclabel);
+ const char *seclabel);
/*
* dml.c
@@ -285,7 +285,7 @@ extern bool sepgsql_dml_privileges(List *rangeTabls, bool abort_on_violation);
* database.c
*/
extern void sepgsql_database_post_create(Oid databaseId,
- const char *dtemplate);
+ const char *dtemplate);
extern void sepgsql_database_drop(Oid databaseId);
extern void sepgsql_database_relabel(Oid databaseId, const char *seclabel);
extern void sepgsql_database_setattr(Oid databaseId);
@@ -308,7 +308,7 @@ extern void sepgsql_schema_rename(Oid namespaceId);
extern void sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum);
extern void sepgsql_attribute_drop(Oid relOid, AttrNumber attnum);
extern void sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
- const char *seclabel);
+ const char *seclabel);
extern void sepgsql_attribute_setattr(Oid relOid, AttrNumber attnum);
extern void sepgsql_relation_post_create(Oid relOid);
extern void sepgsql_relation_drop(Oid relOid);
diff --git a/contrib/sepgsql/sql/alter.sql b/contrib/sepgsql/sql/alter.sql
index 14000eaaeeb..f1144492329 100644
--- a/contrib/sepgsql/sql/alter.sql
+++ b/contrib/sepgsql/sql/alter.sql
@@ -134,8 +134,6 @@ CREATE RULE regtest_test_rule AS ON INSERT TO regtest_table_3 DO ALSO NOTHING;
ALTER TABLE regtest_table_3 DISABLE RULE regtest_test_rule; -- not supported
ALTER TABLE regtest_table_3 ENABLE RULE regtest_test_rule; -- not supported
-ALTER TABLE regtest_table SET WITH OIDS;
-ALTER TABLE regtest_table SET WITHOUT OIDS;
ALTER TABLE regtest_table SET (fillfactor = 75);
ALTER TABLE regtest_table RESET (fillfactor);
ALTER TABLE regtest_table_2 NO INHERIT regtest_table; -- not supported
@@ -157,8 +155,6 @@ ALTER TABLE regtest_ptable ALTER p SET STORAGE PLAIN;
ALTER TABLE regtest_ptable ADD CONSTRAINT test_ck CHECK (p like '%abc%') NOT VALID; -- not supported by sepgsql
ALTER TABLE regtest_ptable DROP CONSTRAINT test_ck; -- not supported by sepgsql
-ALTER TABLE regtest_ptable SET WITH OIDS;
-ALTER TABLE regtest_ptable SET WITHOUT OIDS;
ALTER TABLE regtest_ptable SET TABLESPACE pg_default;
-- partitioned table child
diff --git a/contrib/sepgsql/sql/ddl.sql b/contrib/sepgsql/sql/ddl.sql
index ae431f6cd2a..3deadb62526 100644
--- a/contrib/sepgsql/sql/ddl.sql
+++ b/contrib/sepgsql/sql/ddl.sql
@@ -30,7 +30,7 @@ CREATE TABLE regtest_table (x serial primary key, y text);
ALTER TABLE regtest_table ADD COLUMN z int;
-CREATE TABLE regtest_table_2 (a int) WITH OIDS;
+CREATE TABLE regtest_table_2 (a int);
CREATE TABLE regtest_ptable (a int) PARTITION BY RANGE (a);
CREATE TABLE regtest_ptable_ones PARTITION OF regtest_ptable FOR VALUES FROM ('0') TO ('10');
@@ -112,7 +112,6 @@ DROP SEQUENCE regtest_seq;
DROP VIEW regtest_view;
ALTER TABLE regtest_table DROP COLUMN y;
-ALTER TABLE regtest_table_2 SET WITHOUT OIDS;
ALTER TABLE regtest_ptable DROP COLUMN q CASCADE;
diff --git a/contrib/sepgsql/uavc.c b/contrib/sepgsql/uavc.c
index ea276ee0ccd..60fcf996539 100644
--- a/contrib/sepgsql/uavc.c
+++ b/contrib/sepgsql/uavc.c
@@ -6,17 +6,17 @@
* access control decisions recently used, and reduce number of kernel
* invocations to avoid unnecessary performance hit.
*
- * Copyright (c) 2011-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2011-2019, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
#include "postgres.h"
-#include "access/hash.h"
#include "catalog/pg_proc.h"
#include "commands/seclabel.h"
#include "storage/ipc.h"
#include "utils/guc.h"
+#include "utils/hashutils.h"
#include "utils/memutils.h"
#include "sepgsql.h"
@@ -93,24 +93,20 @@ static void
sepgsql_avc_reclaim(void)
{
ListCell *cell;
- ListCell *next;
- ListCell *prev;
int index;
while (avc_num_caches >= avc_threshold - AVC_NUM_RECLAIM)
{
index = avc_lru_hint;
- prev = NULL;
- for (cell = list_head(avc_slots[index]); cell; cell = next)
+ foreach(cell, avc_slots[index])
{
avc_cache *cache = lfirst(cell);
- next = lnext(cell);
if (!cache->hot_cache)
{
avc_slots[index]
- = list_delete_cell(avc_slots[index], cell, prev);
+ = foreach_delete_current(avc_slots[index], cell);
pfree(cache->scontext);
pfree(cache->tcontext);
@@ -123,7 +119,6 @@ sepgsql_avc_reclaim(void)
else
{
cache->hot_cache = false;
- prev = cell;
}
}
avc_lru_hint = (avc_lru_hint + 1) % AVC_NUM_SLOTS;
diff --git a/contrib/spi/Makefile b/contrib/spi/Makefile
index 42aa3740c42..6bc2318e0ac 100644
--- a/contrib/spi/Makefile
+++ b/contrib/spi/Makefile
@@ -1,14 +1,13 @@
# contrib/spi/Makefile
-MODULES = autoinc insert_username moddatetime refint timetravel
+MODULES = autoinc insert_username moddatetime refint
-EXTENSION = autoinc insert_username moddatetime refint timetravel
+EXTENSION = autoinc insert_username moddatetime refint
DATA = autoinc--1.0.sql autoinc--unpackaged--1.0.sql \
insert_username--1.0.sql insert_username--unpackaged--1.0.sql \
moddatetime--1.0.sql moddatetime--unpackaged--1.0.sql \
- refint--1.0.sql refint--unpackaged--1.0.sql \
- timetravel--1.0.sql timetravel--unpackaged--1.0.sql
+ refint--1.0.sql refint--unpackaged--1.0.sql
PGFILEDESC = "spi - examples of using SPI and triggers"
DOCS = $(addsuffix .example, $(MODULES))
diff --git a/contrib/spi/refint.c b/contrib/spi/refint.c
index b065ffa400d..adf0490f853 100644
--- a/contrib/spi/refint.c
+++ b/contrib/spi/refint.c
@@ -306,7 +306,7 @@ check_foreign_key(PG_FUNCTION_ARGS)
/* internal error */
elog(ERROR, "check_foreign_key: too short %d (< 5) list of arguments", nargs);
- nrefs = pg_atoi(args[0], sizeof(int), 0);
+ nrefs = pg_strtoint32(args[0]);
if (nrefs < 1)
/* internal error */
elog(ERROR, "check_foreign_key: %d (< 1) number of references specified", nrefs);
@@ -473,9 +473,12 @@ check_foreign_key(PG_FUNCTION_ARGS)
nv = SPI_getvalue(newtuple, tupdesc, fn);
type = SPI_gettype(tupdesc, fn);
- if ((strcmp(type, "text") && strcmp(type, "varchar") &&
- strcmp(type, "char") && strcmp(type, "bpchar") &&
- strcmp(type, "date") && strcmp(type, "timestamp")) == 0)
+ if (strcmp(type, "text") == 0 ||
+ strcmp(type, "varchar") == 0 ||
+ strcmp(type, "char") == 0 ||
+ strcmp(type, "bpchar") == 0 ||
+ strcmp(type, "date") == 0 ||
+ strcmp(type, "timestamp") == 0)
is_char_type = 1;
#ifdef DEBUG_QUERY
elog(DEBUG4, "check_foreign_key Debug value %s type %s %d",
diff --git a/contrib/spi/refint.example b/contrib/spi/refint.example
index d0ff7441642..299166d5041 100644
--- a/contrib/spi/refint.example
+++ b/contrib/spi/refint.example
@@ -5,7 +5,7 @@ CREATE TABLE A (
);
CREATE UNIQUE INDEX AI ON A (ID);
---Columns REFB of table B and REFC of C are foreign keys referenting ID of A:
+--Columns REFB of table B and REFC of C are foreign keys referencing ID of A:
CREATE TABLE B (
REFB int4
diff --git a/contrib/spi/timetravel--1.0.sql b/contrib/spi/timetravel--1.0.sql
deleted file mode 100644
index c34ca09965b..00000000000
--- a/contrib/spi/timetravel--1.0.sql
+++ /dev/null
@@ -1,19 +0,0 @@
-/* contrib/spi/timetravel--1.0.sql */
-
--- complain if script is sourced in psql, rather than via CREATE EXTENSION
-\echo Use "CREATE EXTENSION timetravel" to load this file. \quit
-
-CREATE FUNCTION timetravel()
-RETURNS trigger
-AS 'MODULE_PATHNAME'
-LANGUAGE C;
-
-CREATE FUNCTION set_timetravel(name, int4)
-RETURNS int4
-AS 'MODULE_PATHNAME'
-LANGUAGE C RETURNS NULL ON NULL INPUT;
-
-CREATE FUNCTION get_timetravel(name)
-RETURNS int4
-AS 'MODULE_PATHNAME'
-LANGUAGE C RETURNS NULL ON NULL INPUT;
diff --git a/contrib/spi/timetravel--unpackaged--1.0.sql b/contrib/spi/timetravel--unpackaged--1.0.sql
deleted file mode 100644
index 121bceba9b2..00000000000
--- a/contrib/spi/timetravel--unpackaged--1.0.sql
+++ /dev/null
@@ -1,8 +0,0 @@
-/* contrib/spi/timetravel--unpackaged--1.0.sql */
-
--- complain if script is sourced in psql, rather than via CREATE EXTENSION
-\echo Use "CREATE EXTENSION timetravel FROM unpackaged" to load this file. \quit
-
-ALTER EXTENSION timetravel ADD function timetravel();
-ALTER EXTENSION timetravel ADD function set_timetravel(name,integer);
-ALTER EXTENSION timetravel ADD function get_timetravel(name);
diff --git a/contrib/spi/timetravel.c b/contrib/spi/timetravel.c
deleted file mode 100644
index 00f661e6b69..00000000000
--- a/contrib/spi/timetravel.c
+++ /dev/null
@@ -1,553 +0,0 @@
-/*
- * contrib/spi/timetravel.c
- *
- *
- * timetravel.c -- function to get time travel feature
- * using general triggers.
- *
- * Modified by BÖJTHE Zoltán, Hungary, mailto:urdesobt@axelero.hu
- */
-#include "postgres.h"
-
-#include
-
-#include "access/htup_details.h"
-#include "catalog/pg_type.h"
-#include "commands/trigger.h"
-#include "executor/spi.h"
-#include "miscadmin.h"
-#include "utils/builtins.h"
-#include "utils/nabstime.h"
-#include "utils/rel.h"
-
-PG_MODULE_MAGIC;
-
-/* AbsoluteTime currabstime(void); */
-
-typedef struct
-{
- char *ident;
- SPIPlanPtr splan;
-} EPlan;
-
-static EPlan *Plans = NULL; /* for UPDATE/DELETE */
-static int nPlans = 0;
-
-typedef struct _TTOffList
-{
- struct _TTOffList *next;
- char name[FLEXIBLE_ARRAY_MEMBER];
-} TTOffList;
-
-static TTOffList *TTOff = NULL;
-
-static int findTTStatus(char *name);
-static EPlan *find_plan(char *ident, EPlan **eplan, int *nplans);
-
-/*
- * timetravel () --
- * 1. IF an update affects tuple with stop_date eq INFINITY
- * then form (and return) new tuple with start_date eq current date
- * and stop_date eq INFINITY [ and update_user eq current user ]
- * and all other column values as in new tuple, and insert tuple
- * with old data and stop_date eq current date
- * ELSE - skip updating of tuple.
- * 2. IF a delete affects tuple with stop_date eq INFINITY
- * then insert the same tuple with stop_date eq current date
- * [ and delete_user eq current user ]
- * ELSE - skip deletion of tuple.
- * 3. On INSERT, if start_date is NULL then current date will be
- * inserted, if stop_date is NULL then INFINITY will be inserted.
- * [ and insert_user eq current user, update_user and delete_user
- * eq NULL ]
- *
- * In CREATE TRIGGER you are to specify start_date and stop_date column
- * names:
- * EXECUTE PROCEDURE
- * timetravel ('date_on', 'date_off' [,'insert_user', 'update_user', 'delete_user' ] ).
- */
-
-#define MaxAttrNum 5
-#define MinAttrNum 2
-
-#define a_time_on 0
-#define a_time_off 1
-#define a_ins_user 2
-#define a_upd_user 3
-#define a_del_user 4
-
-PG_FUNCTION_INFO_V1(timetravel);
-
-Datum /* have to return HeapTuple to Executor */
-timetravel(PG_FUNCTION_ARGS)
-{
- TriggerData *trigdata = (TriggerData *) fcinfo->context;
- Trigger *trigger; /* to get trigger name */
- int argc;
- char **args; /* arguments */
- int attnum[MaxAttrNum]; /* fnumbers of start/stop columns */
- Datum oldtimeon,
- oldtimeoff;
- Datum newtimeon,
- newtimeoff,
- newuser,
- nulltext;
- Datum *cvals; /* column values */
- char *cnulls; /* column nulls */
- char *relname; /* triggered relation name */
- Relation rel; /* triggered relation */
- HeapTuple trigtuple;
- HeapTuple newtuple = NULL;
- HeapTuple rettuple;
- TupleDesc tupdesc; /* tuple description */
- int natts; /* # of attributes */
- EPlan *plan; /* prepared plan */
- char ident[2 * NAMEDATALEN];
- bool isnull; /* to know is some column NULL or not */
- bool isinsert = false;
- int ret;
- int i;
-
- /*
- * Some checks first...
- */
-
- /* Called by trigger manager ? */
- if (!CALLED_AS_TRIGGER(fcinfo))
- elog(ERROR, "timetravel: not fired by trigger manager");
-
- /* Should be called for ROW trigger */
- if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
- elog(ERROR, "timetravel: must be fired for row");
-
- /* Should be called BEFORE */
- if (!TRIGGER_FIRED_BEFORE(trigdata->tg_event))
- elog(ERROR, "timetravel: must be fired before event");
-
- /* INSERT ? */
- if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
- isinsert = true;
-
- if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
- newtuple = trigdata->tg_newtuple;
-
- trigtuple = trigdata->tg_trigtuple;
-
- rel = trigdata->tg_relation;
- relname = SPI_getrelname(rel);
-
- /* check if TT is OFF for this relation */
- if (0 == findTTStatus(relname))
- {
- /* OFF - nothing to do */
- pfree(relname);
- return PointerGetDatum((newtuple != NULL) ? newtuple : trigtuple);
- }
-
- trigger = trigdata->tg_trigger;
-
- argc = trigger->tgnargs;
- if (argc != MinAttrNum && argc != MaxAttrNum)
- elog(ERROR, "timetravel (%s): invalid (!= %d or %d) number of arguments %d",
- relname, MinAttrNum, MaxAttrNum, trigger->tgnargs);
-
- args = trigger->tgargs;
- tupdesc = rel->rd_att;
- natts = tupdesc->natts;
-
- for (i = 0; i < MinAttrNum; i++)
- {
- attnum[i] = SPI_fnumber(tupdesc, args[i]);
- if (attnum[i] <= 0)
- elog(ERROR, "timetravel (%s): there is no attribute %s", relname, args[i]);
- if (SPI_gettypeid(tupdesc, attnum[i]) != ABSTIMEOID)
- elog(ERROR, "timetravel (%s): attribute %s must be of abstime type",
- relname, args[i]);
- }
- for (; i < argc; i++)
- {
- attnum[i] = SPI_fnumber(tupdesc, args[i]);
- if (attnum[i] <= 0)
- elog(ERROR, "timetravel (%s): there is no attribute %s", relname, args[i]);
- if (SPI_gettypeid(tupdesc, attnum[i]) != TEXTOID)
- elog(ERROR, "timetravel (%s): attribute %s must be of text type",
- relname, args[i]);
- }
-
- /* create fields containing name */
- newuser = CStringGetTextDatum(GetUserNameFromId(GetUserId(), false));
-
- nulltext = (Datum) NULL;
-
- if (isinsert)
- { /* INSERT */
- int chnattrs = 0;
- int chattrs[MaxAttrNum];
- Datum newvals[MaxAttrNum];
- bool newnulls[MaxAttrNum];
-
- oldtimeon = SPI_getbinval(trigtuple, tupdesc, attnum[a_time_on], &isnull);
- if (isnull)
- {
- newvals[chnattrs] = GetCurrentAbsoluteTime();
- newnulls[chnattrs] = false;
- chattrs[chnattrs] = attnum[a_time_on];
- chnattrs++;
- }
-
- oldtimeoff = SPI_getbinval(trigtuple, tupdesc, attnum[a_time_off], &isnull);
- if (isnull)
- {
- if ((chnattrs == 0 && DatumGetInt32(oldtimeon) >= NOEND_ABSTIME) ||
- (chnattrs > 0 && DatumGetInt32(newvals[a_time_on]) >= NOEND_ABSTIME))
- elog(ERROR, "timetravel (%s): %s is infinity", relname, args[a_time_on]);
- newvals[chnattrs] = NOEND_ABSTIME;
- newnulls[chnattrs] = false;
- chattrs[chnattrs] = attnum[a_time_off];
- chnattrs++;
- }
- else
- {
- if ((chnattrs == 0 && DatumGetInt32(oldtimeon) > DatumGetInt32(oldtimeoff)) ||
- (chnattrs > 0 && DatumGetInt32(newvals[a_time_on]) > DatumGetInt32(oldtimeoff)))
- elog(ERROR, "timetravel (%s): %s gt %s", relname, args[a_time_on], args[a_time_off]);
- }
-
- pfree(relname);
- if (chnattrs <= 0)
- return PointerGetDatum(trigtuple);
-
- if (argc == MaxAttrNum)
- {
- /* clear update_user value */
- newvals[chnattrs] = nulltext;
- newnulls[chnattrs] = true;
- chattrs[chnattrs] = attnum[a_upd_user];
- chnattrs++;
- /* clear delete_user value */
- newvals[chnattrs] = nulltext;
- newnulls[chnattrs] = true;
- chattrs[chnattrs] = attnum[a_del_user];
- chnattrs++;
- /* set insert_user value */
- newvals[chnattrs] = newuser;
- newnulls[chnattrs] = false;
- chattrs[chnattrs] = attnum[a_ins_user];
- chnattrs++;
- }
- rettuple = heap_modify_tuple_by_cols(trigtuple, tupdesc,
- chnattrs, chattrs,
- newvals, newnulls);
- return PointerGetDatum(rettuple);
- /* end of INSERT */
- }
-
- /* UPDATE/DELETE: */
- oldtimeon = SPI_getbinval(trigtuple, tupdesc, attnum[a_time_on], &isnull);
- if (isnull)
- elog(ERROR, "timetravel (%s): %s must be NOT NULL", relname, args[a_time_on]);
-
- oldtimeoff = SPI_getbinval(trigtuple, tupdesc, attnum[a_time_off], &isnull);
- if (isnull)
- elog(ERROR, "timetravel (%s): %s must be NOT NULL", relname, args[a_time_off]);
-
- /*
- * If DELETE/UPDATE of tuple with stop_date neq INFINITY then say upper
- * Executor to skip operation for this tuple
- */
- if (newtuple != NULL)
- { /* UPDATE */
- newtimeon = SPI_getbinval(newtuple, tupdesc, attnum[a_time_on], &isnull);
- if (isnull)
- elog(ERROR, "timetravel (%s): %s must be NOT NULL", relname, args[a_time_on]);
-
- newtimeoff = SPI_getbinval(newtuple, tupdesc, attnum[a_time_off], &isnull);
- if (isnull)
- elog(ERROR, "timetravel (%s): %s must be NOT NULL", relname, args[a_time_off]);
-
- if (oldtimeon != newtimeon || oldtimeoff != newtimeoff)
- elog(ERROR, "timetravel (%s): you cannot change %s and/or %s columns (use set_timetravel)",
- relname, args[a_time_on], args[a_time_off]);
- }
- if (oldtimeoff != NOEND_ABSTIME)
- { /* current record is a deleted/updated record */
- pfree(relname);
- return PointerGetDatum(NULL);
- }
-
- newtimeoff = GetCurrentAbsoluteTime();
-
- /* Connect to SPI manager */
- if ((ret = SPI_connect()) < 0)
- elog(ERROR, "timetravel (%s): SPI_connect returned %d", relname, ret);
-
- /* Fetch tuple values and nulls */
- cvals = (Datum *) palloc(natts * sizeof(Datum));
- cnulls = (char *) palloc(natts * sizeof(char));
- for (i = 0; i < natts; i++)
- {
- cvals[i] = SPI_getbinval(trigtuple, tupdesc, i + 1, &isnull);
- cnulls[i] = (isnull) ? 'n' : ' ';
- }
-
- /* change date column(s) */
- cvals[attnum[a_time_off] - 1] = newtimeoff; /* stop_date eq current date */
- cnulls[attnum[a_time_off] - 1] = ' ';
-
- if (!newtuple)
- { /* DELETE */
- if (argc == MaxAttrNum)
- {
- cvals[attnum[a_del_user] - 1] = newuser; /* set delete user */
- cnulls[attnum[a_del_user] - 1] = ' ';
- }
- }
-
- /*
- * Construct ident string as TriggerName $ TriggeredRelationId and try to
- * find prepared execution plan.
- */
- snprintf(ident, sizeof(ident), "%s$%u", trigger->tgname, rel->rd_id);
- plan = find_plan(ident, &Plans, &nPlans);
-
- /* if there is no plan ... */
- if (plan->splan == NULL)
- {
- SPIPlanPtr pplan;
- Oid *ctypes;
- char sql[8192];
- char separ = ' ';
-
- /* allocate ctypes for preparation */
- ctypes = (Oid *) palloc(natts * sizeof(Oid));
-
- /*
- * Construct query: INSERT INTO _relation_ VALUES ($1, ...)
- */
- snprintf(sql, sizeof(sql), "INSERT INTO %s VALUES (", relname);
- for (i = 1; i <= natts; i++)
- {
- ctypes[i - 1] = SPI_gettypeid(tupdesc, i);
- if (!(TupleDescAttr(tupdesc, i - 1)->attisdropped)) /* skip dropped columns */
- {
- snprintf(sql + strlen(sql), sizeof(sql) - strlen(sql), "%c$%d", separ, i);
- separ = ',';
- }
- }
- snprintf(sql + strlen(sql), sizeof(sql) - strlen(sql), ")");
-
- elog(DEBUG4, "timetravel (%s) update: sql: %s", relname, sql);
-
- /* Prepare plan for query */
- pplan = SPI_prepare(sql, natts, ctypes);
- if (pplan == NULL)
- elog(ERROR, "timetravel (%s): SPI_prepare returned %s", relname, SPI_result_code_string(SPI_result));
-
- /*
- * Remember that SPI_prepare places plan in current memory context -
- * so, we have to save plan in Top memory context for later use.
- */
- if (SPI_keepplan(pplan))
- elog(ERROR, "timetravel (%s): SPI_keepplan failed", relname);
-
- plan->splan = pplan;
- }
-
- /*
- * Ok, execute prepared plan.
- */
- ret = SPI_execp(plan->splan, cvals, cnulls, 0);
-
- if (ret < 0)
- elog(ERROR, "timetravel (%s): SPI_execp returned %d", relname, ret);
-
- /* Tuple to return to upper Executor ... */
- if (newtuple)
- { /* UPDATE */
- int chnattrs = 0;
- int chattrs[MaxAttrNum];
- Datum newvals[MaxAttrNum];
- char newnulls[MaxAttrNum];
-
- newvals[chnattrs] = newtimeoff;
- newnulls[chnattrs] = ' ';
- chattrs[chnattrs] = attnum[a_time_on];
- chnattrs++;
-
- newvals[chnattrs] = NOEND_ABSTIME;
- newnulls[chnattrs] = ' ';
- chattrs[chnattrs] = attnum[a_time_off];
- chnattrs++;
-
- if (argc == MaxAttrNum)
- {
- /* set update_user value */
- newvals[chnattrs] = newuser;
- newnulls[chnattrs] = ' ';
- chattrs[chnattrs] = attnum[a_upd_user];
- chnattrs++;
- /* clear delete_user value */
- newvals[chnattrs] = nulltext;
- newnulls[chnattrs] = 'n';
- chattrs[chnattrs] = attnum[a_del_user];
- chnattrs++;
- /* set insert_user value */
- newvals[chnattrs] = nulltext;
- newnulls[chnattrs] = 'n';
- chattrs[chnattrs] = attnum[a_ins_user];
- chnattrs++;
- }
-
- /*
- * Use SPI_modifytuple() here because we are inside SPI environment
- * but rettuple must be allocated in caller's context.
- */
- rettuple = SPI_modifytuple(rel, newtuple, chnattrs, chattrs, newvals, newnulls);
- }
- else
- /* DELETE case */
- rettuple = trigtuple;
-
- SPI_finish(); /* don't forget say Bye to SPI mgr */
-
- pfree(relname);
- return PointerGetDatum(rettuple);
-}
-
-/*
- * set_timetravel (relname, on) --
- * turn timetravel for specified relation ON/OFF
- */
-PG_FUNCTION_INFO_V1(set_timetravel);
-
-Datum
-set_timetravel(PG_FUNCTION_ARGS)
-{
- Name relname = PG_GETARG_NAME(0);
- int32 on = PG_GETARG_INT32(1);
- char *rname;
- char *d;
- char *s;
- int32 ret;
- TTOffList *prev,
- *pp;
-
- prev = NULL;
- for (pp = TTOff; pp; prev = pp, pp = pp->next)
- {
- if (namestrcmp(relname, pp->name) == 0)
- break;
- }
- if (pp)
- {
- /* OFF currently */
- if (on != 0)
- {
- /* turn ON */
- if (prev)
- prev->next = pp->next;
- else
- TTOff = pp->next;
- free(pp);
- }
- ret = 0;
- }
- else
- {
- /* ON currently */
- if (on == 0)
- {
- /* turn OFF */
- s = rname = DatumGetCString(DirectFunctionCall1(nameout, NameGetDatum(relname)));
- if (s)
- {
- pp = malloc(offsetof(TTOffList, name) + strlen(rname) + 1);
- if (pp)
- {
- pp->next = NULL;
- d = pp->name;
- while (*s)
- *d++ = tolower((unsigned char) *s++);
- *d = '\0';
- if (prev)
- prev->next = pp;
- else
- TTOff = pp;
- }
- pfree(rname);
- }
- }
- ret = 1;
- }
- PG_RETURN_INT32(ret);
-}
-
-/*
- * get_timetravel (relname) --
- * get timetravel status for specified relation (ON/OFF)
- */
-PG_FUNCTION_INFO_V1(get_timetravel);
-
-Datum
-get_timetravel(PG_FUNCTION_ARGS)
-{
- Name relname = PG_GETARG_NAME(0);
- TTOffList *pp;
-
- for (pp = TTOff; pp; pp = pp->next)
- {
- if (namestrcmp(relname, pp->name) == 0)
- PG_RETURN_INT32(0);
- }
- PG_RETURN_INT32(1);
-}
-
-static int
-findTTStatus(char *name)
-{
- TTOffList *pp;
-
- for (pp = TTOff; pp; pp = pp->next)
- if (pg_strcasecmp(name, pp->name) == 0)
- return 0;
- return 1;
-}
-
-/*
-AbsoluteTime
-currabstime()
-{
- return GetCurrentAbsoluteTime();
-}
-*/
-
-static EPlan *
-find_plan(char *ident, EPlan **eplan, int *nplans)
-{
- EPlan *newp;
- int i;
-
- if (*nplans > 0)
- {
- for (i = 0; i < *nplans; i++)
- {
- if (strcmp((*eplan)[i].ident, ident) == 0)
- break;
- }
- if (i != *nplans)
- return (*eplan + i);
- *eplan = (EPlan *) realloc(*eplan, (i + 1) * sizeof(EPlan));
- newp = *eplan + i;
- }
- else
- {
- newp = *eplan = (EPlan *) malloc(sizeof(EPlan));
- (*nplans) = i = 0;
- }
-
- newp->ident = strdup(ident);
- newp->splan = NULL;
- (*nplans)++;
-
- return newp;
-}
diff --git a/contrib/spi/timetravel.control b/contrib/spi/timetravel.control
deleted file mode 100644
index 9b4bb6ba046..00000000000
--- a/contrib/spi/timetravel.control
+++ /dev/null
@@ -1,5 +0,0 @@
-# timetravel extension
-comment = 'functions for implementing time travel'
-default_version = '1.0'
-module_pathname = '$libdir/timetravel'
-relocatable = true
diff --git a/contrib/spi/timetravel.example b/contrib/spi/timetravel.example
deleted file mode 100644
index 35a7f654085..00000000000
--- a/contrib/spi/timetravel.example
+++ /dev/null
@@ -1,81 +0,0 @@
-drop table tttest;
-
-create table tttest (
- price_id int4,
- price_val int4,
- price_on abstime,
- price_off abstime
-);
-
-create unique index tttest_idx on tttest (price_id,price_off);
-alter table tttest add column q1 text;
-alter table tttest add column q2 int;
-alter table tttest drop column q1;
-
-create trigger timetravel
- before insert or delete or update on tttest
- for each row
- execute procedure
- timetravel (price_on, price_off);
-
-insert into tttest values (1, 1, null, null);
-insert into tttest(price_id, price_val) values (2, 2);
-insert into tttest(price_id, price_val,price_off) values (3, 3, 'infinity');
-
-insert into tttest(price_id, price_val,price_off) values (4, 4,
- abstime('now'::timestamp - '100 days'::interval));
-insert into tttest(price_id, price_val,price_on) values (3, 3, 'infinity'); -- duplicate key
-
-select * from tttest;
-delete from tttest where price_id = 2;
-select * from tttest;
--- what do we see ?
-
--- get current prices
-select * from tttest where price_off = 'infinity';
-
--- change price for price_id == 3
-update tttest set price_val = 30 where price_id = 3;
-select * from tttest;
-
--- now we want to change price_id from 3 to 5 in ALL tuples
--- but this gets us not what we need
-update tttest set price_id = 5 where price_id = 3;
-select * from tttest;
-
--- restore data as before last update:
-select set_timetravel('tttest', 0); -- turn TT OFF!
-
-select get_timetravel('tttest'); -- check status
-
-delete from tttest where price_id = 5;
-update tttest set price_off = 'infinity' where price_val = 30;
-select * from tttest;
-
--- and try change price_id now!
-update tttest set price_id = 5 where price_id = 3;
-select * from tttest;
--- isn't it what we need ?
-
-select set_timetravel('tttest', 1); -- turn TT ON!
-
-select get_timetravel('tttest'); -- check status
-
--- we want to correct some date
-update tttest set price_on = 'Jan-01-1990 00:00:01' where price_id = 5 and
- price_off <> 'infinity';
--- but this doesn't work
-
--- try in this way
-select set_timetravel('tttest', 0); -- turn TT OFF!
-
-select get_timetravel('tttest'); -- check status
-
-update tttest set price_on = '01-Jan-1990 00:00:01' where price_id = 5 and
- price_off <> 'infinity';
-select * from tttest;
--- isn't it what we need ?
-
--- get price for price_id == 5 as it was '10-Jan-1990'
-select * from tttest where price_id = 5 and
- price_on <= '10-Jan-1990' and price_off > '10-Jan-1990';
diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c
index 59f90dc9479..e31be80828e 100644
--- a/contrib/tablefunc/tablefunc.c
+++ b/contrib/tablefunc/tablefunc.c
@@ -10,7 +10,7 @@
* And contributors:
* Nabil Sayegh
*
- * Copyright (c) 2002-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2002-2019, PostgreSQL Global Development Group
*
* Permission to use, copy, modify, and distribute this software and its
* documentation for any purpose, without fee, and without a written agreement
@@ -48,41 +48,41 @@ PG_MODULE_MAGIC;
static HTAB *load_categories_hash(char *cats_sql, MemoryContext per_query_ctx);
static Tuplestorestate *get_crosstab_tuplestore(char *sql,
- HTAB *crosstab_hash,
- TupleDesc tupdesc,
- MemoryContext per_query_ctx,
- bool randomAccess);
+ HTAB *crosstab_hash,
+ TupleDesc tupdesc,
+ MemoryContext per_query_ctx,
+ bool randomAccess);
static void validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial);
static bool compatCrosstabTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2);
static void compatConnectbyTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2);
static void get_normal_pair(float8 *x1, float8 *x2);
static Tuplestorestate *connectby(char *relname,
- char *key_fld,
- char *parent_key_fld,
- char *orderby_fld,
- char *branch_delim,
- char *start_with,
- int max_depth,
- bool show_branch,
- bool show_serial,
- MemoryContext per_query_ctx,
- bool randomAccess,
- AttInMetadata *attinmeta);
+ char *key_fld,
+ char *parent_key_fld,
+ char *orderby_fld,
+ char *branch_delim,
+ char *start_with,
+ int max_depth,
+ bool show_branch,
+ bool show_serial,
+ MemoryContext per_query_ctx,
+ bool randomAccess,
+ AttInMetadata *attinmeta);
static void build_tuplestore_recursively(char *key_fld,
- char *parent_key_fld,
- char *relname,
- char *orderby_fld,
- char *branch_delim,
- char *start_with,
- char *branch,
- int level,
- int *serial,
- int max_depth,
- bool show_branch,
- bool show_serial,
- MemoryContext per_query_ctx,
- AttInMetadata *attinmeta,
- Tuplestorestate *tupstore);
+ char *parent_key_fld,
+ char *relname,
+ char *orderby_fld,
+ char *branch_delim,
+ char *start_with,
+ char *branch,
+ int level,
+ int *serial,
+ int max_depth,
+ bool show_branch,
+ bool show_serial,
+ MemoryContext per_query_ctx,
+ AttInMetadata *attinmeta,
+ Tuplestorestate *tupstore);
typedef struct
{
@@ -867,11 +867,8 @@ get_crosstab_tuplestore(char *sql,
"tuple has %d columns but crosstab " \
"returns %d.", tupdesc->natts, result_ncols)));
- /* allocate space */
- values = (char **) palloc(result_ncols * sizeof(char *));
-
- /* and make sure it's clear */
- memset(values, '\0', result_ncols * sizeof(char *));
+ /* allocate space and make sure it's clear */
+ values = (char **) palloc0(result_ncols * sizeof(char *));
for (i = 0; i < proc; i++)
{
diff --git a/contrib/tablefunc/tablefunc.h b/contrib/tablefunc/tablefunc.h
index 7d0773f82fc..f1e75463ff7 100644
--- a/contrib/tablefunc/tablefunc.h
+++ b/contrib/tablefunc/tablefunc.h
@@ -10,7 +10,7 @@
* And contributors:
* Nabil Sayegh
*
- * Copyright (c) 2002-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2002-2019, PostgreSQL Global Development Group
*
* Permission to use, copy, modify, and distribute this software and its
* documentation for any purpose, without fee, and without a written agreement
diff --git a/contrib/tcn/tcn.c b/contrib/tcn/tcn.c
index 43bdd92749d..5355a64c5e6 100644
--- a/contrib/tcn/tcn.c
+++ b/contrib/tcn/tcn.c
@@ -3,7 +3,7 @@
* tcn.c
* triggered change notification support for PostgreSQL
*
- * Portions Copyright (c) 2011-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2011-2019, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -136,9 +136,9 @@ triggered_change_notification(PG_FUNCTION_ARGS)
elog(ERROR, "cache lookup failed for index %u", indexoid);
index = (Form_pg_index) GETSTRUCT(indexTuple);
/* we're only interested if it is the primary key and valid */
- if (index->indisprimary && IndexIsValid(index))
+ if (index->indisprimary && index->indisvalid)
{
- int indnkeyatts = index->indnkeyatts;
+ int indnkeyatts = index->indnkeyatts;
if (indnkeyatts > 0)
{
diff --git a/contrib/test_decoding/Makefile b/contrib/test_decoding/Makefile
index 1d601d8144c..4afb1d963e5 100644
--- a/contrib/test_decoding/Makefile
+++ b/contrib/test_decoding/Makefile
@@ -3,9 +3,18 @@
MODULES = test_decoding
PGFILEDESC = "test_decoding - example of a logical decoding output plugin"
-# Note: because we don't tell the Makefile there are any regression tests,
-# we have to clean those result files explicitly
-EXTRA_CLEAN = $(pg_regress_clean_files)
+REGRESS = ddl xact rewrite toast permissions decoding_in_xact \
+ decoding_into_rel binary prepared replorigin time messages \
+ spill slot truncate
+ISOLATION = mxact delayed_startup ondisk_startup concurrent_ddl_dml \
+ oldest_xmin snapshot_transfer
+
+REGRESS_OPTS = --temp-config $(top_srcdir)/contrib/test_decoding/logical.conf
+ISOLATION_OPTS = --temp-config $(top_srcdir)/contrib/test_decoding/logical.conf
+
+# Disabled because these tests require "wal_level=logical", which
+# typical installcheck users do not have (e.g. buildfarm clients).
+NO_INSTALLCHECK = 1
ifdef USE_PGXS
PG_CONFIG = pg_config
@@ -18,51 +27,8 @@ include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/contrib/contrib-global.mk
endif
-# Disabled because these tests require "wal_level=logical", which
-# typical installcheck users do not have (e.g. buildfarm clients).
-installcheck:;
-
# But it can nonetheless be very helpful to run tests on preexisting
# installation, allow to do so, but only if requested explicitly.
-installcheck-force: regresscheck-install-force isolationcheck-install-force
-
-check: regresscheck isolationcheck
-
-submake-regress:
- $(MAKE) -C $(top_builddir)/src/test/regress all
-
-submake-isolation:
- $(MAKE) -C $(top_builddir)/src/test/isolation all
-
-submake-test_decoding:
- $(MAKE) -C $(top_builddir)/contrib/test_decoding
-
-REGRESSCHECKS=ddl xact rewrite toast permissions decoding_in_xact \
- decoding_into_rel binary prepared replorigin time messages \
- spill slot truncate
-
-regresscheck: | submake-regress submake-test_decoding temp-install
- $(pg_regress_check) \
- --temp-config $(top_srcdir)/contrib/test_decoding/logical.conf \
- $(REGRESSCHECKS)
-
-regresscheck-install-force: | submake-regress submake-test_decoding temp-install
- $(pg_regress_installcheck) \
- $(REGRESSCHECKS)
-
-ISOLATIONCHECKS=mxact delayed_startup ondisk_startup concurrent_ddl_dml
-
-isolationcheck: | submake-isolation submake-test_decoding temp-install
- $(pg_isolation_regress_check) \
- --temp-config $(top_srcdir)/contrib/test_decoding/logical.conf \
- $(ISOLATIONCHECKS)
-
-isolationcheck-install-force: all | submake-isolation submake-test_decoding temp-install
- $(pg_isolation_regress_installcheck) \
- $(ISOLATIONCHECKS)
-
-.PHONY: submake-test_decoding submake-regress check \
- regresscheck regresscheck-install-force \
- isolationcheck isolationcheck-install-force
-
-temp-install: EXTRA_INSTALL=contrib/test_decoding
+installcheck-force:
+ $(pg_regress_installcheck) $(REGRESS)
+ $(pg_isolation_regress_installcheck) $(ISOLATION)
diff --git a/contrib/test_decoding/expected/ddl.out b/contrib/test_decoding/expected/ddl.out
index 79c359d6e3d..2c999fd3eb7 100644
--- a/contrib/test_decoding/expected/ddl.out
+++ b/contrib/test_decoding/expected/ddl.out
@@ -17,8 +17,8 @@ HINT: Replication slot names may only contain lower case letters, numbers, and
SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', 'frakbar');
ERROR: could not parse value "frakbar" for parameter "include-xids"
CONTEXT: slot "regression_slot", output plugin "test_decoding", in the startup callback
-SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'nonexistant-option', 'frakbar');
-ERROR: option "nonexistant-option" = "frakbar" is unknown
+SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'nonexistent-option', 'frakbar');
+ERROR: option "nonexistent-option" = "frakbar" is unknown
CONTEXT: slot "regression_slot", output plugin "test_decoding", in the startup callback
SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', 'frakbar');
ERROR: could not parse value "frakbar" for parameter "include-xids"
@@ -192,52 +192,6 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc
COMMIT
(33 rows)
--- MERGE support
-BEGIN;
-MERGE INTO replication_example t
- USING (SELECT i as id, i as data, i as num FROM generate_series(-20, 5) i) s
- ON t.id = s.id
- WHEN MATCHED AND t.id < 0 THEN
- UPDATE SET somenum = somenum + 1
- WHEN MATCHED AND t.id >= 0 THEN
- DELETE
- WHEN NOT MATCHED THEN
- INSERT VALUES (s.*);
-COMMIT;
-/* display results */
-SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
- data
---------------------------------------------------------------------------------------------------------------------------------------------------
- BEGIN
- table public.replication_example: INSERT: id[integer]:-20 somedata[integer]:-20 somenum[integer]:-20 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: INSERT: id[integer]:-19 somedata[integer]:-19 somenum[integer]:-19 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: INSERT: id[integer]:-18 somedata[integer]:-18 somenum[integer]:-18 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: INSERT: id[integer]:-17 somedata[integer]:-17 somenum[integer]:-17 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: INSERT: id[integer]:-16 somedata[integer]:-16 somenum[integer]:-16 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: UPDATE: id[integer]:-15 somedata[integer]:-15 somenum[integer]:-14 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: UPDATE: id[integer]:-14 somedata[integer]:-14 somenum[integer]:-13 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: UPDATE: id[integer]:-13 somedata[integer]:-13 somenum[integer]:-12 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: UPDATE: id[integer]:-12 somedata[integer]:-12 somenum[integer]:-11 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: UPDATE: id[integer]:-11 somedata[integer]:-11 somenum[integer]:-10 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: UPDATE: id[integer]:-10 somedata[integer]:-10 somenum[integer]:-9 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: UPDATE: id[integer]:-9 somedata[integer]:-9 somenum[integer]:-8 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: UPDATE: id[integer]:-8 somedata[integer]:-8 somenum[integer]:-7 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: UPDATE: id[integer]:-7 somedata[integer]:-7 somenum[integer]:-6 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: UPDATE: id[integer]:-6 somedata[integer]:-6 somenum[integer]:-5 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: UPDATE: id[integer]:-5 somedata[integer]:-5 somenum[integer]:-4 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: UPDATE: id[integer]:-4 somedata[integer]:-4 somenum[integer]:-3 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: UPDATE: id[integer]:-3 somedata[integer]:-3 somenum[integer]:-2 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: UPDATE: id[integer]:-2 somedata[integer]:-2 somenum[integer]:-1 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: UPDATE: id[integer]:-1 somedata[integer]:-1 somenum[integer]:0 zaphod1[integer]:null zaphod2[integer]:null
- table public.replication_example: DELETE: id[integer]:0
- table public.replication_example: DELETE: id[integer]:1
- table public.replication_example: DELETE: id[integer]:2
- table public.replication_example: DELETE: id[integer]:3
- table public.replication_example: DELETE: id[integer]:4
- table public.replication_example: DELETE: id[integer]:5
- COMMIT
-(28 rows)
-
CREATE TABLE tr_unique(id2 serial unique NOT NULL, data int);
INSERT INTO tr_unique(data) VALUES(10);
ALTER TABLE tr_unique RENAME TO tr_pkey;
@@ -455,6 +409,24 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc
COMMIT
(6 rows)
+-- check that DDL in aborted subtransactions handled correctly
+CREATE TABLE tr_sub_ddl(data int);
+BEGIN;
+SAVEPOINT a;
+ALTER TABLE tr_sub_ddl ALTER COLUMN data TYPE text;
+INSERT INTO tr_sub_ddl VALUES ('blah-blah');
+ROLLBACK TO SAVEPOINT a;
+ALTER TABLE tr_sub_ddl ALTER COLUMN data TYPE bigint;
+INSERT INTO tr_sub_ddl VALUES(43);
+COMMIT;
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+ data
+--------------------------------------------------
+ BEGIN
+ table public.tr_sub_ddl: INSERT: data[bigint]:43
+ COMMIT
+(3 rows)
+
/*
* Check whether treating a table as a catalog table works somewhat
*/
diff --git a/contrib/test_decoding/expected/oldest_xmin.out b/contrib/test_decoding/expected/oldest_xmin.out
new file mode 100644
index 00000000000..d1b4f17e3aa
--- /dev/null
+++ b/contrib/test_decoding/expected/oldest_xmin.out
@@ -0,0 +1,30 @@
+Parsed test spec with 2 sessions
+
+starting permutation: s0_begin s0_getxid s1_begin s1_insert s0_alter s0_commit s0_checkpoint s0_get_changes s0_get_changes s1_commit s0_vacuum s0_get_changes
+step s0_begin: BEGIN;
+step s0_getxid: SELECT txid_current() IS NULL;
+?column?
+
+f
+step s1_begin: BEGIN;
+step s1_insert: INSERT INTO harvest VALUES ((1, 2, 3));
+step s0_alter: ALTER TYPE basket DROP ATTRIBUTE mangos;
+step s0_commit: COMMIT;
+step s0_checkpoint: CHECKPOINT;
+step s0_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+data
+
+step s0_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+data
+
+step s1_commit: COMMIT;
+step s0_vacuum: VACUUM pg_attribute;
+step s0_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+data
+
+BEGIN
+table public.harvest: INSERT: fruits[basket]:'(1,2,3)'
+COMMIT
+?column?
+
+stop
diff --git a/contrib/test_decoding/expected/replorigin.out b/contrib/test_decoding/expected/replorigin.out
index 8ea4ddda977..3b249f4856f 100644
--- a/contrib/test_decoding/expected/replorigin.out
+++ b/contrib/test_decoding/expected/replorigin.out
@@ -2,38 +2,38 @@
SET synchronous_commit = on;
CREATE TABLE origin_tbl(id serial primary key, data text);
CREATE TABLE target_tbl(id serial primary key, data text);
-SELECT pg_replication_origin_create('test_decoding: regression_slot');
+SELECT pg_replication_origin_create('regress_test_decoding: regression_slot');
pg_replication_origin_create
------------------------------
1
(1 row)
-- ensure duplicate creations fail
-SELECT pg_replication_origin_create('test_decoding: regression_slot');
+SELECT pg_replication_origin_create('regress_test_decoding: regression_slot');
ERROR: duplicate key value violates unique constraint "pg_replication_origin_roname_index"
-DETAIL: Key (roname)=(test_decoding: regression_slot) already exists.
+DETAIL: Key (roname)=(regress_test_decoding: regression_slot) already exists.
--ensure deletions work (once)
-SELECT pg_replication_origin_create('test_decoding: temp');
+SELECT pg_replication_origin_create('regress_test_decoding: temp');
pg_replication_origin_create
------------------------------
2
(1 row)
-SELECT pg_replication_origin_drop('test_decoding: temp');
+SELECT pg_replication_origin_drop('regress_test_decoding: temp');
pg_replication_origin_drop
----------------------------
(1 row)
-SELECT pg_replication_origin_drop('test_decoding: temp');
-ERROR: replication origin "test_decoding: temp" does not exist
+SELECT pg_replication_origin_drop('regress_test_decoding: temp');
+ERROR: replication origin "regress_test_decoding: temp" does not exist
-- various failure checks for undefined slots
-select pg_replication_origin_advance('test_decoding: temp', '0/1');
-ERROR: replication origin "test_decoding: temp" does not exist
-select pg_replication_origin_session_setup('test_decoding: temp');
-ERROR: replication origin "test_decoding: temp" does not exist
-select pg_replication_origin_progress('test_decoding: temp', true);
-ERROR: replication origin "test_decoding: temp" does not exist
+select pg_replication_origin_advance('regress_test_decoding: temp', '0/1');
+ERROR: replication origin "regress_test_decoding: temp" does not exist
+select pg_replication_origin_session_setup('regress_test_decoding: temp');
+ERROR: replication origin "regress_test_decoding: temp" does not exist
+select pg_replication_origin_progress('regress_test_decoding: temp', true);
+ERROR: replication origin "regress_test_decoding: temp" does not exist
SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
?column?
----------
@@ -57,14 +57,14 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc
INSERT INTO origin_tbl(data) VALUES ('will be replicated, but not decoded again');
-- mark session as replaying
-SELECT pg_replication_origin_session_setup('test_decoding: regression_slot');
+SELECT pg_replication_origin_session_setup('regress_test_decoding: regression_slot');
pg_replication_origin_session_setup
-------------------------------------
(1 row)
-- ensure we prevent duplicate setup
-SELECT pg_replication_origin_session_setup('test_decoding: regression_slot');
+SELECT pg_replication_origin_session_setup('regress_test_decoding: regression_slot');
ERROR: cannot setup replication origin when one is already setup
SELECT '' FROM pg_logical_emit_message(false, 'test', 'this message will not be decoded');
?column?
@@ -103,19 +103,19 @@ SELECT pg_replication_origin_session_reset();
(1 row)
SELECT local_id, external_id, remote_lsn, local_lsn <> '0/0' FROM pg_replication_origin_status;
- local_id | external_id | remote_lsn | ?column?
-----------+--------------------------------+------------+----------
- 1 | test_decoding: regression_slot | 0/AABBCCDD | t
+ local_id | external_id | remote_lsn | ?column?
+----------+----------------------------------------+------------+----------
+ 1 | regress_test_decoding: regression_slot | 0/AABBCCDD | t
(1 row)
-- check replication progress identified by name is correct
-SELECT pg_replication_origin_progress('test_decoding: regression_slot', false);
+SELECT pg_replication_origin_progress('regress_test_decoding: regression_slot', false);
pg_replication_origin_progress
--------------------------------
0/AABBCCDD
(1 row)
-SELECT pg_replication_origin_progress('test_decoding: regression_slot', true);
+SELECT pg_replication_origin_progress('regress_test_decoding: regression_slot', true);
pg_replication_origin_progress
--------------------------------
0/AABBCCDD
@@ -146,7 +146,7 @@ SELECT pg_drop_replication_slot('regression_slot');
(1 row)
-SELECT pg_replication_origin_drop('test_decoding: regression_slot');
+SELECT pg_replication_origin_drop('regress_test_decoding: regression_slot');
pg_replication_origin_drop
----------------------------
diff --git a/contrib/test_decoding/expected/rewrite.out b/contrib/test_decoding/expected/rewrite.out
index 4dcd4895438..b30999c436b 100644
--- a/contrib/test_decoding/expected/rewrite.out
+++ b/contrib/test_decoding/expected/rewrite.out
@@ -1,6 +1,61 @@
-- predictability
SET synchronous_commit = on;
DROP TABLE IF EXISTS replication_example;
+-- Ensure there's tables with toast datums. To do so, we dynamically
+-- create a function returning a large textblob. We want tables of
+-- different kinds: mapped catalog table, unmapped catalog table,
+-- shared catalog table and usertable.
+CREATE FUNCTION exec(text) returns void language plpgsql volatile
+ AS $f$
+ BEGIN
+ EXECUTE $1;
+ END;
+$f$;
+CREATE ROLE regress_justforcomments NOLOGIN;
+SELECT exec(
+ format($outer$CREATE FUNCTION iamalongfunction() RETURNS TEXT IMMUTABLE LANGUAGE SQL AS $f$SELECT text %L$f$$outer$,
+ (SELECT repeat(string_agg(to_char(g.i, 'FM0000'), ''), 50) FROM generate_series(1, 500) g(i))));
+ exec
+------
+
+(1 row)
+
+SELECT exec(
+ format($outer$COMMENT ON FUNCTION iamalongfunction() IS %L$outer$,
+ iamalongfunction()));
+ exec
+------
+
+(1 row)
+
+SELECT exec(
+ format($outer$COMMENT ON ROLE REGRESS_JUSTFORCOMMENTS IS %L$outer$,
+ iamalongfunction()));
+ exec
+------
+
+(1 row)
+
+CREATE TABLE iamalargetable AS SELECT iamalongfunction() longfunctionoutput;
+-- verify toast usage
+SELECT pg_relation_size((SELECT reltoastrelid FROM pg_class WHERE oid = 'pg_proc'::regclass)) > 0;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT pg_relation_size((SELECT reltoastrelid FROM pg_class WHERE oid = 'pg_description'::regclass)) > 0;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT pg_relation_size((SELECT reltoastrelid FROM pg_class WHERE oid = 'pg_shdescription'::regclass)) > 0;
+ ?column?
+----------
+ t
+(1 row)
+
SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
?column?
----------
@@ -48,6 +103,10 @@ COMMIT;
-- repeated rewrites in different transactions
VACUUM FULL pg_class;
VACUUM FULL pg_class;
+-- reindexing of important relations / indexes
+REINDEX TABLE pg_class;
+REINDEX INDEX pg_class_oid_index;
+REINDEX INDEX pg_class_tblspc_relfilenode_index;
INSERT INTO replication_example(somedata, testcolumn1) VALUES (5, 3);
BEGIN;
INSERT INTO replication_example(somedata, testcolumn1) VALUES (6, 4);
@@ -76,6 +135,23 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc
COMMIT
(15 rows)
+-- trigger repeated rewrites of a system catalog with a toast table,
+-- that previously was buggy: 20180914021046.oi7dm4ra3ot2g2kt@alap3.anarazel.de
+VACUUM FULL pg_proc; VACUUM FULL pg_description; VACUUM FULL pg_shdescription; VACUUM FULL iamalargetable;
+INSERT INTO replication_example(somedata, testcolumn1, testcolumn3) VALUES (8, 6, 1);
+VACUUM FULL pg_proc; VACUUM FULL pg_description; VACUUM FULL pg_shdescription; VACUUM FULL iamalargetable;
+INSERT INTO replication_example(somedata, testcolumn1, testcolumn3) VALUES (9, 7, 1);
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+ data
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:9 somedata[integer]:8 text[character varying]:null testcolumn1[integer]:6 testcolumn2[integer]:null testcolumn3[integer]:1
+ COMMIT
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:10 somedata[integer]:9 text[character varying]:null testcolumn1[integer]:7 testcolumn2[integer]:null testcolumn3[integer]:1
+ COMMIT
+(6 rows)
+
SELECT pg_drop_replication_slot('regression_slot');
pg_drop_replication_slot
--------------------------
@@ -83,3 +159,6 @@ SELECT pg_drop_replication_slot('regression_slot');
(1 row)
DROP TABLE IF EXISTS replication_example;
+DROP FUNCTION iamalongfunction();
+DROP FUNCTION exec(text);
+DROP ROLE regress_justforcomments;
diff --git a/contrib/test_decoding/expected/slot.out b/contrib/test_decoding/expected/slot.out
index 21e9d56f73b..1000171530f 100644
--- a/contrib/test_decoding/expected/slot.out
+++ b/contrib/test_decoding/expected/slot.out
@@ -30,6 +30,8 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot_t2', 'tes
init
(1 row)
+SELECT pg_create_logical_replication_slot('foo', 'nonexistent');
+ERROR: could not access file "nonexistent": No such file or directory
-- here we want to start a new session and wait till old one is gone
select pg_backend_pid() as oldpid \gset
\c -
@@ -131,3 +133,254 @@ SELECT pg_drop_replication_slot('regression_slot1');
ERROR: replication slot "regression_slot1" does not exist
SELECT pg_drop_replication_slot('regression_slot2');
ERROR: replication slot "regression_slot2" does not exist
+-- slot advance with physical slot, error with non-reserved slot
+SELECT slot_name FROM pg_create_physical_replication_slot('regression_slot3');
+ slot_name
+------------------
+ regression_slot3
+(1 row)
+
+SELECT pg_replication_slot_advance('regression_slot3', '0/0'); -- invalid LSN
+ERROR: invalid target WAL LSN
+SELECT pg_replication_slot_advance('regression_slot3', '0/1'); -- error
+ERROR: cannot advance replication slot that has not previously reserved WAL
+SELECT pg_drop_replication_slot('regression_slot3');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+--
+-- Test copy functions for logical replication slots
+--
+-- Create and copy logical slots
+SELECT 'init' FROM pg_create_logical_replication_slot('orig_slot1', 'test_decoding', false);
+ ?column?
+----------
+ init
+(1 row)
+
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot1', 'copied_slot1_no_change');
+ ?column?
+----------
+ copy
+(1 row)
+
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot1', 'copied_slot1_change_plugin', false, 'pgoutput');
+ ?column?
+----------
+ copy
+(1 row)
+
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot1', 'copied_slot1_change_plugin_temp', true, 'pgoutput');
+ ?column?
+----------
+ copy
+(1 row)
+
+-- Check all copied slots status
+SELECT
+ o.slot_name, o.plugin, o.temporary, c.slot_name, c.plugin, c.temporary
+FROM
+ (SELECT * FROM pg_replication_slots WHERE slot_name LIKE 'orig%') as o
+ LEFT JOIN pg_replication_slots as c ON o.restart_lsn = c.restart_lsn AND o.confirmed_flush_lsn = c.confirmed_flush_lsn
+WHERE
+ o.slot_name != c.slot_name
+ORDER BY o.slot_name, c.slot_name;
+ slot_name | plugin | temporary | slot_name | plugin | temporary
+------------+---------------+-----------+---------------------------------+---------------+-----------
+ orig_slot1 | test_decoding | f | copied_slot1_change_plugin | pgoutput | f
+ orig_slot1 | test_decoding | f | copied_slot1_change_plugin_temp | pgoutput | t
+ orig_slot1 | test_decoding | f | copied_slot1_no_change | test_decoding | f
+(3 rows)
+
+-- Now we have maximum 4 replication slots. Check slots are properly
+-- released even when raise error during creating the target slot.
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot1', 'failed'); -- error
+ERROR: all replication slots are in use
+HINT: Free one or increase max_replication_slots.
+-- temporary slots were dropped automatically
+SELECT pg_drop_replication_slot('orig_slot1');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+SELECT pg_drop_replication_slot('copied_slot1_no_change');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+SELECT pg_drop_replication_slot('copied_slot1_change_plugin');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+-- Test based on the temporary logical slot
+SELECT 'init' FROM pg_create_logical_replication_slot('orig_slot2', 'test_decoding', true);
+ ?column?
+----------
+ init
+(1 row)
+
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot2', 'copied_slot2_no_change');
+ ?column?
+----------
+ copy
+(1 row)
+
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot2', 'copied_slot2_change_plugin', true, 'pgoutput');
+ ?column?
+----------
+ copy
+(1 row)
+
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot2', 'copied_slot2_change_plugin_temp', false, 'pgoutput');
+ ?column?
+----------
+ copy
+(1 row)
+
+-- Check all copied slots status
+SELECT
+ o.slot_name, o.plugin, o.temporary, c.slot_name, c.plugin, c.temporary
+FROM
+ (SELECT * FROM pg_replication_slots WHERE slot_name LIKE 'orig%') as o
+ LEFT JOIN pg_replication_slots as c ON o.restart_lsn = c.restart_lsn AND o.confirmed_flush_lsn = c.confirmed_flush_lsn
+WHERE
+ o.slot_name != c.slot_name
+ORDER BY o.slot_name, c.slot_name;
+ slot_name | plugin | temporary | slot_name | plugin | temporary
+------------+---------------+-----------+---------------------------------+---------------+-----------
+ orig_slot2 | test_decoding | t | copied_slot2_change_plugin | pgoutput | t
+ orig_slot2 | test_decoding | t | copied_slot2_change_plugin_temp | pgoutput | f
+ orig_slot2 | test_decoding | t | copied_slot2_no_change | test_decoding | t
+(3 rows)
+
+-- Cannot copy a logical slot to a physical slot
+SELECT 'copy' FROM pg_copy_physical_replication_slot('orig_slot2', 'failed'); -- error
+ERROR: cannot copy physical replication slot "orig_slot2" as a logical replication slot
+-- temporary slots were dropped automatically
+SELECT pg_drop_replication_slot('copied_slot2_change_plugin_temp');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+--
+-- Test copy functions for physical replication slots
+--
+-- Create and copy physical slots
+SELECT 'init' FROM pg_create_physical_replication_slot('orig_slot1', true);
+ ?column?
+----------
+ init
+(1 row)
+
+SELECT 'init' FROM pg_create_physical_replication_slot('orig_slot2', false);
+ ?column?
+----------
+ init
+(1 row)
+
+SELECT 'copy' FROM pg_copy_physical_replication_slot('orig_slot1', 'copied_slot1_no_change');
+ ?column?
+----------
+ copy
+(1 row)
+
+SELECT 'copy' FROM pg_copy_physical_replication_slot('orig_slot1', 'copied_slot1_temp', true);
+ ?column?
+----------
+ copy
+(1 row)
+
+-- Check all copied slots status. Since all slots don't reserve WAL we check only other fields.
+SELECT slot_name, slot_type, temporary FROM pg_replication_slots;
+ slot_name | slot_type | temporary
+------------------------+-----------+-----------
+ orig_slot1 | physical | f
+ orig_slot2 | physical | f
+ copied_slot1_no_change | physical | f
+ copied_slot1_temp | physical | t
+(4 rows)
+
+-- Cannot copy a physical slot to a logical slot
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot1', 'failed'); -- error
+ERROR: cannot copy logical replication slot "orig_slot1" as a physical replication slot
+-- Cannot copy a physical slot that doesn't reserve WAL
+SELECT 'copy' FROM pg_copy_physical_replication_slot('orig_slot2', 'failed'); -- error
+ERROR: cannot copy a replication slot that doesn't reserve WAL
+-- temporary slots were dropped automatically
+SELECT pg_drop_replication_slot('orig_slot1');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+SELECT pg_drop_replication_slot('orig_slot2');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+SELECT pg_drop_replication_slot('copied_slot1_no_change');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+-- Test based on the temporary physical slot
+SELECT 'init' FROM pg_create_physical_replication_slot('orig_slot2', true, true);
+ ?column?
+----------
+ init
+(1 row)
+
+SELECT 'copy' FROM pg_copy_physical_replication_slot('orig_slot2', 'copied_slot2_no_change');
+ ?column?
+----------
+ copy
+(1 row)
+
+SELECT 'copy' FROM pg_copy_physical_replication_slot('orig_slot2', 'copied_slot2_notemp', false);
+ ?column?
+----------
+ copy
+(1 row)
+
+-- Check all copied slots status
+SELECT
+ o.slot_name, o.temporary, c.slot_name, c.temporary
+FROM
+ (SELECT * FROM pg_replication_slots WHERE slot_name LIKE 'orig%') as o
+ LEFT JOIN pg_replication_slots as c ON o.restart_lsn = c.restart_lsn
+WHERE
+ o.slot_name != c.slot_name
+ORDER BY o.slot_name, c.slot_name;
+ slot_name | temporary | slot_name | temporary
+------------+-----------+------------------------+-----------
+ orig_slot2 | t | copied_slot2_no_change | t
+ orig_slot2 | t | copied_slot2_notemp | f
+(2 rows)
+
+SELECT pg_drop_replication_slot('orig_slot2');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+SELECT pg_drop_replication_slot('copied_slot2_no_change');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+SELECT pg_drop_replication_slot('copied_slot2_notemp');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
diff --git a/contrib/test_decoding/expected/snapshot_transfer.out b/contrib/test_decoding/expected/snapshot_transfer.out
new file mode 100644
index 00000000000..87bed03f766
--- /dev/null
+++ b/contrib/test_decoding/expected/snapshot_transfer.out
@@ -0,0 +1,49 @@
+Parsed test spec with 2 sessions
+
+starting permutation: s0_begin s0_begin_sub0 s0_log_assignment s0_sub_get_base_snap s1_produce_new_snap s0_insert s0_end_sub0 s0_commit s0_get_changes
+step s0_begin: BEGIN;
+step s0_begin_sub0: SAVEPOINT s0;
+step s0_log_assignment: SELECT txid_current() IS NULL;
+?column?
+
+f
+step s0_sub_get_base_snap: INSERT INTO dummy VALUES (0);
+step s1_produce_new_snap: ALTER TABLE harvest ADD COLUMN mangos int;
+step s0_insert: INSERT INTO harvest VALUES (1, 2, 3);
+step s0_end_sub0: RELEASE SAVEPOINT s0;
+step s0_commit: COMMIT;
+step s0_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+data
+
+BEGIN
+table public.dummy: INSERT: i[integer]:0
+table public.harvest: INSERT: apples[integer]:1 pears[integer]:2 mangos[integer]:3
+COMMIT
+?column?
+
+stop
+
+starting permutation: s0_begin s0_begin_sub0 s0_log_assignment s0_begin_sub1 s0_sub_get_base_snap s1_produce_new_snap s0_insert s0_end_sub1 s0_end_sub0 s0_commit s0_get_changes
+step s0_begin: BEGIN;
+step s0_begin_sub0: SAVEPOINT s0;
+step s0_log_assignment: SELECT txid_current() IS NULL;
+?column?
+
+f
+step s0_begin_sub1: SAVEPOINT s1;
+step s0_sub_get_base_snap: INSERT INTO dummy VALUES (0);
+step s1_produce_new_snap: ALTER TABLE harvest ADD COLUMN mangos int;
+step s0_insert: INSERT INTO harvest VALUES (1, 2, 3);
+step s0_end_sub1: RELEASE SAVEPOINT s1;
+step s0_end_sub0: RELEASE SAVEPOINT s0;
+step s0_commit: COMMIT;
+step s0_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+data
+
+BEGIN
+table public.dummy: INSERT: i[integer]:0
+table public.harvest: INSERT: apples[integer]:1 pears[integer]:2 mangos[integer]:3
+COMMIT
+?column?
+
+stop
diff --git a/contrib/test_decoding/expected/truncate.out b/contrib/test_decoding/expected/truncate.out
index be851782066..1cf2ae835c8 100644
--- a/contrib/test_decoding/expected/truncate.out
+++ b/contrib/test_decoding/expected/truncate.out
@@ -1,3 +1,5 @@
+-- predictability
+SET synchronous_commit = on;
SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
?column?
----------
diff --git a/contrib/test_decoding/specs/concurrent_ddl_dml.spec b/contrib/test_decoding/specs/concurrent_ddl_dml.spec
index e7cea37d307..d16515e6f48 100644
--- a/contrib/test_decoding/specs/concurrent_ddl_dml.spec
+++ b/contrib/test_decoding/specs/concurrent_ddl_dml.spec
@@ -19,7 +19,6 @@ setup { SET synchronous_commit=on; }
step "s1_init" { SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding'); }
step "s1_begin" { BEGIN; }
step "s1_insert_tbl1" { INSERT INTO tbl1 (val1, val2) VALUES (1, 1); }
-step "s1_insert_tbl1_3col" { INSERT INTO tbl1 (val1, val2, val3) VALUES (1, 1, 1); }
step "s1_insert_tbl2" { INSERT INTO tbl2 (val1, val2) VALUES (1, 1); }
step "s1_insert_tbl2_3col" { INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); }
step "s1_commit" { COMMIT; }
@@ -29,15 +28,8 @@ setup { SET synchronous_commit=on; }
step "s2_alter_tbl1_float" { ALTER TABLE tbl1 ALTER COLUMN val2 TYPE float; }
step "s2_alter_tbl1_char" { ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; }
-step "s2_alter_tbl1_text" { ALTER TABLE tbl1 ALTER COLUMN val2 TYPE text; }
step "s2_alter_tbl1_boolean" { ALTER TABLE tbl1 ALTER COLUMN val2 TYPE boolean; }
-step "s2_alter_tbl1_add_int" { ALTER TABLE tbl1 ADD COLUMN val3 INTEGER; }
-step "s2_alter_tbl1_add_float" { ALTER TABLE tbl1 ADD COLUMN val3 FLOAT; }
-step "s2_alter_tbl1_add_char" { ALTER TABLE tbl1 ADD COLUMN val3 character varying; }
-step "s2_alter_tbl1_add_boolean" { ALTER TABLE tbl1 ADD COLUMN val3 BOOLEAN; }
-step "s2_alter_tbl1_add_text" { ALTER TABLE tbl1 ADD COLUMN val3 TEXT; }
-
step "s2_alter_tbl2_float" { ALTER TABLE tbl2 ALTER COLUMN val2 TYPE float; }
step "s2_alter_tbl2_char" { ALTER TABLE tbl2 ALTER COLUMN val2 TYPE character varying; }
step "s2_alter_tbl2_text" { ALTER TABLE tbl2 ALTER COLUMN val2 TYPE text; }
@@ -46,7 +38,6 @@ step "s2_alter_tbl2_boolean" { ALTER TABLE tbl2 ALTER COLUMN val2 TYPE boolean;
step "s2_alter_tbl2_add_int" { ALTER TABLE tbl2 ADD COLUMN val3 INTEGER; }
step "s2_alter_tbl2_add_float" { ALTER TABLE tbl2 ADD COLUMN val3 FLOAT; }
step "s2_alter_tbl2_add_char" { ALTER TABLE tbl2 ADD COLUMN val3 character varying; }
-step "s2_alter_tbl2_add_boolean" { ALTER TABLE tbl2 ADD COLUMN val3 BOOLEAN; }
step "s2_alter_tbl2_add_text" { ALTER TABLE tbl2 ADD COLUMN val3 TEXT; }
step "s2_alter_tbl2_drop_3rd_col" { ALTER TABLE tbl2 DROP COLUMN val3; }
step "s2_alter_tbl2_3rd_char" { ALTER TABLE tbl2 ALTER COLUMN val3 TYPE character varying; }
diff --git a/contrib/test_decoding/specs/oldest_xmin.spec b/contrib/test_decoding/specs/oldest_xmin.spec
new file mode 100644
index 00000000000..6cb13e85cec
--- /dev/null
+++ b/contrib/test_decoding/specs/oldest_xmin.spec
@@ -0,0 +1,42 @@
+# Test advancement of the slot's oldest xmin
+
+setup
+{
+ SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding'); -- must be first write in xact
+ DROP TYPE IF EXISTS basket;
+ CREATE TYPE basket AS (apples integer, pears integer, mangos integer);
+ DROP TABLE IF EXISTS harvest;
+ CREATE TABLE harvest(fruits basket);
+}
+
+teardown
+{
+ DROP TABLE IF EXISTS harvest;
+ DROP TYPE IF EXISTS basket;
+ SELECT 'stop' FROM pg_drop_replication_slot('isolation_slot');
+}
+
+session "s0"
+setup { SET synchronous_commit=on; }
+step "s0_begin" { BEGIN; }
+step "s0_getxid" { SELECT txid_current() IS NULL; }
+step "s0_alter" { ALTER TYPE basket DROP ATTRIBUTE mangos; }
+step "s0_commit" { COMMIT; }
+step "s0_checkpoint" { CHECKPOINT; }
+step "s0_vacuum" { VACUUM pg_attribute; }
+step "s0_get_changes" { SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); }
+
+session "s1"
+setup { SET synchronous_commit=on; }
+step "s1_begin" { BEGIN; }
+step "s1_insert" { INSERT INTO harvest VALUES ((1, 2, 3)); }
+step "s1_commit" { COMMIT; }
+
+# Checkpoint with following get_changes forces xmin advancement. We do
+# get_changes twice because if one more xl_running_xacts record had slipped
+# before our CHECKPOINT, xmin will be advanced only on this record, thus not
+# reaching value needed for vacuuming corresponding pg_attribute entry. ALTER of
+# composite type is a rare form of DDL which allows T1 to see the tuple which
+# will be removed (xmax set) before T1 commits. That is, interlocking doesn't
+# forbid modifying catalog after someone read it (and didn't commit yet).
+permutation "s0_begin" "s0_getxid" "s1_begin" "s1_insert" "s0_alter" "s0_commit" "s0_checkpoint" "s0_get_changes" "s0_get_changes""s1_commit" "s0_vacuum" "s0_get_changes"
diff --git a/contrib/test_decoding/specs/snapshot_transfer.spec b/contrib/test_decoding/specs/snapshot_transfer.spec
new file mode 100644
index 00000000000..ae81e8f102d
--- /dev/null
+++ b/contrib/test_decoding/specs/snapshot_transfer.spec
@@ -0,0 +1,43 @@
+# Test snapshot transfer from subxact to top-level and receival of later snaps.
+
+setup
+{
+ SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding'); -- must be first write in xact
+ DROP TABLE IF EXISTS dummy;
+ CREATE TABLE dummy(i int);
+ DROP TABLE IF EXISTS harvest;
+ CREATE TABLE harvest(apples int, pears int);
+}
+
+teardown
+{
+ DROP TABLE IF EXISTS harvest;
+ DROP TABLE IF EXISTS dummy;
+ SELECT 'stop' FROM pg_drop_replication_slot('isolation_slot');
+}
+
+session "s0"
+setup { SET synchronous_commit=on; }
+step "s0_begin" { BEGIN; }
+step "s0_begin_sub0" { SAVEPOINT s0; }
+step "s0_log_assignment" { SELECT txid_current() IS NULL; }
+step "s0_begin_sub1" { SAVEPOINT s1; }
+step "s0_sub_get_base_snap" { INSERT INTO dummy VALUES (0); }
+step "s0_insert" { INSERT INTO harvest VALUES (1, 2, 3); }
+step "s0_end_sub0" { RELEASE SAVEPOINT s0; }
+step "s0_end_sub1" { RELEASE SAVEPOINT s1; }
+step "s0_commit" { COMMIT; }
+step "s0_get_changes" { SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); }
+
+session "s1"
+setup { SET synchronous_commit=on; }
+step "s1_produce_new_snap" { ALTER TABLE harvest ADD COLUMN mangos int; }
+
+# start top-level without base snap, get base snap in subxact, then create new
+# snap and make sure it is queued.
+permutation "s0_begin" "s0_begin_sub0" "s0_log_assignment" "s0_sub_get_base_snap" "s1_produce_new_snap" "s0_insert" "s0_end_sub0" "s0_commit" "s0_get_changes"
+
+# In previous test, we firstly associated subxact with xact and only then got
+# base snap; now nest one more subxact to get snap first and only then (at
+# commit) associate it with toplevel.
+permutation "s0_begin" "s0_begin_sub0" "s0_log_assignment" "s0_begin_sub1" "s0_sub_get_base_snap" "s1_produce_new_snap" "s0_insert" "s0_end_sub1" "s0_end_sub0" "s0_commit" "s0_get_changes"
diff --git a/contrib/test_decoding/sql/ddl.sql b/contrib/test_decoding/sql/ddl.sql
index 0e608b252fa..856495c9526 100644
--- a/contrib/test_decoding/sql/ddl.sql
+++ b/contrib/test_decoding/sql/ddl.sql
@@ -9,7 +9,7 @@ SELECT 'init' FROM pg_create_logical_replication_slot('Invalid Name', 'test_deco
-- fail twice because of an invalid parameter values
SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', 'frakbar');
-SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'nonexistant-option', 'frakbar');
+SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'nonexistent-option', 'frakbar');
SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', 'frakbar');
-- succeed once
@@ -93,22 +93,6 @@ COMMIT;
/* display results */
SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
--- MERGE support
-BEGIN;
-MERGE INTO replication_example t
- USING (SELECT i as id, i as data, i as num FROM generate_series(-20, 5) i) s
- ON t.id = s.id
- WHEN MATCHED AND t.id < 0 THEN
- UPDATE SET somenum = somenum + 1
- WHEN MATCHED AND t.id >= 0 THEN
- DELETE
- WHEN NOT MATCHED THEN
- INSERT VALUES (s.*);
-COMMIT;
-
-/* display results */
-SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
-
CREATE TABLE tr_unique(id2 serial unique NOT NULL, data int);
INSERT INTO tr_unique(data) VALUES(10);
ALTER TABLE tr_unique RENAME TO tr_pkey;
@@ -250,6 +234,19 @@ INSERT INTO tr_sub(path) VALUES ('5-top-1-#1');
COMMIT;
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+
+-- check that DDL in aborted subtransactions handled correctly
+CREATE TABLE tr_sub_ddl(data int);
+BEGIN;
+SAVEPOINT a;
+ALTER TABLE tr_sub_ddl ALTER COLUMN data TYPE text;
+INSERT INTO tr_sub_ddl VALUES ('blah-blah');
+ROLLBACK TO SAVEPOINT a;
+ALTER TABLE tr_sub_ddl ALTER COLUMN data TYPE bigint;
+INSERT INTO tr_sub_ddl VALUES(43);
+COMMIT;
+
SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
diff --git a/contrib/test_decoding/sql/replorigin.sql b/contrib/test_decoding/sql/replorigin.sql
index 451cd4bc3b2..8979b306160 100644
--- a/contrib/test_decoding/sql/replorigin.sql
+++ b/contrib/test_decoding/sql/replorigin.sql
@@ -4,19 +4,19 @@ SET synchronous_commit = on;
CREATE TABLE origin_tbl(id serial primary key, data text);
CREATE TABLE target_tbl(id serial primary key, data text);
-SELECT pg_replication_origin_create('test_decoding: regression_slot');
+SELECT pg_replication_origin_create('regress_test_decoding: regression_slot');
-- ensure duplicate creations fail
-SELECT pg_replication_origin_create('test_decoding: regression_slot');
+SELECT pg_replication_origin_create('regress_test_decoding: regression_slot');
--ensure deletions work (once)
-SELECT pg_replication_origin_create('test_decoding: temp');
-SELECT pg_replication_origin_drop('test_decoding: temp');
-SELECT pg_replication_origin_drop('test_decoding: temp');
+SELECT pg_replication_origin_create('regress_test_decoding: temp');
+SELECT pg_replication_origin_drop('regress_test_decoding: temp');
+SELECT pg_replication_origin_drop('regress_test_decoding: temp');
-- various failure checks for undefined slots
-select pg_replication_origin_advance('test_decoding: temp', '0/1');
-select pg_replication_origin_session_setup('test_decoding: temp');
-select pg_replication_origin_progress('test_decoding: temp', true);
+select pg_replication_origin_advance('regress_test_decoding: temp', '0/1');
+select pg_replication_origin_session_setup('regress_test_decoding: temp');
+select pg_replication_origin_progress('regress_test_decoding: temp', true);
SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
@@ -31,10 +31,10 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc
INSERT INTO origin_tbl(data) VALUES ('will be replicated, but not decoded again');
-- mark session as replaying
-SELECT pg_replication_origin_session_setup('test_decoding: regression_slot');
+SELECT pg_replication_origin_session_setup('regress_test_decoding: regression_slot');
-- ensure we prevent duplicate setup
-SELECT pg_replication_origin_session_setup('test_decoding: regression_slot');
+SELECT pg_replication_origin_session_setup('regress_test_decoding: regression_slot');
SELECT '' FROM pg_logical_emit_message(false, 'test', 'this message will not be decoded');
@@ -54,8 +54,8 @@ SELECT pg_replication_origin_session_reset();
SELECT local_id, external_id, remote_lsn, local_lsn <> '0/0' FROM pg_replication_origin_status;
-- check replication progress identified by name is correct
-SELECT pg_replication_origin_progress('test_decoding: regression_slot', false);
-SELECT pg_replication_origin_progress('test_decoding: regression_slot', true);
+SELECT pg_replication_origin_progress('regress_test_decoding: regression_slot', false);
+SELECT pg_replication_origin_progress('regress_test_decoding: regression_slot', true);
-- ensure reset requires previously setup state
SELECT pg_replication_origin_session_reset();
@@ -68,4 +68,4 @@ INSERT INTO origin_tbl(data) VALUES ('will be replicated');
SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'only-local', '1');
SELECT pg_drop_replication_slot('regression_slot');
-SELECT pg_replication_origin_drop('test_decoding: regression_slot');
+SELECT pg_replication_origin_drop('regress_test_decoding: regression_slot');
diff --git a/contrib/test_decoding/sql/rewrite.sql b/contrib/test_decoding/sql/rewrite.sql
index 8a7329423de..62dead3a9b1 100644
--- a/contrib/test_decoding/sql/rewrite.sql
+++ b/contrib/test_decoding/sql/rewrite.sql
@@ -3,6 +3,35 @@ SET synchronous_commit = on;
DROP TABLE IF EXISTS replication_example;
+-- Ensure there's tables with toast datums. To do so, we dynamically
+-- create a function returning a large textblob. We want tables of
+-- different kinds: mapped catalog table, unmapped catalog table,
+-- shared catalog table and usertable.
+CREATE FUNCTION exec(text) returns void language plpgsql volatile
+ AS $f$
+ BEGIN
+ EXECUTE $1;
+ END;
+$f$;
+CREATE ROLE regress_justforcomments NOLOGIN;
+
+SELECT exec(
+ format($outer$CREATE FUNCTION iamalongfunction() RETURNS TEXT IMMUTABLE LANGUAGE SQL AS $f$SELECT text %L$f$$outer$,
+ (SELECT repeat(string_agg(to_char(g.i, 'FM0000'), ''), 50) FROM generate_series(1, 500) g(i))));
+SELECT exec(
+ format($outer$COMMENT ON FUNCTION iamalongfunction() IS %L$outer$,
+ iamalongfunction()));
+SELECT exec(
+ format($outer$COMMENT ON ROLE REGRESS_JUSTFORCOMMENTS IS %L$outer$,
+ iamalongfunction()));
+CREATE TABLE iamalargetable AS SELECT iamalongfunction() longfunctionoutput;
+
+-- verify toast usage
+SELECT pg_relation_size((SELECT reltoastrelid FROM pg_class WHERE oid = 'pg_proc'::regclass)) > 0;
+SELECT pg_relation_size((SELECT reltoastrelid FROM pg_class WHERE oid = 'pg_description'::regclass)) > 0;
+SELECT pg_relation_size((SELECT reltoastrelid FROM pg_class WHERE oid = 'pg_shdescription'::regclass)) > 0;
+
+
SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
CREATE TABLE replication_example(id SERIAL PRIMARY KEY, somedata int, text varchar(120));
INSERT INTO replication_example(somedata) VALUES (1);
@@ -45,6 +74,11 @@ COMMIT;
VACUUM FULL pg_class;
VACUUM FULL pg_class;
+-- reindexing of important relations / indexes
+REINDEX TABLE pg_class;
+REINDEX INDEX pg_class_oid_index;
+REINDEX INDEX pg_class_tblspc_relfilenode_index;
+
INSERT INTO replication_example(somedata, testcolumn1) VALUES (5, 3);
BEGIN;
@@ -57,6 +91,17 @@ COMMIT;
CHECKPOINT;
SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
-SELECT pg_drop_replication_slot('regression_slot');
+-- trigger repeated rewrites of a system catalog with a toast table,
+-- that previously was buggy: 20180914021046.oi7dm4ra3ot2g2kt@alap3.anarazel.de
+VACUUM FULL pg_proc; VACUUM FULL pg_description; VACUUM FULL pg_shdescription; VACUUM FULL iamalargetable;
+INSERT INTO replication_example(somedata, testcolumn1, testcolumn3) VALUES (8, 6, 1);
+VACUUM FULL pg_proc; VACUUM FULL pg_description; VACUUM FULL pg_shdescription; VACUUM FULL iamalargetable;
+INSERT INTO replication_example(somedata, testcolumn1, testcolumn3) VALUES (9, 7, 1);
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+
+SELECT pg_drop_replication_slot('regression_slot');
DROP TABLE IF EXISTS replication_example;
+DROP FUNCTION iamalongfunction();
+DROP FUNCTION exec(text);
+DROP ROLE regress_justforcomments;
diff --git a/contrib/test_decoding/sql/slot.sql b/contrib/test_decoding/sql/slot.sql
index 706340c1d8d..6d83fb26782 100644
--- a/contrib/test_decoding/sql/slot.sql
+++ b/contrib/test_decoding/sql/slot.sql
@@ -9,6 +9,8 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot_p', 'test
SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot_t2', 'test_decoding', true);
+SELECT pg_create_logical_replication_slot('foo', 'nonexistent');
+
-- here we want to start a new session and wait till old one is gone
select pg_backend_pid() as oldpid \gset
\c -
@@ -68,3 +70,103 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot1', 'test_
-- both should error as they should be dropped on error
SELECT pg_drop_replication_slot('regression_slot1');
SELECT pg_drop_replication_slot('regression_slot2');
+
+-- slot advance with physical slot, error with non-reserved slot
+SELECT slot_name FROM pg_create_physical_replication_slot('regression_slot3');
+SELECT pg_replication_slot_advance('regression_slot3', '0/0'); -- invalid LSN
+SELECT pg_replication_slot_advance('regression_slot3', '0/1'); -- error
+SELECT pg_drop_replication_slot('regression_slot3');
+
+--
+-- Test copy functions for logical replication slots
+--
+
+-- Create and copy logical slots
+SELECT 'init' FROM pg_create_logical_replication_slot('orig_slot1', 'test_decoding', false);
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot1', 'copied_slot1_no_change');
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot1', 'copied_slot1_change_plugin', false, 'pgoutput');
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot1', 'copied_slot1_change_plugin_temp', true, 'pgoutput');
+
+-- Check all copied slots status
+SELECT
+ o.slot_name, o.plugin, o.temporary, c.slot_name, c.plugin, c.temporary
+FROM
+ (SELECT * FROM pg_replication_slots WHERE slot_name LIKE 'orig%') as o
+ LEFT JOIN pg_replication_slots as c ON o.restart_lsn = c.restart_lsn AND o.confirmed_flush_lsn = c.confirmed_flush_lsn
+WHERE
+ o.slot_name != c.slot_name
+ORDER BY o.slot_name, c.slot_name;
+
+-- Now we have maximum 4 replication slots. Check slots are properly
+-- released even when raise error during creating the target slot.
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot1', 'failed'); -- error
+
+-- temporary slots were dropped automatically
+SELECT pg_drop_replication_slot('orig_slot1');
+SELECT pg_drop_replication_slot('copied_slot1_no_change');
+SELECT pg_drop_replication_slot('copied_slot1_change_plugin');
+
+-- Test based on the temporary logical slot
+SELECT 'init' FROM pg_create_logical_replication_slot('orig_slot2', 'test_decoding', true);
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot2', 'copied_slot2_no_change');
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot2', 'copied_slot2_change_plugin', true, 'pgoutput');
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot2', 'copied_slot2_change_plugin_temp', false, 'pgoutput');
+
+-- Check all copied slots status
+SELECT
+ o.slot_name, o.plugin, o.temporary, c.slot_name, c.plugin, c.temporary
+FROM
+ (SELECT * FROM pg_replication_slots WHERE slot_name LIKE 'orig%') as o
+ LEFT JOIN pg_replication_slots as c ON o.restart_lsn = c.restart_lsn AND o.confirmed_flush_lsn = c.confirmed_flush_lsn
+WHERE
+ o.slot_name != c.slot_name
+ORDER BY o.slot_name, c.slot_name;
+
+-- Cannot copy a logical slot to a physical slot
+SELECT 'copy' FROM pg_copy_physical_replication_slot('orig_slot2', 'failed'); -- error
+
+-- temporary slots were dropped automatically
+SELECT pg_drop_replication_slot('copied_slot2_change_plugin_temp');
+
+--
+-- Test copy functions for physical replication slots
+--
+
+-- Create and copy physical slots
+SELECT 'init' FROM pg_create_physical_replication_slot('orig_slot1', true);
+SELECT 'init' FROM pg_create_physical_replication_slot('orig_slot2', false);
+SELECT 'copy' FROM pg_copy_physical_replication_slot('orig_slot1', 'copied_slot1_no_change');
+SELECT 'copy' FROM pg_copy_physical_replication_slot('orig_slot1', 'copied_slot1_temp', true);
+
+-- Check all copied slots status. Since all slots don't reserve WAL we check only other fields.
+SELECT slot_name, slot_type, temporary FROM pg_replication_slots;
+
+-- Cannot copy a physical slot to a logical slot
+SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot1', 'failed'); -- error
+
+-- Cannot copy a physical slot that doesn't reserve WAL
+SELECT 'copy' FROM pg_copy_physical_replication_slot('orig_slot2', 'failed'); -- error
+
+-- temporary slots were dropped automatically
+SELECT pg_drop_replication_slot('orig_slot1');
+SELECT pg_drop_replication_slot('orig_slot2');
+SELECT pg_drop_replication_slot('copied_slot1_no_change');
+
+-- Test based on the temporary physical slot
+SELECT 'init' FROM pg_create_physical_replication_slot('orig_slot2', true, true);
+SELECT 'copy' FROM pg_copy_physical_replication_slot('orig_slot2', 'copied_slot2_no_change');
+SELECT 'copy' FROM pg_copy_physical_replication_slot('orig_slot2', 'copied_slot2_notemp', false);
+
+-- Check all copied slots status
+SELECT
+ o.slot_name, o.temporary, c.slot_name, c.temporary
+FROM
+ (SELECT * FROM pg_replication_slots WHERE slot_name LIKE 'orig%') as o
+ LEFT JOIN pg_replication_slots as c ON o.restart_lsn = c.restart_lsn
+WHERE
+ o.slot_name != c.slot_name
+ORDER BY o.slot_name, c.slot_name;
+
+SELECT pg_drop_replication_slot('orig_slot2');
+SELECT pg_drop_replication_slot('copied_slot2_no_change');
+SELECT pg_drop_replication_slot('copied_slot2_notemp');
diff --git a/contrib/test_decoding/sql/truncate.sql b/contrib/test_decoding/sql/truncate.sql
index 88f113fd5b1..5aecdf0881f 100644
--- a/contrib/test_decoding/sql/truncate.sql
+++ b/contrib/test_decoding/sql/truncate.sql
@@ -1,3 +1,6 @@
+-- predictability
+SET synchronous_commit = on;
+
SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
CREATE TABLE tab1 (id serial unique, data int);
diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c
index e192d5b4ad4..6c33c4bdedb 100644
--- a/contrib/test_decoding/test_decoding.c
+++ b/contrib/test_decoding/test_decoding.c
@@ -3,7 +3,7 @@
* test_decoding.c
* example logical decoding output plugin
*
- * Copyright (c) 2012-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2012-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/test_decoding/test_decoding.c
@@ -24,7 +24,7 @@
PG_MODULE_MAGIC;
-/* These must be available to pg_dlsym() */
+/* These must be available to dlsym() */
extern void _PG_init(void);
extern void _PG_output_plugin_init(OutputPluginCallbacks *cb);
@@ -39,29 +39,29 @@ typedef struct
} TestDecodingData;
static void pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
- bool is_init);
+ bool is_init);
static void pg_decode_shutdown(LogicalDecodingContext *ctx);
static void pg_decode_begin_txn(LogicalDecodingContext *ctx,
- ReorderBufferTXN *txn);
+ ReorderBufferTXN *txn);
static void pg_output_begin(LogicalDecodingContext *ctx,
- TestDecodingData *data,
- ReorderBufferTXN *txn,
- bool last_write);
+ TestDecodingData *data,
+ ReorderBufferTXN *txn,
+ bool last_write);
static void pg_decode_commit_txn(LogicalDecodingContext *ctx,
- ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
+ ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
static void pg_decode_change(LogicalDecodingContext *ctx,
- ReorderBufferTXN *txn, Relation rel,
- ReorderBufferChange *change);
+ ReorderBufferTXN *txn, Relation rel,
+ ReorderBufferChange *change);
static void pg_decode_truncate(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn,
int nrelations, Relation relations[],
ReorderBufferChange *change);
static bool pg_decode_filter(LogicalDecodingContext *ctx,
- RepOriginId origin_id);
+ RepOriginId origin_id);
static void pg_decode_message(LogicalDecodingContext *ctx,
- ReorderBufferTXN *txn, XLogRecPtr message_lsn,
- bool transactional, const char *prefix,
- Size sz, const char *message);
+ ReorderBufferTXN *txn, XLogRecPtr message_lsn,
+ bool transactional, const char *prefix,
+ Size sz, const char *message);
void
_PG_init(void)
@@ -319,13 +319,6 @@ static void
tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls)
{
int natt;
- Oid oid;
-
- /* print oid of tuple, it's not included in the TupleDesc */
- if ((oid = HeapTupleHeaderGetOid(tuple->t_data)) != InvalidOid)
- {
- appendStringInfo(s, " oid[oid]:%u", oid);
- }
/* print all columns individually */
for (natt = 0; natt < tupdesc->natts; natt++)
@@ -525,9 +518,9 @@ pg_decode_truncate(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
|| change->data.truncate.cascade)
{
if (change->data.truncate.restart_seqs)
- appendStringInfo(ctx->out, " restart_seqs");
+ appendStringInfoString(ctx->out, " restart_seqs");
if (change->data.truncate.cascade)
- appendStringInfo(ctx->out, " cascade");
+ appendStringInfoString(ctx->out, " cascade");
}
else
appendStringInfoString(ctx->out, " (no-flags)");
diff --git a/contrib/tsm_system_rows/tsm_system_rows.c b/contrib/tsm_system_rows/tsm_system_rows.c
index 83f841f0c2e..e6dbf6bc0dc 100644
--- a/contrib/tsm_system_rows/tsm_system_rows.c
+++ b/contrib/tsm_system_rows/tsm_system_rows.c
@@ -17,7 +17,7 @@
* won't visit blocks added after the first scan, but that is fine since
* such blocks shouldn't contain any visible tuples anyway.
*
- * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
@@ -32,8 +32,7 @@
#include "access/tsmapi.h"
#include "catalog/pg_type.h"
#include "miscadmin.h"
-#include "optimizer/clauses.h"
-#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
#include "utils/sampling.h"
PG_MODULE_MAGIC;
@@ -46,7 +45,6 @@ typedef struct
{
uint32 seed; /* random seed */
int64 ntuples; /* number of tuples to return */
- int64 donetuples; /* number of tuples already returned */
OffsetNumber lt; /* last tuple returned from current block */
BlockNumber doneblocks; /* number of already-scanned blocks */
BlockNumber lb; /* last block visited */
@@ -57,21 +55,20 @@ typedef struct
} SystemRowsSamplerData;
static void system_rows_samplescangetsamplesize(PlannerInfo *root,
- RelOptInfo *baserel,
- List *paramexprs,
- BlockNumber *pages,
- double *tuples);
+ RelOptInfo *baserel,
+ List *paramexprs,
+ BlockNumber *pages,
+ double *tuples);
static void system_rows_initsamplescan(SampleScanState *node,
- int eflags);
+ int eflags);
static void system_rows_beginsamplescan(SampleScanState *node,
- Datum *params,
- int nparams,
- uint32 seed);
-static BlockNumber system_rows_nextsampleblock(SampleScanState *node);
+ Datum *params,
+ int nparams,
+ uint32 seed);
+static BlockNumber system_rows_nextsampleblock(SampleScanState *node, BlockNumber nblocks);
static OffsetNumber system_rows_nextsampletuple(SampleScanState *node,
- BlockNumber blockno,
- OffsetNumber maxoffset);
-static bool SampleOffsetVisible(OffsetNumber tupoffset, HeapScanDesc scan);
+ BlockNumber blockno,
+ OffsetNumber maxoffset);
static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate);
@@ -187,7 +184,6 @@ system_rows_beginsamplescan(SampleScanState *node,
sampler->seed = seed;
sampler->ntuples = ntuples;
- sampler->donetuples = 0;
sampler->lt = InvalidOffsetNumber;
sampler->doneblocks = 0;
/* lb will be initialized during first NextSampleBlock call */
@@ -206,10 +202,9 @@ system_rows_beginsamplescan(SampleScanState *node,
* Uses linear probing algorithm for picking next block.
*/
static BlockNumber
-system_rows_nextsampleblock(SampleScanState *node)
+system_rows_nextsampleblock(SampleScanState *node, BlockNumber nblocks)
{
SystemRowsSamplerData *sampler = (SystemRowsSamplerData *) node->tsm_state;
- HeapScanDesc scan = node->ss.ss_currentScanDesc;
/* First call within scan? */
if (sampler->doneblocks == 0)
@@ -221,14 +216,14 @@ system_rows_nextsampleblock(SampleScanState *node)
SamplerRandomState randstate;
/* If relation is empty, there's nothing to scan */
- if (scan->rs_nblocks == 0)
+ if (nblocks == 0)
return InvalidBlockNumber;
/* We only need an RNG during this setup step */
sampler_random_init_state(sampler->seed, randstate);
/* Compute nblocks/firstblock/step only once per query */
- sampler->nblocks = scan->rs_nblocks;
+ sampler->nblocks = nblocks;
/* Choose random starting block within the relation */
/* (Actually this is the predecessor of the first block visited) */
@@ -245,7 +240,7 @@ system_rows_nextsampleblock(SampleScanState *node)
/* If we've read all blocks or returned all needed tuples, we're done */
if (++sampler->doneblocks > sampler->nblocks ||
- sampler->donetuples >= sampler->ntuples)
+ node->donetuples >= sampler->ntuples)
return InvalidBlockNumber;
/*
@@ -258,7 +253,7 @@ system_rows_nextsampleblock(SampleScanState *node)
{
/* Advance lb, using uint64 arithmetic to forestall overflow */
sampler->lb = ((uint64) sampler->lb + sampler->step) % sampler->nblocks;
- } while (sampler->lb >= scan->rs_nblocks);
+ } while (sampler->lb >= nblocks);
return sampler->lb;
}
@@ -278,76 +273,27 @@ system_rows_nextsampletuple(SampleScanState *node,
OffsetNumber maxoffset)
{
SystemRowsSamplerData *sampler = (SystemRowsSamplerData *) node->tsm_state;
- HeapScanDesc scan = node->ss.ss_currentScanDesc;
OffsetNumber tupoffset = sampler->lt;
/* Quit if we've returned all needed tuples */
- if (sampler->donetuples >= sampler->ntuples)
+ if (node->donetuples >= sampler->ntuples)
return InvalidOffsetNumber;
- /*
- * Because we should only count visible tuples as being returned, we need
- * to search for a visible tuple rather than just let the core code do it.
- */
-
- /* We rely on the data accumulated in pagemode access */
- Assert(scan->rs_pageatatime);
- for (;;)
- {
- /* Advance to next possible offset on page */
- if (tupoffset == InvalidOffsetNumber)
- tupoffset = FirstOffsetNumber;
- else
- tupoffset++;
-
- /* Done? */
- if (tupoffset > maxoffset)
- {
- tupoffset = InvalidOffsetNumber;
- break;
- }
+ /* Advance to next possible offset on page */
+ if (tupoffset == InvalidOffsetNumber)
+ tupoffset = FirstOffsetNumber;
+ else
+ tupoffset++;
- /* Found a candidate? */
- if (SampleOffsetVisible(tupoffset, scan))
- {
- sampler->donetuples++;
- break;
- }
- }
+ /* Done? */
+ if (tupoffset > maxoffset)
+ tupoffset = InvalidOffsetNumber;
sampler->lt = tupoffset;
return tupoffset;
}
-/*
- * Check if tuple offset is visible
- *
- * In pageatatime mode, heapgetpage() already did visibility checks,
- * so just look at the info it left in rs_vistuples[].
- */
-static bool
-SampleOffsetVisible(OffsetNumber tupoffset, HeapScanDesc scan)
-{
- int start = 0,
- end = scan->rs_ntuples - 1;
-
- while (start <= end)
- {
- int mid = (start + end) / 2;
- OffsetNumber curoffset = scan->rs_vistuples[mid];
-
- if (tupoffset == curoffset)
- return true;
- else if (tupoffset < curoffset)
- end = mid - 1;
- else
- start = mid + 1;
- }
-
- return false;
-}
-
/*
* Compute greatest common divisor of two uint32's.
*/
diff --git a/contrib/tsm_system_time/tsm_system_time.c b/contrib/tsm_system_time/tsm_system_time.c
index f0c220aa4ac..ed486c4696b 100644
--- a/contrib/tsm_system_time/tsm_system_time.c
+++ b/contrib/tsm_system_time/tsm_system_time.c
@@ -13,7 +13,7 @@
* However, we do what we can to reduce surprising behavior by selecting
* the sampling pattern just once per query, much as in tsm_system_rows.
*
- * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
@@ -24,17 +24,13 @@
#include "postgres.h"
-#ifdef _MSC_VER
-#include /* for _isnan */
-#endif
#include
#include "access/relscan.h"
#include "access/tsmapi.h"
#include "catalog/pg_type.h"
#include "miscadmin.h"
-#include "optimizer/clauses.h"
-#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
#include "utils/sampling.h"
#include "utils/spccache.h"
@@ -59,20 +55,20 @@ typedef struct
} SystemTimeSamplerData;
static void system_time_samplescangetsamplesize(PlannerInfo *root,
- RelOptInfo *baserel,
- List *paramexprs,
- BlockNumber *pages,
- double *tuples);
+ RelOptInfo *baserel,
+ List *paramexprs,
+ BlockNumber *pages,
+ double *tuples);
static void system_time_initsamplescan(SampleScanState *node,
- int eflags);
+ int eflags);
static void system_time_beginsamplescan(SampleScanState *node,
- Datum *params,
- int nparams,
- uint32 seed);
-static BlockNumber system_time_nextsampleblock(SampleScanState *node);
+ Datum *params,
+ int nparams,
+ uint32 seed);
+static BlockNumber system_time_nextsampleblock(SampleScanState *node, BlockNumber nblocks);
static OffsetNumber system_time_nextsampletuple(SampleScanState *node,
- BlockNumber blockno,
- OffsetNumber maxoffset);
+ BlockNumber blockno,
+ OffsetNumber maxoffset);
static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate);
@@ -216,10 +212,9 @@ system_time_beginsamplescan(SampleScanState *node,
* Uses linear probing algorithm for picking next block.
*/
static BlockNumber
-system_time_nextsampleblock(SampleScanState *node)
+system_time_nextsampleblock(SampleScanState *node, BlockNumber nblocks)
{
SystemTimeSamplerData *sampler = (SystemTimeSamplerData *) node->tsm_state;
- HeapScanDesc scan = node->ss.ss_currentScanDesc;
instr_time cur_time;
/* First call within scan? */
@@ -232,14 +227,14 @@ system_time_nextsampleblock(SampleScanState *node)
SamplerRandomState randstate;
/* If relation is empty, there's nothing to scan */
- if (scan->rs_nblocks == 0)
+ if (nblocks == 0)
return InvalidBlockNumber;
/* We only need an RNG during this setup step */
sampler_random_init_state(sampler->seed, randstate);
/* Compute nblocks/firstblock/step only once per query */
- sampler->nblocks = scan->rs_nblocks;
+ sampler->nblocks = nblocks;
/* Choose random starting block within the relation */
/* (Actually this is the predecessor of the first block visited) */
@@ -275,7 +270,7 @@ system_time_nextsampleblock(SampleScanState *node)
{
/* Advance lb, using uint64 arithmetic to forestall overflow */
sampler->lb = ((uint64) sampler->lb + sampler->step) % sampler->nblocks;
- } while (sampler->lb >= scan->rs_nblocks);
+ } while (sampler->lb >= nblocks);
return sampler->lb;
}
diff --git a/contrib/unaccent/expected/unaccent.out b/contrib/unaccent/expected/unaccent.out
index b93105e9c7c..c1bd7cd897d 100644
--- a/contrib/unaccent/expected/unaccent.out
+++ b/contrib/unaccent/expected/unaccent.out
@@ -6,23 +6,35 @@ SELECT getdatabaseencoding();
UTF8
(1 row)
-SET client_encoding TO 'KOI8';
+SET client_encoding TO 'UTF8';
SELECT unaccent('foobar');
unaccent
----------
foobar
(1 row)
-SELECT unaccent('£ÌËÁ');
+SELECT unaccent('ёлка');
unaccent
----------
- ÅÌËÁ
+ елка
(1 row)
-SELECT unaccent('³öéë');
+SELECT unaccent('ÐЖИК');
unaccent
----------
- åöéë
+ ЕЖИК
+(1 row)
+
+SELECT unaccent('˃˖˗˜');
+ unaccent
+----------
+ >+-~
+(1 row)
+
+SELECT unaccent('AÌ€'); -- Remove combining diacritical 0x0300
+ unaccent
+----------
+ A
(1 row)
SELECT unaccent('unaccent', 'foobar');
@@ -31,16 +43,28 @@ SELECT unaccent('unaccent', 'foobar');
foobar
(1 row)
-SELECT unaccent('unaccent', '£ÌËÁ');
+SELECT unaccent('unaccent', 'ёлка');
+ unaccent
+----------
+ елка
+(1 row)
+
+SELECT unaccent('unaccent', 'ÐЖИК');
unaccent
----------
- ÅÌËÁ
+ ЕЖИК
(1 row)
-SELECT unaccent('unaccent', '³öéë');
+SELECT unaccent('unaccent', '˃˖˗˜');
unaccent
----------
- åöéë
+ >+-~
+(1 row)
+
+SELECT unaccent('unaccent', 'AÌ€');
+ unaccent
+----------
+ A
(1 row)
SELECT ts_lexize('unaccent', 'foobar');
@@ -49,15 +73,27 @@ SELECT ts_lexize('unaccent', 'foobar');
(1 row)
-SELECT ts_lexize('unaccent', '£ÌËÁ');
+SELECT ts_lexize('unaccent', 'ёлка');
+ ts_lexize
+-----------
+ {елка}
+(1 row)
+
+SELECT ts_lexize('unaccent', 'ÐЖИК');
+ ts_lexize
+-----------
+ {ЕЖИК}
+(1 row)
+
+SELECT ts_lexize('unaccent', '˃˖˗˜');
ts_lexize
-----------
- {ÅÌËÁ}
+ {>+-~}
(1 row)
-SELECT ts_lexize('unaccent', '³öéë');
+SELECT ts_lexize('unaccent', 'AÌ€');
ts_lexize
-----------
- {åöéë}
+ {A}
(1 row)
diff --git a/contrib/unaccent/generate_unaccent_rules.py b/contrib/unaccent/generate_unaccent_rules.py
index 4b1b011861f..7a0a96e04f7 100644
--- a/contrib/unaccent/generate_unaccent_rules.py
+++ b/contrib/unaccent/generate_unaccent_rules.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This script builds unaccent.rules on standard output when given the
@@ -20,17 +20,69 @@
# option is enabled, the XML file of this transliterator [2] -- given as a
# command line argument -- will be parsed and used.
#
+# Ideally you should use the latest release for each data set. For
+# Latin-ASCII.xml, the latest data sets released can be browsed directly
+# via [3]. Note that this script is compatible with at least release 29.
+#
# [1] http://unicode.org/Public/8.0.0/ucd/UnicodeData.txt
-# [2] http://unicode.org/cldr/trac/export/12304/tags/release-28/common/transforms/Latin-ASCII.xml
+# [2] http://unicode.org/cldr/trac/export/14746/tags/release-34/common/transforms/Latin-ASCII.xml
+# [3] https://unicode.org/cldr/trac/browser/tags
+# BEGIN: Python 2/3 compatibility - remove when Python 2 compatibility dropped
+# The approach is to be Python3 compatible with Python2 "backports".
+from __future__ import print_function
+from __future__ import unicode_literals
+# END: Python 2/3 compatibility - remove when Python 2 compatibility dropped
-import re
import argparse
+import codecs
+import re
import sys
import xml.etree.ElementTree as ET
+# BEGIN: Python 2/3 compatibility - remove when Python 2 compatibility dropped
+if sys.version_info[0] <= 2:
+ # Encode stdout as UTF-8, so we can just print to it
+ sys.stdout = codecs.getwriter('utf8')(sys.stdout)
+
+ # Map Python 2's chr to unichr
+ chr = unichr
+
+ # Python 2 and 3 compatible bytes call
+ def bytes(source, encoding='ascii', errors='strict'):
+ return source.encode(encoding=encoding, errors=errors)
+else:
+# END: Python 2/3 compatibility - remove when Python 2 compatibility dropped
+ sys.stdout = codecs.getwriter('utf8')(sys.stdout.buffer)
+
+# The ranges of Unicode characters that we consider to be "plain letters".
+# For now we are being conservative by including only Latin and Greek. This
+# could be extended in future based on feedback from people with relevant
+# language knowledge.
+PLAIN_LETTER_RANGES = ((ord('a'), ord('z')), # Latin lower case
+ (ord('A'), ord('Z')), # Latin upper case
+ (0x03b1, 0x03c9), # GREEK SMALL LETTER ALPHA, GREEK SMALL LETTER OMEGA
+ (0x0391, 0x03a9)) # GREEK CAPITAL LETTER ALPHA, GREEK CAPITAL LETTER OMEGA
+
+# Combining marks follow a "base" character, and result in a composite
+# character. Example: "U&'A\0300'"produces "AÌ€".There are three types of
+# combining marks: enclosing (Me), non-spacing combining (Mn), spacing
+# combining (Mc). We identify the ranges of marks we feel safe removing.
+# References:
+# https://en.wikipedia.org/wiki/Combining_character
+# https://www.unicode.org/charts/PDF/U0300.pdf
+# https://www.unicode.org/charts/PDF/U20D0.pdf
+COMBINING_MARK_RANGES = ((0x0300, 0x0362), # Mn: Accents, IPA
+ (0x20dd, 0x20E0), # Me: Symbols
+ (0x20e2, 0x20e4),) # Me: Screen, keycap, triangle
+
def print_record(codepoint, letter):
- print (unichr(codepoint) + "\t" + letter).encode("UTF-8")
+ if letter:
+ output = chr(codepoint) + "\t" + letter
+ else:
+ output = chr(codepoint)
+
+ print(output)
class Codepoint:
def __init__(self, id, general_category, combining_ids):
@@ -38,10 +90,22 @@ def __init__(self, id, general_category, combining_ids):
self.general_category = general_category
self.combining_ids = combining_ids
+def is_mark_to_remove(codepoint):
+ """Return true if this is a combining mark to remove."""
+ if not is_mark(codepoint):
+ return False
+
+ for begin, end in COMBINING_MARK_RANGES:
+ if codepoint.id >= begin and codepoint.id <= end:
+ return True
+ return False
+
def is_plain_letter(codepoint):
- """Return true if codepoint represents a plain ASCII letter."""
- return (codepoint.id >= ord('a') and codepoint.id <= ord('z')) or \
- (codepoint.id >= ord('A') and codepoint.id <= ord('Z'))
+ """Return true if codepoint represents a "plain letter"."""
+ for begin, end in PLAIN_LETTER_RANGES:
+ if codepoint.id >= begin and codepoint.id <= end:
+ return True
+ return False
def is_mark(codepoint):
"""Returns true for diacritical marks (combining codepoints)."""
@@ -105,14 +169,24 @@ def parse_cldr_latin_ascii_transliterator(latinAsciiFilePath):
charactersSet = set()
# RegEx to parse rules
- rulePattern = re.compile(ur'^(?:(.)|(\\u[0-9a-fA-F]{4})) \u2192 (?:\'(.+)\'|(.+)) ;')
+ rulePattern = re.compile(r'^(?:(.)|(\\u[0-9a-fA-F]{4})) \u2192 (?:\'(.+)\'|(.+)) ;')
# construct tree from XML
transliterationTree = ET.parse(latinAsciiFilePath)
transliterationTreeRoot = transliterationTree.getroot()
- for rule in transliterationTreeRoot.findall("./transforms/transform/tRule"):
- matches = rulePattern.search(rule.text)
+ # Fetch all the transliteration rules. Since release 29 of Latin-ASCII.xml
+ # all the transliteration rules are located in a single tRule block with
+ # all rules separated into separate lines.
+ blockRules = transliterationTreeRoot.findall("./transforms/transform/tRule")
+ assert(len(blockRules) == 1)
+
+ # Split the block of rules into one element per line.
+ rules = blockRules[0].text.splitlines()
+
+ # And finish the processing of each individual rule.
+ for rule in rules:
+ matches = rulePattern.search(rule)
# The regular expression capture four groups corresponding
# to the characters.
@@ -123,7 +197,7 @@ def parse_cldr_latin_ascii_transliterator(latinAsciiFilePath):
# Group 3: plain "trg" char. Empty if group 4 is not.
# Group 4: plain "trg" char between quotes. Empty if group 3 is not.
if matches is not None:
- src = matches.group(1) if matches.group(1) is not None else matches.group(2).decode('unicode-escape')
+ src = matches.group(1) if matches.group(1) is not None else bytes(matches.group(2), 'UTF-8').decode('unicode-escape')
trg = matches.group(3) if matches.group(3) is not None else matches.group(4)
# "'" and """ are escaped
@@ -162,21 +236,22 @@ def main(args):
charactersSet = set()
# read file UnicodeData.txt
- unicodeDataFile = open(args.unicodeDataFilePath, 'r')
-
- # read everything we need into memory
- for line in unicodeDataFile:
- fields = line.split(";")
- if len(fields) > 5:
- # http://www.unicode.org/reports/tr44/tr44-14.html#UnicodeData.txt
- general_category = fields[2]
- decomposition = fields[5]
- decomposition = re.sub(decomposition_type_pattern, ' ', decomposition)
- id = int(fields[0], 16)
- combining_ids = [int(s, 16) for s in decomposition.split(" ") if s != ""]
- codepoint = Codepoint(id, general_category, combining_ids)
- table[id] = codepoint
- all.append(codepoint)
+ with codecs.open(
+ args.unicodeDataFilePath, mode='r', encoding='UTF-8',
+ ) as unicodeDataFile:
+ # read everything we need into memory
+ for line in unicodeDataFile:
+ fields = line.split(";")
+ if len(fields) > 5:
+ # http://www.unicode.org/reports/tr44/tr44-14.html#UnicodeData.txt
+ general_category = fields[2]
+ decomposition = fields[5]
+ decomposition = re.sub(decomposition_type_pattern, ' ', decomposition)
+ id = int(fields[0], 16)
+ combining_ids = [int(s, 16) for s in decomposition.split(" ") if s != ""]
+ codepoint = Codepoint(id, general_category, combining_ids)
+ table[id] = codepoint
+ all.append(codepoint)
# walk through all the codepoints looking for interesting mappings
for codepoint in all:
@@ -187,9 +262,11 @@ def main(args):
chr(get_plain_letter(codepoint, table).id)))
elif args.noLigaturesExpansion is False and is_ligature(codepoint, table):
charactersSet.add((codepoint.id,
- "".join(unichr(combining_codepoint.id)
+ "".join(chr(combining_codepoint.id)
for combining_codepoint \
in get_plain_letters(codepoint, table))))
+ elif is_mark_to_remove(codepoint):
+ charactersSet.add((codepoint.id, None))
# add CLDR Latin-ASCII characters
if not args.noLigaturesExpansion:
diff --git a/contrib/unaccent/sql/unaccent.sql b/contrib/unaccent/sql/unaccent.sql
index 310213994f3..2ae097ff2b8 100644
--- a/contrib/unaccent/sql/unaccent.sql
+++ b/contrib/unaccent/sql/unaccent.sql
@@ -3,16 +3,22 @@ CREATE EXTENSION unaccent;
-- must have a UTF8 database
SELECT getdatabaseencoding();
-SET client_encoding TO 'KOI8';
+SET client_encoding TO 'UTF8';
SELECT unaccent('foobar');
-SELECT unaccent('£ÌËÁ');
-SELECT unaccent('³öéë');
+SELECT unaccent('ёлка');
+SELECT unaccent('ÐЖИК');
+SELECT unaccent('˃˖˗˜');
+SELECT unaccent('AÌ€'); -- Remove combining diacritical 0x0300
SELECT unaccent('unaccent', 'foobar');
-SELECT unaccent('unaccent', '£ÌËÁ');
-SELECT unaccent('unaccent', '³öéë');
+SELECT unaccent('unaccent', 'ёлка');
+SELECT unaccent('unaccent', 'ÐЖИК');
+SELECT unaccent('unaccent', '˃˖˗˜');
+SELECT unaccent('unaccent', 'AÌ€');
SELECT ts_lexize('unaccent', 'foobar');
-SELECT ts_lexize('unaccent', '£ÌËÁ');
-SELECT ts_lexize('unaccent', '³öéë');
+SELECT ts_lexize('unaccent', 'ёлка');
+SELECT ts_lexize('unaccent', 'ÐЖИК');
+SELECT ts_lexize('unaccent', '˃˖˗˜');
+SELECT ts_lexize('unaccent', 'AÌ€');
diff --git a/contrib/unaccent/unaccent.c b/contrib/unaccent/unaccent.c
index 247c202755b..fc5176e338b 100644
--- a/contrib/unaccent/unaccent.c
+++ b/contrib/unaccent/unaccent.c
@@ -3,7 +3,7 @@
* unaccent.c
* Text search unaccent dictionary
*
- * Copyright (c) 2009-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2009-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/unaccent/unaccent.c
@@ -14,13 +14,16 @@
#include "postgres.h"
#include "catalog/namespace.h"
+#include "catalog/pg_ts_dict.h"
#include "commands/defrem.h"
#include "lib/stringinfo.h"
#include "tsearch/ts_cache.h"
#include "tsearch/ts_locale.h"
#include "tsearch/ts_public.h"
#include "utils/builtins.h"
+#include "utils/lsyscache.h"
#include "utils/regproc.h"
+#include "utils/syscache.h"
PG_MODULE_MAGIC;
@@ -376,7 +379,21 @@ unaccent_dict(PG_FUNCTION_ARGS)
if (PG_NARGS() == 1)
{
- dictOid = get_ts_dict_oid(stringToQualifiedNameList("unaccent"), false);
+ /*
+ * Use the "unaccent" dictionary that is in the same schema that this
+ * function is in.
+ */
+ Oid procnspid = get_func_namespace(fcinfo->flinfo->fn_oid);
+ const char *dictname = "unaccent";
+
+ dictOid = GetSysCacheOid2(TSDICTNAMENSP, Anum_pg_ts_dict_oid,
+ PointerGetDatum(dictname),
+ ObjectIdGetDatum(procnspid));
+ if (!OidIsValid(dictOid))
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("text search dictionary \"%s.%s\" does not exist",
+ get_namespace_name(procnspid), dictname)));
strArg = 0;
}
else
diff --git a/contrib/unaccent/unaccent.rules b/contrib/unaccent/unaccent.rules
index 97f9ed47cfa..99826408ac1 100644
--- a/contrib/unaccent/unaccent.rules
+++ b/contrib/unaccent/unaccent.rules
@@ -399,6 +399,140 @@
ʦ ts
ʪ ls
Ê« lz
+ʹ '
+ʺ "
+Ê» '
+ʼ '
+ʽ '
+Ë‚ <
+˃ >
+Ë„ ^
+ˆ ^
+ˈ '
+Ë‹ `
+Ë :
+Ë– +
+Ë— -
+˜ ~
+Ì€
+Ì
+Ì‚
+̃
+Ì„
+Ì…
+̆
+̇
+̈
+̉
+ÌŠ
+Ì‹
+̌
+Ì
+ÌŽ
+Ì
+Ì
+Ì‘
+Ì’
+Ì“
+Ì”
+Ì•
+Ì–
+Ì—
+̘
+Ì™
+Ìš
+Ì›
+̜
+Ì
+Ìž
+ÌŸ
+Ì
+Ì¡
+Ì¢
+Ì£
+̤
+Ì¥
+̦
+̧
+̨
+Ì©
+̪
+Ì«
+̬
+Ì
+Ì®
+̯
+̰
+̱
+̲
+̳
+Ì´
+̵
+̶
+Ì·
+̸
+̹
+̺
+Ì»
+̼
+̽
+̾
+Ì¿
+Í€
+Í
+Í‚
+̓
+Í„
+Í…
+͆
+͇
+͈
+͉
+ÍŠ
+Í‹
+͌
+Í
+ÍŽ
+Í
+Í
+Í‘
+Í’
+Í“
+Í”
+Í•
+Í–
+Í—
+͘
+Í™
+Íš
+Í›
+͜
+Í
+Íž
+ÍŸ
+Í
+Í¡
+Í¢
+Ά Α
+Έ Ε
+Ή Η
+Ί Ι
+Ό Ο
+Ύ Υ
+ΠΩ
+Πι
+Ϊ Ι
+Ϋ Υ
+ά α
+Πε
+ή η
+ί ι
+ΰ υ
+ϊ ι
+Ï‹ Ï…
+ό ο
+Ï Ï…
+ώ ω
РЕ
ё е
á´€ A
@@ -709,6 +843,207 @@
ỽ v
Ỿ Y
ỿ y
+ἀ α
+ἠα
+ἂ α
+ἃ α
+ἄ α
+ἅ α
+ἆ α
+ἇ α
+Ἀ Α
+Ἁ Α
+Ἂ Α
+Ἃ Α
+Ἄ Α
+ἠΑ
+Ἆ Α
+ἠΑ
+ἠε
+ἑ ε
+ἒ ε
+ἓ ε
+ἔ ε
+ἕ ε
+Ἐ Ε
+Ἑ Ε
+Ἒ Ε
+Ἓ Ε
+Ἔ Ε
+ἠΕ
+ἠη
+ἡ η
+ἢ η
+ἣ η
+ἤ η
+ἥ η
+ἦ η
+ἧ η
+Ἠ Η
+Ἡ Η
+Ἢ Η
+Ἣ Η
+Ἤ Η
+ἠΗ
+Ἦ Η
+Ἧ Η
+ἰ ι
+ἱ ι
+ἲ ι
+ἳ ι
+ἴ ι
+ἵ ι
+ἶ ι
+ἷ ι
+Ἰ Ι
+Ἱ Ι
+Ἲ Ι
+Ἳ Ι
+Ἴ Ι
+Ἵ Ι
+Ἶ Ι
+Ἷ Ι
+ὀ ο
+ὠο
+ὂ ο
+ὃ ο
+ὄ ο
+ὅ ο
+Ὀ Ο
+Ὁ Ο
+Ὂ Ο
+Ὃ Ο
+Ὄ Ο
+ὠΟ
+á½ Ï…
+ὑ υ
+á½’ Ï…
+ὓ υ
+á½” Ï…
+ὕ υ
+á½– Ï…
+á½— Ï…
+Ὑ Υ
+Ὓ Υ
+ὠΥ
+Ὗ Υ
+ὠω
+ὡ ω
+ὢ ω
+ὣ ω
+ὤ ω
+ὥ ω
+ὦ ω
+ὧ ω
+Ὠ Ω
+Ὡ Ω
+Ὢ Ω
+Ὣ Ω
+Ὤ Ω
+ὠΩ
+Ὦ Ω
+Ὧ Ω
+ὰ α
+ὲ ε
+ὴ η
+ὶ ι
+ὸ ο
+ὺ υ
+ὼ ω
+ᾀ α
+ᾠα
+ᾂ α
+ᾃ α
+ᾄ α
+ᾅ α
+ᾆ α
+ᾇ α
+ᾈ Α
+ᾉ Α
+ᾊ Α
+ᾋ Α
+ᾌ Α
+ᾠΑ
+ᾎ Α
+ᾠΑ
+ᾠη
+ᾑ η
+ᾒ η
+ᾓ η
+ᾔ η
+ᾕ η
+ᾖ η
+ᾗ η
+ᾘ Η
+ᾙ Η
+ᾚ Η
+ᾛ Η
+ᾜ Η
+ᾠΗ
+ᾞ Η
+ᾟ Η
+ᾠω
+ᾡ ω
+ᾢ ω
+ᾣ ω
+ᾤ ω
+ᾥ ω
+ᾦ ω
+ᾧ ω
+ᾨ Ω
+ᾩ Ω
+ᾪ Ω
+ᾫ Ω
+ᾬ Ω
+ᾠΩ
+ᾮ Ω
+ᾯ Ω
+ᾰ α
+ᾱ α
+ᾲ α
+ᾳ α
+ᾴ α
+ᾶ α
+ᾷ α
+Ᾰ Α
+Ᾱ Α
+Ὰ Α
+ᾼ Α
+ῂ η
+ῃ η
+ῄ η
+ῆ η
+ῇ η
+Ὲ Ε
+Ὴ Η
+ῌ Η
+ῠι
+ῑ ι
+ῒ ι
+ῖ ι
+ῗ ι
+Ῐ Ι
+Ῑ Ι
+Ὶ Ι
+á¿ Ï…
+á¿¡ Ï…
+á¿¢ Ï…
+ῤ Ï
+á¿¥ Ï
+ῦ υ
+á¿§ Ï…
+Ῠ Υ
+Ῡ Υ
+Ὺ Υ
+Ῥ Ρ
+ῲ ω
+ῳ ω
+ῴ ω
+ῶ ω
+ῷ ω
+Ὸ Ο
+Ὼ Ω
+ῼ Ω
†-
‑ -
‒ -
@@ -746,6 +1081,13 @@
â‚§ Pts
₹ Rs
₺ TL
+âƒ
+⃞
+⃟
+âƒ
+⃢
+⃣
+⃤
â„€ a/c
â„ a/s
â„‚ C
diff --git a/contrib/uuid-ossp/uuid-ossp.c b/contrib/uuid-ossp/uuid-ossp.c
index 179305b9547..f5ae915f243 100644
--- a/contrib/uuid-ossp/uuid-ossp.c
+++ b/contrib/uuid-ossp/uuid-ossp.c
@@ -2,7 +2,7 @@
*
* UUID generation functions using the BSD, E2FS or OSSP UUID library
*
- * Copyright (c) 2007-2018, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2019, PostgreSQL Global Development Group
*
* Portions Copyright (c) 2009 Andrew Gierth
*
diff --git a/contrib/vacuumlo/.gitignore b/contrib/vacuumlo/.gitignore
index 07f6ab4fd7c..f3f0ce3d80b 100644
--- a/contrib/vacuumlo/.gitignore
+++ b/contrib/vacuumlo/.gitignore
@@ -1 +1,3 @@
/vacuumlo
+
+/tmp_check/
diff --git a/contrib/vacuumlo/Makefile b/contrib/vacuumlo/Makefile
index 71106ff69c6..3efcb46735c 100644
--- a/contrib/vacuumlo/Makefile
+++ b/contrib/vacuumlo/Makefile
@@ -6,6 +6,8 @@ PGAPPICON = win32
PROGRAM = vacuumlo
OBJS = vacuumlo.o $(WIN32RES)
+TAP_TESTS = 1
+
PG_CPPFLAGS = -I$(libpq_srcdir)
PG_LIBS_INTERNAL = $(libpq_pgport)
diff --git a/contrib/vacuumlo/t/001_basic.pl b/contrib/vacuumlo/t/001_basic.pl
new file mode 100644
index 00000000000..2bfb6ce17d9
--- /dev/null
+++ b/contrib/vacuumlo/t/001_basic.pl
@@ -0,0 +1,9 @@
+use strict;
+use warnings;
+
+use TestLib;
+use Test::More tests => 8;
+
+program_help_ok('vacuumlo');
+program_version_ok('vacuumlo');
+program_options_handling_ok('vacuumlo');
diff --git a/contrib/vacuumlo/vacuumlo.c b/contrib/vacuumlo/vacuumlo.c
index 7eb474ca3e4..533e2ce33c5 100644
--- a/contrib/vacuumlo/vacuumlo.c
+++ b/contrib/vacuumlo/vacuumlo.c
@@ -3,7 +3,7 @@
* vacuumlo.c
* This removes orphaned large objects from a database.
*
- * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -23,9 +23,11 @@
#include "catalog/pg_class_d.h"
+#include "common/logging.h"
#include "fe_utils/connect.h"
#include "libpq-fe.h"
#include "pg_getopt.h"
+#include "getopt_long.h"
#define BUFSIZE 1024
@@ -108,8 +110,7 @@ vacuumlo(const char *database, const struct _param *param)
conn = PQconnectdbParams(keywords, values, true);
if (!conn)
{
- fprintf(stderr, "Connection to database \"%s\" failed\n",
- database);
+ pg_log_error("connection to database \"%s\" failed", database);
return -1;
}
@@ -128,8 +129,8 @@ vacuumlo(const char *database, const struct _param *param)
/* check to see that the backend connection was successfully made */
if (PQstatus(conn) == CONNECTION_BAD)
{
- fprintf(stderr, "Connection to database \"%s\" failed:\n%s",
- database, PQerrorMessage(conn));
+ pg_log_error("connection to database \"%s\" failed: %s",
+ database, PQerrorMessage(conn));
PQfinish(conn);
return -1;
}
@@ -144,8 +145,7 @@ vacuumlo(const char *database, const struct _param *param)
res = PQexec(conn, ALWAYS_SECURE_SEARCH_PATH_SQL);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
- fprintf(stderr, "Failed to set search_path:\n");
- fprintf(stderr, "%s", PQerrorMessage(conn));
+ pg_log_error("failed to set search_path: %s", PQerrorMessage(conn));
PQclear(res);
PQfinish(conn);
return -1;
@@ -164,8 +164,7 @@ vacuumlo(const char *database, const struct _param *param)
res = PQexec(conn, buf);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
- fprintf(stderr, "Failed to create temp table:\n");
- fprintf(stderr, "%s", PQerrorMessage(conn));
+ pg_log_error("failed to create temp table: %s", PQerrorMessage(conn));
PQclear(res);
PQfinish(conn);
return -1;
@@ -181,8 +180,7 @@ vacuumlo(const char *database, const struct _param *param)
res = PQexec(conn, buf);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
- fprintf(stderr, "Failed to vacuum temp table:\n");
- fprintf(stderr, "%s", PQerrorMessage(conn));
+ pg_log_error("failed to vacuum temp table: %s", PQerrorMessage(conn));
PQclear(res);
PQfinish(conn);
return -1;
@@ -197,9 +195,6 @@ vacuumlo(const char *database, const struct _param *param)
* table formed above is ignored, and pg_largeobject will be too. If
* either of these were scanned, obviously we'd end up with nothing to
* delete...
- *
- * NOTE: the system oid column is ignored, as it has attnum < 1. This
- * shouldn't matter for correctness, but it saves time.
*/
buf[0] = '\0';
strcat(buf, "SELECT s.nspname, c.relname, a.attname ");
@@ -214,8 +209,7 @@ vacuumlo(const char *database, const struct _param *param)
res = PQexec(conn, buf);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
- fprintf(stderr, "Failed to find OID columns:\n");
- fprintf(stderr, "%s", PQerrorMessage(conn));
+ pg_log_error("failed to find OID columns: %s", PQerrorMessage(conn));
PQclear(res);
PQfinish(conn);
return -1;
@@ -240,14 +234,14 @@ vacuumlo(const char *database, const struct _param *param)
if (!schema || !table || !field)
{
- fprintf(stderr, "%s", PQerrorMessage(conn));
+ pg_log_error("%s", PQerrorMessage(conn));
PQclear(res);
PQfinish(conn);
if (schema != NULL)
PQfreemem(schema);
- if (schema != NULL)
+ if (table != NULL)
PQfreemem(table);
- if (schema != NULL)
+ if (field != NULL)
PQfreemem(field);
return -1;
}
@@ -259,9 +253,8 @@ vacuumlo(const char *database, const struct _param *param)
res2 = PQexec(conn, buf);
if (PQresultStatus(res2) != PGRES_COMMAND_OK)
{
- fprintf(stderr, "Failed to check %s in table %s.%s:\n",
- field, schema, table);
- fprintf(stderr, "%s", PQerrorMessage(conn));
+ pg_log_error("failed to check %s in table %s.%s: %s",
+ field, schema, table, PQerrorMessage(conn));
PQclear(res2);
PQclear(res);
PQfinish(conn);
@@ -290,8 +283,7 @@ vacuumlo(const char *database, const struct _param *param)
res = PQexec(conn, "begin");
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
- fprintf(stderr, "Failed to start transaction:\n");
- fprintf(stderr, "%s", PQerrorMessage(conn));
+ pg_log_error("failed to start transaction: %s", PQerrorMessage(conn));
PQclear(res);
PQfinish(conn);
return -1;
@@ -304,7 +296,7 @@ vacuumlo(const char *database, const struct _param *param)
res = PQexec(conn, buf);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
- fprintf(stderr, "DECLARE CURSOR failed: %s", PQerrorMessage(conn));
+ pg_log_error("DECLARE CURSOR failed: %s", PQerrorMessage(conn));
PQclear(res);
PQfinish(conn);
return -1;
@@ -316,12 +308,12 @@ vacuumlo(const char *database, const struct _param *param)
deleted = 0;
- while (1)
+ do
{
res = PQexec(conn, buf);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
- fprintf(stderr, "FETCH FORWARD failed: %s", PQerrorMessage(conn));
+ pg_log_error("FETCH FORWARD failed: %s", PQerrorMessage(conn));
PQclear(res);
PQfinish(conn);
return -1;
@@ -349,13 +341,12 @@ vacuumlo(const char *database, const struct _param *param)
{
if (lo_unlink(conn, lo) < 0)
{
- fprintf(stderr, "\nFailed to remove lo %u: ", lo);
- fprintf(stderr, "%s", PQerrorMessage(conn));
+ pg_log_error("failed to remove lo %u: %s", lo,
+ PQerrorMessage(conn));
if (PQtransactionStatus(conn) == PQTRANS_INERROR)
{
success = false;
- PQclear(res);
- break;
+ break; /* out of inner for-loop */
}
}
else
@@ -370,8 +361,8 @@ vacuumlo(const char *database, const struct _param *param)
res2 = PQexec(conn, "commit");
if (PQresultStatus(res2) != PGRES_COMMAND_OK)
{
- fprintf(stderr, "Failed to commit transaction:\n");
- fprintf(stderr, "%s", PQerrorMessage(conn));
+ pg_log_error("failed to commit transaction: %s",
+ PQerrorMessage(conn));
PQclear(res2);
PQclear(res);
PQfinish(conn);
@@ -381,8 +372,8 @@ vacuumlo(const char *database, const struct _param *param)
res2 = PQexec(conn, "begin");
if (PQresultStatus(res2) != PGRES_COMMAND_OK)
{
- fprintf(stderr, "Failed to start transaction:\n");
- fprintf(stderr, "%s", PQerrorMessage(conn));
+ pg_log_error("failed to start transaction: %s",
+ PQerrorMessage(conn));
PQclear(res2);
PQclear(res);
PQfinish(conn);
@@ -393,7 +384,7 @@ vacuumlo(const char *database, const struct _param *param)
}
PQclear(res);
- }
+ } while (success);
/*
* That's all folks!
@@ -401,8 +392,8 @@ vacuumlo(const char *database, const struct _param *param)
res = PQexec(conn, "commit");
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
- fprintf(stderr, "Failed to commit transaction:\n");
- fprintf(stderr, "%s", PQerrorMessage(conn));
+ pg_log_error("failed to commit transaction: %s",
+ PQerrorMessage(conn));
PQclear(res);
PQfinish(conn);
return -1;
@@ -434,31 +425,47 @@ usage(const char *progname)
printf("%s removes unreferenced large objects from databases.\n\n", progname);
printf("Usage:\n %s [OPTION]... DBNAME...\n\n", progname);
printf("Options:\n");
- printf(" -l LIMIT commit after removing each LIMIT large objects\n");
- printf(" -n don't remove large objects, just show what would be done\n");
- printf(" -v write a lot of progress messages\n");
- printf(" -V, --version output version information, then exit\n");
- printf(" -?, --help show this help, then exit\n");
+ printf(" -l, --limit=LIMIT commit after removing each LIMIT large objects\n");
+ printf(" -n, --dry-run don't remove large objects, just show what would be done\n");
+ printf(" -v, --verbose write a lot of progress messages\n");
+ printf(" -V, --version output version information, then exit\n");
+ printf(" -?, --help show this help, then exit\n");
printf("\nConnection options:\n");
- printf(" -h HOSTNAME database server host or socket directory\n");
- printf(" -p PORT database server port\n");
- printf(" -U USERNAME user name to connect as\n");
- printf(" -w never prompt for password\n");
- printf(" -W force password prompt\n");
+ printf(" -h, --host=HOSTNAME database server host or socket directory\n");
+ printf(" -p, --port=PORT database server port\n");
+ printf(" -U, --username=USERNAME user name to connect as\n");
+ printf(" -w, --no-password never prompt for password\n");
+ printf(" -W, --password force password prompt\n");
printf("\n");
- printf("Report bugs to .\n");
+ printf("Report bugs to .\n");
}
int
main(int argc, char **argv)
{
+ static struct option long_options[] = {
+ {"host", required_argument, NULL, 'h'},
+ {"limit", required_argument, NULL, 'l'},
+ {"dry-run", no_argument, NULL, 'n'},
+ {"port", required_argument, NULL, 'p'},
+ {"username", required_argument, NULL, 'U'},
+ {"verbose", no_argument, NULL, 'v'},
+ {"version", no_argument, NULL, 'V'},
+ {"no-password", no_argument, NULL, 'w'},
+ {"password", no_argument, NULL, 'W'},
+ {"help", no_argument, NULL, '?'},
+ {NULL, 0, NULL, 0}
+ };
+
int rc = 0;
struct _param param;
int c;
int port;
const char *progname;
+ int optindex;
+ pg_logging_init(argv[0]);
progname = get_progname(argv[0]);
/* Set default parameter values */
@@ -486,64 +493,59 @@ main(int argc, char **argv)
}
}
- while (1)
+ while ((c = getopt_long(argc, argv, "h:l:np:U:vwW", long_options, &optindex)) != -1)
{
- c = getopt(argc, argv, "h:l:U:p:vnwW");
- if (c == -1)
- break;
-
switch (c)
{
case '?':
fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
exit(1);
- case ':':
- exit(1);
- case 'v':
- param.verbose = 1;
- break;
- case 'n':
- param.dry_run = 1;
- param.verbose = 1;
+ case 'h':
+ param.pg_host = pg_strdup(optarg);
break;
case 'l':
param.transaction_limit = strtol(optarg, NULL, 10);
if (param.transaction_limit < 0)
{
- fprintf(stderr,
- "%s: transaction limit must not be negative (0 disables)\n",
- progname);
+ pg_log_error("transaction limit must not be negative (0 disables)");
exit(1);
}
break;
- case 'U':
- param.pg_user = pg_strdup(optarg);
- break;
- case 'w':
- param.pg_prompt = TRI_NO;
- break;
- case 'W':
- param.pg_prompt = TRI_YES;
+ case 'n':
+ param.dry_run = 1;
+ param.verbose = 1;
break;
case 'p':
port = strtol(optarg, NULL, 10);
if ((port < 1) || (port > 65535))
{
- fprintf(stderr, "%s: invalid port number: %s\n", progname, optarg);
+ pg_log_error("invalid port number: %s", optarg);
exit(1);
}
param.pg_port = pg_strdup(optarg);
break;
- case 'h':
- param.pg_host = pg_strdup(optarg);
+ case 'U':
+ param.pg_user = pg_strdup(optarg);
break;
+ case 'v':
+ param.verbose = 1;
+ break;
+ case 'w':
+ param.pg_prompt = TRI_NO;
+ break;
+ case 'W':
+ param.pg_prompt = TRI_YES;
+ break;
+ default:
+ fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ exit(1);
}
}
/* No database given? Show usage */
if (optind >= argc)
{
- fprintf(stderr, "vacuumlo: missing required argument: database name\n");
+ pg_log_error("missing required argument: database name");
fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
exit(1);
}
diff --git a/contrib/xml2/xpath.c b/contrib/xml2/xpath.c
index 95e580df088..1e5b71d9a02 100644
--- a/contrib/xml2/xpath.c
+++ b/contrib/xml2/xpath.c
@@ -41,16 +41,16 @@ typedef struct
/* local declarations */
static xmlChar *pgxmlNodeSetToText(xmlNodeSetPtr nodeset,
- xmlChar *toptagname, xmlChar *septagname,
- xmlChar *plainsep);
+ xmlChar *toptagname, xmlChar *septagname,
+ xmlChar *plainsep);
static text *pgxml_result_to_text(xmlXPathObjectPtr res, xmlChar *toptag,
- xmlChar *septag, xmlChar *plainsep);
+ xmlChar *septag, xmlChar *plainsep);
static xmlChar *pgxml_texttoxmlchar(text *textstring);
static xmlXPathObjectPtr pgxml_xpath(text *document, xmlChar *xpath,
- xpath_workspace *workspace);
+ xpath_workspace *workspace);
static void cleanup_workspace(xpath_workspace *workspace);
diff --git a/doc/bug.template b/doc/bug.template
deleted file mode 100644
index 4d767bfd516..00000000000
--- a/doc/bug.template
+++ /dev/null
@@ -1,53 +0,0 @@
-If PostgreSQL failed to compile on your computer or you found a bug,
-please fill out this form and e-mail it to pgsql-bugs@postgresql.org.
-
-If your bug report has security implications and you'd prefer that it not
-become immediately visible in public archives, don't send it to pgsql-bugs.
-Security issues can be reported privately to security@postgresql.org.
-
-If you not only found the problem but solved it and generated a patch
-then e-mail it to pgsql-hackers@postgresql.org instead. Please use the
-command "diff -c" to generate the patch.
-
-You may also enter a bug report at https://www.postgresql.org/ instead of
-e-mailing this form.
-
-============================================================================
- POSTGRESQL BUG REPORT TEMPLATE
-============================================================================
-
-
-Your name :
-Your email address :
-
-
-System Configuration:
----------------------
- Architecture (example: Intel Pentium) :
-
- Operating System (example: Linux 2.4.18) :
-
- PostgreSQL version (example: PostgreSQL 11devel): PostgreSQL 11devel
-
- Compiler used (example: gcc 3.3.5) :
-
-
-Please enter a FULL description of your problem:
-------------------------------------------------
-
-
-
-
-
-Please describe a way to repeat the problem. Please try to provide a
-concise reproducible example, if at all possible:
-----------------------------------------------------------------------
-
-
-
-
-
-If you know how this problem might be fixed, list the solution below:
----------------------------------------------------------------------
-
-
diff --git a/doc/src/sgml/.gitignore b/doc/src/sgml/.gitignore
index a72b7ccb06c..acf7b4f10f7 100644
--- a/doc/src/sgml/.gitignore
+++ b/doc/src/sgml/.gitignore
@@ -15,6 +15,7 @@
/features-supported.sgml
/features-unsupported.sgml
/errcodes-table.sgml
+/keywords-table.sgml
/version.sgml
# Assorted byproducts from building the above
/postgres.xml
diff --git a/doc/src/sgml/Makefile b/doc/src/sgml/Makefile
index 74aac01c395..0401a515df8 100644
--- a/doc/src/sgml/Makefile
+++ b/doc/src/sgml/Makefile
@@ -53,10 +53,13 @@ override XSLTPROCFLAGS += --stringparam pg.version '$(VERSION)'
GENERATED_SGML = version.sgml \
- features-supported.sgml features-unsupported.sgml errcodes-table.sgml
+ features-supported.sgml features-unsupported.sgml errcodes-table.sgml \
+ keywords-table.sgml
ALLSGML := $(wildcard $(srcdir)/*.sgml $(srcdir)/ref/*.sgml) $(GENERATED_SGML)
+ALL_IMAGES := $(wildcard $(srcdir)/images/*.svg)
+
##
## Man pages
@@ -94,24 +97,21 @@ features-unsupported.sgml: $(top_srcdir)/src/backend/catalog/sql_feature_package
errcodes-table.sgml: $(top_srcdir)/src/backend/utils/errcodes.txt generate-errcodes-table.pl
$(PERL) $(srcdir)/generate-errcodes-table.pl $< > $@
+keywords-table.sgml: $(top_srcdir)/src/include/parser/kwlist.h $(wildcard $(srcdir)/keywords/sql*.txt) generate-keywords-table.pl
+ $(PERL) $(srcdir)/generate-keywords-table.pl $(srcdir) > $@
+
##
## Generation of some text files.
##
ICONV = iconv
-LYNX = lynx
-
-# The documentation may contain non-ASCII characters (mostly for
-# contributor names), which lynx converts to the encoding determined
-# by the current locale. To get text output that is deterministic and
-# easily readable by everyone, we make lynx produce LATIN1 and then
-# convert that to ASCII with transliteration for the non-ASCII characters.
-# Official releases were historically built on FreeBSD, which has limited
-# locale support and is very picky about locale name spelling. The
-# below has been finely tuned to run on FreeBSD and Linux/glibc.
+PANDOC = pandoc
+
INSTALL: % : %.html
- $(PERL) -p -e 's, $@
+ $(PANDOC) -t plain -o $@.tmp $<
+ $(ICONV) -f utf8 -t us-ascii//TRANSLIT $@.tmp > $@
+ rm $@.tmp
INSTALL.html: %.html : stylesheet-text.xsl %.xml
$(XMLLINT) --noout --valid $*.xml
@@ -131,24 +131,30 @@ endif
html: html-stamp
-html-stamp: stylesheet.xsl postgres.sgml $(ALLSGML)
+html-stamp: stylesheet.xsl postgres.sgml $(ALLSGML) $(ALL_IMAGES)
$(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^)
$(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) $(XSLTPROC_HTML_FLAGS) $(wordlist 1,2,$^)
+ cp $(ALL_IMAGES) html/
cp $(srcdir)/stylesheet.css html/
touch $@
-htmlhelp: stylesheet-hh.xsl postgres.sgml $(ALLSGML)
+htmlhelp: htmlhelp-stamp
+
+htmlhelp-stamp: stylesheet-hh.xsl postgres.sgml $(ALLSGML) $(ALL_IMAGES)
$(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^)
$(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) $(wordlist 1,2,$^)
+ cp $(ALL_IMAGES) htmlhelp/
+ cp $(srcdir)/stylesheet.css htmlhelp/
+ touch $@
# single-page HTML
-postgres.html: stylesheet-html-nochunk.xsl postgres.sgml $(ALLSGML)
+postgres.html: stylesheet-html-nochunk.xsl postgres.sgml $(ALLSGML) $(ALL_IMAGES)
$(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^)
$(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) $(XSLTPROC_HTML_FLAGS) -o $@ $(wordlist 1,2,$^)
# single-page text
postgres.txt: postgres.html
- $(LYNX) -force_html -dump -nolist $< > $@
+ $(PANDOC) -t plain -o $@ $<
##
@@ -158,15 +164,17 @@ postgres.txt: postgres.html
postgres.pdf:
$(error Invalid target; use postgres-A4.pdf or postgres-US.pdf as targets)
+XSLTPROC_FO_FLAGS += --stringparam img.src.path '$(srcdir)/'
+
%-A4.fo: stylesheet-fo.xsl %.sgml $(ALLSGML)
$(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^)
- $(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) --stringparam paper.type A4 -o $@ $(wordlist 1,2,$^)
+ $(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) $(XSLTPROC_FO_FLAGS) --stringparam paper.type A4 -o $@ $(wordlist 1,2,$^)
%-US.fo: stylesheet-fo.xsl %.sgml $(ALLSGML)
$(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^)
- $(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) --stringparam paper.type USletter -o $@ $(wordlist 1,2,$^)
+ $(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) $(XSLTPROC_FO_FLAGS) --stringparam paper.type USletter -o $@ $(wordlist 1,2,$^)
-%.pdf: %.fo
+%.pdf: %.fo $(ALL_IMAGES)
$(FOP) -fo $< -pdf $@
@@ -175,9 +183,9 @@ postgres.pdf:
##
epub: postgres.epub
-postgres.epub: postgres.sgml $(ALLSGML)
+postgres.epub: postgres.sgml $(ALLSGML) $(ALL_IMAGES)
$(XMLLINT) --noout --valid $<
- $(DBTOEPUB) $<
+ $(DBTOEPUB) -o $@ $<
##
@@ -290,7 +298,7 @@ clean:
# generated SGML files
rm -f $(GENERATED_SGML)
# HTML Help
- rm -f htmlhelp.hhp toc.hhc index.hhk
+ rm -rf htmlhelp/ htmlhelp-stamp
# EPUB
rm -f postgres.epub
# Texinfo
diff --git a/doc/src/sgml/README.links b/doc/src/sgml/README.links
index f64b8573169..db6577d2436 100644
--- a/doc/src/sgml/README.links
+++ b/doc/src/sgml/README.links
@@ -1,6 +1,6 @@
-Linking within SGML documents can be confusing, so here is a summary:
+Linking within DocBook documents can be confusing, so here is a summary:
Intra-document Linking
@@ -40,7 +40,10 @@ url=
Guidelines
----------
-o If you want to supply text, use , else
-o Do not use text with so the URL appears in printed output
-o Specific nouns like GUC variables, SQL commands, and contrib modules
- usually have xreflabels
+- For an internal link, if you want to supply text, use , else
+ .
+
+- Specific nouns like GUC variables, SQL commands, and contrib modules
+ usually have xreflabels.
+
+- For an external link, use , with or without link text.
diff --git a/doc/src/sgml/acronyms.sgml b/doc/src/sgml/acronyms.sgml
index 638ffc9fe83..411e368a9c6 100644
--- a/doc/src/sgml/acronyms.sgml
+++ b/doc/src/sgml/acronyms.sgml
@@ -13,7 +13,7 @@
ANSI
-
+
American National Standards Institute
@@ -23,7 +23,7 @@
API
- Application Programming Interface
+ Application Programming Interface
@@ -32,7 +32,7 @@
ASCII
- American Standard
+ American Standard
Code for Information Interchange
@@ -51,7 +51,7 @@
CA
- Certificate Authority
+ Certificate Authority
@@ -61,7 +61,7 @@
Classless
+ url="https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing">Classless
Inter-Domain Routing
@@ -71,7 +71,7 @@
CPAN
- Comprehensive Perl Archive Network
+ Comprehensive Perl Archive Network
@@ -81,7 +81,7 @@
Certificate
+ url="https://en.wikipedia.org/wiki/Certificate_revocation_list">Certificate
Revocation List
@@ -92,7 +92,7 @@
Comma
+ url="https://en.wikipedia.org/wiki/Comma-separated_values">Comma
Separated Values
@@ -121,7 +121,7 @@
Database
+ url="https://en.wikipedia.org/wiki/Database_administrator">Database
Administrator
@@ -131,7 +131,7 @@
DBI
- Database Interface (Perl)
+ Database Interface (Perl)
@@ -140,7 +140,7 @@
DBMS
- Database Management
+ Database Management
System
@@ -151,7 +151,7 @@
Data
+ url="https://en.wikipedia.org/wiki/Data_Definition_Language">Data
Definition Language , SQL commands such as CREATE
TABLE , ALTER USER
@@ -163,7 +163,7 @@
Data
+ url="https://en.wikipedia.org/wiki/Data_Manipulation_Language">Data
Manipulation Language , SQL commands such as INSERT ,
UPDATE , DELETE
@@ -175,7 +175,7 @@
Daylight
+ url="https://en.wikipedia.org/wiki/Daylight_saving_time">Daylight
Saving Time
@@ -194,7 +194,7 @@
ESQL
- Embedded
+ Embedded
SQL
@@ -204,7 +204,7 @@
FAQ
- Frequently Asked
+ Frequently Asked
Questions
@@ -251,7 +251,7 @@
Git
+ url="https://en.wikipedia.org/wiki/Git_(software)">Git
@@ -260,7 +260,7 @@
GMT
- Greenwich Mean Time
+ Greenwich Mean Time
@@ -270,7 +270,7 @@
Generic
+ url="https://en.wikipedia.org/wiki/Generic_Security_Services_Application_Program_Interface">Generic
Security Services Application Programming Interface
@@ -300,7 +300,7 @@
Heap-Only
+ url="https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/backend/access/heap/README.HOT;hb=HEAD">Heap-Only
Tuples
@@ -311,7 +311,7 @@
International
+ url="https://en.wikipedia.org/wiki/International_Electrotechnical_Commission">International
Electrotechnical Commission
@@ -332,7 +332,7 @@
Inter-Process
+ url="https://en.wikipedia.org/wiki/Inter-process_communication">Inter-Process
Communication
@@ -342,7 +342,7 @@
ISO
- International Organization for
+ International Organization for
Standardization
@@ -352,7 +352,7 @@
ISSN
- International Standard
+ International Standard
Serial Number
@@ -363,7 +363,7 @@
Java
+ url="https://en.wikipedia.org/wiki/Java_Database_Connectivity">Java
Database Connectivity
@@ -394,7 +394,7 @@
Lightweight
+ url="https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol">Lightweight
Directory Access Protocol
@@ -415,7 +415,7 @@
Microsoft
+ url="https://en.wikipedia.org/wiki/Visual_C++">Microsoft
Visual C
@@ -435,7 +435,7 @@
National
+ url="https://en.wikipedia.org/wiki/Internationalization_and_localization">National
Language Support
@@ -446,7 +446,7 @@
Open
+ url="https://en.wikipedia.org/wiki/Open_Database_Connectivity">Open
Database Connectivity
@@ -465,7 +465,7 @@
OLAP
- Online Analytical
+ Online Analytical
Processing
@@ -475,7 +475,7 @@
OLTP
- Online Transaction
+ Online Transaction
Processing
@@ -485,7 +485,7 @@
ORDBMS
- Object-Relational
+ Object-Relational
Database Management System
@@ -496,7 +496,7 @@
Pluggable
+ url="https://en.wikipedia.org/wiki/Pluggable_Authentication_Modules">Pluggable
Authentication Modules
@@ -524,7 +524,7 @@
PID
- Process Identifier
+ Process Identifier
@@ -552,7 +552,7 @@
POSIX
- Portable Operating
+ Portable Operating
System Interface
@@ -563,7 +563,7 @@
Relational
+ url="https://en.wikipedia.org/wiki/Relational_database_management_system">Relational
Database Management System
@@ -574,7 +574,7 @@
Request For
+ url="https://en.wikipedia.org/wiki/Request_for_Comments">Request For
Comments
@@ -584,7 +584,7 @@
SGML
- Standard Generalized
+ Standard Generalized
Markup Language
@@ -612,7 +612,7 @@
SQL
- Structured Query Language
+ Structured Query Language
@@ -630,7 +630,7 @@
SSH
- Secure
+ Secure
Shell
@@ -640,7 +640,7 @@
SSL
- Secure Sockets Layer
+ Secure Sockets Layer
@@ -649,7 +649,7 @@
SSPI
- Security
+ Security
Support Provider Interface
@@ -659,7 +659,7 @@
SYSV
- Unix System V
+ Unix System V
@@ -669,7 +669,7 @@
Transmission
+ url="https://en.wikipedia.org/wiki/Transmission_Control_Protocol">Transmission
Control Protocol (TCP) / Internet Protocol (IP)
@@ -707,7 +707,7 @@
URL
- Uniform Resource
+ Uniform Resource
Locator
@@ -718,7 +718,7 @@
Coordinated
+ url="https://en.wikipedia.org/wiki/Coordinated_Universal_Time">Coordinated
Universal Time
@@ -738,7 +738,7 @@
UTF8
- Eight-Bit Unicode
+ Eight-Bit Unicode
Transformation Format
@@ -775,7 +775,7 @@
XML
- Extensible Markup
+ Extensible Markup
Language
diff --git a/doc/src/sgml/amcheck.sgml b/doc/src/sgml/amcheck.sgml
index a712c86a10f..fe0fe9c186e 100644
--- a/doc/src/sgml/amcheck.sgml
+++ b/doc/src/sgml/amcheck.sgml
@@ -35,7 +35,7 @@
functions.
- amcheck functions may be used only by superusers.
+ amcheck functions may only be used by superusers.
@@ -55,7 +55,7 @@
bt_index_check tests that its target, a
B-Tree index, respects a variety of invariants. Example usage:
-test=# SELECT bt_index_check(index => c.oid, heapallindexed => i.indisunique)
+test=# SELECT bt_index_check(index => c.oid, heapallindexed => i.indisunique),
c.relname,
c.relpages
FROM pg_index i
@@ -67,7 +67,7 @@ WHERE am.amname = 'btree' AND n.nspname = 'pg_catalog'
-- Don't check temp tables, which may be from another session:
AND c.relpersistence != 't'
-- Function may throw an error when this is omitted:
-AND i.indisready AND i.indisvalid
+AND c.relkind = 'i' AND i.indisready AND i.indisvalid
ORDER BY c.relpages DESC LIMIT 10;
bt_index_check | relname | relpages
----------------+---------------------------------+----------
@@ -83,14 +83,13 @@ ORDER BY c.relpages DESC LIMIT 10;
| pg_amop_fam_strat_index | 5
(10 rows)
- This example shows a session that performs verification of every
- catalog index in the database test
. Details of just
- the 10 largest indexes verified are displayed. Verification of
- the presence of heap tuples as index tuples is requested for
- unique indexes only. Since no error is raised, all indexes
- tested appear to be logically consistent. Naturally, this query
- could easily be changed to call
- bt_index_check for every index in the
+ This example shows a session that performs verification of the
+ 10 largest catalog indexes in the database test
.
+ Verification of the presence of heap tuples as index tuples is
+ requested for the subset that are unique indexes. Since no
+ error is raised, all indexes tested appear to be logically
+ consistent. Naturally, this query could easily be changed to
+ call bt_index_check for every index in the
database where verification is supported.
@@ -113,7 +112,7 @@ ORDER BY c.relpages DESC LIMIT 10;
- bt_index_parent_check(index regclass, heapallindexed boolean) returns void
+ bt_index_parent_check(index regclass, heapallindexed boolean, rootdescend boolean) returns void
bt_index_parent_check
@@ -126,7 +125,11 @@ ORDER BY c.relpages DESC LIMIT 10;
Optionally, when the heapallindexed
argument is true , the function verifies the
presence of all heap tuples that should be found within the
- index. The checks that can be performed by
+ index, and that there are no missing downlinks in the index
+ structure. When the optional rootdescend
+ argument is true , verification re-finds
+ tuples on the leaf level by performing a new search from the
+ root page for each tuple. The checks that can be performed by
bt_index_parent_check are a superset of the
checks that can be performed by bt_index_check .
bt_index_parent_check can be thought of as
@@ -166,7 +169,7 @@ ORDER BY c.relpages DESC LIMIT 10;
- Optional heapallindexed verification
+ Optional heapallindexed Verification
When the heapallindexed argument to
verification functions is true , an additional
@@ -207,7 +210,7 @@ ORDER BY c.relpages DESC LIMIT 10;
- Using amcheck effectively
+ Using amcheck Effectively
amcheck can be effective at detecting various types of
@@ -291,8 +294,7 @@ ORDER BY c.relpages DESC LIMIT 10;
- Corruption caused by faulty RAM, and the broader memory subsystem
- and operating system.
+ Corruption caused by faulty RAM, or the broader memory subsystem.
PostgreSQL does not protect against correctable
@@ -317,7 +319,7 @@ ORDER BY c.relpages DESC LIMIT 10;
- Repairing corruption
+ Repairing Corruption
No error concerning corruption raised by amcheck should
ever be a false positive. amcheck raises
diff --git a/doc/src/sgml/arch-dev.sgml b/doc/src/sgml/arch-dev.sgml
index 53f8049df38..9ffb8427bf0 100644
--- a/doc/src/sgml/arch-dev.sgml
+++ b/doc/src/sgml/arch-dev.sgml
@@ -114,7 +114,7 @@
- How Connections are Established
+ How Connections Are Established
PostgreSQL is implemented using a
diff --git a/doc/src/sgml/array.sgml b/doc/src/sgml/array.sgml
index f4d4a610ef3..a473fa8ee8b 100644
--- a/doc/src/sgml/array.sgml
+++ b/doc/src/sgml/array.sgml
@@ -766,9 +766,9 @@ SELECT f1[1][-2][3] AS e1, f1[1][-1][5] AS e2
For example, elements containing curly braces, commas (or the data type's
delimiter character), double quotes, backslashes, or leading or trailing
whitespace must be double-quoted. Empty strings and strings matching the
- word NULL must be quoted, too. To put a double quote or
- backslash in a quoted array element value, use escape string syntax
- and precede it with a backslash. Alternatively, you can avoid quotes and use
+ word NULL must be quoted, too. To put a double
+ quote or backslash in a quoted array element value, precede it
+ with a backslash. Alternatively, you can avoid quotes and use
backslash-escaping to protect all data characters that would otherwise
be taken as array syntax.
@@ -781,27 +781,6 @@ SELECT f1[1][-2][3] AS e1, f1[1][-1][5] AS e2
non-whitespace characters of an element, is not ignored.
-
-
- Remember that what you write in an SQL command will first be interpreted
- as a string literal, and then as an array. This doubles the number of
- backslashes you need. For example, to insert a text array
- value containing a backslash and a double quote, you'd need to write:
-
-INSERT ... VALUES (E'{"\\\\","\\""}');
-
- The escape string processor removes one level of backslashes, so that
- what arrives at the array-value parser looks like {"\\","\""} .
- In turn, the strings fed to the text data type's input routine
- become \ and " respectively. (If we were working
- with a data type whose input routine also treated backslashes specially,
- bytea for example, we might need as many as eight backslashes
- in the command to get one backslash into the stored array element.)
- Dollar quoting (see ) can be
- used to avoid the need to double backslashes.
-
-
-
The ARRAY constructor syntax (see
diff --git a/doc/src/sgml/auto-explain.sgml b/doc/src/sgml/auto-explain.sgml
index 08b67f2600b..3d619d4a3dd 100644
--- a/doc/src/sgml/auto-explain.sgml
+++ b/doc/src/sgml/auto-explain.sgml
@@ -54,10 +54,11 @@ LOAD 'auto_explain';
auto_explain.log_min_duration is the minimum statement
execution time, in milliseconds, that will cause the statement's plan to
- be logged. Setting this to zero logs all plans. Minus-one (the default)
- disables logging of plans. For example, if you set it to
- 250ms then all statements that run 250ms or longer
- will be logged. Only superusers can change this setting.
+ be logged. Setting this to 0 logs all plans.
+ -1 (the default) disables logging of plans. For
+ example, if you set it to 250ms then all statements
+ that run 250ms or longer will be logged. Only superusers can change this
+ setting.
@@ -169,6 +170,24 @@ LOAD 'auto_explain';
+
+
+ auto_explain.log_settings (boolean )
+
+ auto_explain.log_settings configuration parameter
+
+
+
+
+ auto_explain.log_settings controls whether information
+ about modified configuration options are printed when execution plan is logged.
+ Only options affecting query planning with value different from the built-in
+ default value are included in the output. This parameter is off by default.
+ Only superusers can change this setting.
+
+
+
+
auto_explain.log_format (enum )
@@ -187,6 +206,27 @@ LOAD 'auto_explain';
+
+
+ auto_explain.log_level (enum )
+
+ auto_explain.log_level configuration parameter
+
+
+
+
+ auto_explain.log_level selects the log level at which
+ auto_explain will log the query plan.
+ Valid values are DEBUG5 , DEBUG4 ,
+ DEBUG3 , DEBUG2 ,
+ DEBUG1 , INFO ,
+ NOTICE , WARNING ,
+ and LOG . The default is LOG .
+ Only superusers can change this setting.
+
+
+
+
auto_explain.log_nested_statements (boolean )
diff --git a/doc/src/sgml/backup.sgml b/doc/src/sgml/backup.sgml
index 349834c35d8..bdc9026c629 100644
--- a/doc/src/sgml/backup.sgml
+++ b/doc/src/sgml/backup.sgml
@@ -141,7 +141,7 @@ psql dbname < psql exit with an
exit status of 3 if an SQL error occurs:
-psql --set ON_ERROR_STOP=on dbname < dumpfile
+psql --set ON_ERROR_STOP=on dbname < dumpfile
Either way, you will only have a partially restored database.
Alternatively, you can specify that the whole dump should be
@@ -824,8 +824,9 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0
way. The non-exclusive method is recommended and the exclusive one is
deprecated and will eventually be removed.
+
- Making a non-exclusive low level backup
+ Making a Non-Exclusive Low-Level Backup
A non-exclusive low level backup is one that allows other
concurrent backups to be running (both those started using
@@ -947,14 +948,27 @@ SELECT * FROM pg_stop_backup(false, true);
- Making an exclusive low level backup
+ Making an Exclusive Low-Level Backup
+
+
+
+ The exclusive backup method is deprecated and should be avoided.
+ Prior to PostgreSQL 9.6, this was the only
+ low-level method available, but it is now recommended that all users
+ upgrade their scripts to use non-exclusive backups.
+
+
+
The process for an exclusive backup is mostly the same as for a
- non-exclusive one, but it differs in a few key steps. This type of backup
- can only be taken on a primary and does not allow concurrent backups.
- Prior to PostgreSQL 9.6, this
- was the only low-level method available, but it is now recommended that
- all users upgrade their scripts to use non-exclusive backups if possible.
+ non-exclusive one, but it differs in a few key steps. This type of
+ backup can only be taken on a primary and does not allow concurrent
+ backups. Moreover, because it creates a backup label file, as
+ described below, it can block automatic restart of the master server
+ after a crash. On the other hand, the erroneous removal of this
+ file from a backup or standby is a common mistake, which can result
+ in serious data corruption. If it is necessary to use this method,
+ the following steps may be used.
@@ -1011,9 +1025,17 @@ SELECT pg_start_backup('label', true);
consider during this backup.
- Note that if the server crashes during the backup it may not be
- possible to restart until the backup_label file has been
- manually deleted from the PGDATA directory.
+ As noted above, if the server crashes during the backup it may not be
+ possible to restart until the backup_label file has
+ been manually deleted from the PGDATA directory. Note
+ that it is very important to never remove the
+ backup_label file when restoring a backup, because
+ this will result in corruption. Confusion about when it is appropriate
+ to remove this file is a common cause of data corruption when using this
+ method; be very certain that you remove the file only on an existing
+ master and never when building a standby or restoring a backup, even if
+ you are building a standby that will subsequently be promoted to a new
+ master.
@@ -1045,18 +1067,23 @@ SELECT pg_stop_backup();
If the archive process has fallen behind
because of failures of the archive command, it will keep retrying
until the archive succeeds and the backup is complete.
- If you wish to place a time limit on the execution of
- pg_stop_backup , set an appropriate
- statement_timeout value, but make note that if
- pg_stop_backup terminates because of this your backup
- may not be valid.
+
+
+
+ When using exclusive backup mode, it is absolutely imperative to ensure
+ that pg_stop_backup completes successfully at the
+ end of the backup. Even if the backup itself fails, for example due to
+ lack of disk space, failure to call pg_stop_backup
+ will leave the server in backup mode indefinitely, causing future backups
+ to fail and increasing the risk of a restart failure during the time that
+ backup_label exists.
- Backing up the data directory
+ Backing Up the Data Directory
Some file system backup tools emit warnings or errors
if the files they are trying to copy change while the copy proceeds.
@@ -1220,8 +1247,11 @@ SELECT pg_stop_backup();
- Create a recovery command file recovery.conf in the cluster
- data directory (see ). You might
+ Set recovery configuration settings in
+ postgresql.conf (see ) and create a file
+ recovery.signal in the cluster
+ data directory. You might
also want to temporarily modify pg_hba.conf to prevent
ordinary users from connecting until you are sure the recovery was successful.
@@ -1232,8 +1262,8 @@ SELECT pg_stop_backup();
proceed to read through the archived WAL files it needs. Should the
recovery be terminated because of an external error, the server can
simply be restarted and it will continue recovery. Upon completion
- of the recovery process, the server will rename
- recovery.conf to recovery.done (to prevent
+ of the recovery process, the server will remove
+ recovery.signal (to prevent
accidentally re-entering recovery mode later) and then
commence normal database operations.
@@ -1249,12 +1279,9 @@ SELECT pg_stop_backup();
- The key part of all this is to set up a recovery configuration file that
+ The key part of all this is to set up a recovery configuration that
describes how you want to recover and how far the recovery should
- run. You can use recovery.conf.sample (normally
- located in the installation's share/ directory) as a
- prototype. The one thing that you absolutely must specify in
- recovery.conf is the restore_command ,
+ run. The one thing that you absolutely must specify is the restore_command ,
which tells PostgreSQL how to retrieve archived
WAL file segments. Like the archive_command , this is
a shell command string. It can contain %f , which is
@@ -1288,7 +1315,7 @@ restore_command = 'cp /mnt/server/archivedir/%f %p'
Not all of the requested files will be WAL segment
files; you should also expect requests for files with a suffix of
- .backup or .history . Also be aware that
+ .history . Also be aware that
the base name of the %p path will be different from
%f ; do not expect them to be interchangeable.
@@ -1316,7 +1343,7 @@ restore_command = 'cp /mnt/server/archivedir/%f %p'
If you want to recover to some previous point in time (say, right before
the junior DBA dropped your main transaction table), just specify the
- required stopping point in recovery.conf . You can specify
+ required stopping point. You can specify
the stop point, known as the recovery target
, either by
date/time, named restore point or by completion of a specific transaction
ID. As of this writing only the date/time and named restore point options
@@ -1414,7 +1441,7 @@ restore_command = 'cp /mnt/server/archivedir/%f %p'
that was current when the base backup was taken. If you wish to recover
into some child timeline (that is, you want to return to some state that
was itself generated after a recovery attempt), you need to specify the
- target timeline ID in recovery.conf . You cannot recover into
+ target timeline ID in . You cannot recover into
timelines that branched off earlier than the base backup.
diff --git a/doc/src/sgml/bgworker.sgml b/doc/src/sgml/bgworker.sgml
index ac71fb2c41f..bc5a52584b9 100644
--- a/doc/src/sgml/bgworker.sgml
+++ b/doc/src/sgml/bgworker.sgml
@@ -189,15 +189,17 @@ typedef struct BackgroundWorker
Once running, the process can connect to a database by calling
- BackgroundWorkerInitializeConnection(char *dbname , char *username ) or
- BackgroundWorkerInitializeConnectionByOid(Oid dboid , Oid useroid ) .
+ BackgroundWorkerInitializeConnection(char *dbname , char *username , uint32 flags ) or
+ BackgroundWorkerInitializeConnectionByOid(Oid dboid , Oid useroid , uint32 flags ) .
This allows the process to run transactions and queries using the
SPI interface. If dbname is NULL or
dboid is InvalidOid , the session is not connected
to any particular database, but shared catalogs can be accessed.
If username is NULL or useroid is
InvalidOid , the process will run as the superuser created
- during initdb .
+ during initdb . If BGWORKER_BYPASS_ALLOWCONN
+ is specified as flags it is possible to bypass the restriction
+ to connect to databases not allowing user connections.
A background worker can only call one of these two functions, and only
once. It is not possible to switch databases.
diff --git a/doc/src/sgml/biblio.sgml b/doc/src/sgml/biblio.sgml
index 49530241620..bf3aebd2a88 100644
--- a/doc/src/sgml/biblio.sgml
+++ b/doc/src/sgml/biblio.sgml
@@ -136,6 +136,14 @@
1988
+
+ SQL Technical Report
+ Part 6: SQL support for JavaScript Object
+ Notation (JSON)
+ First Edition
+ 2017
+
+
diff --git a/doc/src/sgml/bki.sgml b/doc/src/sgml/bki.sgml
index f7a323ef345..6523dd5032c 100644
--- a/doc/src/sgml/bki.sgml
+++ b/doc/src/sgml/bki.sgml
@@ -65,7 +65,7 @@
- Most Postgres developers don't need to be directly concerned with
+ Most PostgreSQL developers don't need to be directly concerned with
the BKI file, but almost any nontrivial feature
addition in the backend will require modifying the catalog header files
and/or initial data files. The rest of this chapter gives some
@@ -89,7 +89,7 @@
The CATALOG line can also be annotated, with some
other BKI property macros described in genbki.h , to
define other properties of the catalog as a whole, such as whether
- it has OIDs (by default, it does).
+ it is a shared relation.
@@ -184,16 +184,13 @@
[
-# LC_COLLATE and LC_CTYPE will be replaced at initdb time with user choices
-# that might contain non-word characters, so we must double-quote them.
-
+# A comment could appear here.
{ oid => '1', oid_symbol => 'TemplateDbOid',
descr => 'database\'s default template',
- datname => 'template1', datdba => 'PGUID', encoding => 'ENCODING',
- datcollate => '"LC_COLLATE"', datctype => '"LC_CTYPE"', datistemplate => 't',
- datallowconn => 't', datconnlimit => '-1', datlastsysoid => '0',
- datfrozenxid => '0', datminmxid => '1', dattablespace => '1663',
- datacl => '_null_' },
+ datname => 'template1', encoding => 'ENCODING', datcollate => 'LC_COLLATE',
+ datctype => 'LC_CTYPE', datistemplate => 't', datallowconn => 't',
+ datconnlimit => '-1', datlastsysoid => '0', datfrozenxid => '0',
+ datminmxid => '1', dattablespace => 'pg_default', datacl => '_null_' },
]
@@ -219,40 +216,44 @@
value pairs. The
allowed key s are the names of the catalog's
columns, plus the metadata keys oid ,
- oid_symbol , and descr .
+ oid_symbol ,
+ array_type_oid , and descr .
(The use of oid and oid_symbol
- is described in
- below. descr supplies a description string for
- the object, which will be inserted
- into pg_description
+ is described in below,
+ while array_type_oid is described in
+ .
+ descr supplies a description string for the object,
+ which will be inserted into pg_description
or pg_shdescription as appropriate.)
While the metadata keys are optional, the catalog's defined columns
must all be provided, except when the catalog's .h
file specifies a default value for the column.
+ (In the example above, the datdba field has
+ been omitted because pg_database.h supplies a
+ suitable default value for it.)
- All values must be single-quoted. Escape single quotes used within
- a value with a backslash. (Backslashes meant as data need not be
- doubled, however; this follows Perl's rules for simple quoted
- literals.)
+ All values must be single-quoted. Escape single quotes used within a
+ value with a backslash. Backslashes meant as data can, but need not,
+ be doubled; this follows Perl's rules for simple quoted literals.
+ Note that backslashes appearing as data will be treated as escapes by
+ the bootstrap scanner, according to the same rules as for escape string
+ constants (see ); for
+ example \t converts to a tab character. If you
+ actually want a backslash in the final value, you will need to write
+ four of them: Perl strips two, leaving \\ for the
+ bootstrap scanner to see.
Null values are represented by _null_ .
-
-
-
-
-
- If a value is a macro to be expanded
- by initdb , it should also contain double
- quotes as shown above, unless we know that no special characters can
- appear within the string that will be substituted.
+ (Note that there is no way to create a value that is just that
+ string.)
@@ -265,8 +266,10 @@
- To aid readability, field values that are OIDs of other catalog
- entries can be represented by names rather than numeric OIDs.
+ Field values that are OIDs of other catalog entries should be
+ represented by symbolic names rather than actual numeric OIDs.
+ (In the example above, dattablespace
+ contains such a reference.)
This is described in
below.
@@ -285,8 +288,9 @@
Within each pair of curly braces, the metadata
fields oid , oid_symbol ,
- and descr (if present) come first, in that
- order, then the catalog's own fields appear in their defined order.
+ array_type_oid , and descr
+ (if present) come first, in that order, then the catalog's own
+ fields appear in their defined order.
@@ -354,7 +358,7 @@
also needed if the row's OID must be referenced from C code.
If neither case applies, the oid metadata field can
be omitted, in which case the bootstrap code assigns an OID
- automatically, or leaves it zero in a catalog that has no OIDs.
+ automatically.
In practice we usually preassign OIDs for all or none of the pre-loaded
rows in a given catalog, even if only some of them are actually
cross-referenced.
@@ -387,15 +391,52 @@
through the catalog headers and .dat files
to see which ones do not appear. You can also use
the duplicate_oids script to check for mistakes.
- (That script is run automatically at compile time, and will stop the
- build if a duplicate is found.)
+ (genbki.pl will assign OIDs for any rows that
+ didn't get one hand-assigned to them, and it will also detect duplicate
+ OIDs at compile time.)
+
+
+
+ When choosing OIDs for a patch that is not expected to be committed
+ immediately, best practice is to use a group of more-or-less
+ consecutive OIDs starting with some random choice in the range
+ 8000—9999. This minimizes the risk of OID collisions with other
+ patches being developed concurrently. To keep the 8000—9999
+ range free for development purposes, after a patch has been committed
+ to the master git repository its OIDs should be renumbered into
+ available space below that range. Typically, this will be done
+ near the end of each development cycle, moving all OIDs consumed by
+ patches committed in that cycle at the same time. The script
+ renumber_oids.pl can be used for this purpose.
+ If an uncommitted patch is found to have OID conflicts with some
+ recently-committed patch, renumber_oids.pl may
+ also be useful for recovering from that situation.
- The OID counter starts at 10000 at the beginning of a bootstrap run.
- If a catalog row is in a table that requires OIDs, but no OID was
- preassigned by an oid field, then it will
- receive an OID of 10000 or above.
+ Because of this convention of possibly renumbering OIDs assigned by
+ patches, the OIDs assigned by a patch should not be considered stable
+ until the patch has been included in an official release. We do not
+ change manually-assigned object OIDs once released, however, as that
+ would create assorted compatibility problems.
+
+
+
+ If genbki.pl needs to assign an OID to a catalog
+ entry that does not have a manually-assigned OID, it will use a value in
+ the range 10000—11999. The server's OID counter is set to 12000
+ at the start of a bootstrap run. Thus objects created by regular SQL
+ commands during the later phases of bootstrap, such as objects created
+ while running the information_schema.sql script,
+ receive OIDs of 12000 or above.
+
+
+
+ OIDs assigned during normal database operation are constrained to be
+ 16384 or higher. This ensures that the range 10000—16383 is free
+ for OIDs assigned automatically by genbki.pl or
+ during bootstrap. These automatically-assigned OIDs are not considered
+ stable, and may change from one installation to another.
@@ -403,13 +444,14 @@
OID Reference Lookup
- Cross-references from one initial catalog row to another can be written
- by just writing the preassigned OID of the referenced row. But
- that's error-prone and hard to understand, so for frequently-referenced
- catalogs, genbki.pl provides mechanisms to write
- symbolic references instead. Currently this is possible for references
- to access methods, functions, operators, opclasses, opfamilies, and
- types. The rules are as follows:
+ In principle, cross-references from one initial catalog row to another
+ could be written just by writing the preassigned OID of the referenced
+ row in the referencing field. However, that is against project
+ policy, because it is error-prone, hard to read, and subject to
+ breakage if a newly-assigned OID is renumbered. Therefore
+ genbki.pl provides mechanisms to write
+ symbolic references instead.
+ The rules are as follows:
@@ -419,9 +461,7 @@
Use of symbolic references is enabled in a particular catalog column
by attaching BKI_LOOKUP(lookuprule )
to the column's definition, where lookuprule
- is pg_am , pg_proc ,
- pg_operator , pg_opclass ,
- pg_opfamily , or pg_type .
+ is the name of the referenced catalog, e.g. pg_proc .
BKI_LOOKUP can be attached to columns of
type Oid , regproc , oidvector ,
or Oid[] ; in the latter two cases it implies performing a
@@ -429,6 +469,15 @@
+
+
+ It's also permissible to attach BKI_LOOKUP(encoding)
+ to integer columns to reference character set encodings, which are
+ not currently represented as catalog OIDs, but have a set of values
+ known to genbki.pl .
+
+
+
In such a column, all entries must use the symbolic format except
@@ -441,10 +490,11 @@
- Access methods are just represented by their names, as are types.
- Type names must match the referenced pg_type
- entry's typname ; you do not get to use any
- aliases such as integer
+ Most kinds of catalog objects are simply referenced by their names.
+ Note that type names must exactly match the
+ referenced pg_type
+ entry's typname ; you do not get to use
+ any aliases such as integer
for int4 .
@@ -488,7 +538,18 @@
In none of these cases is there any provision for
schema-qualification; all objects created during bootstrap are
- expected to be in the pg_catalog schema.
+ expected to be in the pg_catalog schema.
+
+
+
+
+
+ In addition to the generic lookup mechanisms, there is a special
+ convention that PGNSP is replaced by the OID of
+ the pg_catalog schema,
+ and PGUID is replaced by the OID of the bootstrap
+ superuser role. These usages are somewhat historical but so far
+ there hasn't been a need to generalize them.
@@ -501,6 +562,41 @@
+
+ Automatic Creation of Array Types
+
+
+ Most scalar data types should have a corresponding array type (that is,
+ a standard varlena array type whose element type is the scalar type, and
+ which is referenced by the typarray field of
+ the scalar type's pg_type
+ entry). genbki.pl is able to generate
+ the pg_type entry for the array type
+ automatically in most cases.
+
+
+
+ To use this facility, just write an array_type_oid
+ => nnnn metadata field in the
+ scalar type's pg_type entry, specifying the OID
+ to use for the array type. You may then omit
+ the typarray field, since it will be filled
+ automatically with that OID.
+
+
+
+ The generated array type's name is the scalar type's name with an
+ underscore prepended. The array entry's other fields are filled from
+ BKI_ARRAY_DEFAULT(value )
+ annotations in pg_type.h , or if there isn't one,
+ copied from the scalar type. (There's also a special case
+ for typalign .) Then
+ the typelem
+ and typarray fields of the two entries are
+ set to cross-reference each other.
+
+
+
Recipes for Editing Data Files
@@ -599,7 +695,7 @@
Run the new script:
$ cd src/include/catalog
-$ perl -I ../../backend/catalog rewrite_dat_with_prokind.pl pg_proc.dat
+$ perl rewrite_dat_with_prokind.pl pg_proc.dat
At this point pg_proc.dat has all three
columns, prokind ,
@@ -679,7 +775,6 @@ $ perl -I ../../backend/catalog rewrite_dat_with_prokind.pl pg_proc.dat
tableoid
bootstrap
shared_relation
- without_oids
rowtype_oid oid
(name1 =
type1
@@ -731,7 +826,6 @@ $ perl -I ../../backend/catalog rewrite_dat_with_prokind.pl pg_proc.dat
The table is created as shared if shared_relation is
specified.
- It will have OIDs unless without_oids is specified.
The table's row type OID (pg_type OID) can optionally
be specified via the rowtype_oid clause; if not specified,
an OID is automatically generated for it. (The rowtype_oid
@@ -757,20 +851,20 @@ $ perl -I ../../backend/catalog rewrite_dat_with_prokind.pl pg_proc.dat
- close tablename
+ close tablename
- Close the open table. The name of the table can be given as a
- cross-check, but this is not required.
+ Close the open table. The name of the table must be given as a
+ cross-check.
- insert OID = oid_value ( value1 value2 ... )
+ insert ( oid_value value1 value2 ... )
@@ -778,17 +872,13 @@ $ perl -I ../../backend/catalog rewrite_dat_with_prokind.pl pg_proc.dat
Insert a new row into the open table using value1 , value2 , etc., for its column
- values and oid_value for its OID. If
- oid_value is zero
- (0) or the clause is omitted, and the table has OIDs, then the
- next available OID is assigned.
+ values.
NULL values can be specified using the special key word
- _null_ . Values containing spaces must be
- double quoted.
+ _null_ . Values that do not look like
+ identifiers or digit strings must be double quoted.
@@ -950,16 +1040,16 @@ $ perl -I ../../backend/catalog rewrite_dat_with_prokind.pl pg_proc.dat
BKI Example
- The following sequence of commands will create the
- table test_table with OID 420, having two columns
- cola and colb of type
- int4 and text , respectively, and insert
- two rows into the table:
+ The following sequence of commands will create the table
+ test_table with OID 420, having three columns
+ oid , cola and colb
+ of type oid , int4 and text ,
+ respectively, and insert two rows into the table:
-create test_table 420 (cola = int4, colb = text)
+create test_table 420 (oid = oid, cola = int4, colb = text)
open test_table
-insert OID=421 ( 1 "value1" )
-insert OID=422 ( 2 _null_ )
+insert ( 421 1 "value1" )
+insert ( 422 2 _null_ )
close test_table
diff --git a/doc/src/sgml/bloom.sgml b/doc/src/sgml/bloom.sgml
index e13ebf80fdf..6eeaddee093 100644
--- a/doc/src/sgml/bloom.sgml
+++ b/doc/src/sgml/bloom.sgml
@@ -9,7 +9,7 @@
bloom provides an index access method based on
- Bloom filters .
+ Bloom filters .
@@ -51,8 +51,9 @@
length
- Length of each signature (index entry) in bits. The default
- is 80 bits and maximum is 4096 .
+ Length of each signature (index entry) in bits. It is rounded up to the
+ nearest multiple of 16 . The default is
+ 80 bits and the maximum is 4096 .
@@ -242,6 +243,20 @@ DEFAULT FOR TYPE text USING bloom AS
operations in the future.
+
+
+
+ bloom access method doesn't support
+ UNIQUE indexes.
+
+
+
+
+
+ bloom access method doesn't support searching for
+ NULL values.
+
+
diff --git a/doc/src/sgml/brin.sgml b/doc/src/sgml/brin.sgml
index f02e061bc1c..da0c9111534 100644
--- a/doc/src/sgml/brin.sgml
+++ b/doc/src/sgml/brin.sgml
@@ -129,17 +129,6 @@ LOG: request for BRIN range summarization for index "brin_wi_idx" page 128 was
-
- abstime_minmax_ops
- abstime
-
- <
- <=
- =
- >=
- >
-
-
int8_minmax_ops
bigint
@@ -388,17 +377,6 @@ LOG: request for BRIN range summarization for index "brin_wi_idx" page 128 was
>
-
- reltime_minmax_ops
- reltime
-
- <
- <=
- =
- >=
- >
-
-
int2_minmax_ops
smallint
@@ -537,7 +515,7 @@ typedef struct BrinOpcInfo
} BrinOpcInfo;
BrinOpcInfo .oi_opaque can be used by the
- operator class routines to pass information between support procedures
+ operator class routines to pass information between support functions
during an index scan.
@@ -587,27 +565,27 @@ typedef struct BrinOpcInfo
defined by the user for other data types using equivalent definitions,
without having to write any source code; appropriate catalog entries being
declared is enough. Note that assumptions about the semantics of operator
- strategies are embedded in the support procedures' source code.
+ strategies are embedded in the support functions' source code.
Operator classes that implement completely different semantics are also
- possible, provided implementations of the four main support procedures
+ possible, provided implementations of the four main support functions
described above are written. Note that backwards compatibility across major
- releases is not guaranteed: for example, additional support procedures might
+ releases is not guaranteed: for example, additional support functions might
be required in later releases.
To write an operator class for a data type that implements a totally
- ordered set, it is possible to use the minmax support procedures
+ ordered set, it is possible to use the minmax support functions
alongside the corresponding operators, as shown in
.
- All operator class members (procedures and operators) are mandatory.
+ All operator class members (functions and operators) are mandatory.
- Procedure and Support Numbers for Minmax Operator Classes
+ Function and Support Numbers for Minmax Operator Classes
@@ -617,19 +595,19 @@ typedef struct BrinOpcInfo
- Support Procedure 1
+ Support Function 1
internal function brin_minmax_opcinfo()
- Support Procedure 2
+ Support Function 2
internal function brin_minmax_add_value()
- Support Procedure 3
+ Support Function 3
internal function brin_minmax_consistent()
- Support Procedure 4
+ Support Function 4
internal function brin_minmax_union()
@@ -659,7 +637,7 @@ typedef struct BrinOpcInfo
To write an operator class for a complex data type which has values
included within another type, it's possible to use the inclusion support
- procedures alongside the corresponding operators, as shown
+ functions alongside the corresponding operators, as shown
in . It requires
only a single additional function, which can be written in any language.
More functions can be defined for additional functionality. All operators
@@ -668,7 +646,7 @@ typedef struct BrinOpcInfo
- Procedure and Support Numbers for Inclusion Operator Classes
+ Function and Support Numbers for Inclusion Operator Classes
@@ -679,42 +657,42 @@ typedef struct BrinOpcInfo
- Support Procedure 1
+ Support Function 1
internal function brin_inclusion_opcinfo()
- Support Procedure 2
+ Support Function 2
internal function brin_inclusion_add_value()
- Support Procedure 3
+ Support Function 3
internal function brin_inclusion_consistent()
- Support Procedure 4
+ Support Function 4
internal function brin_inclusion_union()
- Support Procedure 11
+ Support Function 11
function to merge two elements
- Support Procedure 12
+ Support Function 12
optional function to check whether two elements are mergeable
- Support Procedure 13
+ Support Function 13
optional function to check if an element is contained within another
- Support Procedure 14
+ Support Function 14
optional function to check whether an element is empty
@@ -803,7 +781,7 @@ typedef struct BrinOpcInfo
- Support procedure numbers 1-10 are reserved for the BRIN internal
+ Support function numbers 1-10 are reserved for the BRIN internal
functions, so the SQL level functions start with number 11. Support
function number 11 is the main function required to build the index.
It should accept two arguments with the same data type as the operator class,
@@ -814,11 +792,11 @@ typedef struct BrinOpcInfo
- Support procedure numbers 12 and 14 are provided to support
- irregularities of built-in data types. Procedure number 12
+ Support function numbers 12 and 14 are provided to support
+ irregularities of built-in data types. Function number 12
is used to support network addresses from different families which
- are not mergeable. Procedure number 14 is used to support
- empty ranges. Procedure number 13 is an optional but
+ are not mergeable. Function number 14 is used to support
+ empty ranges. Function number 13 is an optional but
recommended one, which allows the new value to be checked before
it is passed to the union function. As the BRIN framework can shortcut
some operations when the union is not changed, using this
diff --git a/doc/src/sgml/btree.sgml b/doc/src/sgml/btree.sgml
index ca81fbbc848..5881ea5dd6d 100644
--- a/doc/src/sgml/btree.sgml
+++ b/doc/src/sgml/btree.sgml
@@ -13,7 +13,7 @@
PostgreSQL includes an implementation of the
- standard btree (multi-way binary tree) index data
+ standard btree (multi-way balanced tree) index data
structure. Any data type that can be sorted into a well-defined linear
order can be indexed by a btree index. The only limitation is that an
index entry cannot exceed approximately one-third of a page (after TOAST
@@ -228,11 +228,8 @@
B , A
= B ,
or A >
- B , respectively. The function must not
- return INT_MIN for the A
- < B case,
- since the value may be negated before being tested for sign. A null
- result is disallowed, too.
+ B , respectively.
+ A null result is disallowed: all values of the data type must be comparable.
See src/backend/access/nbtree/nbtcompare.c for
examples.
@@ -301,7 +298,7 @@ returns bool
The essential semantics of an in_range function
- depend on the two boolean flag parameters. It should add or
+ depend on the two Boolean flag parameters. It should add or
subtract base
and offset , then
compare val to the result, as follows:
@@ -344,7 +341,7 @@ returns bool
Before doing so, the function should check the sign
of offset : if it is less than zero, raise
- error ERRCODE_INVALID_PRECEDING_FOLLOWING_SIZE (22013)
+ error ERRCODE_INVALID_PRECEDING_OR_FOLLOWING_SIZE (22013)
with error text like invalid preceding or following size in window
function
. (This is required by the SQL standard, although
nonstandard operator families might perhaps choose to ignore this
@@ -433,23 +430,6 @@ returns bool
-
- Included attributes in B-tree indexes
-
-
- As of PostgreSQL 11.0 there is an optional
- INCLUDE clause, which allows to add non-key (included) attributes to index.
- Those included attributes allow more queries to benefit from index-only scans.
- We never use included attributes in ScanKeys for search. That allows us to
- include into B-tree any datatypes, even those which don't have suitable
- operator classes. Included columns only stored in regular tuples on leaf
- pages. All pivot tuples on non-leaf pages and highkey tuples are truncated
- to contain only key attributes. That helps to slightly reduce the size of
- index.
-
-
-
-
Implementation
diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml
index 14aeed30763..5e71a2e8654 100644
--- a/doc/src/sgml/catalogs.sgml
+++ b/doc/src/sgml/catalogs.sgml
@@ -57,7 +57,7 @@
pg_am
- index access methods
+ relation access methods
@@ -67,7 +67,7 @@
pg_amproc
- access method support procedures
+ access method support functions
@@ -297,7 +297,12 @@
pg_statistic_ext
- extended planner statistics
+ extended planner statistics (definition)
+
+
+
+ pg_statistic_ext_data
+ extended planner statistics (built statistics)
@@ -587,8 +592,9 @@
The catalog pg_am stores information about
relation access methods. There is one row for each access method supported
by the system.
- Currently, only indexes have access methods. The requirements for index
- access methods are discussed in detail in .
+ Currently, only tables and indexes have access methods. The requirements for table
+ and index access methods are discussed in detail in and
+ respectively.
@@ -609,7 +615,7 @@
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -634,8 +640,8 @@
char
- Currently always i to indicate an index access
- method; other values may be allowed in future
+ t = table (including materialized views),
+ i = index.
@@ -693,7 +699,7 @@
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -814,8 +820,8 @@
The catalog pg_amproc stores information about
- support procedures associated with access method operator families. There
- is one row for each support procedure belonging to an operator family.
+ support functions associated with access method operator families. There
+ is one row for each support function belonging to an operator family.
@@ -836,7 +842,7 @@
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -864,14 +870,14 @@
amprocnum
int2
- Support procedure number
+ Support function number
amproc
regproc
pg_proc .oid
- OID of the procedure
+ OID of the function
@@ -882,9 +888,9 @@
The usual interpretation of the
amproclefttype and amprocrighttype fields
is that they identify the left and right input types of the operator(s)
- that a particular support procedure supports. For some access methods
- these match the input data type(s) of the support procedure itself, for
- others not. There is a notion of default
support procedures for
+ that a particular support function supports. For some access methods
+ these match the input data type(s) of the support function itself, for
+ others not. There is a notion of default
support functions for
an index, which are those with amproclefttype and
amprocrighttype both equal to the index operator class's
opcintype .
@@ -901,11 +907,11 @@
- The catalog pg_attrdef stores column default values. The main information
- about columns is stored in pg_attribute
- (see below). Only columns that explicitly specify a default value
- (when the table is created or the column is added) will have an
- entry here.
+ The catalog pg_attrdef stores column default
+ values. The main information about columns is stored in
+ pg_attribute .
+ Only columns for which a default value has been explicitly set will have
+ an entry here.
@@ -926,7 +932,7 @@
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -947,27 +953,13 @@
adbin
pg_node_tree
- The internal representation of the column default value
-
-
-
- adsrc
- text
-
- A human-readable representation of the default value
+ The column default value, in nodeToString()
+ representation. Use pg_get_expr(adbin, adrelid) to
+ convert it to an SQL expression.
-
-
- The adsrc field is historical, and is best
- not used, because it does not track outside changes that might affect
- the representation of the default value. Reverse-compiling the
- adbin field (with pg_get_expr for
- example) is a better way to display the default value.
-
-
@@ -1060,7 +1052,7 @@
The number of the column. Ordinary columns are numbered from 1
- up. System columns, such as oid ,
+ up. System columns, such as ctid ,
have (arbitrary) negative numbers.
@@ -1143,9 +1135,11 @@
bool
- This column has a default value, in which case there will be a
- corresponding entry in the pg_attrdef
- catalog that actually defines the value.
+ This column has a default expression or generation expression, in which
+ case there will be a corresponding entry in the
+ pg_attrdef catalog that actually defines the
+ expression. (Check attgenerated to
+ determine whether this is a default or a generation expression.)
@@ -1173,6 +1167,17 @@
+
+ attgenerated
+ char
+
+
+ If a zero byte ('' ), then not a generated column.
+ Otherwise, s = stored. (Other values might be added
+ in the future.)
+
+
+
attisdropped
bool
@@ -1326,7 +1331,7 @@
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -1552,7 +1557,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -1675,7 +1680,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -1725,7 +1730,10 @@ SCRAM-SHA-256$<iteration count> :&l
relam
oid
pg_am .oid
- If this is an index, the access method used (B-tree, hash, etc.)
+
+ If this is a table or an index, the access method used (heap,
+ B-tree, hash, etc.)
+
@@ -1840,7 +1848,8 @@ SCRAM-SHA-256$<iteration count> :&l
m = materialized view,
c = composite type,
f = foreign table,
- p = partitioned table
+ p = partitioned table,
+ I = partitioned index
@@ -1866,15 +1875,6 @@ SCRAM-SHA-256$<iteration count> :&l
-
- relhasoids
- bool
-
-
- True if we generate an OID for each row of the relation
-
-
-
relhasrules
bool
@@ -1899,7 +1899,9 @@ SCRAM-SHA-256$<iteration count> :&l
relhassubclass
bool
- True if table has (or once had) any inheritance children
+
+ True if table or index has (or once had) any inheritance children
+
@@ -1947,7 +1949,7 @@ SCRAM-SHA-256$<iteration count> :&l
relispartition
bool
- True if table is a partition
+ True if table or index is a partition
@@ -1993,10 +1995,7 @@ SCRAM-SHA-256$<iteration count> :&l
aclitem[]
- Access privileges; see
- and
-
- for details
+ Access privileges; see for details
@@ -2066,7 +2065,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -2100,6 +2099,13 @@ SCRAM-SHA-256$<iteration count> :&l
default, c = libc, i = icu
+
+ collisdeterministic
+ bool
+
+ Is the collation deterministic?
+
+
collencoding
int4
@@ -2204,7 +2210,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -2372,14 +2378,6 @@ SCRAM-SHA-256$<iteration count> :&l
triggers), list of the constrained columns
-
- conincluding
- int2[]
- pg_attribute .attnum
- List of the non-constrained columns which are included into
- the same index as the constrained columns
-
-
confkey
int2[]
@@ -2419,14 +2417,10 @@ SCRAM-SHA-256$<iteration count> :&l
conbin
pg_node_tree
- If a check constraint, an internal representation of the expression
-
-
-
- consrc
- text
-
- If a check constraint, a human-readable representation of the expression
+ If a check constraint, an internal representation of the
+ expression. (It's recommended to use
+ pg_get_constraintdef() to extract the definition of
+ a check constraint.)
@@ -2442,15 +2436,6 @@ SCRAM-SHA-256$<iteration count> :&l
index.)
-
-
- consrc is not updated when referenced objects
- change; for example, it won't track renaming of columns. Rather than
- relying on this field, it's best to use pg_get_constraintdef()
- to extract the definition of a check constraint.
-
-
-
pg_class.relchecks needs to agree with the
@@ -2470,7 +2455,7 @@ SCRAM-SHA-256$<iteration count> :&l
The catalog pg_conversion describes
- encoding conversion procedures. See
+ encoding conversion functions. See
for more information.
@@ -2492,7 +2477,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -2536,7 +2521,7 @@ SCRAM-SHA-256$<iteration count> :&l
conproc
regproc
pg_proc .oid
- Conversion procedure
+ Conversion function
@@ -2592,7 +2577,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -2720,10 +2705,7 @@ SCRAM-SHA-256$<iteration count> :&l
aclitem[]
- Access privileges; see
- and
-
- for details
+ Access privileges; see for details
@@ -2822,7 +2804,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -2849,7 +2831,8 @@ SCRAM-SHA-256$<iteration count> :&l
r = relation (table, view),
S = sequence,
f = function,
- T = type
+ T = type,
+ n = schema
@@ -3016,7 +2999,7 @@ SCRAM-SHA-256$<iteration count> :&l
referenced object, and should be automatically dropped
(regardless of RESTRICT or CASCADE
mode) if the referenced object is dropped. Example: a named
- constraint on a table is made autodependent on the table, so
+ constraint on a table is made auto-dependent on the table, so
that it will go away if the table is dropped.
@@ -3028,38 +3011,61 @@ SCRAM-SHA-256$<iteration count> :&l
The dependent object was created as part of creation of the
referenced object, and is really just a part of its internal
- implementation. A DROP of the dependent object
- will be disallowed outright (we'll tell the user to issue a
- DROP against the referenced object, instead). A
- DROP of the referenced object will be propagated
- through to drop the dependent object whether
- CASCADE is specified or not. Example: a trigger
- that's created to enforce a foreign-key constraint is made
- internally dependent on the constraint's
- pg_constraint entry.
+ implementation. A direct DROP of the dependent
+ object will be disallowed outright (we'll tell the user to issue
+ a DROP against the referenced object, instead).
+ A DROP of the referenced object will result in
+ automatically dropping the dependent object
+ whether CASCADE is specified or not. If the
+ dependent object has to be dropped due to a dependency on some other
+ object being removed, its drop is converted to a drop of the referenced
+ object, so that NORMAL and AUTO
+ dependencies of the dependent object behave much like they were
+ dependencies of the referenced object.
+ Example: a view's ON SELECT rule is made
+ internally dependent on the view, preventing it from being dropped
+ while the view remains. Dependencies of the rule (such as tables it
+ refers to) act as if they were dependencies of the view.
- DEPENDENCY_INTERNAL_AUTO (I )
+ DEPENDENCY_PARTITION_PRI (P )
+ DEPENDENCY_PARTITION_SEC (S )
The dependent object was created as part of creation of the
referenced object, and is really just a part of its internal
- implementation. A DROP of the dependent object
- will be disallowed outright (we'll tell the user to issue a
- DROP against the referenced object, instead).
- While a regular internal dependency will prevent
- the dependent object from being dropped while any such dependencies
- remain, DEPENDENCY_INTERNAL_AUTO will allow such
- a drop as long as the object can be found by following any of such
+ implementation; however, unlike INTERNAL ,
+ there is more than one such referenced object. The dependent object
+ must not be dropped unless at least one of these referenced objects
+ is dropped; if any one is, the dependent object should be dropped
+ whether or not CASCADE is specified. Also
+ unlike INTERNAL , a drop of some other object
+ that the dependent object depends on does not result in automatic
+ deletion of any partition-referenced object. Hence, if the drop
+ does not cascade to at least one of these objects via some other
+ path, it will be refused. (In most cases, the dependent object
+ shares all its non-partition dependencies with at least one
+ partition-referenced object, so that this restriction does not
+ result in blocking any cascaded delete.)
+ Primary and secondary partition dependencies behave identically
+ except that the primary dependency is preferred for use in error
+ messages; hence, a partition-dependent object should have one
+ primary partition dependency and one or more secondary partition
dependencies.
- Example: an index on a partition is made internal-auto-dependent on
- both the partition itself as well as on the index on the parent
- partitioned table; so the partition index is dropped together with
- either the partition it indexes, or with the parent index it is
- attached to.
+ Note that partition dependencies are made in addition to, not
+ instead of, any dependencies the object would normally have. This
+ simplifies ATTACH/DETACH PARTITION operations:
+ the partition dependencies need only be added or removed.
+ Example: a child partitioned index is made partition-dependent
+ on both the partition table it is on and the parent partitioned
+ index, so that it goes away if either of those is dropped, but
+ not otherwise. The dependency on the parent index is primary,
+ so that if the user tries to drop the child partitioned index,
+ the error message will suggest dropping the parent index instead
+ (not the table).
@@ -3072,9 +3078,10 @@ SCRAM-SHA-256$<iteration count> :&l
the referenced object (see
pg_extension ).
The dependent object can be dropped only via
- DROP EXTENSION on the referenced object. Functionally
- this dependency type acts the same as an internal dependency, but
- it's kept separate for clarity and to simplify pg_dump .
+ DROP EXTENSION on the referenced object.
+ Functionally this dependency type acts the same as
+ an INTERNAL dependency, but it's kept separate for
+ clarity and to simplify pg_dump .
@@ -3084,10 +3091,13 @@ SCRAM-SHA-256$<iteration count> :&l
The dependent object is not a member of the extension that is the
- referenced object (and so should not be ignored by pg_dump), but
- cannot function without it and should be dropped when the
- extension itself is. The dependent object may be dropped on its
- own as well.
+ referenced object (and so it should not be ignored
+ by pg_dump ), but it cannot function
+ without the extension and should be auto-dropped if the extension is.
+ The dependent object may be dropped on its own as well.
+ Functionally this dependency type acts the same as
+ an AUTO dependency, but it's kept separate for
+ clarity and to simplify pg_dump .
@@ -3109,6 +3119,19 @@ SCRAM-SHA-256$<iteration count> :&l
Other dependency flavors might be needed in future.
+
+ Note that it's quite possible for two objects to be linked by more than
+ one pg_depend entry. For example, a child
+ partitioned index would have both a partition-type dependency on its
+ associated partition table, and an auto dependency on each column of
+ that table that it indexes. This sort of situation expresses the union
+ of multiple dependency semantics. A dependent object can be dropped
+ without CASCADE if any of its dependencies satisfies
+ its condition for automatic dropping. Conversely, all the
+ dependencies' restrictions about which objects must be dropped together
+ must be satisfied.
+
+
@@ -3219,7 +3242,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -3382,7 +3405,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -3485,7 +3508,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -3531,10 +3554,7 @@ SCRAM-SHA-256$<iteration count> :&l
aclitem[]
- Access privileges; see
- and
-
- for details
+ Access privileges; see for details
@@ -3584,7 +3604,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -3627,10 +3647,7 @@ SCRAM-SHA-256$<iteration count> :&l
aclitem[]
- Access privileges; see
- and
-
- for details
+ Access privileges; see for details
@@ -3752,15 +3769,16 @@ SCRAM-SHA-256$<iteration count> :&l
int2
The total number of columns in the index (duplicates
- pg_class.relnatts ). This number includes both key and included attributes.
+ pg_class.relnatts ); this number includes both key and included attributes
indnkeyatts
int2
- The number of key columns in the index. "Key columns" are ordinary
- index columns (as opposed to "included" columns).
+ The number of key columns in the index,
+ not counting any included columns , which are
+ merely stored and do not participate in the index semantics
@@ -3866,7 +3884,8 @@ SCRAM-SHA-256$<iteration count> :&l
This is an array of indnatts values that
indicate which table columns this index indexes. For example a value
of 1 3 would mean that the first and the third table
- columns make up the index key. A zero in this array indicates that the
+ columns make up the index entries. Key columns come before non-key
+ (included) columns. A zero in this array indicates that the
corresponding index attribute is an expression over the table columns,
rather than a simple column reference.
@@ -3877,9 +3896,10 @@ SCRAM-SHA-256$<iteration count> :&l
oidvector
pg_collation .oid
- For each column in the index key, this contains the OID of the
- collation to use for the index, or zero if the column is not
- of a collatable data type.
+ For each column in the index key
+ (indnkeyatts values), this contains the OID
+ of the collation to use for the index, or zero if the column is not of
+ a collatable data type.
@@ -3888,8 +3908,9 @@ SCRAM-SHA-256$<iteration count> :&l
oidvector
pg_opclass .oid
- For each column in the index key, this contains the OID of
- the operator class to use. See
+ For each column in the index key
+ (indnkeyatts values), this contains the OID
+ of the operator class to use. See
pg_opclass for details.
@@ -3899,7 +3920,7 @@ SCRAM-SHA-256$<iteration count> :&l
int2vector
- This is an array of indnatts values that
+ This is an array of indnkeyatts values that
store per-column flag bits. The meaning of the bits is defined by
the index's access method.
@@ -4088,9 +4109,7 @@ SCRAM-SHA-256$<iteration count> :&l
The initial access privileges; see
- and
-
- for details
+ for details
@@ -4133,7 +4152,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -4215,10 +4234,7 @@ SCRAM-SHA-256$<iteration count> :&l
aclitem[]
- Access privileges; see
- and
-
- for details
+ Access privileges; see for details
@@ -4340,7 +4356,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -4355,10 +4371,7 @@ SCRAM-SHA-256$<iteration count> :&l
aclitem[]
- Access privileges; see
- and
-
- for details
+ Access privileges; see for details
@@ -4400,7 +4413,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -4422,10 +4435,7 @@ SCRAM-SHA-256$<iteration count> :&l
aclitem[]
- Access privileges; see
- and
-
- for details
+ Access privileges; see for details
@@ -4475,7 +4485,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -4580,7 +4590,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -4737,7 +4747,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -4823,8 +4833,8 @@ SCRAM-SHA-256$<iteration count> :&l
char
- Partitioning strategy; l = list partitioned table,
- r = range partitioned table
+ Partitioning strategy; h = hash partitioned table,
+ l = list partitioned table, r = range partitioned table
@@ -5147,7 +5157,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -5205,11 +5215,11 @@ SCRAM-SHA-256$<iteration count> :&l
- protransform
+ prosupport
regproc
pg_proc .oid
- Calls to this function can be simplified by this other function
- (see )
+ Optional planner support function for this function
+ (see )
@@ -5432,10 +5442,7 @@ SCRAM-SHA-256$<iteration count> :&l
aclitem[]
- Access privileges; see
- and
-
- for details
+ Access privileges; see for details
@@ -5485,7 +5492,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -5690,6 +5697,13 @@ SCRAM-SHA-256$<iteration count> :&l
see .
+
+ Unlike most system catalogs, pg_replication_origin
+ is shared across all databases of a cluster: there is only one copy
+ of pg_replication_origin per cluster, not one per
+ database.
+
+
pg_replication_origin Columns
@@ -5754,7 +5768,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -6362,6 +6376,28 @@ SCRAM-SHA-256$<iteration count> :&l
about those tables that are readable by the current user.
+
+ pg_statistic should not be readable by the
+ public, since even statistical information about a table's contents
+ might be considered sensitive. (Example: minimum and maximum values
+ of a salary column might be quite interesting.)
+ pg_stats
+ is a publicly readable view on
+ pg_statistic that only exposes information
+ about those tables that are readable by the current user.
+
+
+
+ Similarly, pg_statistic_ext_data should not be
+ readable by the public, since the contents might be considered sensitive.
+ (Example: most common combination of values in columns might be quite
+ interesting.)
+ pg_stats_ext
+ is a publicly readable view on pg_statistic_ext_data
+ (after joining with pg_statistic_ext ) that only exposes
+ information about those tables and columns that are readable by the current user.
+
+
pg_statistic Columns
@@ -6449,6 +6485,18 @@ SCRAM-SHA-256$<iteration count> :&l
+
+ stacollN
+ oid
+ pg_collation .oid
+
+ The collation used to derive the statistics stored in the
+ N th slot
. For example, a
+ histogram slot for a collatable column would show the collation that
+ defines the sort order of the data. Zero for noncollatable data.
+
+
+
stanumbersN
float4[]
@@ -6488,7 +6536,7 @@ SCRAM-SHA-256$<iteration count> :&l
The catalog pg_statistic_ext
- holds extended planner statistics.
+ holds definitions of extended planner statistics.
Each row in this catalog corresponds to a statistics object
created with .
@@ -6558,12 +6606,62 @@ SCRAM-SHA-256$<iteration count> :&l
An array containing codes for the enabled statistic kinds;
valid values are:
d for n-distinct statistics,
- f for functional dependency statistics
+ f for functional dependency statistics, and
+ m for most common values (MCV) list statistics
+
+
+
+
+
+ The stxkind field is filled at creation of the
+ statistics object, indicating which statistic type(s) are desired. The
+ statistics (once computed by ANALYZE ) are stored in
+ pg_statistic_ext_data
+ catalog.
+
+
+
+
+ pg_statistic_ext_data
+
+
+ pg_statistic_ext_data
+
+
+
+ The catalog pg_statistic_ext_data
+ holds data for extended planner statistics defined in pg_statistic_ext .
+ Each row in this catalog corresponds to a statistics object
+ created with .
+
+
+
+ pg_statistic_ext_data Columns
+
+
+
- stxndistinct
+ Name
+ Type
+ References
+ Description
+
+
+
+
+
+
+ stxoid
+ oid
+ pg_statistic_ext .oid
+ Extended statistic containing the definition for this data.
+
+
+
+ stxdndistinct
pg_ndistinct
@@ -6572,7 +6670,7 @@ SCRAM-SHA-256$<iteration count> :&l
- stxdependencies
+ stxddependencies
pg_dependencies
@@ -6581,16 +6679,20 @@ SCRAM-SHA-256$<iteration count> :&l
+
+ stxdmcv
+ pg_mcv_list
+
+
+ MCV (most-common values) list statistics, serialized as
+ pg_mcv_list type.
+
+
+
-
- The stxkind field is filled at creation of the
- statistics object, indicating which statistic type(s) are desired.
- The fields after it are initially NULL and are filled only when the
- corresponding statistic has been computed by ANALYZE .
-
@@ -6608,7 +6710,7 @@ SCRAM-SHA-256$<iteration count> :&l
Unlike most system catalogs, pg_subscription is
- shared across all databases of a cluster: There is only one copy
+ shared across all databases of a cluster: there is only one copy
of pg_subscription per cluster, not one per
database.
@@ -6636,7 +6738,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -6817,7 +6919,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -6839,10 +6941,7 @@ SCRAM-SHA-256$<iteration count> :&l
aclitem[]
- Access privileges; see
- and
-
- for details
+ Access privileges; see for details
@@ -6960,7 +7059,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -7169,7 +7268,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -7315,7 +7414,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -7398,7 +7497,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -7495,7 +7594,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -7569,7 +7668,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -7912,10 +8011,10 @@ SCRAM-SHA-256$<iteration count> :&l
typcollation specifies the collation
of the type. If the type does not support collations, this will
- be zero. A base type that supports collations will have
- DEFAULT_COLLATION_OID here. A domain over a
- collatable type can have some other collation OID, if one was
- specified for the domain.
+ be zero. A base type that supports collations will have a nonzero
+ value here, typically DEFAULT_COLLATION_OID .
+ A domain over a collatable type can have a collation OID different
+ from its base type's, if one was specified for the domain.
@@ -7952,10 +8051,7 @@ SCRAM-SHA-256$<iteration count> :&l
aclitem[]
- Access privileges; see
- and
-
- for details
+ Access privileges; see for details
@@ -8081,7 +8177,7 @@ SCRAM-SHA-256$<iteration count> :&l
oid
oid
- Row identifier (hidden attribute; must be explicitly selected)
+ Row identifier
@@ -8272,6 +8368,11 @@ SCRAM-SHA-256$<iteration count> :&l
planner statistics
+
+ pg_stats_ext
+ extended planner statistics
+
+
pg_tables
tables
@@ -9859,7 +9960,8 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
The address (LSN ) of oldest WAL which still
might be required by the consumer of this slot and thus won't be
- automatically removed during checkpoints.
+ automatically removed during checkpoints. NULL
+ if the LSN of this slot has never been reserved.
@@ -9893,11 +9995,6 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
that blanks out the password field.
-
- This view explicitly exposes the OID column of the underlying table,
- since that is needed to do joins to other catalogs.
-
-
pg_roles Columns
@@ -10640,7 +10737,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
valuntil
- abstime
+ timestamptz
Password expiry time (only used for password authentication)
@@ -10850,6 +10947,171 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
+
+ pg_stats_ext
+
+
+ pg_stats_ext
+
+
+
+ The view pg_stats_ext provides access to
+ the information stored in the pg_statistic_ext
+ and pg_statistic_ext_data
+ catalogs. This view allows access only to rows of
+ pg_statistic_ext and pg_statistic_ext_data
+ that correspond to tables the user has permission to read, and therefore
+ it is safe to allow public read access to this view.
+
+
+
+ pg_stats_ext is also designed to present the
+ information in a more readable format than the underlying catalogs
+ — at the cost that its schema must be extended whenever new types
+ of extended statistics are added to pg_statistic_ext .
+
+
+
+ pg_stats_ext Columns
+
+
+
+
+ Name
+ Type
+ References
+ Description
+
+
+
+
+ schemaname
+ name
+ pg_namespace .nspname
+ Name of schema containing table
+
+
+
+ tablename
+ name
+ pg_class .relname
+ Name of table
+
+
+
+ statistics_schemaname
+ name
+ pg_namespace .nspname
+ Name of schema containing extended statistic
+
+
+
+ statistics_name
+ name
+ pg_statistic_ext .stxname
+ Name of extended statistics
+
+
+
+ statistics_owner
+ oid
+ pg_authid .oid
+ Owner of the extended statistics
+
+
+
+ attnames
+ name[]
+ pg_attribute .attname
+ Names of the columns the extended statistics is defined on
+
+
+
+ kinds
+ text[]
+
+ Types of extended statistics enabled for this record
+
+
+
+ n_distinct
+ pg_ndistinct
+
+ N-distinct counts for combinations of column values. If greater
+ than zero, the estimated number of distinct values in the combination.
+ If less than zero, the negative of the number of distinct values divided
+ by the number of rows.
+ (The negated form is used when ANALYZE believes that
+ the number of distinct values is likely to increase as the table grows;
+ the positive form is used when the column seems to have a fixed number
+ of possible values.) For example, -1 indicates a unique combination of
+ columns in which the number of distinct combinations is the same as the
+ number of rows.
+
+
+
+
+ dependencies
+ pg_dependencies
+
+ Functional dependency statistics
+
+
+
+ most_common_vals
+ anyarray
+
+
+ A list of the most common combinations of values in the columns.
+ (Null if no combinations seem to be more common than any others.)
+
+
+
+
+ most_common_val_nulls
+ anyarray
+
+
+ A list of NULL flags for the most common combinations of values.
+ (Null when most_common_vals is.)
+
+
+
+
+ most_common_freqs
+ real[]
+
+
+ A list of the frequencies of the most common combinations,
+ i.e., number of occurrences of each divided by total number of rows.
+ (Null when most_common_vals is.)
+
+
+
+
+ most_common_base_freqs
+ real[]
+
+
+ A list of the base frequencies of the most common combinations,
+ i.e., product of per-value frequencies.
+ (Null when most_common_vals is.)
+
+
+
+
+
+
+
+ The maximum number of entries in the array fields can be controlled on a
+ column-by-column basis using the ALTER TABLE SET STATISTICS
+ command, or globally by setting the
+ run-time parameter.
+
+
+
+
pg_tables
@@ -11119,7 +11381,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
valuntil
- abstime
+ timestamptz
Password expiry time (only used for password authentication)
diff --git a/doc/src/sgml/charset.sgml b/doc/src/sgml/charset.sgml
index dc3fd34a624..b672da47d0a 100644
--- a/doc/src/sgml/charset.sgml
+++ b/doc/src/sgml/charset.sgml
@@ -580,7 +580,7 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR";
- libc collations
+ libc Collations
For example, the operating system might
@@ -637,7 +637,7 @@ SELECT a COLLATE "C" < b COLLATE "POSIX" FROM test1;
- ICU collations
+ ICU Collations
With ICU, it is not sensible to enumerate all possible locale names. ICU
@@ -713,7 +713,7 @@ SELECT a COLLATE "C" < b COLLATE "POSIX" FROM test1;
- libc collations
+ libc Collations
New libc collations can be created like this:
@@ -737,7 +737,7 @@ CREATE COLLATION german (provider = libc, locale = 'de_DE');
- ICU collations
+ ICU Collations
ICU allows collations to be customized beyond the basic language+country
@@ -847,11 +847,13 @@ CREATE COLLATION german (provider = libc, locale = 'de_DE');
Note that while this system allows creating collations that ignore
- case
or ignore accents
or similar (using
- the ks key), PostgreSQL does not at the moment allow
- such collations to act in a truly case- or accent-insensitive manner. Any
- strings that compare equal according to the collation but are not
- byte-wise equal will be sorted according to their byte values.
+ case or ignore accents
or similar (using the
+ ks key), in order for such collations to act in a
+ truly case- or accent-insensitive manner, they also need to be declared as not
+ deterministic in CREATE COLLATION ;
+ see .
+ Otherwise, any strings that compare equal according to the collation but
+ are not byte-wise equal will be sorted according to their byte values.
@@ -883,6 +885,55 @@ CREATE COLLATION french FROM "fr-x-icu";
+
+
+ Nondeterministic Collations
+
+
+ A collation is either deterministic or
+ nondeterministic . A deterministic collation uses
+ deterministic comparisons, which means that it considers strings to be
+ equal only if they consist of the same byte sequence. Nondeterministic
+ comparison may determine strings to be equal even if they consist of
+ different bytes. Typical situations include case-insensitive comparison,
+ accent-insensitive comparison, as well as comparison of strings in
+ different Unicode normal forms. It is up to the collation provider to
+ actually implement such insensitive comparisons; the deterministic flag
+ only determines whether ties are to be broken using bytewise comparison.
+ See also Unicode Technical
+ Standard 10 for more information on the terminology.
+
+
+
+ To create a nondeterministic collation, specify the property
+ deterministic = false to CREATE
+ COLLATION , for example:
+
+CREATE COLLATION ndcoll (provider = icu, locale = 'und', deterministic = false);
+
+ This example would use the standard Unicode collation in a
+ nondeterministic way. In particular, this would allow strings in
+ different normal forms to be compared correctly. More interesting
+ examples make use of the ICU customization facilities explained above.
+ For example:
+
+CREATE COLLATION case_insensitive (provider = icu, locale = 'und-u-ks-level2', deterministic = false);
+CREATE COLLATION ignore_accents (provider = icu, locale = 'und-u-ks-level1-kc-true', deterministic = false);
+
+
+
+
+ All standard and predefined collations are deterministic, all
+ user-defined collations are deterministic by default. While
+ nondeterministic collations give a more correct
behavior,
+ especially when considering the full power of Unicode and its many
+ special cases, they also have some drawbacks. Foremost, their use leads
+ to a performance penalty. Also, certain operations are not possible with
+ nondeterministic collations, such as pattern matching operations.
+ Therefore, they should be used only in cases where they are specifically
+ wanted.
+
+
@@ -1483,6 +1534,13 @@ $ psql -l
UTF8
+
+ EUC_JIS_2004
+ EUC_JIS_2004 ,
+ SHIFT_JIS_2004 ,
+ UTF8
+
+
EUC_KR
EUC_KR ,
@@ -1538,8 +1596,7 @@ $ psql -l
JOHAB
- JOHAB ,
- UTF8
+ not supported as a server encoding
@@ -1645,6 +1702,11 @@ $ psql -l
not supported as a server encoding
+
+ SHIFT_JIS_2004
+ not supported as a server encoding
+
+
SQL_ASCII
any (no conversion will be performed)
@@ -1834,7 +1896,11 @@ RESET client_encoding;
If the client character set is defined as SQL_ASCII ,
encoding conversion is disabled, regardless of the server's character
- set. Just as for the server, use of SQL_ASCII is unwise
+ set. (However, if the server's character set is
+ not SQL_ASCII , the server will still check that
+ incoming data is valid for that encoding; so the net effect is as
+ though the client character set were the same as the server's.)
+ Just as for the server, use of SQL_ASCII is unwise
unless you are working with all-ASCII data.
diff --git a/doc/src/sgml/citext.sgml b/doc/src/sgml/citext.sgml
index b1fe7101b20..85aa339d8ba 100644
--- a/doc/src/sgml/citext.sgml
+++ b/doc/src/sgml/citext.sgml
@@ -14,6 +14,16 @@
exactly like text .
+
+
+ Consider using nondeterministic collations (see
+ ) instead of this module. They
+ can be used for case-insensitive comparisons, accent-insensitive
+ comparisons, and other combinations, and they handle more Unicode special
+ cases correctly.
+
+
+
Rationale
@@ -246,6 +256,17 @@ SELECT * FROM users WHERE nick = 'Larry';
will be invoked instead.
+
+
+
+ The approach of lower-casing strings for comparison does not handle some
+ Unicode special cases correctly, for example when one upper-case letter
+ has two lower-case letter equivalents. Unicode distinguishes between
+ case mapping and case
+ folding for this reason. Use nondeterministic collations
+ instead of citext to handle that correctly.
+
+
diff --git a/doc/src/sgml/client-auth.sgml b/doc/src/sgml/client-auth.sgml
index 53832d08e29..9236fc014c4 100644
--- a/doc/src/sgml/client-auth.sgml
+++ b/doc/src/sgml/client-auth.sgml
@@ -108,6 +108,8 @@ hostnossl database user
host database user IP-address IP-mask auth-method auth-options
hostssl database user IP-address IP-mask auth-method auth-options
hostnossl database user IP-address IP-mask auth-method auth-options
+hostgssenc database user IP-address IP-mask auth-method auth-options
+hostnogssenc database user IP-address IP-mask auth-method auth-options
The meaning of the fields is as follows:
@@ -128,9 +130,10 @@ hostnossl database user
This record matches connection attempts made using TCP/IP.
- host records match either
+ host records match
SSL or non-SSL connection
- attempts.
+ attempts as well as GSSAPI encrypted or
+ non-GSSAPI encrypted connection attempts.
@@ -176,6 +179,43 @@ hostnossl database user
+
+ hostgssenc
+
+
+ This record matches connection attempts made using TCP/IP,
+ but only when the connection is made with GSSAPI
+ encryption.
+
+
+
+ To make use of this option the server must be built with
+ GSSAPI support. Otherwise,
+ the hostgssenc record is ignored except for logging
+ a warning that it cannot match any connections.
+
+
+
+ Note that the only supported
+ authentication methods for use
+ with GSSAPI encryption
+ are gss , reject ,
+ and trust .
+
+
+
+
+
+ hostnogssenc
+
+
+ This record type has the opposite behavior of hostgssenc ;
+ it only matches connection attempts made over
+ TCP/IP that do not use GSSAPI encryption.
+
+
+
+
database
@@ -451,7 +491,8 @@ hostnossl database user
Use GSSAPI to authenticate the user. This is only
available for TCP/IP connections. See for details.
+ linkend="gssapi-auth"/> for details. It can be used in conjunction
+ with GSSAPI encryption.
@@ -563,10 +604,17 @@ hostnossl database user
In addition to the method-specific options listed below, there is one
method-independent authentication option clientcert , which
- can be specified in any hostssl record. When set
- to 1 , this option requires the client to present a valid
- (trusted) SSL certificate, in addition to the other requirements of the
- authentication method.
+ can be specified in any hostssl record.
+ This option can be set to verify-ca or
+ verify-full . Both options require the client
+ to present a valid (trusted) SSL certificate, while
+ verify-full additionally enforces that the
+ cn (Common Name) in the certificate matches
+ the username or an applicable mapping.
+ This behavior is similar to the cert authentication
+ method (see ) but enables pairing
+ the verification of client certificates with any authentication
+ method that supports hostssl entries.
@@ -603,8 +651,9 @@ hostnossl database user
SIGHUP SIGHUP
signal. If you edit the file on an
active system, you will need to signal the postmaster
- (using pg_ctl reload or kill -HUP ) to make it
- re-read the file.
+ (using pg_ctl reload , calling the SQL function
+ pg_reload_conf() , or using kill
+ -HUP ) to make it re-read the file.
@@ -624,7 +673,7 @@ hostnossl database user
non-null error fields indicate problems in the
corresponding lines of the file.
-
+
To connect to a particular database, a user must not only pass the
@@ -696,15 +745,18 @@ host postgres all 192.168.12.10/32 scram-sha-256
host all mike .example.com md5
host all all .example.com scram-sha-256
-# In the absence of preceding "host" lines, these two lines will
+# In the absence of preceding "host" lines, these three lines will
# reject all connections from 192.168.54.1 (since that entry will be
-# matched first), but allow GSSAPI connections from anywhere else
-# on the Internet. The zero mask causes no bits of the host IP
-# address to be considered, so it matches any host.
+# matched first), but allow GSSAPI-encrypted connections from anywhere else
+# on the Internet. The zero mask causes no bits of the host IP address to
+# be considered, so it matches any host. Unencrypted GSSAPI connections
+# (which "fall through" to the third line since "hostgssenc" only matches
+# encrypted GSSAPI connections) are allowed, but only from 192.168.12.10.
#
# TYPE DATABASE USER ADDRESS METHOD
host all all 192.168.54.1/32 reject
-host all all 0.0.0.0/0 gss
+hostgssenc all all 0.0.0.0/0 gss
+host all all 192.168.12.10/32 gss
# Allow users from 192.168.x.x hosts to connect to any database, if
# they pass the ident check. If, for example, ident says the user is
@@ -821,8 +873,9 @@ mymap /^(.*)@otherdomain\.com$ guest
SIGHUP SIGHUP
signal. If you edit the file on an
active system, you will need to signal the postmaster
- (using pg_ctl reload or kill -HUP ) to make it
- re-read the file.
+ (using pg_ctl reload , calling the SQL function
+ pg_reload_conf() , or using kill
+ -HUP ) to make it re-read the file.
@@ -859,10 +912,11 @@ omicron bryanh guest1
Authentication Methods
- The following subsections describe the authentication methods in more detail.
+ The following sections describe the authentication methods in more detail.
+
-
+
Trust Authentication
@@ -908,9 +962,9 @@ omicron bryanh guest1
for any TCP/IP connections other than those from localhost (127.0.0.1).
-
+
-
+
Password Authentication
@@ -1038,9 +1092,9 @@ omicron bryanh guest1
and change the authentication method specifications
in pg_hba.conf to scram-sha-256 .
-
+
-
+
GSSAPI Authentication
@@ -1050,13 +1104,16 @@ omicron bryanh guest1
GSSAPI is an industry-standard protocol
for secure authentication defined in RFC 2743.
- PostgreSQL supports
- GSSAPI with Kerberos
- authentication according to RFC 1964. GSSAPI
- provides automatic authentication (single sign-on) for systems
- that support it. The authentication itself is secure, but the
- data sent over the database connection will be sent unencrypted unless
- SSL is used.
+
+ PostgreSQL
+ supports GSSAPI for use as either an encrypted,
+ authenticated layer, or for authentication only.
+ GSSAPI provides automatic authentication
+ (single sign-on) for systems that support it. The authentication itself is
+ secure. If GSSAPI encryption
+ (see hostgssenc ) or SSL encryption are
+ used, the data sent along the database connection will be encrypted;
+ otherwise, it will not.
@@ -1192,9 +1249,9 @@ omicron bryanh guest1
-
+
-
+
SSPI Authentication
@@ -1310,9 +1367,9 @@ omicron bryanh guest1
-
+
-
+
Ident Authentication
@@ -1391,9 +1448,9 @@ omicron bryanh guest1
since PostgreSQL does not have any way to decrypt the
returned string to determine the actual user name.
-
+
-
+
Peer Authentication
@@ -1432,9 +1489,9 @@ omicron bryanh guest1
and Solaris .
-
+
-
+
LDAP Authentication
@@ -1647,7 +1704,8 @@ ldap[s]://host [:port ]/
- LDAP URLs are currently only supported with OpenLDAP, not on Windows.
+ LDAP URLs are currently only supported with
+ OpenLDAP , not on Windows.
@@ -1670,6 +1728,15 @@ ldap[s]://host [:port ]/ldapsearchattribute=uid.
+
+ If PostgreSQL was compiled with
+ OpenLDAP as the LDAP client library, the
+ ldapserver setting may be omitted. In that case, a
+ list of host names and ports is looked up via RFC 2782 DNS SRV records.
+ The name _ldap._tcp.DOMAIN is looked up, where
+ DOMAIN is extracted from ldapbasedn .
+
+
Here is an example for a simple-bind LDAP configuration:
@@ -1715,6 +1782,15 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
+
+ Here is an example for a search+bind configuration that uses DNS SRV
+ discovery to find the host name(s) and port(s) for the LDAP service for the
+ domain name example.net :
+
+host ... ldap ldapbasedn="dc=example,dc=net"
+
+
+
Since LDAP often uses commas and spaces to separate the different
@@ -1723,9 +1799,9 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
-
+
-
+
RADIUS Authentication
@@ -1824,9 +1900,9 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
-
+
-
+
Certificate Authentication
@@ -1864,15 +1940,15 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
In a pg_hba.conf record specifying certificate
authentication, the authentication option clientcert is
- assumed to be 1 , and it cannot be turned off since a client
- certificate is necessary for this method. What the cert
- method adds to the basic clientcert certificate validity test
- is a check that the cn attribute matches the database
- user name.
+ assumed to be verify-ca or verify-full ,
+ and it cannot be turned off since a client certificate is necessary for this
+ method. What the cert method adds to the basic
+ clientcert certificate validity test is a check that the
+ cn attribute matches the database user name.
-
+
-
+
PAM Authentication
@@ -1888,7 +1964,7 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
connected remote host name or IP address. Therefore the user must already
exist in the database before PAM can be used for authentication. For more
information about PAM, please read the
-
+
Linux-PAM Page .
@@ -1928,9 +2004,9 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
LDAP or other authentication methods.
-
+
-
+
BSD Authentication
@@ -1963,8 +2039,7 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
exists by default on OpenBSD systems.
-
-
+
Authentication Problems
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index 5d5f2d23c4f..619ac8c50c8 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -51,14 +51,21 @@
In general, enclose the value in single quotes, doubling any single
quotes within the value. Quotes can usually be omitted if the value
is a simple number or identifier, however.
+ (Values that match a SQL keyword require quoting in some contexts.)
Numeric (integer and floating point):
- A decimal point is permitted only for floating-point parameters.
- Do not use thousands separators. Quotes are not required.
+ Numeric parameters can be specified in the customary integer and
+ floating-point formats; fractional values are rounded to the nearest
+ integer if the parameter is of integer type. Integer parameters
+ additionally accept hexadecimal input (beginning
+ with 0x ) and octal input (beginning
+ with 0 ), but these formats cannot have a fraction.
+ Do not use thousands separators.
+ Quotes are not required, except for hexadecimal input.
@@ -66,7 +73,7 @@
Numeric with Unit:
Some numeric parameters have an implicit unit, because they describe
- quantities of memory or time. The unit might be kilobytes, blocks
+ quantities of memory or time. The unit might be bytes, kilobytes, blocks
(typically eight kilobytes), milliseconds, seconds, or minutes.
An unadorned numeric value for one of these settings will use the
setting's default unit, which can be learned from
@@ -81,7 +88,8 @@
- Valid memory units are kB (kilobytes),
+ Valid memory units are B (bytes),
+ kB (kilobytes),
MB (megabytes), GB
(gigabytes), and TB (terabytes).
The multiplier for memory units is 1024, not 1000.
@@ -90,12 +98,21 @@
- Valid time units are ms (milliseconds),
+ Valid time units are
+ us (microseconds),
+ ms (milliseconds),
s (seconds), min (minutes),
h (hours), and d (days).
+
+ If a fractional value is specified with a unit, it will be rounded
+ to a multiple of the next smaller unit if there is one.
+ For example, 30.1 GB will be converted
+ to 30822 MB not 32319628902 B .
+ If the parameter is of integer type, a final rounding to integer
+ occurs after any units conversion.
@@ -136,6 +153,8 @@ shared_buffers = 128MB
identifiers or numbers must be single-quoted. To embed a single
quote in a parameter value, write either two quotes (preferred)
or backslash-quote.
+ If the file contains multiple entries for the same parameter,
+ all but the last one are ignored.
@@ -168,18 +187,29 @@ shared_buffers = 128MB
In addition to postgresql.conf ,
a PostgreSQL data directory contains a file
postgresql.auto.conf postgresql.auto.conf ,
- which has the same format as postgresql.conf but should
- never be edited manually. This file holds settings provided through
- the command. This file is automatically
- read whenever postgresql.conf is, and its settings take
- effect in the same way. Settings in postgresql.auto.conf
- override those in postgresql.conf .
+ which has the same format as postgresql.conf but
+ is intended to be edited automatically, not manually. This file holds
+ settings provided through the command.
+ This file is read whenever postgresql.conf is,
+ and its settings take effect in the same way. Settings
+ in postgresql.auto.conf override those
+ in postgresql.conf .
+
+
+
+ External tools may also
+ modify postgresql.auto.conf . It is not
+ recommended to do this while the server is running, since a
+ concurrent ALTER SYSTEM command could overwrite
+ such changes. Such tools might simply append new settings to the end,
+ or they might choose to remove duplicate settings and/or comments
+ (as ALTER SYSTEM will).
The system view
pg_file_settings
- can be helpful for pre-testing changes to the configuration file, or for
+ can be helpful for pre-testing changes to the configuration files, or for
diagnosing problems if a SIGHUP signal did not have the
desired effects.
@@ -696,8 +726,7 @@ include_dir 'conf.d'
The default value is three connections. The value must be less
- than max_connections minus
- .
+ than max_connections .
This parameter can only be set at server start.
@@ -923,6 +952,31 @@ include_dir 'conf.d'
+
+ tcp_user_timeout (integer )
+
+ tcp_user_timeout configuration parameter
+
+
+
+
+ Specifies the number of milliseconds that transmitted data may
+ remain unacknowledged before a connection is forcibly closed.
+ A value of 0 uses the system default.
+ This parameter is supported only on systems that support
+ TCP_USER_TIMEOUT ; on other systems, it must be zero.
+ In sessions connected via a Unix-domain socket, this parameter is
+ ignored and always reads as zero.
+
+
+
+ This parameter is not supported on Windows and on Linux version
+ 2.6.36 or older.
+
+
+
+
+
@@ -1248,7 +1302,7 @@ include_dir 'conf.d'
than the client's.
This parameter can only be set in the postgresql.conf
file or on the server command line.
- The default is true .
+ The default is on .
@@ -1290,6 +1344,50 @@ include_dir 'conf.d'
+
+ ssl_min_protocol_version (enum )
+
+ ssl_min_protocol_version configuration parameter
+
+
+
+
+ Sets the minimum SSL/TLS protocol version to use. Valid values are
+ currently: TLSv1 , TLSv1.1 ,
+ TLSv1.2 , TLSv1.3 . Older
+ versions of the OpenSSL library do not
+ support all values; an error will be raised if an unsupported setting
+ is chosen. Protocol versions before TLS 1.0, namely SSL version 2 and
+ 3, are always disabled.
+
+
+
+ The default is TLSv1 , mainly to support older
+ versions of the OpenSSL library. You might
+ want to set this to a higher value if all software components can
+ support the newer protocol versions.
+
+
+
+
+
+ ssl_max_protocol_version (enum )
+
+ ssl_max_protocol_version configuration parameter
+
+
+
+
+ Sets the maximum SSL/TLS protocol version to use. Valid values are as
+ for , with addition of
+ an empty string, which allows any protocol version. The default is to
+ allow any version. Setting the maximum protocol version is mainly
+ useful for testing or if some component has issues working with a
+ newer protocol.
+
+
+
+
ssl_dh_params_file (string )
@@ -1359,12 +1457,12 @@ include_dir 'conf.d'
This parameter determines whether the passphrase command set by
ssl_passphrase_command will also be called during a
configuration reload if a key file needs a passphrase. If this
- parameter is false (the default), then
+ parameter is off (the default), then
ssl_passphrase_command will be ignored during a
reload and the SSL configuration will not be reloaded if a passphrase
is needed. That setting is appropriate for a command that requires a
TTY for prompting, which might not be available when the server is
- running. Setting this parameter to true might be appropriate if the
+ running. Setting this parameter to on might be appropriate if the
passphrase is obtained from a file, for example.
@@ -1630,12 +1728,11 @@ include_dir 'conf.d'
enforced by the kernel (as set by ulimit -s or local
equivalent), less a safety margin of a megabyte or so. The safety
margin is needed because the stack depth is not checked in every
- routine in the server, but only in key potentially-recursive routines
- such as expression evaluation. The default setting is two
- megabytes (2MB ), which is conservatively small and
- unlikely to risk crashes. However, it might be too small to allow
- execution of complex functions. Only superusers can change this
- setting.
+ routine in the server, but only in key potentially-recursive routines.
+ The default setting is two megabytes (2MB ), which
+ is conservatively small and unlikely to risk crashes. However,
+ it might be too small to allow execution of complex functions.
+ Only superusers can change this setting.
@@ -1650,6 +1747,31 @@ include_dir 'conf.d'
+
+ shared_memory_type (enum )
+
+ shared_memory_type configuration parameter
+
+
+
+
+ Specifies the shared memory implementation that the server
+ should use for the main shared memory region that holds
+ PostgreSQL 's shared buffers and other
+ shared data. Possible values are mmap (for
+ anonymous shared memory allocated using mmap ),
+ sysv (for System V shared memory allocated via
+ shmget ) and windows (for Windows
+ shared memory). Not all values are supported on all platforms; the
+ first supported option is the default for that platform. The use of
+ the sysv option, which is not the default on any
+ platform, is generally discouraged because it typically requires
+ non-default kernel settings to allow for large allocations (see ).
+
+
+
+
dynamic_shared_memory_type (enum )
@@ -1662,9 +1784,9 @@ include_dir 'conf.d'
should use. Possible values are posix (for POSIX shared
memory allocated using shm_open ), sysv
(for System V shared memory allocated via shmget ),
- windows (for Windows shared memory), mmap
- (to simulate shared memory using memory-mapped files stored in the
- data directory), and none (to disable this feature).
+ windows (for Windows shared memory),
+ and mmap (to simulate shared memory using
+ memory-mapped files stored in the data directory).
Not all values are supported on all platforms; the first supported
option is the default for that platform. The use of the
mmap option, which is not the default on any platform,
@@ -1777,7 +1899,7 @@ include_dir 'conf.d'
- vacuum_cost_delay (integer )
+ vacuum_cost_delay (floating point )
vacuum_cost_delay configuration parameter
@@ -1788,18 +1910,19 @@ include_dir 'conf.d'
when the cost limit has been exceeded.
The default value is zero, which disables the cost-based vacuum
delay feature. Positive values enable cost-based vacuuming.
- Note that on many systems, the effective resolution
- of sleep delays is 10 milliseconds; setting
- vacuum_cost_delay to a value that is
- not a multiple of 10 might have the same results as setting it
- to the next higher multiple of 10.
When using cost-based vacuuming, appropriate values for
vacuum_cost_delay are usually quite small, perhaps
- 10 or 20 milliseconds. Adjusting vacuum's resource consumption
- is best done by changing the other vacuum cost parameters.
+ less than 1 millisecond. While vacuum_cost_delay
+ can be set to fractional-millisecond values, such delays may not be
+ measured accurately on older platforms. On such platforms,
+ increasing VACUUM 's throttled resource consumption
+ above what you get at 1ms will require changing the other vacuum cost
+ parameters. You should, nonetheless,
+ keep vacuum_cost_delay as small as your platform
+ will consistently measure; large delays are not helpful.
@@ -1882,31 +2005,6 @@ include_dir 'conf.d'
-
- Index Vacuum
-
-
- vacuum_cleanup_index_scale_factor (floating point )
-
- vacuum_cleanup_index_scale_factor configuration parameter
-
-
-
-
- When no tuples were deleted from the heap, B-tree indexes might still
- be scanned during VACUUM cleanup stage by two
- reasons. The first reason is that B-tree index contains deleted pages
- which can be recycled during cleanup. The second reason is that B-tree
- index statistics is stalled. The criterion of stalled index statistics
- is number of inserted tuples since previous statistics collection
- is greater than vacuum_cleanup_index_scale_factor
- fraction of total number of heap tuples.
-
-
-
-
-
-
Background Writer
@@ -2173,7 +2271,7 @@ include_dir 'conf.d'
pool of processes established by , limited by . Note that the requested
- number of workers may not actually be available at runtime.
+ number of workers may not actually be available at run time.
If this occurs, the utility operation will run with fewer
workers than expected. The default value is 2. Setting this
value to 0 disables the use of parallel workers by utility
@@ -3024,6 +3122,394 @@ include_dir 'conf.d'
+
+
+ Archive Recovery
+
+
+ configuration
+ of recovery
+ of a standby server
+
+
+
+ This section describes the settings that apply only for the duration of
+ the recovery. They must be reset for any subsequent recovery you wish to
+ perform.
+
+
+
+ Recovery
covers using the server as a standby or for
+ executing a targeted recovery. Typically, standby mode would be used to
+ provide high availability and/or read scalability, whereas a targeted
+ recovery is used to recover from data loss.
+
+
+
+ To start the server in standby mode, create a file called
+ standby.signal standby.signal
+ in the data directory. The server will enter recovery and will not stop
+ recovery when the end of archived WAL is reached, but will keep trying to
+ continue recovery by connecting to the sending server as specified by the
+ primary_conninfo setting and/or by fetching new WAL
+ segments using restore_command . For this mode, the
+ parameters from this section and are of interest.
+ Parameters from will
+ also be applied but are typically not useful in this mode.
+
+
+
+ To start the server in targeted recovery mode, create a file called
+ recovery.signal recovery.signal
+ in the data directory. If both standby.signal and
+ recovery.signal files are created, standby mode
+ takes precedence. Targeted recovery mode ends when the archived WAL is
+ fully replayed, or when recovery_target is reached.
+ In this mode, the parameters from both this section and will be used. Parameters
+ from will not be
+ used.
+
+
+
+
+ restore_command (string )
+
+ restore_command configuration parameter
+
+
+
+
+ The local shell command to execute to retrieve an archived segment of
+ the WAL file series. This parameter is required for archive recovery,
+ but optional for streaming replication.
+ Any %f in the string is
+ replaced by the name of the file to retrieve from the archive,
+ and any %p is replaced by the copy destination path name
+ on the server.
+ (The path name is relative to the current working directory,
+ i.e., the cluster's data directory.)
+ Any %r is replaced by the name of the file containing the
+ last valid restart point. That is the earliest file that must be kept
+ to allow a restore to be restartable, so this information can be used
+ to truncate the archive to just the minimum required to support
+ restarting from the current restore. %r is typically only
+ used by warm-standby configurations
+ (see ).
+ Write %% to embed an actual % character.
+
+
+
+ It is important for the command to return a zero exit status
+ only if it succeeds. The command will be asked for file
+ names that are not present in the archive; it must return nonzero
+ when so asked. Examples:
+
+restore_command = 'cp /mnt/server/archivedir/%f "%p"'
+restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows
+
+ An exception is that if the command was terminated by a signal (other
+ than SIGTERM , which is used as part of a
+ database server shutdown) or an error by the shell (such as command
+ not found), then recovery will abort and the server will not start up.
+
+
+
+ This parameter can only be set at server start.
+
+
+
+
+
+ archive_cleanup_command (string )
+
+ archive_cleanup_command configuration parameter
+
+
+
+
+ This optional parameter specifies a shell command that will be executed
+ at every restartpoint. The purpose of
+ archive_cleanup_command is to provide a mechanism for
+ cleaning up old archived WAL files that are no longer needed by the
+ standby server.
+ Any %r is replaced by the name of the file containing the
+ last valid restart point.
+ That is the earliest file that must be kept to allow a
+ restore to be restartable, and so all files earlier than %r
+ may be safely removed.
+ This information can be used to truncate the archive to just the
+ minimum required to support restart from the current restore.
+ The module
+ is often used in archive_cleanup_command for
+ single-standby configurations, for example:
+archive_cleanup_command = 'pg_archivecleanup /mnt/server/archivedir %r'
+ Note however that if multiple standby servers are restoring from the
+ same archive directory, you will need to ensure that you do not delete
+ WAL files until they are no longer needed by any of the servers.
+ archive_cleanup_command would typically be used in a
+ warm-standby configuration (see ).
+ Write %% to embed an actual % character in the
+ command.
+
+
+ If the command returns a nonzero exit status then a warning log
+ message will be written. An exception is that if the command was
+ terminated by a signal or an error by the shell (such as command not
+ found), a fatal error will be raised.
+
+
+ This parameter can only be set in the postgresql.conf
+ file or on the server command line.
+
+
+
+
+
+ recovery_end_command (string )
+
+ recovery_end_command configuration parameter
+
+
+
+
+ This parameter specifies a shell command that will be executed once only
+ at the end of recovery. This parameter is optional. The purpose of the
+ recovery_end_command is to provide a mechanism for cleanup
+ following replication or recovery.
+ Any %r is replaced by the name of the file containing the
+ last valid restart point, like in .
+
+
+ If the command returns a nonzero exit status then a warning log
+ message will be written and the database will proceed to start up
+ anyway. An exception is that if the command was terminated by a
+ signal or an error by the shell (such as command not found), the
+ database will not proceed with startup.
+
+
+ This parameter can only be set in the postgresql.conf
+ file or on the server command line.
+
+
+
+
+
+
+
+
+
+
+ Recovery Target
+
+
+ By default, recovery will recover to the end of the WAL log. The
+ following parameters can be used to specify an earlier stopping point.
+ At most one of recovery_target ,
+ recovery_target_lsn , recovery_target_name ,
+ recovery_target_time , or recovery_target_xid
+ can be used; if more than one of these is specified in the configuration
+ file, an error will be raised.
+ These parameters can only be set at server start.
+
+
+
+
+ recovery_target = 'immediate'
+
+ recovery_target configuration parameter
+
+
+
+
+ This parameter specifies that recovery should end as soon as a
+ consistent state is reached, i.e. as early as possible. When restoring
+ from an online backup, this means the point where taking the backup
+ ended.
+
+
+ Technically, this is a string parameter, but 'immediate'
+ is currently the only allowed value.
+
+
+
+
+
+ recovery_target_name (string )
+
+ recovery_target_name configuration parameter
+
+
+
+
+ This parameter specifies the named restore point (created with
+ pg_create_restore_point() ) to which recovery will proceed.
+
+
+
+
+
+ recovery_target_time (timestamp )
+
+ recovery_target_time configuration parameter
+
+
+
+
+ This parameter specifies the time stamp up to which recovery
+ will proceed.
+ The precise stopping point is also influenced by
+ .
+
+
+
+
+
+ recovery_target_xid (string )
+
+ recovery_target_xid configuration parameter
+
+
+
+
+ This parameter specifies the transaction ID up to which recovery
+ will proceed. Keep in mind
+ that while transaction IDs are assigned sequentially at transaction
+ start, transactions can complete in a different numeric order.
+ The transactions that will be recovered are those that committed
+ before (and optionally including) the specified one.
+ The precise stopping point is also influenced by
+ .
+
+
+
+
+
+ recovery_target_lsn (pg_lsn )
+
+ recovery_target_lsn configuration parameter
+
+
+
+
+ This parameter specifies the LSN of the write-ahead log location up
+ to which recovery will proceed. The precise stopping point is also
+ influenced by . This
+ parameter is parsed using the system data type
+ pg_lsn .
+
+
+
+
+
+
+ The following options further specify the recovery target, and affect
+ what happens when the target is reached:
+
+
+
+
+ recovery_target_inclusive (boolean )
+
+ recovery_target_inclusive configuration parameter
+
+
+
+
+ Specifies whether to stop just after the specified recovery target
+ (on ), or just before the recovery target
+ (off ).
+ Applies when ,
+ , or
+ is specified.
+ This setting controls whether transactions
+ having exactly the target WAL location (LSN), commit time, or transaction ID, respectively, will
+ be included in the recovery. Default is on .
+
+
+
+
+
+ recovery_target_timeline (string )
+
+ recovery_target_timeline configuration parameter
+
+
+
+
+ Specifies recovering into a particular timeline. The value can be a
+ numeric timeline ID or a special value. The value
+ current recovers along the same timeline that was
+ current when the base backup was taken. The
+ value latest recovers
+ to the latest timeline found in the archive, which is useful in
+ a standby server. latest is the default.
+
+
+
+ You usually only need to set this parameter
+ in complex re-recovery situations, where you need to return to
+ a state that itself was reached after a point-in-time recovery.
+ See for discussion.
+
+
+
+
+
+ recovery_target_action (enum )
+
+ recovery_target_action configuration parameter
+
+
+
+
+ Specifies what action the server should take once the recovery target is
+ reached. The default is pause , which means recovery will
+ be paused. promote means the recovery process will finish
+ and the server will start to accept connections.
+ Finally shutdown will stop the server after reaching the
+ recovery target.
+
+
+ The intended use of the pause setting is to allow queries
+ to be executed against the database to check if this recovery target
+ is the most desirable point for recovery.
+ The paused state can be resumed by
+ using pg_wal_replay_resume() (see
+ ), which then
+ causes recovery to end. If this recovery target is not the
+ desired stopping point, then shut down the server, change the
+ recovery target settings to a later target and restart to
+ continue recovery.
+
+
+ The shutdown setting is useful to have the instance ready
+ at the exact replay point desired. The instance will still be able to
+ replay more WAL records (and in fact will have to replay WAL records
+ since the last checkpoint next time it is started).
+
+
+ Note that because recovery.signal will not be
+ removed when recovery_target_action is set to shutdown ,
+ any subsequent start will end with immediate shutdown unless the
+ configuration is changed or the recovery.signal
+ file is removed manually.
+
+
+ This setting has no effect if no recovery target is set.
+ If is not enabled, a setting of
+ pause will act the same as shutdown .
+
+
+
+
+
+
+
@@ -3033,17 +3519,17 @@ include_dir 'conf.d'
These settings control the behavior of the built-in
streaming replication feature (see
). Servers will be either a
- Master or a Standby server. Masters can send data, while Standby(s)
+ master or a standby server. Masters can send data, while standbys
are always receivers of replicated data. When cascading replication
- (see ) is used, Standby server(s)
+ (see ) is used, standby servers
can also be senders, as well as receivers.
- Parameters are mainly for Sending and Standby servers, though some
- parameters have meaning only on the Master server. Settings may vary
+ Parameters are mainly for sending and standby servers, though some
+ parameters have meaning only on the master server. Settings may vary
across the cluster without problems if that is required.
- Sending Server(s)
+ Sending Servers
These parameters can be set on any server that is
@@ -3063,24 +3549,25 @@ include_dir 'conf.d'
- Specifies the maximum number of concurrent connections from
- standby servers or streaming base backup clients (i.e., the
- maximum number of simultaneously running WAL sender
- processes). The default is 10. The value 0 means replication is
- disabled. WAL sender processes count towards the total number
- of connections, so this parameter's value must be less than
- minus
- .
- Abrupt streaming client disconnection might leave an orphaned
- connection slot behind until
- a timeout is reached, so this parameter should be set slightly
- higher than the maximum number of expected clients so disconnected
- clients can immediately reconnect. This parameter can only
- be set at server start.
- Also, wal_level must be set to
+ Specifies the maximum number of concurrent connections from standby
+ servers or streaming base backup clients (i.e., the maximum number of
+ simultaneously running WAL sender processes). The default is
+ 10 . The value 0 means
+ replication is disabled. Abrupt streaming client disconnection might
+ leave an orphaned connection slot behind until a timeout is reached,
+ so this parameter should be set slightly higher than the maximum
+ number of expected clients so disconnected clients can immediately
+ reconnect. This parameter can only be set at server start. Also,
+ wal_level must be set to
replica or higher to allow connections from standby
servers.
+
+
+ When running a standby server, you must set this parameter to the
+ same or higher value than on the master server. Otherwise, queries
+ will not be allowed in the standby server.
+
@@ -3141,6 +3628,41 @@ include_dir 'conf.d'
+
+ wal_init_zero (boolean )
+
+ wal_init_zero configuration parameter
+
+
+
+
+ If set to on (the default), this option causes new
+ WAL files to be filled with zeroes. On some file systems, this ensures
+ that space is allocated before we need to write WAL records. However,
+ Copy-On-Write (COW) file systems may not benefit
+ from this technique, so the option is given to skip the unnecessary
+ work. If set to off , only the final byte is written
+ when the file is created so that it has the expected size.
+
+
+
+
+
+ wal_recycle (boolean )
+
+ wal_recycle configuration parameter
+
+
+
+
+ If set to on (the default), this option causes WAL
+ files to be recycled by renaming them, avoiding the need to create new
+ ones. On COW file systems, it may be faster to create new ones, so the
+ option is given to disable this behavior.
+
+
+
+
wal_sender_timeout (integer )
@@ -3152,10 +3674,14 @@ include_dir 'conf.d'
Terminate replication connections that are inactive longer
than the specified number of milliseconds. This is useful for
the sending server to detect a standby crash or network outage.
- A value of zero disables the timeout mechanism. This parameter
- can only be set in
- the postgresql.conf file or on the server command line.
- The default value is 60 seconds.
+ A value of zero disables the timeout mechanism. The default value
+ is 60 seconds. With a cluster distributed across multiple geographic
+ locations, using different values per location brings more flexibility
+ in the cluster management. A smaller value is useful for faster
+ failure detection with a standby having a low-latency network
+ connection, and a larger value helps in judging better the health
+ of a standby if located on a remote location, with a high-latency
+ network connection.
@@ -3212,9 +3738,9 @@ include_dir 'conf.d'
The synchronous standbys will be those whose names appear
in this list, and
that are both currently connected and streaming data in real-time
- (as shown by a state of streaming in the
-
- pg_stat_replication view).
+ (as shown by a state of streaming in the pg_stat_replication
+ view).
Specifying more than one synchronous standby can allow for very high
availability and protection against data loss.
@@ -3223,11 +3749,12 @@ include_dir 'conf.d'
application_name setting of the standby, as set in the
standby's connection information. In case of a physical replication
standby, this should be set in the primary_conninfo
- setting in recovery.conf ; the default
- is walreceiver . For logical replication, this can
- be set in the connection information of the subscription, and it
- defaults to the subscription name. For other replication stream
- consumers, consult their documentation.
+ setting; the default is the setting of
+ if set, else walreceiver .
+ For logical replication, this can be set in the connection
+ information of the subscription, and it defaults to the
+ subscription name. For other replication stream consumers,
+ consult their documentation.
This parameter specifies a list of standby servers using
@@ -3370,6 +3897,80 @@ ANY num_sync (
+ primary_conninfo (string )
+
+ primary_conninfo configuration parameter
+
+
+
+
+ Specifies a connection string to be used for the standby server
+ to connect with a sending server. This string is in the format
+ described in . If any option is
+ unspecified in this string, then the corresponding environment
+ variable (see ) is checked. If the
+ environment variable is not set either, then
+ defaults are used.
+
+
+ The connection string should specify the host name (or address)
+ of the sending server, as well as the port number if it is not
+ the same as the standby server's default.
+ Also specify a user name corresponding to a suitably-privileged role
+ on the sending server (see
+ ).
+ A password needs to be provided too, if the sender demands password
+ authentication. It can be provided in the
+ primary_conninfo string, or in a separate
+ ~/.pgpass file on the standby server (use
+ replication as the database name).
+ Do not specify a database name in the
+ primary_conninfo string.
+
+
+ This parameter can only be set at server start.
+ This setting has no effect if the server is not in standby mode.
+
+
+
+
+ primary_slot_name (string )
+
+ primary_slot_name configuration parameter
+
+
+
+
+ Optionally specifies an existing replication slot to be used when
+ connecting to the sending server via streaming replication to control
+ resource removal on the upstream node
+ (see ).
+ This parameter can only be set at server start.
+ This setting has no effect if primary_conninfo is not
+ set.
+
+
+
+
+
+ promote_trigger_file (string )
+
+ promote_trigger_file configuration parameter
+
+
+
+
+ Specifies a trigger file whose presence ends recovery in the
+ standby. Even if this value is not set, you can still promote
+ the standby using pg_ctl promote or calling
+ pg_promote .
+ This parameter can only be set in the postgresql.conf
+ file or on the server command line.
+
+
+
+
hot_standby (boolean )
@@ -3461,8 +4062,9 @@ ANY num_sync (
- pg_stat_replication view. The standby will report
+ pg_stat_replication
+ view. The standby will report
the last write-ahead log location it has written, the last position it
has flushed to disk, and the last position it has applied.
This parameter's
@@ -3562,6 +4164,68 @@ ANY num_sync (
+ recovery_min_apply_delay (integer )
+
+ recovery_min_apply_delay configuration parameter
+
+
+
+
+ By default, a standby server restores WAL records from the
+ sending server as soon as possible. It may be useful to have a time-delayed
+ copy of the data, offering opportunities to correct data loss errors.
+ This parameter allows you to delay recovery by a fixed period of time,
+ measured in milliseconds if no unit is specified. For example, if
+ you set this parameter to 5min , the standby will
+ replay each transaction commit only when the system time on the standby
+ is at least five minutes past the commit time reported by the master.
+
+
+ It is possible that the replication delay between servers exceeds the
+ value of this parameter, in which case no delay is added.
+ Note that the delay is calculated between the WAL time stamp as written
+ on master and the current time on the standby. Delays in transfer
+ because of network lag or cascading replication configurations
+ may reduce the actual wait time significantly. If the system
+ clocks on master and standby are not synchronized, this may lead to
+ recovery applying records earlier than expected; but that is not a
+ major issue because useful settings of this parameter are much larger
+ than typical time deviations between servers.
+
+
+ The delay occurs only on WAL records for transaction commits.
+ Other records are replayed as quickly as possible, which
+ is not a problem because MVCC visibility rules ensure their effects
+ are not visible until the corresponding commit record is applied.
+
+
+ The delay occurs once the database in recovery has reached a consistent
+ state, until the standby is promoted or triggered. After that the standby
+ will end recovery without further waiting.
+
+
+ This parameter is intended for use with streaming replication deployments;
+ however, if the parameter is specified it will be honored in all cases.
+
+ hot_standby_feedback will be delayed by use of this feature
+ which could lead to bloat on the master; use both together with care.
+
+
+
+ Synchronous replication is affected by this setting when synchronous_commit
+ is set to remote_apply ; every COMMIT
+ will need to wait to be applied.
+
+
+
+
+ This parameter can only be set in the postgresql.conf
+ file or on the server command line.
+
+
+
+
@@ -3826,6 +4490,24 @@ ANY num_sync (
+ enable_partition_pruning (boolean )
+
+ enable_partition_pruning configuration parameter
+
+
+
+
+ Enables or disables the query planner's ability to eliminate a
+ partitioned table's partitions from query plans. This also controls
+ the planner's ability to generate query plans which allow the query
+ executor to remove (ignore) partitions during query execution. The
+ default is on .
+ See for details.
+
+
+
+
enable_partitionwise_join (boolean )
@@ -4148,7 +4830,8 @@ ANY num_sync ( num_sync (
jit_above_cost (floating point )
@@ -4170,48 +4852,49 @@ ANY num_sync ( ). Performing
- JIT costs time but can accelerate query execution.
-
+ Sets the query cost above which JIT compilation is activated, if
+ enabled (see ).
+ Performing JIT costs planning time but can
+ accelerate query execution.
+ Setting this to -1 disables JIT compilation.
The default is 100000 .
-
- jit_optimize_above_cost (floating point )
+
+ jit_inline_above_cost (floating point )
- jit_optimize_above_cost configuration parameter
+ jit_inline_above_cost configuration parameter
- Sets the planner's cutoff above which JIT compiled programs (see ) are optimized. Optimization initially
- takes time, but can improve execution speed. It is not meaningful to
- set this to a lower value than .
-
+ Sets the query cost above which JIT compilation attempts to inline
+ functions and operators. Inlining adds planning time, but can
+ improve execution speed. It is not meaningful to set this to less
+ than jit_above_cost .
+ Setting this to -1 disables inlining.
The default is 500000 .
-
- jit_inline_above_cost (floating point )
+
+ jit_optimize_above_cost (floating point )
- jit_inline_above_cost configuration parameter
+ jit_optimize_above_cost configuration parameter
- Sets the planner's cutoff above which JIT compiled programs (see ) attempt to inline functions and
- operators. Inlining initially takes time, but can improve execution
- speed. It is unlikely to be beneficial to set
- jit_inline_above_cost below
- jit_optimize_above_cost .
-
+ Sets the query cost above which JIT compilation applies expensive
+ optimizations. Such optimization adds planning time, but can improve
+ execution speed. It is not meaningful to set this to less
+ than jit_above_cost , and it is unlikely to be
+ beneficial to set it to more
+ than jit_inline_above_cost .
+ Setting this to -1 disables expensive optimizations.
The default is 500000 .
@@ -4414,11 +5097,11 @@ ANY num_sync ( .)
Refer to for
- more information on using constraint exclusion and partitioning.
+ more information on using constraint exclusion to implement
+ partitioning.
@@ -4507,10 +5194,9 @@ SELECT * FROM parent WHERE key = 2400;
- Determines whether JIT may be used by
+ Determines whether JIT compilation may be used by
PostgreSQL , if available (see ).
-
The default is on .
@@ -4623,6 +5309,34 @@ SELECT * FROM parent WHERE key = 2400;
+
+ plan_cache_mode (enum )
+
+ plan_cache_mode configuration parameter
+
+
+
+
+ Prepared statements (either explicitly prepared or implicitly
+ generated, for example by PL/pgSQL) can be executed using custom or
+ generic plans. Custom plans are made afresh for each execution
+ using its specific set of parameter values, while generic plans do
+ not rely on the parameter values and can be re-used across
+ executions. Thus, use of a generic plan saves planning time, but if
+ the ideal plan depends strongly on the parameter values then a
+ generic plan may be inefficient. The choice between these options
+ is normally made automatically, but it can be overridden
+ with plan_cache_mode .
+ The allowed values are auto (the default),
+ force_custom_plan and
+ force_generic_plan .
+ This setting is considered when a cached plan is to be executed,
+ not when it is prepared.
+ For more information see .
+
+
+
+
@@ -4635,7 +5349,7 @@ SELECT * FROM parent WHERE key = 2400;
- Where To Log
+ Where to Log
where to log
@@ -5079,32 +5793,10 @@ local0.* /var/log/postgresql
- When To Log
+ When to Log
-
- client_min_messages (enum )
-
- client_min_messages configuration parameter
-
-
-
-
- Controls which message levels are sent to the client.
- Valid values are DEBUG5 ,
- DEBUG4 , DEBUG3 , DEBUG2 ,
- DEBUG1 , LOG , NOTICE ,
- WARNING , ERROR , FATAL ,
- and PANIC . Each level
- includes all the levels that follow it. The later the level,
- the fewer messages are sent. The default is
- NOTICE . Note that LOG has a different
- rank here than in log_min_messages .
-
-
-
-
log_min_messages (enum )
@@ -5113,7 +5805,8 @@ local0.* /var/log/postgresql
- Controls which message levels are written to the server log.
+ Controls which message
+ levels are written to the server log.
Valid values are DEBUG5 , DEBUG4 ,
DEBUG3 , DEBUG2 , DEBUG1 ,
INFO , NOTICE , WARNING ,
@@ -5122,7 +5815,7 @@ local0.* /var/log/postgresql
follow it. The later the level, the fewer messages are sent
to the log. The default is WARNING . Note that
LOG has a different rank here than in
- client_min_messages .
+ .
Only superusers can change this setting.
@@ -5139,7 +5832,9 @@ local0.* /var/log/postgresql
Controls which SQL statements that cause an error
condition are recorded in the server log. The current
SQL statement is included in the log entry for any message of
- the specified severity or higher.
+ the specified
+ severity
+ or higher.
Valid values are DEBUG5 ,
DEBUG4 , DEBUG3 ,
DEBUG2 , DEBUG1 ,
@@ -5197,6 +5892,32 @@ local0.* /var/log/postgresql
+
+ log_transaction_sample_rate (real )
+
+ log_transaction_sample_rate configuration parameter
+
+
+
+
+ Set the fraction of transactions whose statements are all logged,
+ in addition to statements logged for other reasons. It applies to
+ each new transaction regardless of its statements' durations.
+ The default is 0 , meaning not to log statements
+ from any additional transaction. Setting this to 1
+ logs all statements for all transactions.
+ log_transaction_sample_rate is helpful to track a
+ sample of transaction.
+
+
+
+ Like all statement-logging options, this option can add significant
+ overhead.
+
+
+
+
+
@@ -5288,7 +6009,7 @@ local0.* /var/log/postgresql
- What To Log
+ What to Log
@@ -5444,7 +6165,7 @@ local0.* /var/log/postgresql
- The difference between setting this option and setting
+ The difference between enabling log_duration and setting
to zero is that
exceeding log_min_duration_statement forces the text of
the query to be logged, but this option doesn't. Thus, if
@@ -5939,8 +6660,15 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
- Sets the cluster name that appears in the process title for all
- server processes in this cluster. The name can be any string of less
+ Sets a name that identifies this database cluster (instance) for
+ various purposes. The cluster name appears in the process title for
+ all server processes in this cluster. Moreover, it is the default
+ application name for a standby connection (see .)
+
+
+
+ The name can be any string of less
than NAMEDATALEN characters (64 characters in a standard
build). Only printable ASCII characters may be used in the
cluster_name value. Other characters will be
@@ -6200,15 +6928,16 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
log_autovacuum_min_duration (integer )
- log_autovacuum_min_duration configuration parameter
+ log_autovacuum_min_duration
+ configuration parameter
Causes each action executed by autovacuum to be logged if it ran for at
least the specified number of milliseconds. Setting this to zero logs
- all autovacuum actions. Minus-one (the default) disables logging
- autovacuum actions. For example, if you set this to
+ all autovacuum actions. -1 (the default) disables
+ logging autovacuum actions. For example, if you set this to
250ms then all automatic vacuums and analyzes that run
250ms or longer will be logged. In addition, when this parameter is
set to any value other than -1 , a message will be
@@ -6259,7 +6988,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
autovacuum_vacuum_threshold (integer )
- autovacuum_vacuum_threshold configuration parameter
+ autovacuum_vacuum_threshold
+ configuration parameter
@@ -6278,7 +7008,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
autovacuum_analyze_threshold (integer )
- autovacuum_analyze_threshold configuration parameter
+ autovacuum_analyze_threshold
+ configuration parameter
@@ -6297,7 +7028,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
autovacuum_vacuum_scale_factor (floating point )
- autovacuum_vacuum_scale_factor configuration parameter
+ autovacuum_vacuum_scale_factor
+ configuration parameter
@@ -6317,7 +7049,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
autovacuum_analyze_scale_factor (floating point )
- autovacuum_analyze_scale_factor configuration parameter
+ autovacuum_analyze_scale_factor
+ configuration parameter
@@ -6337,7 +7070,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
autovacuum_freeze_max_age (integer )
- autovacuum_freeze_max_age configuration parameter
+ autovacuum_freeze_max_age
+ configuration parameter
@@ -6365,7 +7099,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
autovacuum_multixact_freeze_max_age (integer )
- autovacuum_multixact_freeze_max_age configuration parameter
+ autovacuum_multixact_freeze_max_age
+ configuration parameter
@@ -6391,9 +7126,10 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
- autovacuum_vacuum_cost_delay (integer )
+ autovacuum_vacuum_cost_delay (floating point )
- autovacuum_vacuum_cost_delay configuration parameter
+ autovacuum_vacuum_cost_delay
+ configuration parameter
@@ -6401,7 +7137,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
Specifies the cost delay value that will be used in automatic
VACUUM operations. If -1 is specified, the regular
value will be used.
- The default value is 20 milliseconds.
+ The default value is 2 milliseconds.
This parameter can only be set in the postgresql.conf
file or on the server command line;
but the setting can be overridden for individual tables by
@@ -6413,7 +7149,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
autovacuum_vacuum_cost_limit (integer )
- autovacuum_vacuum_cost_limit configuration parameter
+ autovacuum_vacuum_cost_limit
+ configuration parameter
@@ -6443,6 +7180,32 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
Statement Behavior
+
+ client_min_messages (enum )
+
+ client_min_messages configuration parameter
+
+
+
+
+ Controls which
+ message levels
+ are sent to the client.
+ Valid values are DEBUG5 ,
+ DEBUG4 , DEBUG3 , DEBUG2 ,
+ DEBUG1 , LOG , NOTICE ,
+ WARNING , and ERROR .
+ Each level includes all the levels that follow it. The later the level,
+ the fewer messages are sent. The default is
+ NOTICE . Note that LOG has a different
+ rank here than in .
+
+
+ INFO level messages are always sent to the client.
+
+
+
+
search_path (string )
@@ -6471,7 +7234,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
If one of the list items is the special name
$user , then the schema having the name returned by
- SESSION_USER is substituted, if there is such a schema
+ CURRENT_USER is substituted, if there is such a schema
and the user has USAGE permission for it.
(If not, $user is ignored.)
@@ -6484,6 +7247,10 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
be searched before searching any of the path items.
+
Likewise, the current session's temporary-table schema,
pg_temp_nnn , is always searched if it
@@ -6559,6 +7326,23 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
+
+ default_table_access_method (string )
+
+ default_table_access_method configuration parameter
+
+
+
+
+ This parameter specifies the default table access method to use when
+ creating tables or materialized views if the CREATE
+ command does not explicitly specify an access method, or when
+ SELECT ... INTO is used, which does not allow to
+ specify a table access method. The default is heap .
+
+
+
+
default_tablespace (string )
@@ -6570,7 +7354,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
This variable specifies the default tablespace in which to create
objects (tables and indexes) when a CREATE command does
- not explicitly specify a tablespace.
+ not explicitly specify a tablespace. It also determines the tablespace
+ that a partitioned relation will direct future partitions to.
@@ -6966,6 +7751,47 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
+
+ vacuum_cleanup_index_scale_factor (floating point )
+
+ vacuum_cleanup_index_scale_factor
+ configuration parameter
+
+
+
+
+ Specifies the fraction of the total number of heap tuples counted in
+ the previous statistics collection that can be inserted without
+ incurring an index scan at the VACUUM cleanup stage.
+ This setting currently applies to B-tree indexes only.
+
+
+
+ If no tuples were deleted from the heap, B-tree indexes are still
+ scanned at the VACUUM cleanup stage when at least one
+ of the following conditions is met: the index statistics are stale, or
+ the index contains deleted pages that can be recycled during cleanup.
+ Index statistics are considered to be stale if the number of newly
+ inserted tuples exceeds the vacuum_cleanup_index_scale_factor
+ fraction of the total number of heap tuples detected by the previous
+ statistics collection. The total number of heap tuples is stored in
+ the index meta-page. Note that the meta-page does not include this data
+ until VACUUM finds no dead tuples, so B-tree index
+ scan at the cleanup stage can only be skipped if the second and
+ subsequent VACUUM cycles detect no dead tuples.
+
+
+
+ The value can range from 0 to
+ 10000000000 .
+ When vacuum_cleanup_index_scale_factor is set to
+ 0 , index scans are never skipped during
+ VACUUM cleanup. The default value is 0.1 .
+
+
+
+
+
bytea_output (enum )
@@ -7048,7 +7874,8 @@ SET XML OPTION { DOCUMENT | CONTENT };
gin_pending_list_limit (integer )
- gin_pending_list_limit configuration parameter
+ gin_pending_list_limit
+ configuration parameter
@@ -7184,16 +8011,38 @@ SET XML OPTION { DOCUMENT | CONTENT };
- This parameter adjusts the number of digits displayed for
+ This parameter adjusts the number of digits used for textual output of
floating-point values, including float4 , float8 ,
- and geometric data types. The parameter value is added to the
- standard number of digits (FLT_DIG or DBL_DIG
- as appropriate). The value can be set as high as 3, to include
- partially-significant digits; this is especially useful for dumping
- float data that needs to be restored exactly. Or it can be set
- negative to suppress unwanted digits.
- See also .
+ and geometric data types.
+
+
+ If the value is 1 (the default) or above, float values are output in
+ shortest-precise format; see . The
+ actual number of digits generated depends only on the value being
+ output, not on the value of this parameter. At most 17 digits are
+ required for float8 values, and 9 for float4
+ values. This format is both fast and precise, preserving the original
+ binary float value exactly when correctly read. For historical
+ compatibility, values up to 3 are permitted.
+
+ If the value is zero or negative, then the output is rounded to a
+ given decimal precision. The precision used is the standard number of
+ digits for the type (FLT_DIG
+ or DBL_DIG as appropriate) reduced according to the
+ value of this parameter. (For example, specifying -1 will cause
+ float4 values to be output rounded to 5 significant
+ digits, and float8 values
+ rounded to 14 digits.) This format is slower and does not preserve all
+ the bits of the binary float value, but may be more human-readable.
+
+
+
+ The meaning of this parameter, and its default value, changed
+ in PostgreSQL 12;
+ see for further discussion.
+
+
@@ -7519,16 +8368,17 @@ SET XML OPTION { DOCUMENT | CONTENT };
- Determines which JIT provider (see ) is
- used. The built-in default is llvmjit .
+ This variable is the name of the JIT provider library to be used
+ (see ).
+ The default is llvmjit .
+ This parameter can only be set at server start.
+
- If set to a non-existent library JIT will not be
+ If set to a non-existent library, JIT will not be
available, but no error will be raised. This allows JIT support to be
installed separately from the main
PostgreSQL package.
-
- This parameter can only be set at server start.
@@ -7830,35 +8680,6 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
-
- default_with_oids (boolean )
-
- default_with_oids configuration parameter
-
-
-
-
- This controls whether CREATE TABLE and
- CREATE TABLE AS include an OID column in
- newly-created tables, if neither WITH OIDS
- nor WITHOUT OIDS is specified. It also
- determines whether OIDs will be included in tables created by
- SELECT INTO . The parameter is off
- by default; in PostgreSQL 8.0 and earlier, it
- was on by default.
-
-
-
- The use of OIDs in user tables is considered deprecated, so
- most installations should leave this variable disabled.
- Applications that require OIDs for a particular table should
- specify WITH OIDS when creating the
- table. This variable can be enabled for compatibility with old
- applications that do not follow this behavior.
-
-
-
-
escape_string_warning (boolean )
strings escape warning
@@ -8067,8 +8888,8 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
- If true, any error will terminate the current session. By default,
- this is set to false, so that only FATAL errors will terminate the
+ If on, any error will terminate the current session. By default,
+ this is set to off, so that only FATAL errors will terminate the
session.
@@ -8082,9 +8903,9 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
- When set to true, which is the default, PostgreSQL
+ When set to on, which is the default, PostgreSQL
will automatically reinitialize after a backend crash. Leaving this
- value set to true is normally the best way to maximize the availability
+ value set to on is normally the best way to maximize the availability
of the database. However, in some circumstances, such as when
PostgreSQL is being invoked by clusterware, it may be
useful to disable the restart so that the clusterware can gain
@@ -8093,6 +8914,39 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
+
+ data_sync_retry (boolean )
+
+ data_sync_retry configuration parameter
+
+
+
+
+ When set to off, which is the default, PostgreSQL
+ will raise a PANIC-level error on failure to flush modified data files
+ to the file system. This causes the database server to crash. This
+ parameter can only be set at server start.
+
+
+ On some operating systems, the status of data in the kernel's page
+ cache is unknown after a write-back failure. In some cases it might
+ have been entirely forgotten, making it unsafe to retry; the second
+ attempt may be reported as successful, when in fact the data has been
+ lost. In these circumstances, the only way to avoid data loss is to
+ recover from the WAL after any failure is reported, preferably
+ after investigating the root cause of the failure and replacing any
+ faulty hardware.
+
+
+ If set to on, PostgreSQL will instead
+ report an error but continue to run so that the data flushing
+ operation can be retried in a later checkpoint. Only set it to on
+ after investigating the operating system's treatment of buffered data
+ in case of write-back failure.
+
+
+
+
@@ -8338,6 +9192,22 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
+
+ ssl_library (string )
+
+ ssl_library configuration parameter
+
+
+
+
+ Reports the name of the SSL library that this
+ PostgreSQL server was built with (even if
+ SSL is not currently configured or in use on this instance), for
+ example OpenSSL , or an empty string if none.
+
+
+
+
wal_block_size (integer )
@@ -8361,11 +9231,8 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
- Reports the number of blocks (pages) in a WAL segment file.
- The total size of a WAL segment file in bytes is equal to
- wal_segment_size multiplied by wal_block_size ;
- by default this is 16MB. See for
- more information.
+ Reports the size of write ahead log segments. The default value is
+ 16MB. See for more information.
@@ -8807,9 +9674,8 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
If LLVM has the required functionality, register generated functions
with GDB . This makes debugging easier.
-
- The default setting is off , and can only be set at
- server start.
+ The default setting is off .
+ This parameter can only be set at server start.
@@ -8823,11 +9689,10 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
Writes the generated LLVM IR out to the
- filesystem, inside . This is only
+ file system, inside . This is only
useful for working on the internals of the JIT implementation.
-
- The default setting is off , and it can only be
- changed by a superuser.
+ The default setting is off .
+ This parameter can only be changed by a superuser.
@@ -8840,8 +9705,8 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
- Determines whether expressions are JIT compiled, subject to costing
- decisions (see ). The default is
+ Determines whether expressions are JIT compiled, when JIT compilation
+ is activated (see ). The default is
on .
@@ -8855,13 +9720,12 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
- If LLVM has the required functionality, emit required data to allow
+ If LLVM has the required functionality, emit the data needed to allow
perf to profile functions generated by JIT.
This writes out files to $HOME/.debug/jit/ ; the
user is responsible for performing cleanup when desired.
-
- The default setting is off , and can only be set at
- server start.
+ The default setting is off .
+ This parameter can only be set at server start.
@@ -8874,9 +9738,9 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1)
- Determines whether tuple deforming is JIT compiled, subject to costing
- decisions (see ). The default is
- on .
+ Determines whether tuple deforming is JIT compiled, when JIT
+ compilation is activated (see ).
+ The default is on .
diff --git a/doc/src/sgml/contrib-spi.sgml b/doc/src/sgml/contrib-spi.sgml
index 844ea161c42..fed6f249328 100644
--- a/doc/src/sgml/contrib-spi.sgml
+++ b/doc/src/sgml/contrib-spi.sgml
@@ -65,99 +65,6 @@
-
- timetravel — Functions for Implementing Time Travel
-
-
- Long ago, PostgreSQL had a built-in time travel feature
- that kept the insert and delete times for each tuple. This can be
- emulated using these functions. To use these functions,
- you must add to a table two columns of abstime type to store
- the date when a tuple was inserted (start_date) and changed/deleted
- (stop_date):
-
-
-CREATE TABLE mytab (
- ... ...
- start_date abstime,
- stop_date abstime
- ... ...
-);
-
-
- The columns can be named whatever you like, but in this discussion
- we'll call them start_date and stop_date.
-
-
-
- When a new row is inserted, start_date should normally be set to
- current time, and stop_date to infinity . The trigger
- will automatically substitute these values if the inserted data
- contains nulls in these columns. Generally, inserting explicit
- non-null data in these columns should only be done when re-loading
- dumped data.
-
-
-
- Tuples with stop_date equal to infinity are valid
- now
, and can be modified. Tuples with a finite stop_date cannot
- be modified anymore — the trigger will prevent it. (If you need
- to do that, you can turn off time travel as shown below.)
-
-
-
- For a modifiable row, on update only the stop_date in the tuple being
- updated will be changed (to current time) and a new tuple with the modified
- data will be inserted. Start_date in this new tuple will be set to current
- time and stop_date to infinity .
-
-
-
- A delete does not actually remove the tuple but only sets its stop_date
- to current time.
-
-
-
- To query for tuples valid now
, include
- stop_date = 'infinity' in the query's WHERE condition.
- (You might wish to incorporate that in a view.) Similarly, you can
- query for tuples valid at any past time with suitable conditions on
- start_date and stop_date.
-
-
-
- timetravel() is the general trigger function that supports
- this behavior. Create a BEFORE INSERT OR UPDATE OR DELETE
- trigger using this function on each time-traveled table. Specify two
- trigger arguments: the actual
- names of the start_date and stop_date columns.
- Optionally, you can specify one to three more arguments, which must refer
- to columns of type text . The trigger will store the name of
- the current user into the first of these columns during INSERT, the
- second column during UPDATE, and the third during DELETE.
-
-
-
- set_timetravel() allows you to turn time-travel on or off for
- a table.
- set_timetravel('mytab', 1) will turn TT ON for table mytab .
- set_timetravel('mytab', 0) will turn TT OFF for table mytab .
- In both cases the old status is reported. While TT is off, you can modify
- the start_date and stop_date columns freely. Note that the on/off status
- is local to the current database session — fresh sessions will
- always start out with TT ON for all tables.
-
-
-
- get_timetravel() returns the TT state for a table without
- changing it.
-
-
-
- There is an example in timetravel.example .
-
-
-
autoinc — Functions for Autoincrementing Fields
diff --git a/doc/src/sgml/cube.sgml b/doc/src/sgml/cube.sgml
index e010305d848..c6e586270aa 100644
--- a/doc/src/sgml/cube.sgml
+++ b/doc/src/sgml/cube.sgml
@@ -190,7 +190,7 @@
n = 2 * k - 1 means lower bound of k -th
dimension, n = 2 * k means upper bound of
k -th dimension. Negative
- n denotes inversed value of corresponding
+ n denotes the inverse value of the corresponding
positive coordinate. This operator is designed for KNN-GiST support.
diff --git a/doc/src/sgml/custom-scan.sgml b/doc/src/sgml/custom-scan.sgml
index 24631f5f404..239ba29de72 100644
--- a/doc/src/sgml/custom-scan.sgml
+++ b/doc/src/sgml/custom-scan.sgml
@@ -1,7 +1,7 @@
- Writing A Custom Scan Provider
+ Writing a Custom Scan Provider
custom scan provider
@@ -37,8 +37,9 @@
A custom scan provider will typically add paths for a base relation by
setting the following hook, which is called after the core code has
- generated what it believes to be the complete and correct set of access
- paths for the relation.
+ generated all the access paths it can for the relation (except for
+ Gather paths, which are made after this call so that they can use
+ partial paths added by the hook):
typedef void (*set_rel_pathlist_hook_type) (PlannerInfo *root,
RelOptInfo *rel,
@@ -82,10 +83,7 @@ typedef struct CustomPath
by nodeToString , so that debugging routines that attempt to
print the custom path will work as designed. methods must
point to a (usually statically allocated) object implementing the required
- custom path methods, of which there is currently only one. The
- LibraryName and SymbolName fields must also
- be initialized so that the dynamic loader can resolve them to locate the
- method table.
+ custom path methods, of which there is currently only one.
diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml
index 67bae322878..9b6d6878eb0 100644
--- a/doc/src/sgml/datatype.sgml
+++ b/doc/src/sgml/datatype.sgml
@@ -515,14 +515,13 @@
We use the following terms below: The
- scale of a numeric is the
- count of decimal digits in the fractional part, to the right of
- the decimal point. The precision of a
- numeric is the total count of significant digits in
- the whole number, that is, the number of digits to both sides of
- the decimal point. So the number 23.5141 has a precision of 6
- and a scale of 4. Integers can be considered to have a scale of
- zero.
+ precision of a numeric
+ is the total count of significant digits in the whole number,
+ that is, the number of digits to both sides of the decimal point.
+ The scale of a numeric is the
+ count of decimal digits in the fractional part, to the right of the
+ decimal point. So the number 23.5141 has a precision of 6 and a
+ scale of 4. Integers can be considered to have a scale of zero.
@@ -672,13 +671,12 @@ FROM generate_series(-3.5, 3.5, 1) as x;
- The data types real and double
- precision are inexact, variable-precision numeric types.
- In practice, these types are usually implementations of
- IEEE Standard 754 for Binary Floating-Point
- Arithmetic (single and double precision, respectively), to the
- extent that the underlying processor, operating system, and
- compiler support it.
+ The data types real and double precision are
+ inexact, variable-precision numeric types. On all currently supported
+ platforms, these types are implementations of IEEE
+ Standard 754 for Binary Floating-Point Arithmetic (single and double
+ precision, respectively), to the extent that the underlying processor,
+ operating system, and compiler support it.
@@ -716,24 +714,57 @@ FROM generate_series(-3.5, 3.5, 1) as x;
- On most platforms, the real type has a range of at least
- 1E-37 to 1E+37 with a precision of at least 6 decimal digits. The
- double precision type typically has a range of around
- 1E-307 to 1E+308 with a precision of at least 15 digits. Values that
- are too large or too small will cause an error. Rounding might
- take place if the precision of an input number is too high.
- Numbers too close to zero that are not representable as distinct
- from zero will cause an underflow error.
+ On all currently supported platforms, the real type has a
+ range of around 1E-37 to 1E+37 with a precision of at least 6 decimal
+ digits. The double precision type has a range of around
+ 1E-307 to 1E+308 with a precision of at least 15 digits. Values that are
+ too large or too small will cause an error. Rounding might take place if
+ the precision of an input number is too high. Numbers too close to zero
+ that are not representable as distinct from zero will cause an underflow
+ error.
+
+
+
+ By default, floating point values are output in text form in their
+ shortest precise decimal representation; the decimal value produced is
+ closer to the true stored binary value than to any other value
+ representable in the same binary precision. (However, the output value is
+ currently never exactly midway between two
+ representable values, in order to avoid a widespread bug where input
+ routines do not properly respect the round-to-even rule.) This value will
+ use at most 17 significant decimal digits for float8
+ values, and at most 9 digits for float4 values.
- The setting controls the
- number of extra significant digits included when a floating point
- value is converted to text for output. With the default value of
- 0 , the output is the same on every platform
- supported by PostgreSQL. Increasing it will produce output that
- more accurately represents the stored value, but may be unportable.
+ This shortest-precise output format is much faster to generate than the
+ historical rounded format.
+
+
+
+
+ For compatibility with output generated by older versions
+ of PostgreSQL , and to allow the output
+ precision to be reduced, the
+ parameter can be used to select rounded decimal output instead. Setting a
+ value of 0 restores the previous default of rounding the value to 6
+ (for float4 ) or 15 (for float8 )
+ significant decimal digits. Setting a negative value reduces the number
+ of digits further; for example -2 would round output to 4 or 13 digits
+ respectively.
+
+
+
+ Any value of greater than 0
+ selects the shortest-precise format.
+
+
+
+
+ Applications that wanted precise values have historically had to set
+ to 3 to obtain them. For
+ maximum compatibility between versions, they should continue to do so.
@@ -752,9 +783,7 @@ FROM generate_series(-3.5, 3.5, 1) as x;
These represent the IEEE 754 special values
infinity
, negative infinity
, and
- not-a-number
, respectively. (On a machine whose
- floating-point arithmetic does not follow IEEE 754, these values
- will probably not work as expected.) When writing these values
+ not-a-number
, respectively. When writing these values
as constants in an SQL command, you must put quotes around them,
for example UPDATE table SET x = '-Infinity' . On input,
these strings are recognized in a case-insensitive manner.
@@ -787,17 +816,6 @@ FROM generate_series(-3.5, 3.5, 1) as x;
double precision .
-
-
- The assumption that real and
- double precision have exactly 24 and 53 bits in the
- mantissa respectively is correct for IEEE-standard floating point
- implementations. On non-IEEE platforms it might be off a little, but
- for simplicity the same ranges of p are used
- on all platforms.
-
-
-
@@ -862,7 +880,7 @@ CREATE TABLE tablename (
is equivalent to specifying:
-CREATE SEQUENCE tablename _colname _seq;
+CREATE SEQUENCE tablename _colname _seq AS integer;
CREATE TABLE tablename (
colname integer NOT NULL DEFAULT nextval('tablename _colname _seq')
);
@@ -1297,7 +1315,7 @@ SELECT b, char_length(b) FROM test2;
strings are distinguished from character strings in two
ways. First, binary strings specifically allow storing
octets of value zero and other non-printable
- octets (usually, octets outside the range 32 to 126).
+ octets (usually, octets outside the decimal range 32 to 126).
Character strings disallow zero octets, and also disallow any
other octet values and sequences of octet values that are invalid
according to the database's selected character set encoding.
@@ -1309,9 +1327,10 @@ SELECT b, char_length(b) FROM test2;
- The bytea type supports two external formats for
- input and output: PostgreSQL 's historical
- escape
format, and hex
format. Both
+ The bytea type supports two
+ formats for input and output: hex
format
+ and PostgreSQL 's historical
+ escape
format. Both
of these are always accepted on input. The output format depends
on the configuration parameter ;
the default is hex. (Note that the hex format was introduced in
@@ -1335,9 +1354,9 @@ SELECT b, char_length(b) FROM test2;
per byte, most significant nibble first. The entire string is
preceded by the sequence \x (to distinguish it
from the escape format). In some contexts, the initial backslash may
- need to be escaped by doubling it, in the same cases in which backslashes
- have to be doubled in escape format; details appear below.
- The hexadecimal digits can
+ need to be escaped by doubling it
+ (see ).
+ For input, the hexadecimal digits can
be either upper or lower case, and whitespace is permitted between
digit pairs (but not within a digit pair nor in the starting
\x sequence).
@@ -1349,7 +1368,7 @@ SELECT b, char_length(b) FROM test2;
Example:
-SELECT E'\\xDEADBEEF';
+SELECT '\xDEADBEEF';
@@ -1369,7 +1388,7 @@ SELECT E'\\xDEADBEEF';
convenient. But in practice it is usually confusing because it
fuzzes up the distinction between binary strings and character
strings, and also the particular escape mechanism that was chosen is
- somewhat unwieldy. So this format should probably be avoided
+ somewhat unwieldy. Therefore, this format should probably be avoided
for most new applications.
@@ -1379,10 +1398,8 @@ SELECT E'\\xDEADBEEF';
values must be escaped, while all octet
values can be escaped. In
general, to escape an octet, convert it into its three-digit
- octal value and precede it
- by a backslash (or two backslashes, if writing the value as a
- literal using escape string syntax).
- Backslash itself (octet value 92) can alternatively be represented by
+ octal value and precede it by a backslash.
+ Backslash itself (octet decimal value 92) can alternatively be represented by
double backslashes.
shows the characters that must be escaped, and gives the alternative
@@ -1398,7 +1415,7 @@ SELECT E'\\xDEADBEEF';
Description
Escaped Input Representation
Example
- Output Representation
+ Hex Representation
@@ -1406,33 +1423,33 @@ SELECT E'\\xDEADBEEF';
0
zero octet
- E'\\000'
- SELECT E'\\000'::bytea;
- \000
+ '\000'
+ SELECT '\000'::bytea;
+ \x00
39
single quote
- '''' or E'\\047'
- SELECT E'\''::bytea;
- '
+ '''' or '\047'
+ SELECT ''''::bytea;
+ \x27
92
backslash
- E'\\\\' or E'\\134'
- SELECT E'\\\\'::bytea;
- \\
+ '\\' or '\134'
+ SELECT '\\'::bytea;
+ \x5c
0 to 31 and 127 to 255
non-printable
octets
- E'\\xxx' (octal value)
- SELECT E'\\001'::bytea;
- \001
+ '\xxx' (octal value)
+ SELECT '\001'::bytea;
+ \x01
@@ -1442,41 +1459,49 @@ SELECT E'\\xDEADBEEF';
The requirement to escape non-printable octets
varies depending on locale settings. In some instances you can get away
- with leaving them unescaped. Note that the result in each of the examples
- in was exactly one octet in
- length, even though the output representation is sometimes
- more than one character.
+ with leaving them unescaped.
+
+
+
+ The reason that single quotes must be doubled, as shown
+ in , is that this
+ is true for any string literal in a SQL command. The generic
+ string-literal parser consumes the outermost single quotes
+ and reduces any pair of single quotes to one data character.
+ What the bytea input function sees is just one
+ single quote, which it treats as a plain data character.
+ However, the bytea input function treats
+ backslashes as special, and the other behaviors shown in
+ are implemented by
+ that function.
- The reason multiple backslashes are required, as shown
- in , is that an input
- string written as a string literal must pass through two parse
- phases in the PostgreSQL server.
- The first backslash of each pair is interpreted as an escape
- character by the string-literal parser (assuming escape string
- syntax is used) and is therefore consumed, leaving the second backslash of the
- pair. (Dollar-quoted strings can be used to avoid this level
- of escaping.) The remaining backslash is then recognized by the
- bytea input function as starting either a three
- digit octal value or escaping another backslash. For example,
- a string literal passed to the server as E'\\001'
- becomes \001 after passing through the
- escape string parser. The \001 is then sent
- to the bytea input function, where it is converted
- to a single octet with a decimal value of 1. Note that the
- single-quote character is not treated specially by bytea ,
- so it follows the normal rules for string literals. (See also
- .)
+ In some contexts, backslashes must be doubled compared to what is
+ shown above, because the generic string-literal parser will also
+ reduce pairs of backslashes to one data character;
+ see .
- Bytea octets are sometimes escaped when output. In general, each
- non-printable
octet is converted into
- its equivalent three-digit octal value and preceded by one backslash.
- Most printable
octets are represented by their standard
- representation in the client character set. The octet with decimal
- value 92 (backslash) is doubled in the output.
+ Bytea octets are output in hex
+ format by default. If you change
+ to escape ,
+ non-printable
octets are converted to their
+ equivalent three-digit octal value and preceded by one backslash.
+ Most printable
octets are output by their standard
+ representation in the client character set, e.g.:
+
+
+SET bytea_output = 'escape';
+
+SELECT 'abc \153\154\155 \052\251\124'::bytea;
+ bytea
+----------------
+ abc klm *\251T
+
+
+ The octet with decimal value 92 (backslash) is doubled in the output.
Details are in .
@@ -1499,7 +1524,7 @@ SELECT E'\\xDEADBEEF';
92
backslash
\\
- SELECT E'\\134'::bytea;
+ SELECT '\134'::bytea;
\\
@@ -1507,7 +1532,7 @@ SELECT E'\\xDEADBEEF';
0 to 31 and 127 to 255
non-printable
octets
\xxx (octal value)
- SELECT E'\\001'::bytea;
+ SELECT '\001'::bytea;
\001
@@ -1515,7 +1540,7 @@ SELECT E'\\xDEADBEEF';
32 to 126
printable
octets
client character set representation
- SELECT E'\\176'::bytea;
+ SELECT '\176'::bytea;
~
@@ -1699,14 +1724,6 @@ MINUTE TO SECOND
any application.
-
- The types abstime
- and reltime are lower precision types which are used internally.
- You are discouraged from using these types in
- applications; these internal types
- might disappear in a future release.
-
-
Date/Time Input
@@ -2407,7 +2424,7 @@ January 8 04:05:06 1999 PST
linkend="view-pg-timezone-names"/>).
PostgreSQL uses the widely-used IANA
time zone data for this purpose, so the same time zone
- names are also recognized by much other software.
+ names are also recognized by other software.
@@ -2670,19 +2687,6 @@ P years -months -
to each field if any field is negative.
-
- Internally interval values are stored as months, days,
- and seconds. This is done because the number of days in a month
- varies, and a day can have 23 or 25 hours if a daylight savings
- time adjustment is involved. The months and days fields are integers
- while the seconds field can store fractions. Because intervals are
- usually created from constant strings or timestamp subtraction,
- this storage method works well in most cases. Functions
- justify_days and justify_hours are
- available for adjusting days and hours that overflow their normal
- ranges.
-
-
In the verbose input format, and in some fields of the more compact
input formats, field values can have fractional parts; for example
@@ -2734,6 +2738,33 @@ P years -months -
+
+ Internally interval values are stored as months, days,
+ and seconds. This is done because the number of days in a month
+ varies, and a day can have 23 or 25 hours if a daylight savings
+ time adjustment is involved. The months and days fields are integers
+ while the seconds field can store fractions. Because intervals are
+ usually created from constant strings or timestamp subtraction,
+ this storage method works well in most cases, but can cause unexpected
+ results:
+
+
+SELECT EXTRACT(hours from '80 minutes'::interval);
+ date_part
+-----------
+ 1
+
+SELECT EXTRACT(days from '80 hours'::interval);
+ date_part
+-----------
+ 0
+
+
+ Functions justify_days and
+ justify_hours are available for adjusting days
+ and hours that overflow their normal ranges.
+
+
@@ -2874,37 +2905,36 @@ P years -months -
- Valid literal values for the true
state are:
+ Boolean constants can be represented in SQL queries by the SQL
+ key words TRUE , FALSE ,
+ and NULL .
+
+
+
+ The datatype input function for type boolean accepts these
+ string representations for the true
state:
- TRUE
- 't'
- 'true'
- 'y'
- 'yes'
- 'on'
- '1'
+ true
+ yes
+ on
+ 1
- For the false
state, the following values can be
- used:
+ and these representations for the false
state:
- FALSE
- 'f'
- 'false'
- 'n'
- 'no'
- 'off'
- '0'
+ false
+ no
+ off
+ 0
+ Unique prefixes of these strings are also accepted, for
+ example t or n .
Leading or trailing whitespace is ignored, and case does not matter.
- The key words
- TRUE and FALSE are the preferred
- (SQL -compliant) usage.
- shows that
- boolean values are output using the letters
- t and f .
+ The datatype output function for type boolean always emits
+ either t or f , as shown in
+ .
@@ -2926,6 +2956,27 @@ SELECT * FROM test1 WHERE a;
t | sic est
+
+
+ The key words TRUE and FALSE are
+ the preferred (SQL -compliant) method for writing
+ Boolean constants in SQL queries. But you can also use the string
+ representations by following the generic string-literal constant syntax
+ described in , for
+ example 'yes'::boolean .
+
+
+
+ Note that the parser automatically understands
+ that TRUE and FALSE are of
+ type boolean , but this is not so
+ for NULL because that can have any type.
+ So in some contexts you might have to cast NULL
+ to boolean explicitly, for
+ example NULL::boolean . Conversely, the cast can be
+ omitted from a string-literal Boolean value in contexts where the parser
+ can deduce that the literal must be of type boolean .
+
@@ -4144,16 +4195,8 @@ a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11
- PostgreSQL provides storage and comparison
- functions for UUIDs, but the core database does not include any
- function for generating UUIDs, because no single algorithm is well
- suited for every application. The module
- provides functions that implement several standard algorithms.
- The module also provides a generation
- function for random UUIDs.
- Alternatively, UUIDs could be generated by client applications or
- other libraries invoked through a server-side function.
+ See for how to generate a UUID in
+ PostgreSQL .
@@ -4177,15 +4220,22 @@ a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11
The xml type can store well-formed
documents
, as defined by the XML standard, as well
- as content
fragments, which are defined by the
- production XMLDecl? content in the XML
- standard. Roughly, this means that content fragments can have
+ as content
fragments, which are defined by reference
+ to the more permissive
+ document node
+ of the XQuery and XPath data model.
+ Roughly, this means that content fragments can have
more than one top-level element or character node. The expression
xmlvalue IS DOCUMENT
can be used to evaluate whether a particular xml
value is a full document or only a content fragment.
+
+ Limits and compatibility notes for the xml data type
+ can be found in .
+
+
Creating XML Values
@@ -4254,16 +4304,6 @@ SET xmloption TO { DOCUMENT | CONTENT };
data are allowed.
-
-
- With the default XML option setting, you cannot directly cast
- character strings to type xml if they contain a
- document type declaration, because the definition of XML content
- fragment does not accept them. If you need to do that, either
- use XMLPARSE or change the XML option.
-
-
-
@@ -4479,25 +4519,22 @@ INSERT INTO mytable VALUES(-1); -- fails
Object identifiers (OIDs) are used internally by
PostgreSQL as primary keys for various
- system tables. OIDs are not added to user-created tables, unless
- WITH OIDS is specified when the table is
- created, or the
- configuration variable is enabled. Type oid represents
- an object identifier. There are also several alias types for
- oid : regproc , regprocedure ,
- regoper , regoperator , regclass ,
- regtype , regrole , regnamespace ,
- regconfig , and regdictionary .
- shows an overview.
+ system tables.
+
+ Type oid represents an object identifier. There are also
+ several alias types for oid : regproc ,
+ regprocedure , regoper , regoperator ,
+ regclass , regtype , regrole ,
+ regnamespace , regconfig , and
+ regdictionary . shows an
+ overview.
The oid type is currently implemented as an unsigned
four-byte integer. Therefore, it is not large enough to provide
database-wide uniqueness in large databases, or even in large
- individual tables. So, using a user-created table's OID column as
- a primary key is discouraged. OIDs are best used only for
- references to system tables.
+ individual tables.
diff --git a/doc/src/sgml/datetime.sgml b/doc/src/sgml/datetime.sgml
index d269aa4cc55..23561b19c97 100644
--- a/doc/src/sgml/datetime.sgml
+++ b/doc/src/sgml/datetime.sgml
@@ -24,7 +24,7 @@
Date/Time Input Interpretation
- The date/time type inputs are all decoded using the following procedure.
+ Date/time input strings are decoded using the following procedure.
@@ -73,20 +73,21 @@
- If the token is a text string, match up with possible strings:
+ If the token is an alphabetic string, match up with possible strings:
- Do a binary-search table lookup for the token as a time zone
- abbreviation.
+ See if the token matches any known time zone abbreviation.
+ These abbreviations are supplied by the configuration file
+ described in .
- If not found, do a similar binary-search table lookup to match
+ If not found, search an internal table to match
the token as either a special string (e.g., today ),
day (e.g., Thursday ),
month (e.g., January ),
@@ -176,6 +177,83 @@
+
+ Handling of Invalid or Ambiguous Timestamps
+
+
+ Ordinarily, if a date/time string is syntactically valid but contains
+ out-of-range field values, an error will be thrown. For example, input
+ specifying the 31st of February will be rejected.
+
+
+
+ During a daylight-savings-time transition, it is possible for a
+ seemingly valid timestamp string to represent a nonexistent or ambiguous
+ timestamp. Such cases are not rejected; the ambiguity is resolved by
+ determining which UTC offset to apply. For example, supposing that the
+ parameter is set
+ to America/New_York , consider
+
+=> SELECT '2018-03-11 02:30'::timestamptz;
+ timestamptz
+------------------------
+ 2018-03-11 03:30:00-04
+(1 row)
+
+ Because that day was a spring-forward transition date in that time zone,
+ there was no civil time instant 2:30AM; clocks jumped forward from 2AM
+ EST to 3AM EDT. PostgreSQL interprets the
+ given time as if it were standard time (UTC-5), which then renders as
+ 3:30AM EDT (UTC-4).
+
+
+
+ Conversely, consider the behavior during a fall-back transition:
+
+=> SELECT '2018-11-04 02:30'::timestamptz;
+ timestamptz
+------------------------
+ 2018-11-04 02:30:00-05
+(1 row)
+
+ On that date, there were two possible interpretations of 2:30AM; there
+ was 2:30AM EDT, and then an hour later after the reversion to standard
+ time, there was 2:30AM EST.
+ Again, PostgreSQL interprets the given time
+ as if it were standard time (UTC-5). We can force the matter by
+ specifying daylight-savings time:
+
+=> SELECT '2018-11-04 02:30 EDT'::timestamptz;
+ timestamptz
+------------------------
+ 2018-11-04 01:30:00-05
+(1 row)
+
+ This timestamp could validly be rendered as either 2:30 UTC-4 or
+ 1:30 UTC-5; the timestamp output code chooses the latter.
+
+
+
+ The precise rule that is applied in such cases is that an invalid
+ timestamp that appears to fall within a jump-forward daylight savings
+ transition is assigned the UTC offset that prevailed in the time zone
+ just before the transition, while an ambiguous timestamp that could fall
+ on either side of a jump-back transition is assigned the UTC offset that
+ prevailed just after the transition. In most time zones this is
+ equivalent to saying that the standard-time interpretation is
+ preferred when in doubt
.
+
+
+
+ In all cases, the UTC offset associated with a timestamp can be
+ specified explicitly, using either a numeric UTC offset or a time zone
+ abbreviation that corresponds to a fixed UTC offset. The rule just
+ given applies only when it is necessary to infer a UTC offset for a time
+ zone in which the offset varies.
+
+
+
+
Date/Time Key Words
@@ -553,7 +631,7 @@
is now the USA) in 1752.
Thus 2 September 1752 was followed by 14 September 1752.
- This is why Unix systems have the cal program
+ This is why Unix systems that have the cal program
produce the following:
diff --git a/doc/src/sgml/dblink.sgml b/doc/src/sgml/dblink.sgml
index 87e14ea093d..97dc3b81292 100644
--- a/doc/src/sgml/dblink.sgml
+++ b/doc/src/sgml/dblink.sgml
@@ -1165,11 +1165,25 @@ dblink_error_message(text connname) returns text
Return Value
- Returns last error message, or an empty string if there has been
+ Returns last error message, or OK if there has been
no error in this connection.
+
+ Notes
+
+
+ When asynchronous queries are initiated by
+ dblink_send_query , the error message associated with
+ the connection might not get updated until the server's response message
+ is consumed. This typically means that dblink_is_busy
+ or dblink_get_result should be called prior to
+ dblink_error_message , so that any error generated by
+ the asynchronous query will be visible.
+
+
+
Examples
diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml
index feb2ab77920..d7158c1b034 100644
--- a/doc/src/sgml/ddl.sgml
+++ b/doc/src/sgml/ddl.sgml
@@ -233,6 +233,124 @@ CREATE TABLE products (
+
+ Generated Columns
+
+
+ generated column
+
+
+
+ A generated column is a special column that is always computed from other
+ columns. Thus, it is for columns what a view is for tables. There are two
+ kinds of generated columns: stored and virtual. A stored generated column
+ is computed when it is written (inserted or updated) and occupies storage
+ as if it were a normal column. A virtual generated column occupies no
+ storage and is computed when it is read. Thus, a virtual generated column
+ is similar to a view and a stored generated column is similar to a
+ materialized view (except that it is always updated automatically).
+ PostgreSQL currently implements only stored generated columns.
+
+
+
+ To create a generated column, use the GENERATED ALWAYS
+ AS clause in CREATE TABLE , for example:
+
+CREATE TABLE people (
+ ...,
+ height_cm numeric,
+ height_in numeric GENERATED ALWAYS AS (height_cm / 2.54) STORED
+);
+
+ The keyword STORED must be specified to choose the
+ stored kind of generated column. See for
+ more details.
+
+
+
+ A generated column cannot be written to directly. In
+ INSERT or UPDATE commands, a value
+ cannot be specified for a generated column, but the keyword
+ DEFAULT may be specified.
+
+
+
+ Consider the differences between a column with a default and a generated
+ column. The column default is evaluated once when the row is first
+ inserted if no other value was provided; a generated column is updated
+ whenever the row changes and cannot be overridden. A column default may
+ not refer to other columns of the table; a generation expression would
+ normally do so. A column default can use volatile functions, for example
+ random() or functions referring to the current time;
+ this is not allowed for generated columns.
+
+
+
+ Several restrictions apply to the definition of generated columns and
+ tables involving generated columns:
+
+
+
+
+ The generation expression can only use immutable functions and cannot
+ use subqueries or reference anything other than the current row in any
+ way.
+
+
+
+
+ A generation expression cannot reference another generated column.
+
+
+
+
+ A generation expression cannot reference a system column, except
+ tableoid .
+
+
+
+
+ A generated column cannot have a column default or an identity definition.
+
+
+
+
+ A generated column cannot be part of a partition key.
+
+
+
+
+ Foreign tables can have generated columns. See for details.
+
+
+
+
+
+
+ Additional considerations apply to the use of generated columns.
+
+
+
+ Generated columns maintain access privileges separately from their
+ underlying base columns. So, it is possible to arrange it so that a
+ particular role can read from a generated column but not from the
+ underlying base columns.
+
+
+
+
+ Generated columns are, conceptually, updated after
+ BEFORE triggers have run. Therefore, changes made to
+ base columns in a BEFORE trigger will be reflected in
+ generated columns. But conversely, it is not allowed to access
+ generated columns in BEFORE triggers.
+
+
+
+
+
+
Constraints
@@ -403,6 +521,59 @@ CREATE TABLE products (
ensure that a column does not contain null values, the not-null
constraint described in the next section can be used.
+
+
+
+ PostgreSQL does not support
+ CHECK constraints that reference table data other than
+ the new or updated row being checked. While a CHECK
+ constraint that violates this rule may appear to work in simple
+ tests, it cannot guarantee that the database will not reach a state
+ in which the constraint condition is false (due to subsequent changes
+ of the other row(s) involved). This would cause a database dump and
+ reload to fail. The reload could fail even when the complete
+ database state is consistent with the constraint, due to rows not
+ being loaded in an order that will satisfy the constraint. If
+ possible, use UNIQUE , EXCLUDE ,
+ or FOREIGN KEY constraints to express
+ cross-row and cross-table restrictions.
+
+
+
+ If what you desire is a one-time check against other rows at row
+ insertion, rather than a continuously-maintained consistency
+ guarantee, a custom trigger can be used
+ to implement that. (This approach avoids the dump/reload problem because
+ pg_dump does not reinstall triggers until after
+ reloading data, so that the check will not be enforced during a
+ dump/reload.)
+
+
+
+
+
+ PostgreSQL assumes that
+ CHECK constraints' conditions are immutable, that
+ is, they will always give the same result for the same input row.
+ This assumption is what justifies examining CHECK
+ constraints only when rows are inserted or updated, and not at other
+ times. (The warning above about not referencing other table data is
+ really a special case of this restriction.)
+
+
+
+ An example of a common way to break this assumption is to reference a
+ user-defined function in a CHECK expression, and
+ then change the behavior of that
+ function. PostgreSQL does not disallow
+ that, but it will not notice if there are rows in the table that now
+ violate the CHECK constraint. That would cause a
+ subsequent database dump and reload to fail.
+ The recommended way to handle such a change is to drop the constraint
+ (using ALTER TABLE ), adjust the function definition,
+ and re-add the constraint, thereby rechecking it against all table rows.
+
+
@@ -938,24 +1109,6 @@ CREATE TABLE circles (
-
- oid
-
-
-
- OID
- column
-
- The object identifier (object ID) of a row. This column is only
- present if the table was created using WITH
- OIDS , or if the
- configuration variable was set at the time. This column is of type
- oid (same name as the column); see for more information about the type.
-
-
-
-
tableoid
@@ -1049,53 +1202,12 @@ CREATE TABLE circles (
ctid will change if it is
updated or moved by VACUUM FULL . Therefore
ctid is useless as a long-term row
- identifier. The OID, or even better a user-defined serial
- number, should be used to identify logical rows.
+ identifier. A primary key should be used to identify logical rows.
-
- OIDs are 32-bit quantities and are assigned from a single
- cluster-wide counter. In a large or long-lived database, it is
- possible for the counter to wrap around. Hence, it is bad
- practice to assume that OIDs are unique, unless you take steps to
- ensure that this is the case. If you need to identify the rows in
- a table, using a sequence generator is strongly recommended.
- However, OIDs can be used as well, provided that a few additional
- precautions are taken:
-
-
-
-
- A unique constraint should be created on the OID column of each
- table for which the OID will be used to identify rows. When such
- a unique constraint (or unique index) exists, the system takes
- care not to generate an OID matching an already-existing row.
- (Of course, this is only possible if the table contains fewer
- than 232 (4 billion) rows, and in practice the
- table size had better be much less than that, or performance
- might suffer.)
-
-
-
-
- OIDs should never be assumed to be unique across tables; use
- the combination of tableoid and row OID if you
- need a database-wide identifier.
-
-
-
-
- Of course, the tables in question must be created WITH
- OIDS . As of PostgreSQL 8.1,
- WITHOUT OIDS is the default.
-
-
-
-
-
Transaction identifiers are also 32-bit quantities. In a
long-lived database it is possible for transaction IDs to wrap
@@ -1189,6 +1301,29 @@ ALTER TABLE products ADD COLUMN description text;
value is given (null if you don't specify a DEFAULT clause).
+
+
+ From PostgreSQL 11, adding a column with
+ a constant default value no longer means that each row of the table
+ needs to be updated when the ALTER TABLE statement
+ is executed. Instead, the default value will be returned the next time
+ the row is accessed, and applied when the table is rewritten, making
+ the ALTER TABLE very fast even on large tables.
+
+
+
+ However, if the default value is volatile (e.g.
+ clock_timestamp() )
+ each row will need to be updated with the value calculated at the time
+ ALTER TABLE is executed. To avoid a potentially
+ lengthy update operation, particularly if you intend to fill the column
+ with mostly nondefault values anyway, it may be preferable to add the
+ column with no default, insert the correct values using
+ UPDATE , and then add any desired default as described
+ below.
+
+
+
You can also define constraints on the column at the same time,
using the usual syntax:
@@ -1203,17 +1338,6 @@ ALTER TABLE products ADD COLUMN description text CHECK (description <> '')
correctly.
-
-
- Adding a column with a default requires updating each row of the
- table (to store the new column value). However, if no default is
- specified, PostgreSQL is able to avoid
- the physical update. So if you intend to fill the column with
- mostly nondefault values, it's best to add the column with no default,
- insert the correct values using UPDATE , and then add any
- desired default as described below.
-
-
@@ -1427,6 +1551,10 @@ ALTER TABLE products RENAME TO items;
REVOKE
+
+ ACL
+
+
When an object is created, it is assigned an owner. The
owner is normally the role that executed the creation statement.
@@ -1444,11 +1572,9 @@ ALTER TABLE products RENAME TO items;
EXECUTE , and USAGE .
The privileges applicable to a particular
object vary depending on the object's type (table, function, etc).
- For complete information on the different types of privileges
- supported by PostgreSQL , refer to the
- reference
- page. The following sections and chapters will also show you how
- those privileges are used.
+ More detail about the meanings of these privileges appears below.
+ The following sections and chapters will also show you how
+ these privileges are used.
@@ -1458,15 +1584,17 @@ ALTER TABLE products RENAME TO items;
An object can be assigned to a new owner with an ALTER
- command of the appropriate kind for the object, e.g. . Superusers can always do
- this; ordinary roles can only do it if they are both the current owner
- of the object (or a member of the owning role) and a member of the new
- owning role.
+ command of the appropriate kind for the object, for example
+
+ALTER TABLE table_name OWNER TO new_owner ;
+
+ Superusers can always do this; ordinary roles can only do it if they are
+ both the current owner of the object (or a member of the owning role) and
+ a member of the new owning role.
- To assign privileges, the GRANT command is
+ To assign privileges, the command is
used. For example, if joe is an existing role, and
accounts is an existing table, the privilege to
update the table can be granted with:
@@ -1487,7 +1615,7 @@ GRANT UPDATE ON accounts TO joe;
To revoke a privilege, use the fittingly named
- REVOKE command:
+ command:
REVOKE ALL ON accounts FROM PUBLIC;
@@ -1509,6 +1637,507 @@ REVOKE ALL ON accounts FROM PUBLIC;
privilege. For details see the and
reference pages.
+
+
+ The available privileges are:
+
+
+
+ SELECT
+
+
+ Allows from
+ any column, or specific column(s), of a table, view, materialized
+ view, or other table-like object.
+ Also allows use of TO.
+ This privilege is also needed to reference existing column values in
+ or .
+ For sequences, this privilege also allows use of the
+ currval function.
+ For large objects, this privilege allows the object to be read.
+
+
+
+
+
+ INSERT
+
+
+ Allows of a new row into a table, view,
+ etc. Can be granted on specific column(s), in which case
+ only those columns may be assigned to in the INSERT
+ command (other columns will therefore receive default values).
+ Also allows use of FROM.
+
+
+
+
+
+ UPDATE
+
+
+ Allows of any
+ column, or specific column(s), of a table, view, etc.
+ (In practice, any nontrivial UPDATE command will
+ require SELECT privilege as well, since it must
+ reference table columns to determine which rows to update, and/or to
+ compute new values for columns.)
+ SELECT ... FOR UPDATE
+ and SELECT ... FOR SHARE
+ also require this privilege on at least one column, in addition to the
+ SELECT privilege. For sequences, this
+ privilege allows use of the nextval and
+ setval functions.
+ For large objects, this privilege allows writing or truncating the
+ object.
+
+
+
+
+
+ DELETE
+
+
+ Allows of a row from a table, view, etc.
+ (In practice, any nontrivial DELETE command will
+ require SELECT privilege as well, since it must
+ reference table columns to determine which rows to delete.)
+
+
+
+
+
+ TRUNCATE
+
+
+ Allows on a table, view, etc.
+
+
+
+
+
+ REFERENCES
+
+
+ Allows creation of a foreign key constraint referencing a
+ table, or specific column(s) of a table.
+
+
+
+
+
+ TRIGGER
+
+
+ Allows creation of a trigger on a table, view, etc.
+
+
+
+
+
+ CREATE
+
+
+ For databases, allows new schemas and publications to be created within
+ the database.
+
+
+ For schemas, allows new objects to be created within the schema.
+ To rename an existing object, you must own the
+ object and have this privilege for the containing
+ schema.
+
+
+ For tablespaces, allows tables, indexes, and temporary files to be
+ created within the tablespace, and allows databases to be created that
+ have the tablespace as their default tablespace. (Note that revoking
+ this privilege will not alter the placement of existing objects.)
+
+
+
+
+
+ CONNECT
+
+
+ Allows the grantee to connect to the database. This
+ privilege is checked at connection startup (in addition to checking
+ any restrictions imposed by pg_hba.conf ).
+
+
+
+
+
+ TEMPORARY
+
+
+ Allows temporary tables to be created while using the database.
+
+
+
+
+
+ EXECUTE
+
+
+ Allows calling a function or procedure, including use of
+ any operators that are implemented on top of the function. This is the
+ only type of privilege that is applicable to functions and procedures.
+
+
+
+
+
+ USAGE
+
+
+ For procedural languages, allows use of the language for
+ the creation of functions in that language. This is the only type
+ of privilege that is applicable to procedural languages.
+
+
+ For schemas, allows access to objects contained in the
+ schema (assuming that the objects' own privilege requirements are
+ also met). Essentially this allows the grantee to look up
+ objects within the schema. Without this permission, it is still
+ possible to see the object names, e.g. by querying system catalogs.
+ Also, after revoking this permission, existing sessions might have
+ statements that have previously performed this lookup, so this is not
+ a completely secure way to prevent object access.
+
+
+ For sequences, allows use of the
+ currval and nextval functions.
+
+
+ For types and domains, allows use of the type or domain in the
+ creation of tables, functions, and other schema objects. (Note that
+ this privilege does not control all usage
of the
+ type, such as values of the type appearing in queries. It only
+ prevents objects from being created that depend on the type. The
+ main purpose of this privilege is controlling which users can create
+ dependencies on a type, which could prevent the owner from changing
+ the type later.)
+
+
+ For foreign-data wrappers, allows creation of new servers using the
+ foreign-data wrapper.
+
+
+ For foreign servers, allows creation of foreign tables using the
+ server. Grantees may also create, alter, or drop their own user
+ mappings associated with that server.
+
+
+
+
+
+ The privileges required by other commands are listed on the
+ reference page of the respective command.
+
+
+
+ PostgreSQL grants privileges on some types of objects to
+ PUBLIC by default when the objects are created.
+ No privileges are granted to PUBLIC by default on
+ tables,
+ table columns,
+ sequences,
+ foreign data wrappers,
+ foreign servers,
+ large objects,
+ schemas,
+ or tablespaces.
+ For other types of objects, the default privileges
+ granted to PUBLIC are as follows:
+ CONNECT and TEMPORARY (create
+ temporary tables) privileges for databases;
+ EXECUTE privilege for functions and procedures; and
+ USAGE privilege for languages and data types
+ (including domains).
+ The object owner can, of course, REVOKE
+ both default and expressly granted privileges. (For maximum
+ security, issue the REVOKE in the same transaction that
+ creates the object; then there is no window in which another user
+ can use the object.)
+ Also, these default privilege settings can be overridden using the
+ command.
+
+
+
+ shows the one-letter
+ abbreviations that are used for these privilege types in
+ ACL (Access Control List) values.
+ You will see these letters in the output of the
+ commands listed below, or when looking at ACL columns of system catalogs.
+
+
+
+ ACL Privilege Abbreviations
+
+
+
+ Privilege
+ Abbreviation
+ Applicable Object Types
+
+
+
+
+ SELECT
+ r (read
)
+
+ LARGE OBJECT ,
+ SEQUENCE ,
+ TABLE (and table-like objects),
+ table column
+
+
+
+ INSERT
+ a (append
)
+ TABLE , table column
+
+
+ UPDATE
+ w (write
)
+
+ LARGE OBJECT ,
+ SEQUENCE ,
+ TABLE ,
+ table column
+
+
+
+ DELETE
+ d
+ TABLE
+
+
+ TRUNCATE
+ D
+ TABLE
+
+
+ REFERENCES
+ x
+ TABLE , table column
+
+
+ TRIGGER
+ t
+ TABLE
+
+
+ CREATE
+ C
+
+ DATABASE ,
+ SCHEMA ,
+ TABLESPACE
+
+
+
+ CONNECT
+ c
+ DATABASE
+
+
+ TEMPORARY
+ T
+ DATABASE
+
+
+ EXECUTE
+ X
+ FUNCTION , PROCEDURE
+
+
+ USAGE
+ U
+
+ DOMAIN ,
+ FOREIGN DATA WRAPPER ,
+ FOREIGN SERVER ,
+ LANGUAGE ,
+ SCHEMA ,
+ SEQUENCE ,
+ TYPE
+
+
+
+
+
+
+
+ summarizes the privileges
+ available for each type of SQL object, using the abbreviations shown
+ above.
+ It also shows the psql command
+ that can be used to examine privilege settings for each object type.
+
+
+
+ Summary of Access Privileges
+
+
+
+ Object Type
+ All Privileges
+ Default PUBLIC Privileges
+ psql Command
+
+
+
+
+ DATABASE
+ CTc
+ Tc
+ \l
+
+
+ DOMAIN
+ U
+ U
+ \dD+
+
+
+ FUNCTION or PROCEDURE
+ X
+ X
+ \df+
+
+
+ FOREIGN DATA WRAPPER
+ U
+ none
+ \dew+
+
+
+ FOREIGN SERVER
+ U
+ none
+ \des+
+
+
+ LANGUAGE
+ U
+ U
+ \dL+
+
+
+ LARGE OBJECT
+ rw
+ none
+
+
+
+ SCHEMA
+ UC
+ none
+ \dn+
+
+
+ SEQUENCE
+ rwU
+ none
+ \dp
+
+
+ TABLE (and table-like objects)
+ arwdDxt
+ none
+ \dp
+
+
+ Table column
+ arwx
+ none
+ \dp
+
+
+ TABLESPACE
+ C
+ none
+ \db+
+
+
+ TYPE
+ U
+ U
+ \dT+
+
+
+
+
+
+
+
+ aclitem
+
+ The privileges that have been granted for a particular object are
+ displayed as a list of aclitem entries, where each
+ aclitem describes the permissions of one grantee that
+ have been granted by a particular grantor. For example,
+ calvin=r*w/hobbes specifies that the role
+ calvin has the privilege
+ SELECT (r ) with grant option
+ (* ) as well as the non-grantable
+ privilege UPDATE (w ), both granted
+ by the role hobbes . If calvin
+ also has some privileges on the same object granted by a different
+ grantor, those would appear as a separate aclitem entry.
+ An empty grantee field in an aclitem stands
+ for PUBLIC .
+
+
+
+ As an example, suppose that user miriam creates
+ table mytable and does:
+
+GRANT SELECT ON mytable TO PUBLIC;
+GRANT SELECT, UPDATE, INSERT ON mytable TO admin;
+GRANT SELECT (col1), UPDATE (col1) ON mytable TO miriam_rw;
+
+ Then psql 's \dp command
+ would show:
+
+=> \dp mytable
+ Access privileges
+ Schema | Name | Type | Access privileges | Column privileges | Policies
+--------+---------+-------+-----------------------+-----------------------+----------
+ public | mytable | table | miriam=arwdDxt/miriam+| col1: +|
+ | | | =r/miriam +| miriam_rw=rw/miriam |
+ | | | admin=arw/miriam | |
+(1 row)
+
+
+
+
+ If the Access privileges
column is empty for a given
+ object, it means the object has default privileges (that is, its
+ privileges entry in the relevant system catalog is null). Default
+ privileges always include all privileges for the owner, and can include
+ some privileges for PUBLIC depending on the object
+ type, as explained above. The first GRANT
+ or REVOKE on an object will instantiate the default
+ privileges (producing, for
+ example, miriam=arwdDxt/miriam ) and then modify them
+ per the specified request. Similarly, entries are shown in Column
+ privileges
only for columns with nondefault privileges.
+ (Note: for this purpose, default privileges
always means
+ the built-in default privileges for the object's type. An object whose
+ privileges have been affected by an ALTER DEFAULT
+ PRIVILEGES command will always be shown with an explicit
+ privilege entry that includes the effects of
+ the ALTER .)
+
+
+
+ Notice that the owner's implicit grant options are not marked in the
+ access privileges display. A * will appear only when
+ grant options have been explicitly granted to someone.
+
@@ -1623,10 +2252,21 @@ CREATE POLICY account_managers ON accounts TO managers
USING (manager = current_user);
+
+ The policy above implicitly provides a WITH CHECK
+ clause identical to its USING clause, so that the
+ constraint applies both to rows selected by a command (so a manager
+ cannot SELECT , UPDATE ,
+ or DELETE existing rows belonging to a different
+ manager) and to rows modified by a command (so rows belonging to a
+ different manager cannot be created via INSERT
+ or UPDATE ).
+
+
If no role is specified, or the special user name
PUBLIC is used, then the policy applies to all
- users on the system. To allow all users to access their own row in
+ users on the system. To allow all users to access only their own row in
a users table, a simple policy can be used:
@@ -1635,19 +2275,32 @@ CREATE POLICY user_policy ON users
USING (user_name = current_user);
+
+ This works similarly to the previous example.
+
+
To use a different policy for rows that are being added to the table
- compared to those rows that are visible, the WITH CHECK
- clause can be used. This policy would allow all users to view all rows
+ compared to those rows that are visible, multiple policies can be
+ combined. This pair of policies would allow all users to view all rows
in the users table, but only modify their own:
-CREATE POLICY user_policy ON users
- USING (true)
- WITH CHECK (user_name = current_user);
+CREATE POLICY user_sel_policy ON users
+ FOR SELECT
+ USING (true);
+CREATE POLICY user_mod_policy ON users
+ USING (user_name = current_user);
+
+ In a SELECT command, these two policies are combined
+ using OR , with the net effect being that all rows
+ can be selected. In other command types, only the second policy applies,
+ so that the effects are the same as before.
+
+
Row security can also be disabled with the ALTER TABLE
command. Disabling row security does not remove any policies that are
@@ -2380,9 +3033,12 @@ REVOKE CREATE ON SCHEMA public FROM PUBLIC;
using ALTER ROLE user SET
search_path = "$user" . Everyone retains the ability to
create objects in the public schema, but only qualified names will
- choose those objects. A user holding the CREATEROLE
- privilege can undo this setting and issue arbitrary queries under the
- identity of users relying on the setting. If you
+ choose those objects. While qualified table references are fine, calls
+ to functions in the public schema will be
+ unsafe or unreliable. Also, a user holding
+ the CREATEROLE privilege can undo this setting and
+ issue arbitrary queries under the identity of users relying on the
+ setting. If you create functions or extensions in the public schema or
grant CREATEROLE to users not warranting this
almost-superuser ability, use the first pattern instead.
@@ -2393,8 +3049,10 @@ REVOKE CREATE ON SCHEMA public FROM PUBLIC;
Remove the public schema from search_path in
postgresql.conf .
The ensuing user experience matches the previous pattern. In addition
- to that pattern's implications for CREATEROLE , this
- trusts database owners the same way. If you assign
+ to that pattern's implications for functions
+ and CREATEROLE , this trusts database owners
+ like CREATEROLE . If you create functions or
+ extensions in the public schema or assign
the CREATEROLE
privilege, CREATEDB privilege or individual database
ownership to users not warranting almost-superuser access, use the
@@ -2804,8 +3462,9 @@ VALUES ('Albany', NULL, NULL, 'NY');
- These deficiencies will probably be fixed in some future release,
- but in the meantime considerable care is needed in deciding whether
+ Some functionality not implemented for inheritance hierarchies is
+ implemented for declarative partitioning.
+ Considerable care is needed in deciding whether partitioning with legacy
inheritance is useful for your application.
@@ -2946,7 +3605,7 @@ VALUES ('Albany', NULL, NULL, 'NY');
divide a table into pieces called partitions. The table that is divided
is referred to as a partitioned table . The
specification consists of the partitioning method
- and a list of columns or expressions to be used as the
+ and a list of columns or expressions to be used as the
partition key .
@@ -2979,15 +3638,16 @@ VALUES ('Albany', NULL, NULL, 'NY');
Individual partitions are linked to the partitioned table with inheritance
behind-the-scenes; however, it is not possible to use some of the
- inheritance features discussed in the previous section with partitioned
- tables and partitions. For example, a partition cannot have any parents
- other than the partitioned table it is a partition of, nor can a regular
- table inherit from a partitioned table making the latter its parent.
- That means partitioned tables and partitions do not participate in
- inheritance with regular tables. Since a partition hierarchy consisting
- of the partitioned table and its partitions is still an inheritance
- hierarchy, all the normal rules of inheritance apply as described in
- with some exceptions, most notably:
+ generic features of inheritance (discussed below) with declaratively
+ partitioned tables or their partitions. For example, a partition
+ cannot have any parents other than the partitioned table it is a
+ partition of, nor can a regular table inherit from a partitioned table
+ making the latter its parent. That means partitioned tables and their
+ partitions do not participate in inheritance with regular tables.
+ Since a partition hierarchy consisting of the partitioned table and its
+ partitions is still an inheritance hierarchy, all the normal rules of
+ inheritance apply as described in with
+ some exceptions, most notably:
@@ -3003,27 +3663,31 @@ VALUES ('Albany', NULL, NULL, 'NY');
Using ONLY to add or drop a constraint on only the
- partitioned table is supported when there are no partitions. Once
+ partitioned table is supported as long as there are no partitions. Once
partitions exist, using ONLY will result in an error
as adding or dropping constraints on only the partitioned table, when
- partitions exist, is not supported. Instead, constraints can be added
- or dropped, when they are not present in the parent table, directly on
- the partitions. As a partitioned table does not have any data
- directly, attempts to use TRUNCATE
- ONLY on a partitioned table will always return an
- error.
+ partitions exist, is not supported. Instead, constraints on the
+ partitions themselves can be added and (if they are not present in the
+ parent table) dropped.
+
+
+
+
+
+ As a partitioned table does not have any data directly, attempts to use
+ TRUNCATE ONLY on a partitioned
+ table will always return an error.
Partitions cannot have columns that are not present in the parent. It
- is neither possible to specify columns when creating partitions with
- CREATE TABLE nor is it possible to add columns to
+ is not possible to specify columns when creating partitions with
+ CREATE TABLE , nor is it possible to add columns to
partitions after-the-fact using ALTER TABLE . Tables may be
added as a partition with ALTER TABLE ... ATTACH PARTITION
- only if their columns exactly match the parent, including any
- oid column.
+ only if their columns exactly match the parent.
@@ -3044,7 +3708,7 @@ VALUES ('Albany', NULL, NULL, 'NY');
Updating the partition key of a row might cause it to be moved into a
- different partition where this row satisfies its partition constraint.
+ different partition where this row satisfies the partition bounds.
@@ -3196,7 +3860,7 @@ CREATE INDEX ON measurement (logdate);
- Ensure that the
+ Ensure that the
configuration parameter is not disabled in postgresql.conf .
If it is, queries will not be optimized as desired.
@@ -3292,10 +3956,49 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02
the system will be able to skip the scan to validate the implicit
partition constraint. Without such a constraint, the table will be
scanned to validate the partition constraint while holding an
- ACCESS EXCLUSIVE lock on the parent table.
+ ACCESS EXCLUSIVE lock on that partition
+ and a SHARE UPDATE EXCLUSIVE lock on the parent table.
One may then drop the constraint after ATTACH PARTITION
is finished, because it is no longer necessary.
+
+
+ As explained above, it is possible to create indexes on partitioned tables
+ and they are applied automatically to the entire hierarchy. This is very
+ convenient, as not only the existing partitions will become indexed, but
+ also any partitions that are created in the future will. One limitation is
+ that it's not possible to use the CONCURRENTLY
+ qualifier when creating such a partitioned index. To overcome long lock
+ times, it is possible to use CREATE INDEX ON ONLY
+ the partitioned table; such an index is marked invalid, and the partitions
+ do not get the index applied automatically. The indexes on partitions can
+ be created separately using CONCURRENTLY , and later
+ attached to the index on the parent using
+ ALTER INDEX .. ATTACH PARTITION . Once indexes for all
+ partitions are attached to the parent index, the parent index is marked
+ valid automatically. Example:
+
+CREATE INDEX measurement_usls_idx ON ONLY measurement (unitsales);
+
+CREATE INDEX measurement_usls_200602_idx
+ ON measurement_y2006m02 (unitsales);
+ALTER INDEX measurement_usls_idx
+ ATTACH PARTITION measurement_usls_200602_idx;
+...
+
+
+ This technique can be used with UNIQUE and
+ PRIMARY KEY constraints too; the indexes are created
+ implicitly when the constraint is created. Example:
+
+ALTER TABLE ONLY measurement ADD UNIQUE (city_id, logdate);
+
+ALTER TABLE measurement_y2006m02 ADD UNIQUE (city_id, logdate);
+ALTER INDEX measurement_city_id_logdate_key
+ ATTACH PARTITION measurement_y2006m02_city_id_logdate_key;
+...
+
+
@@ -3314,37 +4017,27 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02
- While primary keys are supported on partitioned tables, foreign
- keys referencing partitioned tables are not supported, nor are foreign
- key references from a partitioned table to some other table.
+ Unique constraints on partitioned tables must include all the
+ partition key columns. This limitation exists because
+ PostgreSQL can only enforce
+ uniqueness in each partition individually.
- When an UPDATE causes a row to move from one
- partition to another, there is a chance that another concurrent
- UPDATE or DELETE misses this row.
- Suppose session 1 is performing an UPDATE on a
- partition key, and meanwhile a concurrent session 2 for which this row
- is visible performs an UPDATE or
- DELETE operation on this row. Session 2 can silently
- miss the row if the row is deleted from the partition due to session
- 1's activity. In such case, session 2's
- UPDATE or DELETE , being unaware of
- the row movement thinks that the row has just been deleted and concludes
- that there is nothing to be done for this row. In the usual case where
- the table is not partitioned, or where there is no row movement,
- session 2 would have identified the newly updated row and carried out
- the UPDATE /DELETE on this new row
- version.
+ BEFORE ROW triggers, if necessary, must be defined
+ on individual partitions, not the partitioned table.
- Row triggers, if necessary, must be defined on individual partitions,
- not the partitioned table.
+ Mixing temporary and permanent relations in the same partition tree is
+ not allowed. Hence, if the partitioned table is permanent, so must be
+ its partitions and likewise if the partitioned table is temporary. When
+ using temporary relations, all members of the partition tree have to be
+ from the same session.
@@ -3358,15 +4051,15 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02
While the built-in declarative partitioning is suitable for most
common use cases, there are some circumstances where a more flexible
approach may be useful. Partitioning can be implemented using table
- inheritance, which allows for several features which are not supported
+ inheritance, which allows for several features not supported
by declarative partitioning, such as:
- Partitioning enforces a rule that all partitions must have exactly
- the same set of columns as the parent, but table inheritance allows
- children to have extra columns not present in the parent.
+ For declarative partitioning, partitions must have exactly the same set
+ of columns as the partitioned table, whereas with table inheritance,
+ child tables may have extra columns not present in the parent.
@@ -3381,8 +4074,8 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02
Declarative partitioning only supports range, list and hash
partitioning, whereas table inheritance allows data to be divided in a
manner of the user's choosing. (Note, however, that if constraint
- exclusion is unable to prune partitions effectively, query performance
- will be very poor.)
+ exclusion is unable to prune child tables effectively, query performance
+ might be poor.)
@@ -3404,18 +4097,18 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02
We use the same measurement table we used
- above. To implement it as a partitioned table using inheritance, use
+ above. To implement partitioning using inheritance, use
the following steps:
Create the master
table, from which all of the
- partitions will inherit. This table will contain no data. Do not
+ child
tables will inherit. This table will contain no data. Do not
define any check constraints on this table, unless you intend them
- to be applied equally to all partitions. There is no point in
+ to be applied equally to all child tables. There is no point in
defining any indexes or unique constraints on it, either. For our
- example, master table is the measurement
+ example, the master table is the measurement
table as originally defined.
@@ -3425,7 +4118,7 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02
Create several child
tables that each inherit from
the master table. Normally, these tables will not add any columns
to the set inherited from the master. Just as with declarative
- partitioning, these partitions are in every way normal
+ partitioning, these tables are in every way normal
PostgreSQL tables (or foreign tables).
@@ -3443,8 +4136,8 @@ CREATE TABLE measurement_y2008m01 () INHERITS (measurement);
- Add non-overlapping table constraints to the partition tables to
- define the allowed key values in each partition.
+ Add non-overlapping table constraints to the child tables to
+ define the allowed key values in each.
@@ -3455,18 +4148,18 @@ CHECK ( county IN ( 'Oxfordshire', 'Buckinghamshire', 'Warwickshire' ))
CHECK ( outletID >= 100 AND outletID < 200 )
Ensure that the constraints guarantee that there is no overlap
- between the key values permitted in different partitions. A common
+ between the key values permitted in different child tables. A common
mistake is to set up range constraints like:
CHECK ( outletID BETWEEN 100 AND 200 )
CHECK ( outletID BETWEEN 200 AND 300 )
- This is wrong since it is not clear which partition the key value
- 200 belongs in.
+ This is wrong since it is not clear which child table the key
+ value 200 belongs in.
- It would be better to instead create partitions as follows:
+ It would be better to instead create child tables as follows:
CREATE TABLE measurement_y2006m02 (
@@ -3495,7 +4188,7 @@ CREATE TABLE measurement_y2008m01 (
- For each partition, create an index on the key column(s),
+ For each child table, create an index on the key column(s),
as well as any other indexes you might want.
CREATE INDEX measurement_y2006m02_logdate ON measurement_y2006m02 (logdate);
@@ -3511,9 +4204,9 @@ CREATE INDEX measurement_y2008m01_logdate ON measurement_y2008m01 (logdate);
We want our application to be able to say INSERT INTO
measurement ... and have the data be redirected into the
- appropriate partition table. We can arrange that by attaching
+ appropriate child table. We can arrange that by attaching
a suitable trigger function to the master table.
- If data will be added only to the latest partition, we can
+ If data will be added only to the latest child, we can
use a very simple trigger function:
@@ -3535,17 +4228,17 @@ LANGUAGE plpgsql;
CREATE TRIGGER insert_measurement_trigger
BEFORE INSERT ON measurement
- FOR EACH ROW EXECUTE PROCEDURE measurement_insert_trigger();
+ FOR EACH ROW EXECUTE FUNCTION measurement_insert_trigger();
We must redefine the trigger function each month so that it always
- points to the current partition. The trigger definition does
+ points to the current child table. The trigger definition does
not need to be updated, however.
We might want to insert data and have the server automatically
- locate the partition into which the row should be added. We
+ locate the child table into which the row should be added. We
could do this with a more complex trigger function, for example:
@@ -3573,7 +4266,7 @@ LANGUAGE plpgsql;
The trigger definition is the same as before.
Note that each IF test must exactly match the
- CHECK constraint for its partition.
+ CHECK constraint for its child table.
@@ -3584,8 +4277,8 @@ LANGUAGE plpgsql;
- In practice it might be best to check the newest partition first,
- if most inserts go into that partition. For simplicity we have
+ In practice, it might be best to check the newest child first,
+ if most inserts go into that child. For simplicity, we have
shown the trigger's tests in the same order as in other parts
of this example.
@@ -3593,7 +4286,7 @@ LANGUAGE plpgsql;
A different approach to redirecting inserts into the appropriate
- partition table is to set up rules, instead of a trigger, on the
+ child table is to set up rules, instead of a trigger, on the
master table. For example:
@@ -3619,7 +4312,7 @@ DO INSTEAD
Be aware that COPY ignores rules. If you want to
use COPY to insert data, you'll need to copy into the
- correct partition table rather than into the master. COPY
+ correct child table rather than directly into the master. COPY
does fire triggers, so you can use it normally if you use the trigger
approach.
@@ -3635,25 +4328,25 @@ DO INSTEAD
Ensure that the
configuration parameter is not disabled in
- postgresql.conf .
- If it is, queries will not be optimized as desired.
+ postgresql.conf ; otherwise
+ child tables may be accessed unnecessarily.
- As we can see, a complex partitioning scheme could require a
+ As we can see, a complex table hierarchy could require a
substantial amount of DDL. In the above example we would be creating
- a new partition each month, so it might be wise to write a script that
+ a new child table each month, so it might be wise to write a script that
generates the required DDL automatically.
- Partition Maintenance
+ Maintenance for Inheritance Partitioning
- To remove old data quickly, simply drop the partition that is no longer
+ To remove old data quickly, simply drop the child table that is no longer
necessary:
DROP TABLE measurement_y2006m02;
@@ -3661,7 +4354,7 @@ DROP TABLE measurement_y2006m02;
- To remove the partition from the partitioned table but retain access to
+ To remove the child table from the inheritance hierarchy table but retain access to
it as a table in its own right:
@@ -3670,8 +4363,8 @@ ALTER TABLE measurement_y2006m02 NO INHERIT measurement;
- To add a new partition to handle new data, create an empty partition
- just as the original partitions were created above:
+ To add a new child table to handle new data, create an empty child table
+ just as the original children were created above:
CREATE TABLE measurement_y2008m02 (
@@ -3679,9 +4372,10 @@ CREATE TABLE measurement_y2008m02 (
) INHERITS (measurement);
- Alternatively, one may want to create the new table outside the partition
- structure, and make it a partition after the data is loaded, checked,
- and transformed.
+ Alternatively, one may want to create and populate the new child table
+ before adding it to the table hierarchy. This could allow data to be
+ loaded, checked, and transformed before being made visible to queries on
+ the parent table.
CREATE TABLE measurement_y2008m02
@@ -3699,7 +4393,7 @@ ALTER TABLE measurement_y2008m02 INHERIT measurement;
Caveats
- The following caveats apply to partitioned tables implemented using
+ The following caveats apply to partitioning implemented using
inheritance:
@@ -3707,19 +4401,27 @@ ALTER TABLE measurement_y2008m02 INHERIT measurement;
There is no automatic way to verify that all of the
CHECK constraints are mutually
exclusive. It is safer to create code that generates
- partitions and creates and/or modifies associated objects than
+ child tables and creates and/or modifies associated objects than
to write each by hand.
- The schemes shown here assume that the partition key column(s)
- of a row never change, or at least do not change enough to require
- it to move to another partition. An UPDATE that attempts
+ Indexes and foreign key constraints apply to single tables and not
+ to their inheritance children, hence they have some
+ caveats to be aware of.
+
+
+
+
+
+ The schemes shown here assume that the values of a row's key column(s)
+ never change, or at least do not change enough to require it to move to another partition.
+ An UPDATE that attempts
to do that will fail because of the CHECK constraints.
If you need to handle such cases, you can put suitable update triggers
- on the partition tables, but it makes management of the structure
+ on the child tables, but it makes management of the structure
much more complicated.
@@ -3728,7 +4430,7 @@ ALTER TABLE measurement_y2008m02 INHERIT measurement;
If you are using manual VACUUM or
ANALYZE commands, don't forget that
- you need to run them on each partition individually. A command like:
+ you need to run them on each child table individually. A command like:
ANALYZE measurement;
@@ -3748,7 +4450,7 @@ ANALYZE measurement;
Triggers or rules will be needed to route rows to the desired
- partition, unless the application is explicitly aware of the
+ child table, unless the application is explicitly aware of the
partitioning scheme. Triggers may be complicated to write, and will
be much slower than the tuple routing performed internally by
declarative partitioning.
@@ -3759,112 +4461,210 @@ ANALYZE measurement;
-
- Partitioning and Constraint Exclusion
+
+ Partition Pruning
- constraint exclusion
+ partition pruning
- Constraint exclusion is a query optimization technique
- that improves performance for partitioned tables defined in the
- fashion described above (both declaratively partitioned tables and those
- implemented using inheritance). As an example:
+ Partition pruning is a query optimization technique
+ that improves performance for declaratively partitioned tables.
+ As an example:
-SET constraint_exclusion = on;
+SET enable_partition_pruning = on; -- the default
SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01';
- Without constraint exclusion, the above query would scan each of
- the partitions of the measurement table. With constraint
- exclusion enabled, the planner will examine the constraints of each
- partition and try to prove that the partition need not
+ Without partition pruning, the above query would scan each of the
+ partitions of the measurement table. With
+ partition pruning enabled, the planner will examine the definition
+ of each partition and prove that the partition need not
be scanned because it could not contain any rows meeting the query's
WHERE clause. When the planner can prove this, it
- excludes the partition from the query plan.
+ excludes (prunes ) the partition from the query
+ plan.
- You can use the EXPLAIN command to show the difference
- between a plan with constraint_exclusion on and a plan
- with it off. A typical unoptimized plan for this type of table setup is:
-
+ By using the EXPLAIN command and the configuration parameter, it's
+ possible to show the difference between a plan for which partitions have
+ been pruned and one for which they have not. A typical unoptimized
+ plan for this type of table setup is:
-SET constraint_exclusion = off;
+SET enable_partition_pruning = off;
EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01';
-
- QUERY PLAN
------------------------------------------------------------------------------------------------
- Aggregate (cost=158.66..158.68 rows=1 width=0)
- -> Append (cost=0.00..151.88 rows=2715 width=0)
- -> Seq Scan on measurement (cost=0.00..30.38 rows=543 width=0)
+ QUERY PLAN
+-----------------------------------------------------------------------------------
+ Aggregate (cost=188.76..188.77 rows=1 width=8)
+ -> Append (cost=0.00..181.05 rows=3085 width=0)
+ -> Seq Scan on measurement_y2006m02 (cost=0.00..33.12 rows=617 width=0)
Filter: (logdate >= '2008-01-01'::date)
- -> Seq Scan on measurement_y2006m02 measurement (cost=0.00..30.38 rows=543 width=0)
- Filter: (logdate >= '2008-01-01'::date)
- -> Seq Scan on measurement_y2006m03 measurement (cost=0.00..30.38 rows=543 width=0)
+ -> Seq Scan on measurement_y2006m03 (cost=0.00..33.12 rows=617 width=0)
Filter: (logdate >= '2008-01-01'::date)
...
- -> Seq Scan on measurement_y2007m12 measurement (cost=0.00..30.38 rows=543 width=0)
+ -> Seq Scan on measurement_y2007m11 (cost=0.00..33.12 rows=617 width=0)
+ Filter: (logdate >= '2008-01-01'::date)
+ -> Seq Scan on measurement_y2007m12 (cost=0.00..33.12 rows=617 width=0)
Filter: (logdate >= '2008-01-01'::date)
- -> Seq Scan on measurement_y2008m01 measurement (cost=0.00..30.38 rows=543 width=0)
+ -> Seq Scan on measurement_y2008m01 (cost=0.00..33.12 rows=617 width=0)
Filter: (logdate >= '2008-01-01'::date)
Some or all of the partitions might use index scans instead of
full-table sequential scans, but the point here is that there
is no need to scan the older partitions at all to answer this query.
- When we enable constraint exclusion, we get a significantly
+ When we enable partition pruning, we get a significantly
cheaper plan that will deliver the same answer:
-
-SET constraint_exclusion = on;
+SET enable_partition_pruning = on;
EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01';
- QUERY PLAN
------------------------------------------------------------------------------------------------
- Aggregate (cost=63.47..63.48 rows=1 width=0)
- -> Append (cost=0.00..60.75 rows=1086 width=0)
- -> Seq Scan on measurement (cost=0.00..30.38 rows=543 width=0)
- Filter: (logdate >= '2008-01-01'::date)
- -> Seq Scan on measurement_y2008m01 measurement (cost=0.00..30.38 rows=543 width=0)
- Filter: (logdate >= '2008-01-01'::date)
+ QUERY PLAN
+-----------------------------------------------------------------------------------
+ Aggregate (cost=37.75..37.76 rows=1 width=8)
+ -> Seq Scan on measurement_y2008m01 (cost=0.00..33.12 rows=617 width=0)
+ Filter: (logdate >= '2008-01-01'::date)
- Note that constraint exclusion is driven only by CHECK
- constraints, not by the presence of indexes. Therefore it isn't
- necessary to define indexes on the key columns. Whether an index
- needs to be created for a given partition depends on whether you
- expect that queries that scan the partition will generally scan
- a large part of the partition or just a small part. An index will
- be helpful in the latter case but not the former.
+ Note that partition pruning is driven only by the constraints defined
+ implicitly by the partition keys, not by the presence of indexes.
+ Therefore it isn't necessary to define indexes on the key columns.
+ Whether an index needs to be created for a given partition depends on
+ whether you expect that queries that scan the partition will
+ generally scan a large part of the partition or just a small part.
+ An index will be helpful in the latter case but not the former.
+
+
+
+ Partition pruning can be performed not only during the planning of a
+ given query, but also during its execution. This is useful as it can
+ allow more partitions to be pruned when clauses contain expressions
+ whose values are not known at query planning time, for example,
+ parameters defined in a PREPARE statement, using a
+ value obtained from a subquery, or using a parameterized value on the
+ inner side of a nested loop join. Partition pruning during execution
+ can be performed at any of the following times:
+
+
+
+
+ During initialization of the query plan. Partition pruning can be
+ performed here for parameter values which are known during the
+ initialization phase of execution. Partitions which are pruned
+ during this stage will not show up in the query's
+ EXPLAIN or EXPLAIN ANALYZE .
+ It is possible to determine the number of partitions which were
+ removed during this phase by observing the
+ Subplans Removed
property in the
+ EXPLAIN output.
+
+
+
+
+
+ During actual execution of the query plan. Partition pruning may
+ also be performed here to remove partitions using values which are
+ only known during actual query execution. This includes values
+ from subqueries and values from execution-time parameters such as
+ those from parameterized nested loop joins. Since the value of
+ these parameters may change many times during the execution of the
+ query, partition pruning is performed whenever one of the
+ execution parameters being used by partition pruning changes.
+ Determining if partitions were pruned during this phase requires
+ careful inspection of the loops property in
+ the EXPLAIN ANALYZE output. Subplans
+ corresponding to different partitions may have different values
+ for it depending on how many times each of them was pruned during
+ execution. Some may be shown as (never executed)
+ if they were pruned every time.
+
+
+
+
+
+
+ Partition pruning can be disabled using the
+ setting.
+
+
+
+
+ Execution-time partition pruning currently only occurs for the
+ Append and MergeAppend node types.
+ It is not yet implemented for the ModifyTable node
+ type, but that is likely to be changed in a future release of
+ PostgreSQL .
+
+
+
+
+
+ Partitioning and Constraint Exclusion
+
+
+ constraint exclusion
+
+
+
+ Constraint exclusion is a query optimization
+ technique similar to partition pruning. While it is primarily used
+ for partitioning implemented using the legacy inheritance method, it can be
+ used for other purposes, including with declarative partitioning.
+
+
+
+ Constraint exclusion works in a very similar way to partition
+ pruning, except that it uses each table's CHECK
+ constraints — which gives it its name — whereas partition
+ pruning uses the table's partition bounds, which exist only in the
+ case of declarative partitioning. Another difference is that
+ constraint exclusion is only applied at plan time; there is no attempt
+ to remove partitions at execution time.
+
+
+
+ The fact that constraint exclusion uses CHECK
+ constraints, which makes it slow compared to partition pruning, can
+ sometimes be used as an advantage: because constraints can be defined
+ even on declaratively-partitioned tables, in addition to their internal
+ partition bounds, constraint exclusion may be able
+ to elide additional partitions from the query plan.
The default (and recommended) setting of
- is actually neither
+ is neither
on nor off , but an intermediate setting
called partition , which causes the technique to be
- applied only to queries that are likely to be working on partitioned
+ applied only to queries that are likely to be working on inheritance partitioned
tables. The on setting causes the planner to examine
CHECK constraints in all queries, even simple ones that
are unlikely to benefit.
- The following caveats apply to constraint exclusion, which is used by
- both inheritance and partitioned tables:
+ The following caveats apply to constraint exclusion:
+
+
+ Constraint exclusion is only applied during query planning, unlike
+ partition pruning, which can also be applied during query execution.
+
+
+
Constraint exclusion only works when the query's WHERE
clause contains constants (or externally supplied parameters).
For example, a comparison against a non-immutable function such as
CURRENT_TIMESTAMP cannot be optimized, since the
- planner cannot know which partition the function value might fall
+ planner cannot know which child table the function's value might fall
into at run time.
@@ -3872,32 +4672,110 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01';
Keep the partitioning constraints simple, else the planner may not be
- able to prove that partitions don't need to be visited. Use simple
+ able to prove that child tables might not need to be visited. Use simple
equality conditions for list partitioning, or simple
range tests for range partitioning, as illustrated in the preceding
examples. A good rule of thumb is that partitioning constraints should
contain only comparisons of the partitioning column(s) to constants
- using B-tree-indexable operators, which applies even to partitioned
- tables, because only B-tree-indexable column(s) are allowed in the
- partition key. (This is not a problem when using declarative
- partitioning, since the automatically generated constraints are simple
- enough to be understood by the planner.)
+ using B-tree-indexable operators, because only B-tree-indexable
+ column(s) are allowed in the partition key.
- All constraints on all partitions of the master table are examined
- during constraint exclusion, so large numbers of partitions are likely
- to increase query planning time considerably. Partitioning using
- these techniques will work well with up to perhaps a hundred partitions;
- don't try to use many thousands of partitions.
+ All constraints on all children of the parent table are examined
+ during constraint exclusion, so large numbers of children are likely
+ to increase query planning time considerably. So the legacy
+ inheritance based partitioning will work well with up to perhaps a
+ hundred child tables; don't try to use many thousands of children.
+
+
+ Declarative Partitioning Best Practices
+
+
+ The choice of how to partition a table should be made carefully as the
+ performance of query planning and execution can be negatively affected by
+ poor design.
+
+
+
+ One of the most critical design decisions will be the column or columns
+ by which you partition your data. Often the best choice will be to
+ partition by the column or set of columns which most commonly appear in
+ WHERE clauses of queries being executed on the
+ partitioned table. WHERE clause items that match and
+ are compatible with the partition key can be used to prune unneeded
+ partitions. However, you may be forced into making other decisions by
+ requirements for the PRIMARY KEY or a
+ UNIQUE constraint. Removal of unwanted data is also a
+ factor to consider when planning your partitioning strategy. An entire
+ partition can be detached fairly quickly, so it may be beneficial to
+ design the partition strategy in such a way that all data to be removed
+ at once is located in a single partition.
+
+
+
+ Choosing the target number of partitions that the table should be divided
+ into is also a critical decision to make. Not having enough partitions
+ may mean that indexes remain too large and that data locality remains poor
+ which could result in low cache hit ratios. However, dividing the table
+ into too many partitions can also cause issues. Too many partitions can
+ mean longer query planning times and higher memory consumption during both
+ query planning and execution. When choosing how to partition your table,
+ it's also important to consider what changes may occur in the future. For
+ example, if you choose to have one partition per customer and you
+ currently have a small number of large customers, consider the
+ implications if in several years you instead find yourself with a large
+ number of small customers. In this case, it may be better to choose to
+ partition by HASH and choose a reasonable number of
+ partitions rather than trying to partition by LIST and
+ hoping that the number of customers does not increase beyond what it is
+ practical to partition the data by.
+
+
+
+ Sub-partitioning can be useful to further divide partitions that are
+ expected to become larger than other partitions, although excessive
+ sub-partitioning can easily lead to large numbers of partitions and can
+ cause the same problems mentioned in the preceding paragraph.
+
+
+
+ It is also important to consider the overhead of partitioning during
+ query planning and execution. The query planner is generally able to
+ handle partition hierarchies with up to a few thousand partitions fairly
+ well, provided that typical queries allow the query planner to prune all
+ but a small number of partitions. Planning times become longer and memory
+ consumption becomes higher when more partitions remain after the planner
+ performs partition pruning. This is particularly true for the
+ UPDATE and DELETE commands. Another
+ reason to be concerned about having a large number of partitions is that
+ the server's memory consumption may grow significantly over a period of
+ time, especially if many sessions touch large numbers of partitions.
+ That's because each partition requires its metadata to be loaded into the
+ local memory of each session that touches it.
+
+
+
+ With data warehouse type workloads, it can make sense to use a larger
+ number of partitions than with an OLTP type workload.
+ Generally, in data warehouses, query planning time is less of a concern as
+ the majority of processing time is spent during query execution. With
+ either of these two types of workload, it is important to make the right
+ decisions early, as re-partitioning large quantities of data can be
+ painfully slow. Simulations of the intended workload are often beneficial
+ for optimizing the partitioning strategy. Never assume that more
+ partitions are better than fewer partitions and vice-versa.
+
+
+
diff --git a/doc/src/sgml/dfunc.sgml b/doc/src/sgml/dfunc.sgml
index dfefa9e686c..a87e47a104d 100644
--- a/doc/src/sgml/dfunc.sgml
+++ b/doc/src/sgml/dfunc.sgml
@@ -1,7 +1,7 @@
- Compiling and Linking Dynamically-loaded Functions
+ Compiling and Linking Dynamically-Loaded Functions
Before you are able to use your
diff --git a/doc/src/sgml/diskusage.sgml b/doc/src/sgml/diskusage.sgml
index 3708e5f3d8b..75467582e48 100644
--- a/doc/src/sgml/diskusage.sgml
+++ b/doc/src/sgml/diskusage.sgml
@@ -87,9 +87,9 @@ WHERE c.relname = 'customer' AND
c2.oid = i.indexrelid
ORDER BY c2.relname;
- relname | relpages
-----------------------+----------
- customer_id_indexdex | 26
+ relname | relpages
+-------------------+----------
+ customer_id_index | 26
diff --git a/doc/src/sgml/dml.sgml b/doc/src/sgml/dml.sgml
index 1e05c84fd17..97a77309554 100644
--- a/doc/src/sgml/dml.sgml
+++ b/doc/src/sgml/dml.sgml
@@ -112,7 +112,7 @@ INSERT INTO products (product_no, name, price)
- When inserting a lot of data at the same time, considering using
+ When inserting a lot of data at the same time, consider using
the command.
It is not as flexible as the
command, but is more efficient. Refer
diff --git a/doc/src/sgml/docguide.sgml b/doc/src/sgml/docguide.sgml
index b4338e0e73d..c99198f5e5c 100644
--- a/doc/src/sgml/docguide.sgml
+++ b/doc/src/sgml/docguide.sgml
@@ -57,13 +57,13 @@
structure and content of a technical document without worrying
about presentation details. A document style defines how that
content is rendered into one of several final forms. DocBook is
- maintained by the
- OASIS group . The
+ maintained by the
+ OASIS group . The
official DocBook site has good introductory and reference documentation and
a complete O'Reilly book for your online reading pleasure. The
NewbieDoc Docbook Guide is very helpful for beginners.
- The
+ The
FreeBSD Documentation Project also uses DocBook and has some good
information, including a number of style guidelines that might be
worth considering.
@@ -80,11 +80,11 @@
- DocBook DTD
+ DocBook DTD
This is the definition of DocBook itself. We currently use version
- 4.2; you cannot use later or earlier versions. You need
+ 4.5; you cannot use later or earlier versions. You need
the XML variant of the DocBook DTD, not
the SGML variant.
@@ -187,12 +187,6 @@ pkg install docbook-xml docbook-xsl fop libxslt
directory you'll need to use gmake , because the
makefile provided is not suitable for FreeBSD's make .
-
-
- More information about the FreeBSD documentation tools can be
- found in the
- FreeBSD Documentation Project's instructions .
-
@@ -220,7 +214,7 @@ apt-get install docbook-xml docbook-xsl fop libxml2-utils xsltproc
If you use MacPorts, the following will get you set up:
-sudo port install docbook-xml-4.2 docbook-xsl fop
+sudo port install docbook-xml-4.5 docbook-xsl fop
If you use Homebrew, use this:
@@ -240,7 +234,7 @@ brew install docbook docbook-xsl fop
like this:
checking for xmllint... xmllint
-checking for DocBook XML V4.2... yes
+checking for DocBook XML V4.5... yes
checking for dbtoepub... dbtoepub
checking for xsltproc... xsltproc
checking for fop... fop
@@ -253,7 +247,7 @@ checking for fop... fop
- Building The Documentation
+ Building the Documentation
Once you have everything set up, change to the directory
@@ -276,7 +270,7 @@ checking for fop... fop
To produce HTML documentation with the stylesheet used on postgresql.org instead of the
+ url="https://www.postgresql.org/docs/current/">postgresql.org instead of the
default simple style use:
doc/src/sgml$ make STYLE=website html
@@ -364,7 +358,9 @@ ADDITIONAL_FLAGS='-Xmx1500m'
corresponds to , with some minor
changes to account for the different context. To recreate the
file, change to the directory doc/src/sgml
- and enter make INSTALL .
+ and enter make INSTALL . Building text output
+ requires Pandoc version 1.13 or newer as an
+ additional build tool.
@@ -393,112 +389,36 @@ ADDITIONAL_FLAGS='-Xmx1500m'
Documentation Authoring
- SGML and DocBook do
- not suffer from an oversupply of open-source authoring tools. The
- most common tool set is the
- Emacs /XEmacs
- editor with appropriate editing mode. On some systems
- these tools are provided in a typical full installation.
+ The documentation sources are most conveniently modified with an editor
+ that has a mode for editing XML, and even more so if it has some awareness
+ of XML schema languages so that it can know about
+ DocBook syntax specifically.
-
- Emacs/PSGML
-
-
- PSGML is the most common and most
- powerful mode for editing SGML documents.
- When properly configured, it will allow you to use
- Emacs to insert tags and check markup
- consistency. You could use it for HTML as
- well. Check the
- PSGML web site for downloads, installation instructions, and
- detailed documentation.
-
-
-
- There is one important thing to note with
- PSGML : its author assumed that your
- main SGML DTD directory
- would be /usr/local/lib/sgml . If, as in the
- examples in this chapter, you use
- /usr/local/share/sgml , you have to
- compensate for this, either by setting
- SGML_CATALOG_FILES environment variable, or you
- can customize your PSGML installation
- (its manual tells you how).
-
-
-
- Put the following in your ~/.emacs
- environment file (adjusting the path names to be appropriate for
- your system):
-
-
-; ********** for SGML mode (psgml)
-
-(setq sgml-omittag t)
-(setq sgml-shorttag t)
-(setq sgml-minimize-attributes nil)
-(setq sgml-always-quote-attributes t)
-(setq sgml-indent-step 1)
-(setq sgml-indent-data t)
-(setq sgml-parent-document nil)
-(setq sgml-exposed-tags nil)
-(setq sgml-catalog-files '("/usr/local/share/sgml/catalog"))
-
-(autoload 'sgml-mode "psgml" "Major mode to edit SGML files." t )
-
-
- and in the same file add an entry for SGML
- into the (existing) definition for
- auto-mode-alist :
-
-(setq
- auto-mode-alist
- '(("\\.sgml$" . sgml-mode)
- ))
-
-
-
-
- You might find that when using PSGML , a
- comfortable way of working with these separate files of book
- parts is to insert a proper DOCTYPE
- declaration while you're editing them. If you are working on
- this source, for instance, it is an appendix chapter, so you
- would specify the document as an appendix
instance
- of a DocBook document by making the first line look like this:
-
-
-<!DOCTYPE appendix PUBLIC "-//OASIS//DTD DocBook V4.2//EN">
-
-
- This means that anything and everything that reads
- SGML will get it right, and I can verify the
- document with nsgmls -s docguide.sgml . (But
- you need to take out that line before building the entire
- documentation set.)
-
-
+
+ Note that for historical reasons the documentation source files are named
+ with an extension .sgml even though they are now XML
+ files. So you might need to adjust your editor configuration to set the
+ correct mode.
+
- Other Emacs Modes
+ Emacs
- GNU Emacs ships with a different
- SGML mode, which is not quite as powerful as
- PSGML , but it's less confusing and
- lighter weight. Also, it offers syntax highlighting (font lock),
- which can be very helpful.
- src/tools/editors/emacs.samples contains
- sample settings for this mode.
+ nXML Mode , which ships with
+ Emacs , is the most common mode for editing
+ XML documents with Emacs .
+ It will allow you to use Emacs to insert tags
+ and check markup consistency, and it supports
+ DocBook out of the box. Check the
+ nXML manual for detailed documentation.
- Norm Walsh offers a
- major mode
- specifically for DocBook which also has font-lock and a number of features to
- reduce typing.
+ src/tools/editors/emacs.samples contains
+ recommended settings for this mode.
diff --git a/doc/src/sgml/earthdistance.sgml b/doc/src/sgml/earthdistance.sgml
index 1f3ea6aa6e2..670fc9955f7 100644
--- a/doc/src/sgml/earthdistance.sgml
+++ b/doc/src/sgml/earthdistance.sgml
@@ -24,7 +24,7 @@
- Cube-based Earth Distances
+ Cube-Based Earth Distances
Data is stored in cubes that are points (both corners are the same) using 3
@@ -60,7 +60,7 @@
- Cube-based Earthdistance Functions
+ Cube-Based Earthdistance Functions
@@ -137,7 +137,7 @@
- Point-based Earth Distances
+ Point-Based Earth Distances
The second part of the module relies on representing Earth locations as
@@ -154,7 +154,7 @@
- Point-based Earthdistance Operators
+ Point-Based Earthdistance Operators
diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml
index 98b68405207..d225eb3c20d 100644
--- a/doc/src/sgml/ecpg.sgml
+++ b/doc/src/sgml/ecpg.sgml
@@ -177,6 +177,19 @@ EXEC SQL CONNECT TO target AS
SQL string literal, or a reference to a character variable.
+
+ If the connection target includes any options ,
+ those consist of
+ keyword =value
+ specifications separated by ampersands (& ).
+ The allowed key words are the same ones recognized
+ by libpq (see
+ ). Spaces are ignored before
+ any keyword or value ,
+ though not within or after one. Note that there is no way to
+ write & within a value .
+
+
The connection-name is used to handle
multiple connections in one program. It can be omitted if a
@@ -191,7 +204,7 @@ EXEC SQL CONNECT TO target AS
secure schema usage pattern,
begin each session by removing publicly-writable schemas
from search_path . For example,
- add options=-csearch_path=
+ add options=-c search_path=
to options , or
issue EXEC SQL SELECT pg_catalog.set_config('search_path', '',
false); after connecting. This consideration is not specific to
@@ -547,7 +560,7 @@ EXEC SQL COMMIT;
-
+
EXEC SQL SET AUTOCOMMIT TO ON
@@ -651,7 +664,7 @@ EXEC SQL DEALLOCATE PREPARE name ;
not really useful in real applications. This section explains in
detail how you can pass data between your C program and the
embedded SQL statements using a simple mechanism called
- host variables . In an embedded SQL program we
+ host variables . In an embedded SQL program we
consider the SQL statements to be guests in the C
program code which is the host language . Therefore
the variables of the C program are called host
@@ -906,7 +919,7 @@ do
character(n ) , varchar(n ) , text
- char[n +1] , VARCHAR[n +1] declared in ecpglib.h
+ char[n +1] , VARCHAR[n +1]
@@ -936,7 +949,7 @@ do
bytea
- char *
+ char * , bytea[n ]
@@ -1193,6 +1206,36 @@ EXEC SQL END DECLARE SECTION;
+
+
+ bytea
+
+
+ The handling of the bytea type is similar to
+ that of VARCHAR . The definition on an array of type
+ bytea is converted into a named struct for every
+ variable. A declaration like:
+
+bytea var[180];
+
+ is converted into:
+
+struct bytea_var { int len; char arr[180]; } var;
+
+ The member arr hosts binary format
+ data. It can also handle '\0' as part of
+ data, unlike VARCHAR .
+ The data is converted from/to hex format and sent/received by
+ ecpglib.
+
+
+
+
+ bytea variable can be used only when
+ is set to hex .
+
+
+
@@ -1666,7 +1709,7 @@ while (1)
- User-defined Base Types
+ User-Defined Base Types
New user-defined base types are not directly supported by ECPG.
@@ -1678,7 +1721,7 @@ while (1)
Here is an example using the data type complex from
the example in . The external string
- representation of that type is (%lf,%lf) ,
+ representation of that type is (%f,%f) ,
which is defined in the
functions complex_in()
and complex_out() functions
@@ -1951,11 +1994,23 @@ EXEC SQL SELECT started, duration INTO :ts1, :iv1 FROM datetbl WHERE d=:date1;
PGTYPEStimestamp_add_interval(&ts1, &iv1, &tsout);
out = PGTYPEStimestamp_to_asc(&tsout);
printf("Started + duration: %s\n", out);
-free(out);
+PGTYPESchar_free(out);
]]>
+
+ Character Strings
+
+ Some functions such as PGTYPESnumeric_to_asc return
+ a pointer to a freshly allocated character string. These results should be
+ freed with PGTYPESchar_free instead of
+ free . (This is important only on Windows, where
+ memory allocation and release sometimes need to be done by the same
+ library.)
+
+
+
The numeric Type
@@ -2029,6 +2084,7 @@ char *PGTYPESnumeric_to_asc(numeric *num, int dscale);
The numeric value will be printed with dscale decimal
digits, with rounding applied if necessary.
+ The result must be freed with PGTYPESchar_free() .
@@ -2419,6 +2475,7 @@ char *PGTYPESdate_to_asc(date dDate);
The function receives the date dDate as its only parameter.
It will output the date in the form 1999-01-18 , i.e., in the
YYYY-MM-DD format.
+ The result must be freed with PGTYPESchar_free() .
@@ -2841,6 +2898,7 @@ char *PGTYPEStimestamp_to_asc(timestamp tstamp);
The function receives the timestamp tstamp as
its only argument and returns an allocated string that contains the
textual representation of the timestamp.
+ The result must be freed with PGTYPESchar_free() .
@@ -3349,6 +3407,7 @@ char *PGTYPESinterval_to_asc(interval *span);
The function converts the interval variable that span
points to into a C char*. The output looks like this example:
@ 1 day 12 hours 59 mins 10 secs .
+ The result must be freed with PGTYPESchar_free() .
@@ -4788,7 +4847,7 @@ EXEC SQL WHENEVER condition action
Execute the C statement continue . This should
- only be used in loops statements. if executed, will cause the flow
+ only be used in loops statements. if executed, will cause the flow
of control to return to the top of the loop.
@@ -4954,7 +5013,7 @@ struct
The fields sqlcaid ,
- sqlcabc ,
+ sqlabc ,
sqlerrp , and the remaining elements of
sqlerrd and
sqlwarn currently contain no useful
@@ -5858,7 +5917,7 @@ ECPG = ecpg
ECPGtransactionStatus(const char *connection_name )
returns the current transaction status of the given connection identified by connection_name .
- See and libpq's PQtransactionStatus() for details about the returned status codes.
+ See and libpq's for details about the returned status codes.
@@ -6442,7 +6501,7 @@ DATABASE connection_target
- connection_object
+ connection_name
An optional identifier for the connection, so that it can be
@@ -8182,7 +8241,7 @@ if (*(int2 *)sqldata->sqlvar[i].sqlind != 0)
sqlformat
- Reserved in Informix, value of PQfformat() for the field.
+ Reserved in Informix, value of for the field.
@@ -8211,7 +8270,7 @@ if (*(int2 *)sqldata->sqlvar[i].sqlind != 0)
sqlxid
- Extended type of the field, result of PQftype() .
+ Extended type of the field, result of .
diff --git a/doc/src/sgml/event-trigger.sgml b/doc/src/sgml/event-trigger.sgml
index 0a8860490ad..18628c498ba 100644
--- a/doc/src/sgml/event-trigger.sgml
+++ b/doc/src/sgml/event-trigger.sgml
@@ -175,6 +175,14 @@
-
+
+ ALTER DEFAULT PRIVILEGES
+ X
+ X
+ -
+ -
+
+
ALTER EXTENSION
X
@@ -215,6 +223,22 @@
-
+
+ ALTER LARGE OBJECT
+ X
+ X
+ -
+ -
+
+
+
+ ALTER MATERIALIZED VIEW
+ X
+ X
+ -
+ -
+
+
ALTER OPERATOR
X
@@ -247,6 +271,22 @@
-
+
+ ALTER PROCEDURE
+ X
+ X
+ -
+ -
+
+
+
+ ALTER PUBLICATION
+ X
+ X
+ -
+ -
+
+
ALTER SCHEMA
X
@@ -271,6 +311,22 @@
-
+
+ ALTER STATISTICS
+ X
+ X
+ -
+ -
+
+
+
+ ALTER SUBSCRIPTION
+ X
+ X
+ -
+ -
+
+
ALTER TABLE
X
@@ -344,7 +400,15 @@
- CREATE AGGREGATE
+ COMMENT
+ X
+ X
+ -
+ -
+ Only for local objects
+
+
+ CREATE ACCESS METHOD
X
X
-
@@ -352,12 +416,12 @@
- COMMENT
+ CREATE AGGREGATE
X
X
-
-
- Only for local objects
+
CREATE CAST
@@ -439,6 +503,14 @@
-
+
+ CREATE MATERIALIZED VIEW
+ X
+ X
+ -
+ -
+
+
CREATE OPERATOR
X
@@ -471,6 +543,22 @@
-
+
+ CREATE PROCEDURE
+ X
+ X
+ -
+ -
+
+
+
+ CREATE PUBLICATION
+ X
+ X
+ -
+ -
+
+
CREATE RULE
X
@@ -511,6 +599,14 @@
-
+
+ CREATE SUBSCRIPTION
+ X
+ X
+ -
+ -
+
+
CREATE TABLE
X
@@ -591,6 +687,14 @@
-
+
+ DROP ACCESS METHOD
+ X
+ X
+ X
+ -
+
+
DROP AGGREGATE
X
@@ -679,6 +783,14 @@
-
+
+ DROP MATERIALIZED VIEW
+ X
+ X
+ X
+ -
+
+
DROP OPERATOR
X
@@ -719,6 +831,22 @@
-
+
+ DROP PROCEDURE
+ X
+ X
+ X
+ -
+
+
+
+ DROP PUBLICATION
+ X
+ X
+ X
+ -
+
+
DROP RULE
X
@@ -759,6 +887,14 @@
-
+
+ DROP SUBSCRIPTION
+ X
+ X
+ X
+ -
+
+
DROP TABLE
X
@@ -847,6 +983,14 @@
-
+
+ REFRESH MATERIALIZED VIEW
+ X
+ X
+ -
+ -
+
+
REVOKE
X
@@ -1044,7 +1188,7 @@ CREATE FUNCTION noddl() RETURNS event_trigger
AS 'noddl' LANGUAGE C;
CREATE EVENT TRIGGER noddl ON ddl_command_start
- EXECUTE PROCEDURE noddl();
+ EXECUTE FUNCTION noddl();
@@ -1053,9 +1197,9 @@ CREATE EVENT TRIGGER noddl ON ddl_command_start
=# \dy
List of event triggers
- Name | Event | Owner | Enabled | Procedure | Tags
--------+-------------------+-------+---------+-----------+------
- noddl | ddl_command_start | dim | enabled | noddl |
+ Name | Event | Owner | Enabled | Function | Tags
+-------+-------------------+-------+---------+----------+------
+ noddl | ddl_command_start | dim | enabled | noddl |
(1 row)
=# CREATE TABLE foo(id serial);
@@ -1129,7 +1273,7 @@ $$;
CREATE EVENT TRIGGER no_rewrite_allowed
ON table_rewrite
- EXECUTE PROCEDURE no_rewrite();
+ EXECUTE FUNCTION no_rewrite();
diff --git a/doc/src/sgml/extend.sgml b/doc/src/sgml/extend.sgml
index 348ae71423f..8dc2b893f7e 100644
--- a/doc/src/sgml/extend.sgml
+++ b/doc/src/sgml/extend.sgml
@@ -953,7 +953,7 @@ SELECT * FROM pg_extension_update_paths('extension_name
- Installing Extensions using Update Scripts
+ Installing Extensions Using Update Scripts
An extension that has been around for awhile will probably exist in
@@ -1015,7 +1015,7 @@ CREATE TYPE pair AS ( k text, v text );
CREATE OR REPLACE FUNCTION pair(text, text)
RETURNS pair LANGUAGE SQL AS 'SELECT ROW($1, $2)::@extschema@.pair;';
-CREATE OPERATOR ~> (LEFTARG = text, RIGHTARG = text, PROCEDURE = pair);
+CREATE OPERATOR ~> (LEFTARG = text, RIGHTARG = text, FUNCTION = pair);
-- "SET search_path" is easy to get right, but qualified names perform better.
CREATE OR REPLACE FUNCTION lower(pair)
@@ -1100,13 +1100,15 @@ include $(PGXS)
and include the global PGXS makefile.
Here is an example that builds an extension module named
isbn_issn , consisting of a shared library containing
- some C code, an extension control file, a SQL script, and a documentation
- text file:
+ some C code, an extension control file, a SQL script, an include file
+ (only needed if other modules might need to access the extension functions
+ without going via SQL), and a documentation text file:
MODULES = isbn_issn
EXTENSION = isbn_issn
DATA = isbn_issn--1.0.sql
DOCS = README.isbn_issn
+HEADERS_isbn_issn = isbn_issn.h
PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
@@ -1220,6 +1222,48 @@ include $(PGXS)
+
+ HEADERS
+ HEADERS_built
+
+
+ Files to (optionally build and) install under
+ prefix /include/server/$MODULEDIR/$MODULE_big .
+
+
+ Unlike DATA_built , files in HEADERS_built
+ are not removed by the clean target; if you want them removed,
+ also add them to EXTRA_CLEAN or add your own rules to do it.
+
+
+
+
+
+ HEADERS_$MODULE
+ HEADERS_built_$MODULE
+
+
+ Files to install (after building if specified) under
+ prefix /include/server/$MODULEDIR/$MODULE ,
+ where $MODULE must be a module name used
+ in MODULES or MODULE_big .
+
+
+ Unlike DATA_built , files in HEADERS_built_$MODULE
+ are not removed by the clean target; if you want them removed,
+ also add them to EXTRA_CLEAN or add your own rules to do it.
+
+
+ It is legal to use both variables for the same module, or any
+ combination, unless you have two module names in the
+ MODULES list that differ only by the presence of a
+ prefix built_ , which would cause ambiguity. In
+ that (hopefully unlikely) case, you should use only the
+ HEADERS_built_$MODULE variables.
+
+
+
+
SCRIPTS
@@ -1259,6 +1303,34 @@ include $(PGXS)
+
+ ISOLATION
+
+
+ list of isolation test cases, see below for more details
+
+
+
+
+
+ ISOLATION_OPTS
+
+
+ additional switches to pass to
+ pg_isolation_regress
+
+
+
+
+
+ TAP_TESTS
+
+
+ switch defining if TAP tests need to be run, see below
+
+
+
+
NO_INSTALLCHECK
@@ -1281,7 +1353,34 @@ include $(PGXS)
PG_CPPFLAGS
- will be added to CPPFLAGS
+ will be prepended to CPPFLAGS
+
+
+
+
+
+ PG_CFLAGS
+
+
+ will be appended to CFLAGS
+
+
+
+
+
+ PG_CXXFLAGS
+
+
+ will be appended to CXXFLAGS
+
+
+
+
+
+ PG_LDFLAGS
+
+
+ will be prepended to LDFLAGS
@@ -1379,13 +1478,42 @@ make VPATH=/path/to/extension/source/tree install
have all expected files.
+
+ The scripts listed in the ISOLATION variable are used
+ for tests stressing behavior of concurrent session with your module, which
+ can be invoked by make installcheck after doing
+ make install . For this to work you must have a
+ running PostgreSQL server. The script files
+ listed in ISOLATION must appear in a subdirectory
+ named specs/ in your extension's directory. These files
+ must have extension .spec , which must not be included
+ in the ISOLATION list in the makefile. For each test
+ there should also be a file containing the expected output in a
+ subdirectory named expected/ , with the same stem and
+ extension .out . make installcheck
+ executes each test script, and compares the resulting output to the
+ matching expected file. Any differences will be written to the file
+ output_iso/regression.diffs in
+ diff -c format. Note that trying to run a test that is
+ missing its expected file will be reported as trouble
, so
+ make sure you have all expected files.
+
+
+
+ TAP_TESTS enables the use of TAP tests. Data from each
+ run is present in a subdirectory named tmp_check/ .
+ See also for more details.
+
+
The easiest way to create the expected files is to create empty files,
then do a test run (which will of course report differences). Inspect
the actual result files found in the results/
- directory, then copy them to expected/ if they match
- what you expect from the test.
+ directory (for tests in REGRESS ), or
+ output_iso/results/ directory (for tests in
+ ISOLATION ), then copy them to
+ expected/ if they match what you expect from the test.
diff --git a/doc/src/sgml/external-projects.sgml b/doc/src/sgml/external-projects.sgml
index 89147817ec1..f94e450ef9e 100644
--- a/doc/src/sgml/external-projects.sgml
+++ b/doc/src/sgml/external-projects.sgml
@@ -65,7 +65,7 @@
DBD::Pg
Perl
Perl DBI driver
-
+
@@ -78,7 +78,7 @@
libpqxx
C++
- New-style C++ interface
+ C++ interface
@@ -107,7 +107,7 @@
pgtclng
Tcl
-
+
@@ -146,7 +146,7 @@
There are several administration tools available for
PostgreSQL . The most popular is
- pgAdmin III ,
+ pgAdmin ,
and there are several commercially available ones as well.
@@ -205,7 +205,7 @@
PL/R
R
-
+
diff --git a/doc/src/sgml/fdwhandler.sgml b/doc/src/sgml/fdwhandler.sgml
index 7b758bdf09b..6587678af2b 100644
--- a/doc/src/sgml/fdwhandler.sgml
+++ b/doc/src/sgml/fdwhandler.sgml
@@ -1,7 +1,7 @@
- Writing A Foreign Data Wrapper
+ Writing a Foreign Data Wrapper
foreign data wrapper
@@ -96,7 +96,7 @@
- FDW Routines For Scanning Foreign Tables
+ FDW Routines for Scanning Foreign Tables
@@ -286,7 +286,7 @@ EndForeignScan(ForeignScanState *node);
- FDW Routines For Scanning Foreign Joins
+ FDW Routines for Scanning Foreign Joins
If an FDW supports performing foreign joins remotely (rather than
@@ -309,7 +309,9 @@ GetForeignJoinPaths(PlannerInfo *root,
function is called during query planning. As
with GetForeignPaths , this function should
generate ForeignPath path(s) for the
- supplied joinrel , and call add_path to add these
+ supplied joinrel
+ (use create_foreign_join_path to build them),
+ and call add_path to add these
paths to the set of paths considered for the join. But unlike
GetForeignPaths , it is not necessary that this function
succeed in creating at least one path, since paths involving local
@@ -346,7 +348,7 @@ GetForeignJoinPaths(PlannerInfo *root,
- FDW Routines For Planning Post-Scan/Join Processing
+ FDW Routines for Planning Post-Scan/Join Processing
If an FDW supports performing remote post-scan/join processing, such as
@@ -369,7 +371,9 @@ GetForeignUpperPaths(PlannerInfo *root,
called only if all base relation(s) involved in the query belong to the
same FDW. This function should generate ForeignPath
path(s) for any post-scan/join processing that the FDW knows how to
- perform remotely, and call add_path to add these paths to
+ perform remotely
+ (use create_foreign_upper_path to build them),
+ and call add_path to add these paths to
the indicated upper relation. As with GetForeignJoinPaths ,
it is not necessary that this function succeed in creating any paths,
since paths involving local processing are always possible.
@@ -383,7 +387,9 @@ GetForeignUpperPaths(PlannerInfo *root,
step. The extra parameter provides additional details,
currently, it is set only for UPPERREL_PARTIAL_GROUP_AGG
or UPPERREL_GROUP_AGG , in which case it points to a
- GroupPathExtraData structure.
+ GroupPathExtraData structure;
+ or for UPPERREL_FINAL , in which case it points to a
+ FinalPathExtraData structure.
(Note that ForeignPath paths added
to output_rel would typically not have any direct dependency
on paths of the input_rel , since their processing is expected
@@ -398,7 +404,7 @@ GetForeignUpperPaths(PlannerInfo *root,
- FDW Routines For Updating Foreign Tables
+ FDW Routines for Updating Foreign Tables
If an FDW supports writable foreign tables, it should provide
@@ -573,12 +579,14 @@ ExecForeignInsert(EState *estate,
The data in the returned slot is used only if the INSERT
- query has a RETURNING clause or the foreign table has
- an AFTER ROW trigger. Triggers require all columns, but the
- FDW could choose to optimize away returning some or all columns depending
- on the contents of the RETURNING clause. Regardless, some
- slot must be returned to indicate success, or the query's reported row
- count will be wrong.
+ statement has a RETURNING clause or involves a view
+ WITH CHECK OPTION ; or if the foreign table has
+ an AFTER ROW trigger. Triggers require all columns,
+ but the FDW could choose to optimize away returning some or all columns
+ depending on the contents of the RETURNING clause or
+ WITH CHECK OPTION constraints. Regardless, some slot
+ must be returned to indicate success, or the query's reported row count
+ will be wrong.
@@ -587,6 +595,14 @@ ExecForeignInsert(EState *estate,
with an error message.
+
+ Note that this function is also called when inserting routed tuples into
+ a foreign-table partition or executing COPY FROM on
+ a foreign table, in which case it is called in a different way than it
+ is in the INSERT case. See the callback functions
+ described below that allow the FDW to support that.
+
+
TupleTableSlot *
@@ -619,12 +635,14 @@ ExecForeignUpdate(EState *estate,
The data in the returned slot is used only if the UPDATE
- query has a RETURNING clause or the foreign table has
- an AFTER ROW trigger. Triggers require all columns, but the
- FDW could choose to optimize away returning some or all columns depending
- on the contents of the RETURNING clause. Regardless, some
- slot must be returned to indicate success, or the query's reported row
- count will be wrong.
+ statement has a RETURNING clause or involves a view
+ WITH CHECK OPTION ; or if the foreign table has
+ an AFTER ROW trigger. Triggers require all columns,
+ but the FDW could choose to optimize away returning some or all columns
+ depending on the contents of the RETURNING clause or
+ WITH CHECK OPTION constraints. Regardless, some slot
+ must be returned to indicate success, or the query's reported row count
+ will be wrong.
@@ -743,6 +761,13 @@ BeginForeignInsert(ModifyTableState *mtstate,
NULL , no action is taken for the initialization.
+
+ Note that if the FDW does not support routable foreign-table partitions
+ and/or executing COPY FROM on foreign tables, this
+ function or ExecForeignInsert subsequently called
+ must throw error as needed.
+
+
void
@@ -795,9 +820,11 @@ IsForeignRelUpdatable(Relation rel);
row-by-row approach is necessary, but it can be inefficient. If it is
possible for the foreign server to determine which rows should be
modified without actually retrieving them, and if there are no local
- triggers which would affect the operation, then it is possible to
- arrange things so that the entire operation is performed on the remote
- server. The interfaces described below make this possible.
+ structures which would affect the operation (row-level local triggers,
+ stored generated columns, or WITH CHECK OPTION
+ constraints from parent views), then it is possible to arrange things
+ so that the entire operation is performed on the remote server. The
+ interfaces described below make this possible.
@@ -942,7 +969,7 @@ EndDirectModify(ForeignScanState *node);
- FDW Routines For Row Locking
+ FDW Routines for Row Locking
If an FDW wishes to support late row locking (as described
@@ -984,29 +1011,31 @@ GetForeignRowMarkType(RangeTblEntry *rte,
-HeapTuple
+void
RefetchForeignRow(EState *estate,
ExecRowMark *erm,
Datum rowid,
+ TupleTableSlot *slot,
bool *updated);
- Re-fetch one tuple from the foreign table, after locking it if required.
+ Re-fetch one tuple slot from the foreign table, after locking it if required.
estate is global execution state for the query.
erm is the ExecRowMark struct describing
the target foreign table and the row lock type (if any) to acquire.
rowid identifies the tuple to be fetched.
- updated is an output parameter.
+ slot contains nothing useful upon call, but can be used to
+ hold the returned tuple. updated is an output parameter.
- This function should return a palloc'ed copy of the fetched tuple,
- or NULL if the row lock couldn't be obtained. The row lock
- type to acquire is defined by erm->markType , which is the
- value previously returned by GetForeignRowMarkType .
- (ROW_MARK_REFERENCE means to just re-fetch the tuple without
- acquiring any lock, and ROW_MARK_COPY will never be seen by
- this routine.)
+ This function should store the tuple into the provided slot, or clear it if
+ the row lock couldn't be obtained. The row lock type to acquire is
+ defined by erm->markType , which is the value
+ previously returned by GetForeignRowMarkType .
+ (ROW_MARK_REFERENCE means to just re-fetch the tuple
+ without acquiring any lock, and ROW_MARK_COPY will
+ never be seen by this routine.)
@@ -1018,7 +1047,7 @@ RefetchForeignRow(EState *estate,
Note that by default, failure to acquire a row lock should result in
- raising an error; a NULL return is only appropriate if
+ raising an error; returning with an empty slot is only appropriate if
the SKIP LOCKED option is specified
by erm->waitPolicy .
@@ -1204,7 +1233,7 @@ AcquireSampleRowsFunc(Relation relation,
- FDW Routines For IMPORT FOREIGN SCHEMA
+ FDW Routines for IMPORT FOREIGN SCHEMA
@@ -1367,7 +1396,7 @@ ShutdownForeignScan(ForeignScanState *node);
- FDW Routines For reparameterization of paths
+ FDW Routines for Reparameterization of Paths
@@ -1404,6 +1433,23 @@ ReparameterizeForeignPathByChild(PlannerInfo *root, List *fdw_private,
ForeignDataWrapper *
+GetForeignDataWrapperExtended(Oid fdwid, bits16 flags);
+
+
+ This function returns a ForeignDataWrapper
+ object for the foreign-data wrapper with the given OID. A
+ ForeignDataWrapper object contains properties
+ of the FDW (see foreign/foreign.h for details).
+ flags is a bitwise-or'd bit mask indicating
+ an extra set of options. It can take the value
+ FDW_MISSING_OK , in which case a NULL
+ result is returned to the caller instead of an error for an undefined
+ object.
+
+
+
+
+ForeignDataWrapper *
GetForeignDataWrapper(Oid fdwid);
@@ -1416,6 +1462,23 @@ GetForeignDataWrapper(Oid fdwid);
ForeignServer *
+GetForeignServerExtended(Oid serverid, bits16 flags);
+
+
+ This function returns a ForeignServer object
+ for the foreign server with the given OID. A
+ ForeignServer object contains properties
+ of the server (see foreign/foreign.h for details).
+ flags is a bitwise-or'd bit mask indicating
+ an extra set of options. It can take the value
+ FSV_MISSING_OK , in which case a NULL
+ result is returned to the caller instead of an error for an undefined
+ object.
+
+
+
+
+ForeignServer *
GetForeignServer(Oid serverid);
diff --git a/doc/src/sgml/features.sgml b/doc/src/sgml/features.sgml
index 6c22d698673..f767bee46e5 100644
--- a/doc/src/sgml/features.sgml
+++ b/doc/src/sgml/features.sgml
@@ -14,9 +14,10 @@
The formal name of the SQL standard is ISO/IEC 9075 Database
Language SQL
. A revised version of the standard is released
- from time to time; the most recent update appearing in 2011.
- The 2011 version is referred to as ISO/IEC 9075:2011, or simply as SQL:2011.
- The versions prior to that were SQL:2008, SQL:2003, SQL:1999, and SQL-92. Each version
+ from time to time; the most recent update appearing in 2016.
+ The 2016 version is referred to as ISO/IEC 9075:2016, or simply as SQL:2016.
+ The versions prior to that were SQL:2011, SQL:2008, SQL:2006, SQL:2003,
+ SQL:1999, and SQL-92. Each version
replaces the previous one, so claims of conformance to earlier
versions have no official merit.
PostgreSQL development aims for
@@ -78,18 +79,18 @@
- PostgreSQL supports most of the major features of SQL:2011. Out of
+ PostgreSQL supports most of the major features of SQL:2016. Out of
179 mandatory features required for full Core conformance,
PostgreSQL conforms to at least 160. In addition, there is a long
list of supported optional features. It might be worth noting that at
the time of writing, no current version of any database management
- system claims full conformance to Core SQL:2011.
+ system claims full conformance to Core SQL:2016.
In the following two sections, we provide a list of those features
that PostgreSQL supports, followed by a
- list of the features defined in SQL:2011 which
+ list of the features defined in SQL:2016 which
are not yet supported in PostgreSQL .
Both of these lists are approximate: There might be minor details that
are nonconforming for a feature that is listed as supported, and
@@ -132,7 +133,7 @@
Unsupported Features
- The following features defined in SQL:2011 are not
+ The following features defined in SQL:2016 are not
implemented in this release of
PostgreSQL . In a few cases, equivalent
functionality is available.
@@ -155,4 +156,335 @@
+
+ XML Limits and Conformance to SQL/XML
+
+
+ SQL/XML
+ limits and conformance
+
+
+
+ Significant revisions to the XML-related specifications in ISO/IEC 9075-14
+ (SQL/XML) were introduced with SQL:2006.
+ PostgreSQL 's implementation of the XML data
+ type and related functions largely follows the earlier 2003 edition,
+ with some borrowing from later editions. In particular:
+
+
+
+ Where the current standard provides a family of XML data types
+ to hold document
or content
in
+ untyped or XML Schema-typed variants, and a type
+ XML(SEQUENCE) to hold arbitrary pieces of XML content,
+ PostgreSQL provides the single
+ xml type, which can hold document
or
+ content
. There is no equivalent of the
+ standard's sequence
type.
+
+
+
+
+
+ PostgreSQL provides two functions
+ introduced in SQL:2006, but in variants that use the XPath 1.0
+ language, rather than XML Query as specified for them in the
+ standard.
+
+
+
+
+
+
+ This section presents some of the resulting differences you may encounter.
+
+
+
+ Queries Are Restricted to XPath 1.0
+
+
+ The PostgreSQL -specific functions
+ xpath() and xpath_exists()
+ query XML documents using the XPath language.
+ PostgreSQL also provides XPath-only variants
+ of the standard functions XMLEXISTS and
+ XMLTABLE , which officially use
+ the XQuery language. For all of these functions,
+ PostgreSQL relies on the
+ libxml2 library, which provides only XPath 1.0.
+
+
+
+ There is a strong connection between the XQuery language and XPath
+ versions 2.0 and later: any expression that is syntactically valid and
+ executes successfully in both produces the same result (with a minor
+ exception for expressions containing numeric character references or
+ predefined entity references, which XQuery replaces with the
+ corresponding character while XPath leaves them alone). But there is
+ no such connection between these languages and XPath 1.0; it was an
+ earlier language and differs in many respects.
+
+
+
+ There are two categories of limitation to keep in mind: the restriction
+ from XQuery to XPath for the functions specified in the SQL standard, and
+ the restriction of XPath to version 1.0 for both the standard and the
+ PostgreSQL -specific functions.
+
+
+
+ Restriction of XQuery to XPath
+
+
+ Features of XQuery beyond those of XPath include:
+
+
+
+
+ XQuery expressions can construct and return new XML nodes, in
+ addition to all possible XPath values. XPath can create and return
+ values of the atomic types (numbers, strings, and so on) but can
+ only return XML nodes that were already present in documents
+ supplied as input to the expression.
+
+
+
+
+
+ XQuery has control constructs for iteration, sorting, and grouping.
+
+
+
+
+
+ XQuery allows declaration and use of local functions.
+
+
+
+
+
+
+ Recent XPath versions begin to offer capabilities overlapping with
+ these (such as functional-style for-each and
+ sort , anonymous functions, and
+ parse-xml to create a node from a string),
+ but such features were not available before XPath 3.0.
+
+
+
+
+ Restriction of XPath to 1.0
+
+
+ For developers familiar with XQuery and XPath 2.0 or later, XPath 1.0
+ presents a number of differences to contend with:
+
+
+
+
+ The fundamental type of an XQuery/XPath expression, the
+ sequence , which can contain XML nodes, atomic values,
+ or both, does not exist in XPath 1.0. A 1.0 expression can only
+ produce a node-set (containing zero or more XML nodes), or a single
+ atomic value.
+
+
+
+
+
+ Unlike an XQuery/XPath sequence, which can contain any desired
+ items in any desired order, an XPath 1.0 node-set has no
+ guaranteed order and, like any set, does not allow multiple
+ appearances of the same item.
+
+
+ The libxml2 library does seem to
+ always return node-sets to PostgreSQL
+ with their members in the same relative order they had in the
+ input document. Its documentation does not commit to this
+ behavior, and an XPath 1.0 expression cannot control it.
+
+
+
+
+
+
+
+ While XQuery/XPath provides all of the types defined in XML Schema
+ and many operators and functions over those types, XPath 1.0 has only
+ node-sets and the three atomic types boolean ,
+ double , and string .
+
+
+
+
+
+ XPath 1.0 has no conditional operator. An XQuery/XPath expression
+ such as if ( hat ) then hat/@size else "no hat"
+ has no XPath 1.0 equivalent.
+
+
+
+
+
+ XPath 1.0 has no ordering comparison operator for strings. Both
+ "cat" < "dog" and
+ "cat" > "dog" are false, because each is a
+ numeric comparison of two NaN s. In contrast,
+ = and != do compare the strings
+ as strings.
+
+
+
+
+
+ XPath 1.0 blurs the distinction between
+ value comparisons and
+ general comparisons as XQuery/XPath define
+ them. Both sale/@hatsize = 7 and
+ sale/@customer = "alice" are existentially
+ quantified comparisons, true if there is
+ any sale with the given value for the
+ attribute, but sale/@taxable = false() is a
+ value comparison to the
+ effective boolean value of a whole node-set.
+ It is true only if no sale has
+ a taxable attribute at all.
+
+
+
+
+
+ In the XQuery/XPath data model, a document
+ node can have either document form (i.e., exactly one
+ top-level element, with only comments and processing instructions
+ outside of it) or content form (with those constraints
+ relaxed). Its equivalent in XPath 1.0, the
+ root node , can only be in document form.
+ This is part of the reason an xml value passed as the
+ context item to any PostgreSQL
+ XPath-based function must be in document form.
+
+
+
+
+
+
+ The differences highlighted here are not all of them. In XQuery and
+ the 2.0 and later versions of XPath, there is an XPath 1.0 compatibility
+ mode, and the W3C lists of
+ function library changes
+ and
+ language changes
+ applied in that mode offer a more complete (but still not exhaustive)
+ account of the differences. The compatibility mode cannot make the
+ later languages exactly equivalent to XPath 1.0.
+
+
+
+
+ Mappings between SQL and XML Data Types and Values
+
+
+ In SQL:2006 and later, both directions of conversion between standard SQL
+ data types and the XML Schema types are specified precisely. However, the
+ rules are expressed using the types and semantics of XQuery/XPath, and
+ have no direct application to the different data model of XPath 1.0.
+
+
+
+ When PostgreSQL maps SQL data values to XML
+ (as in xmlelement ), or XML to SQL (as in the output
+ columns of xmltable ), except for a few cases
+ treated specially, PostgreSQL simply assumes
+ that the XML data type's XPath 1.0 string form will be valid as the
+ text-input form of the SQL datatype, and conversely. This rule has the
+ virtue of simplicity while producing, for many data types, results similar
+ to the mappings specified in the standard.
+
+
+
+ Where interoperability with other systems is a concern, for some data
+ types, it may be necessary to use data type formatting functions (such
+ as those in ) explicitly to
+ produce the standard mappings.
+
+
+
+
+
+
+ Incidental limits of the implementation
+
+
+
+ This section concerns limits that are not inherent in the
+ libxml2 library, but apply to the current
+ implementation in PostgreSQL .
+
+
+
+ Only BY VALUE Passing Mechanism Is Supported
+
+
+ The SQL standard defines two passing mechanisms
+ that apply when passing an XML argument from SQL to an XML function or
+ receiving a result: BY REF , in which a particular XML
+ value retains its node identity, and BY VALUE , in which
+ the content of the XML is passed but node identity is not preserved. A
+ mechanism can be specified before a list of parameters, as the default
+ mechanism for all of them, or after any parameter, to override the
+ default.
+
+
+
+ To illustrate the difference, if
+ x is an XML value, these two queries in
+ an SQL:2006 environment would produce true and false, respectively:
+
+
+SELECT XMLQUERY('$a is $b' PASSING BY REF x AS a, x AS b NULL ON EMPTY);
+SELECT XMLQUERY('$a is $b' PASSING BY VALUE x AS a, x AS b NULL ON EMPTY);
+
+
+
+
+ PostgreSQL will accept
+ BY VALUE or BY REF in an
+ XMLEXISTS or XMLTABLE
+ construct, but it ignores them. The xml data type holds
+ a character-string serialized representation, so there is no node
+ identity to preserve, and passing is always effectively BY
+ VALUE .
+
+
+
+
+ Cannot Pass Named Parameters to Queries
+
+
+ The XPath-based functions support passing one parameter to serve as the
+ XPath expression's context item, but do not support passing additional
+ values to be available to the expression as named parameters.
+
+
+
+
+ No XML(SEQUENCE) Type
+
+
+ The PostgreSQL xml data type
+ can only hold a value in DOCUMENT
+ or CONTENT form. An XQuery/XPath expression
+ context item must be a single XML node or atomic value, but XPath 1.0
+ further restricts it to be only an XML node, and has no node type
+ allowing CONTENT . The upshot is that a
+ well-formed DOCUMENT is the only form of XML value
+ that PostgreSQL can supply as an XPath
+ context item.
+
+
+
+
+
diff --git a/doc/src/sgml/file-fdw.sgml b/doc/src/sgml/file-fdw.sgml
index 955a13ab7d9..4c34ad9cc92 100644
--- a/doc/src/sgml/file-fdw.sgml
+++ b/doc/src/sgml/file-fdw.sgml
@@ -174,9 +174,8 @@
- COPY 's OIDS and
- FORCE_QUOTE options are currently not supported by
- file_fdw .
+ COPY 's FORCE_QUOTE option is
+ currently not supported by file_fdw .
@@ -188,7 +187,7 @@
Changing table-level options requires being a superuser or having the privileges
of the default role pg_read_server_files (to use a filename) or
- the default role pg_execute_server_programs (to use a program),
+ the default role pg_execute_server_program (to use a program),
for security reasons: only certain users should be able to control which file is
read or which program is run. In principle regular users could be allowed to
change the other options, but that's not supported at present.
diff --git a/doc/src/sgml/filelist.sgml b/doc/src/sgml/filelist.sgml
index 56b8da04488..3da2365ea97 100644
--- a/doc/src/sgml/filelist.sgml
+++ b/doc/src/sgml/filelist.sgml
@@ -42,7 +42,6 @@
-
@@ -90,6 +89,7 @@
+
@@ -166,28 +166,16 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml
index 53a40ddeecf..0aa399dc2f0 100644
--- a/doc/src/sgml/func.sgml
+++ b/doc/src/sgml/func.sgml
@@ -896,6 +896,19 @@
2
+
+
+
+ log10
+
+ log10(dp or numeric )
+
+ (same as input)
+ base 10 logarithm
+ log10(100.0)
+ 2
+
+
log(b numeric ,
x numeric )
@@ -1136,15 +1149,19 @@
- The characteristics of the values returned by
- random() depend
- on the system implementation. It is not suitable for cryptographic
- applications; see module for an alternative.
-
+ The random() function uses a simple linear
+ congruential algorithm. It is fast but not suitable for cryptographic
+ applications; see the module for a more
+ secure alternative.
+ If setseed() is called, the results of
+ subsequent random() calls in the current session are
+ repeatable by re-issuing setseed() with the same
+ argument.
+
- Finally, shows the
- available trigonometric functions. All trigonometric functions
+ shows the
+ available trigonometric functions. All these functions
take arguments and return values of type double
precision . Each of the trigonometric functions comes in
two variants, one that measures angles in radians and one that
@@ -1307,6 +1324,96 @@
+
+ shows the
+ available hyperbolic functions. All these functions
+ take arguments and return values of type double
+ precision .
+
+
+
+ Hyperbolic Functions
+
+
+
+
+ Function
+ Description
+ Example
+ Result
+
+
+
+
+
+
+ sinh
+
+ sinh(x )
+
+ hyperbolic sine
+ sinh(0)
+ 0
+
+
+
+
+ cosh
+
+ cosh(x )
+
+ hyperbolic cosine
+ cosh(0)
+ 1
+
+
+
+
+ tanh
+
+ tanh(x )
+
+ hyperbolic tangent
+ tanh(0)
+ 0
+
+
+
+
+ asinh
+
+ asinh(x )
+
+ inverse hyperbolic sine
+ asinh(0)
+ 0
+
+
+
+
+ acosh
+
+ acosh(x )
+
+ inverse hyperbolic cosine
+ acosh(1)
+ 0
+
+
+
+
+ atanh
+
+ atanh(x )
+
+ inverse hyperbolic tangent
+ atanh(0)
+ 0
+
+
+
+
+
@@ -1776,7 +1883,7 @@
octal sequences (\ nnn ) and
doubles backslashes.
- encode(E'123\\000\\001', 'base64')
+ encode('123\000\001', 'base64')
MTIzAAE=
@@ -2100,7 +2207,7 @@
the delimiter. See for more
information.
- regexp_split_to_array('hello world', E'\\s+')
+ regexp_split_to_array('hello world', '\s+')
{hello,world}
@@ -2117,7 +2224,7 @@
the delimiter. See for more
information.
- regexp_split_to_table('hello world', E'\\s+')
+ regexp_split_to_table('hello world', '\s+')
hello world (2 rows)
@@ -2389,18 +2496,6 @@
-
- ascii_to_mic
- SQL_ASCII
- MULE_INTERNAL
-
-
-
- ascii_to_utf8
- SQL_ASCII
- UTF8
-
-
big5_to_euc_tw
BIG5
@@ -2671,12 +2766,6 @@
UTF8
-
- mic_to_ascii
- MULE_INTERNAL
- SQL_ASCII
-
-
mic_to_big5
MULE_INTERNAL
@@ -2786,7 +2875,7 @@
- tcvn_to_utf8
+ windows_1258_to_utf8
WIN1258
UTF8
@@ -2797,12 +2886,6 @@
UTF8
-
- utf8_to_ascii
- UTF8
- SQL_ASCII
-
-
utf8_to_big5
UTF8
@@ -2954,7 +3037,7 @@
- utf8_to_tcvn
+ utf8_to_windows_1258
UTF8
WIN1258
@@ -3301,8 +3384,8 @@ SELECT format('Testing %s, %s, %s, %%', 'one', 'two', 'three');
SELECT format('INSERT INTO %I VALUES(%L)', 'Foo bar', E'O\'Reilly');
Result: INSERT INTO "Foo bar" VALUES('O''Reilly')
-SELECT format('INSERT INTO %I VALUES(%L)', 'locations', E'C:\\Program Files');
-Result: INSERT INTO locations VALUES(E'C:\\Program Files')
+SELECT format('INSERT INTO %I VALUES(%L)', 'locations', 'C:\Program Files');
+Result: INSERT INTO locations VALUES('C:\Program Files')
@@ -3429,7 +3512,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three');
concatenation
- E'\\\\Post'::bytea || E'\\047gres\\000'::bytea
+ '\\Post'::bytea || '\047gres\000'::bytea
\\Post'gres\000
@@ -3442,7 +3525,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three');
int
Number of bytes in binary string
- octet_length(E'jo\\000se'::bytea)
+ octet_length('jo\000se'::bytea)
5
@@ -3457,7 +3540,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three');
Replace substring
- overlay(E'Th\\000omas'::bytea placing E'\\002\\003'::bytea from 2 for 3)
+ overlay('Th\000omas'::bytea placing '\002\003'::bytea from 2 for 3)
T\\002\\003mas
@@ -3470,7 +3553,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three');
int
Location of specified substring
- position(E'\\000om'::bytea in E'Th\\000omas'::bytea)
+ position('\000om'::bytea in 'Th\000omas'::bytea)
3
@@ -3485,7 +3568,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three');
Extract substring
- substring(E'Th\\000omas'::bytea from 2 for 3)
+ substring('Th\000omas'::bytea from 2 for 3)
h\000o
@@ -3504,7 +3587,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three');
bytes from the start
and end of string
- trim(E'\\000\\001'::bytea from E'\\000Tom\\001'::bytea)
+ trim('\000\001'::bytea from '\000Tom\001'::bytea)
Tom
@@ -3547,7 +3630,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three');
bytes from the start and end of
string
- btrim(E'\\000trim\\001'::bytea, E'\\000\\001'::bytea)
+ btrim('\000trim\001'::bytea, '\000\001'::bytea)
trim
@@ -3564,7 +3647,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three');
Decode binary data from textual representation in string .
Options for format are same as in encode .
- decode(E'123\\000456', 'escape')
+ decode('123\000456', 'escape')
123\000456
@@ -3584,7 +3667,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three');
octal sequences (\ nnn ) and
doubles backslashes.
- encode(E'123\\000456'::bytea, 'escape')
+ encode('123\000456'::bytea, 'escape')
123\000456
@@ -3599,7 +3682,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three');
Extract bit from string
- get_bit(E'Th\\000omas'::bytea, 45)
+ get_bit('Th\000omas'::bytea, 45)
1
@@ -3614,7 +3697,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three');
Extract byte from string
- get_byte(E'Th\\000omas'::bytea, 4)
+ get_byte('Th\000omas'::bytea, 4)
109
@@ -3638,7 +3721,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three');
binary strings, length
- length(E'jo\\000se'::bytea)
+ length('jo\000se'::bytea)
5
@@ -3654,7 +3737,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three');
Calculates the MD5 hash of string ,
returning the result in hexadecimal
- md5(E'Th\\000omas'::bytea)
+ md5('Th\000omas'::bytea)
8ab2d3c9689aaf18b4958c334c82d8b1
@@ -3670,7 +3753,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three');
Set bit in string
- set_bit(E'Th\\000omas'::bytea, 45, 0)
+ set_bit('Th\000omas'::bytea, 45, 0)
Th\000omAs
@@ -3686,7 +3769,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three');
Set byte in string
- set_byte(E'Th\\000omas'::bytea, 4, 64)
+ set_byte('Th\000omas'::bytea, 4, 64)
Th\000o@as
@@ -3958,6 +4041,12 @@ cast(-44 as bit(12)) 111111010100
+
+ The pattern matching operators of all three kinds do not support
+ nondeterministic collations. If required, apply a different collation to
+ the expression to work around this limitation.
+
+
LIKE
@@ -4032,6 +4121,14 @@ cast(-44 as bit(12)) 111111010100
special meaning of underscore and percent signs in the pattern.
+
+ According to the SQL standard, omitting ESCAPE
+ means there is no escape character (rather than defaulting to a
+ backslash), and a zero-length ESCAPE value is
+ disallowed. PostgreSQL 's behavior in
+ this regard is therefore slightly nonstandard.
+
+
The key word ILIKE can be used instead of
LIKE to make the match case-insensitive according
@@ -4050,9 +4147,9 @@ cast(-44 as bit(12)) 111111010100
- There is also the prefix operator ^@ and corresponding
- starts_with function which covers cases when only
- searching by beginning of the string is needed.
+ Also see the prefix operator ^@ and corresponding
+ starts_with function, which are useful in cases
+ where simply matching the beginning of a string is needed.
@@ -4083,7 +4180,7 @@ cast(-44 as bit(12)) 111111010100
It is similar to LIKE , except that it
interprets the pattern using the SQL standard's definition of a
regular expression. SQL regular expressions are a curious cross
- between LIKE notation and common regular
+ between LIKE notation and common (POSIX) regular
expression notation.
@@ -4167,35 +4264,81 @@ cast(-44 as bit(12)) 111111010100
- As with LIKE , a backslash disables the special meaning
- of any of these metacharacters; or a different escape character can
- be specified with ESCAPE .
+ As with LIKE , a backslash disables the special
+ meaning of any of these metacharacters. A different escape character
+ can be specified with ESCAPE , or the escape
+ capability can be disabled by writing ESCAPE '' .
+
+
+
+ According to the SQL standard, omitting ESCAPE
+ means there is no escape character (rather than defaulting to a
+ backslash), and a zero-length ESCAPE value is
+ disallowed. PostgreSQL 's behavior in
+ this regard is therefore slightly nonstandard.
+
+
+
+ Another nonstandard extension is that following the escape character
+ with a letter or digit provides access to the escape sequences
+ defined for POSIX regular expressions; see
+ ,
+ , and
+ below.
Some examples:
-'abc' SIMILAR TO 'abc' true
-'abc' SIMILAR TO 'a' false
-'abc' SIMILAR TO '%(b|d)%' true
-'abc' SIMILAR TO '(b|c)%' false
+'abc' SIMILAR TO 'abc' true
+'abc' SIMILAR TO 'a' false
+'abc' SIMILAR TO '%(b|d)%' true
+'abc' SIMILAR TO '(b|c)%' false
+'-abc-' SIMILAR TO '%\mabc\M%' true
+'xabcy' SIMILAR TO '%\mabc\M%' false
- The substring function with three parameters,
- substring(string from
- pattern for
- escape-character ) , provides
- extraction of a substring that matches an SQL
- regular expression pattern. As with SIMILAR TO , the
+ The substring function with three parameters
+ provides extraction of a substring that matches an SQL
+ regular expression pattern. The function can be written according
+ to SQL99 syntax:
+
+substring(string from pattern for escape-character )
+
+ or as a plain three-argument function:
+
+substring(string , pattern , escape-character )
+
+ As with SIMILAR TO , the
specified pattern must match the entire data string, or else the
function fails and returns null. To indicate the part of the
- pattern that should be returned on success, the pattern must contain
+ pattern for which the matching data sub-string is of interest,
+ the pattern should contain
two occurrences of the escape character followed by a double quote
(" ).
The text matching the portion of the pattern
- between these markers is returned.
+ between these separators is returned when the match is successful.
+
+
+
+ The escape-double-quote separators actually
+ divide substring 's pattern into three independent
+ regular expressions; for example, a vertical bar (| )
+ in any of the three sections affects only that section. Also, the first
+ and third of these regular expressions are defined to match the smallest
+ possible amount of text, not the largest, when there is any ambiguity
+ about how much of the data string matches which pattern. (In POSIX
+ parlance, the first and third regular expressions are forced to be
+ non-greedy.)
+
+
+
+ As an extension to the SQL standard, PostgreSQL
+ allows there to be just one escape-double-quote separator, in which case
+ the third regular expression is taken as empty; or no separators, in which
+ case the first and third regular expressions are taken as empty.
@@ -4377,7 +4520,7 @@ regexp_replace('foobarbaz', 'b..', 'X')
fooXbaz
regexp_replace('foobarbaz', 'b..', 'X', 'g')
fooXX
-regexp_replace('foobarbaz', 'b(..)', E'X\\1Y', 'g')
+regexp_replace('foobarbaz', 'b(..)', 'X\1Y', 'g')
fooXarYXazY
@@ -4513,7 +4656,7 @@ SELECT col1, (SELECT regexp_matches(col2, '(bar)(beque)')) FROM tab;
Some examples:
-SELECT foo FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', E'\\s+') AS foo;
+SELECT foo FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', '\s+') AS foo;
foo
-------
the
@@ -4527,13 +4670,13 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox jumps over the lazy d
dog
(9 rows)
-SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', E'\\s+');
+SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', '\s+');
regexp_split_to_array
-----------------------------------------------
{the,quick,brown,fox,jumps,over,the,lazy,dog}
(1 row)
-SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo;
+SELECT foo FROM regexp_split_to_table('the quick brown fox', '\s*') AS foo;
foo
-----
t
@@ -4965,18 +5108,37 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo;
Within a bracket expression, the name of a character class
enclosed in [: and :] stands
- for the list of all characters belonging to that class. Standard
- character class names are: alnum ,
- alpha , blank ,
- cntrl , digit ,
- graph , lower ,
- print , punct ,
- space , upper ,
- xdigit . These stand for the character classes
- defined in
- ctype 3 .
- A locale can provide others. A character class cannot be used as
- an endpoint of a range.
+ for the list of all characters belonging to that class. A character
+ class cannot be used as an endpoint of a range.
+ The POSIX standard defines these character class
+ names:
+ alnum (letters and numeric digits),
+ alpha (letters),
+ blank (space and tab),
+ cntrl (control characters),
+ digit (numeric digits),
+ graph (printable characters except space),
+ lower (lower-case letters),
+ print (printable characters including space),
+ punct (punctuation),
+ space (any white space),
+ upper (upper-case letters),
+ and xdigit (hexadecimal digits).
+ The behavior of these standard character classes is generally
+ consistent across platforms for characters in the 7-bit ASCII set.
+ Whether a given non-ASCII character is considered to belong to one
+ of these classes depends on the collation
+ that is used for the regular-expression function or operator
+ (see ), or by default on the
+ database's LC_CTYPE locale setting (see
+ ). The classification of non-ASCII
+ characters can vary across platforms even in similarly-named
+ locales. (But the C locale never considers any
+ non-ASCII characters to belong to any of these classes.)
+ In addition to these standard character
+ classes, PostgreSQL defines
+ the ascii character class, which contains exactly
+ the 7-bit ASCII set.
@@ -4987,8 +5149,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo;
and end of a word respectively. A word is defined as a sequence
of word characters that is neither preceded nor followed by word
characters. A word character is an alnum character (as
- defined by
- ctype 3 )
+ defined by the POSIX character class described above)
or an underscore. This is an extension, compatible with but not
specified by POSIX 1003.2, and should be used with
caution in software intended to be portable to other systems.
@@ -5045,7 +5206,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo;
- Regular Expression Character-entry Escapes
+ Regular Expression Character-Entry Escapes
@@ -5186,7 +5347,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo;
- Regular Expression Class-shorthand Escapes
+ Regular Expression Class-Shorthand Escapes
@@ -5380,7 +5541,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo;
- ARE Embedded-option Letters
+ ARE Embedded-Option Letters
@@ -5809,6 +5970,145 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}');
+
+ Differences From XQuery (LIKE_REGEX )
+
+
+ LIKE_REGEX
+
+
+
+ XQuery regular expressions
+
+
+
+ Since SQL:2008, the SQL standard includes
+ a LIKE_REGEX operator that performs pattern
+ matching according to the XQuery regular expression
+ standard. PostgreSQL does not yet
+ implement this operator, but you can get very similar behavior using
+ the regexp_match() function, since XQuery
+ regular expressions are quite close to the ARE syntax described above.
+
+
+
+ Notable differences between the existing POSIX-based
+ regular-expression feature and XQuery regular expressions include:
+
+
+
+
+ XQuery character class subtraction is not supported. An example of
+ this feature is using the following to match only English
+ consonants: [a-z-[aeiou]] .
+
+
+
+
+ XQuery character class shorthands \c ,
+ \C , \i ,
+ and \I are not supported.
+
+
+
+
+ XQuery character class elements
+ using \p{UnicodeProperty} or the
+ inverse \P{UnicodeProperty} are not supported.
+
+
+
+
+ POSIX interprets character classes such as \w
+ (see )
+ according to the prevailing locale (which you can control by
+ attaching a COLLATE clause to the operator or
+ function). XQuery specifies these classes by reference to Unicode
+ character properties, so equivalent behavior is obtained only with
+ a locale that follows the Unicode rules.
+
+
+
+
+ The SQL standard (not XQuery itself) attempts to cater for more
+ variants of newline
than POSIX does. The
+ newline-sensitive matching options described above consider only
+ ASCII NL (\n ) to be a newline, but SQL would have
+ us treat CR (\r ), CRLF (\r\n )
+ (a Windows-style newline), and some Unicode-only characters like
+ LINE SEPARATOR (U+2028) as newlines as well.
+ Notably, . and \s should
+ count \r\n as one character not two according to
+ SQL.
+
+
+
+
+ Of the character-entry escapes described in
+ ,
+ XQuery supports only \n , \r ,
+ and \t .
+
+
+
+
+ XQuery does not support
+ the [:name :] syntax
+ for character classes within bracket expressions.
+
+
+
+
+ XQuery does not have lookahead or lookbehind constraints,
+ nor any of the constraint escapes described in
+ .
+
+
+
+
+ The metasyntax forms described in
+ do not exist in XQuery.
+
+
+
+
+ The regular expression flag letters defined by XQuery are
+ related to but not the same as the option letters for POSIX
+ ( ). While the
+ i and q options behave the
+ same, others do not:
+
+
+
+ XQuery's s (allow dot to match newline)
+ and m (allow ^
+ and $ to match at newlines) flags provide
+ access to the same behaviors as
+ POSIX's n , p
+ and w flags, but they
+ do not match the behavior of
+ POSIX's s and m flags.
+ Note in particular that dot-matches-newline is the default
+ behavior in POSIX but not XQuery.
+
+
+
+
+ XQuery's x (ignore whitespace in pattern) flag
+ is noticeably different from POSIX's expanded-mode flag.
+ POSIX's x flag also
+ allows # to begin a comment in the pattern,
+ and POSIX will not ignore a whitespace character after a
+ backslash.
+
+
+
+
+
+
+
+
+
@@ -5990,7 +6290,31 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}');
microsecond (000000-999999)
- SSSS
+ FF1
+ tenth of second (0-9)
+
+
+ FF2
+ hundredth of second (00-99)
+
+
+ FF3
+ millisecond (000-999)
+
+
+ FF4
+ tenth of a millisecond (0000-9999)
+
+
+ FF5
+ hundredth of a millisecond (00000-99999)
+
+
+ FF6
+ microsecond (000000-999999)
+
+
+ SSSS , SSSSS
seconds past midnight (0-86399)
@@ -6262,16 +6586,57 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}');
to_timestamp and to_date
- skip multiple blank spaces in the input string unless the
- FX option is used. For example,
- to_timestamp('2000 JUN', 'YYYY MON') works, but
+ skip multiple blank spaces at the beginning of the input string and
+ around date and time values unless the FX option is used. For example,
+ to_timestamp(' 2000 JUN', 'YYYY MON') and
+ to_timestamp('2000 - JUN', 'YYYY-MON') work, but
to_timestamp('2000 JUN', 'FXYYYY MON') returns an error
- because to_timestamp expects one space only.
+ because to_timestamp expects only a single space.
FX must be specified as the first item in
the template.
+
+
+ A separator (a space or non-letter/non-digit character) in the template string of
+ to_timestamp and to_date
+ matches any single separator in the input string or is skipped,
+ unless the FX option is used.
+ For example, to_timestamp('2000JUN', 'YYYY///MON') and
+ to_timestamp('2000/JUN', 'YYYY MON') work, but
+ to_timestamp('2000//JUN', 'YYYY/MON')
+ returns an error because the number of separators in the input string
+ exceeds the number of separators in the template.
+
+
+ If FX is specified, a separator in the template string
+ matches exactly one character in the input string. But note that the
+ input string character is not required to be the same as the separator from the template string.
+ For example, to_timestamp('2000/JUN', 'FXYYYY MON')
+ works, but to_timestamp('2000/JUN', 'FXYYYY MON')
+ returns an error because the second space in the template string consumes
+ the letter J from the input string.
+
+
+
+
+
+ A TZH template pattern can match a signed number.
+ Without the FX option, minus signs may be ambiguous,
+ and could be interpreted as a separator.
+ This ambiguity is resolved as follows: If the number of separators before
+ TZH in the template string is less than the number of
+ separators before the minus sign in the input string, the minus sign
+ is interpreted as part of TZH .
+ Otherwise, the minus sign is considered to be a separator between values.
+ For example, to_timestamp('2000 -10', 'YYYY TZH') matches
+ -10 to TZH , but
+ to_timestamp('2000 -10', 'YYYY TZH')
+ matches 10 to TZH .
+
+
+
Ordinary text is allowed in to_char
@@ -6287,6 +6652,19 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}');
string; for example "XX" skips two input characters
(whether or not they are XX ).
+
+
+ Prior to PostgreSQL 12, it was possible to
+ skip arbitrary text in the input string using non-letter or non-digit
+ characters. For example,
+ to_timestamp('2000y6m1d', 'yyyy-MM-DD') used to
+ work. Now you can only use letter characters for this purpose. For example,
+ to_timestamp('2000y6m1d', 'yyyytMMtDDt') and
+ to_timestamp('2000y6m1d', 'yyyy"y"MM"m"DD"d"')
+ skip y , m , and
+ d .
+
+
@@ -7125,16 +7503,25 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}');
date_trunc(text , timestamp )
timestamp
- Truncate to specified precision; see also
+ Truncate to specified precision; see
date_trunc('hour', timestamp '2001-02-16 20:38:40')
2001-02-16 20:00:00
+
+ date_trunc(text , timestamp with time zone , text )
+ timestamp with time zone
+ Truncate to specified precision in the specified time zone; see
+
+ date_trunc('day', timestamptz '2001-02-16 20:38:40+00', 'Australia/Sydney')
+ 2001-02-16 13:00:00+00
+
+
date_trunc(text , interval )
interval
- Truncate to specified precision; see also
+ Truncate to specified precision; see
date_trunc('hour', interval '2 days 3 hours 40 minutes')
2 days 03:00:00
@@ -8024,17 +8411,19 @@ SELECT date_part('hour', INTERVAL '4 hours 3 minutes');
-date_trunc('field ', source )
+date_trunc(field , source [, time_zone ])
source is a value expression of type
- timestamp or interval .
+ timestamp , timestamp with time zone ,
+ or interval .
(Values of type date and
time are cast automatically to timestamp or
interval , respectively.)
field selects to which precision to
- truncate the input value. The return value is of type
- timestamp or interval
- with all fields that are less significant than the
+ truncate the input value. The return value is likewise of type
+ timestamp , timestamp with time zone ,
+ or interval ,
+ and it has all fields that are less significant than the
selected one set to zero (or one, for day and month).
@@ -8058,13 +8447,39 @@ date_trunc('field ', source
- Examples:
+ When the input value is of type timestamp with time zone ,
+ the truncation is performed with respect to a particular time zone;
+ for example, truncation to day produces a value that
+ is midnight in that zone. By default, truncation is done with respect
+ to the current setting, but the
+ optional time_zone argument can be provided
+ to specify a different time zone. The time zone name can be specified
+ in any of the ways described in .
+
+
+
+ A time zone cannot be specified when processing timestamp without
+ time zone or interval inputs. These are always
+ taken at face value.
+
+
+
+ Examples (assuming the local time zone is America/New_York ):
SELECT date_trunc('hour', TIMESTAMP '2001-02-16 20:38:40');
Result: 2001-02-16 20:00:00
SELECT date_trunc('year', TIMESTAMP '2001-02-16 20:38:40');
Result: 2001-01-01 00:00:00
+
+SELECT date_trunc('day', TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40+00');
+Result: 2001-02-16 00:00:00-05
+
+SELECT date_trunc('day', TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40+00', 'Australia/Sydney');
+Result: 2001-02-16 08:00:00-05
+
+SELECT date_trunc('hour', INTERVAL '3 days 02:47:33');
+Result: 3 days 02:00:00
@@ -8082,10 +8497,11 @@ SELECT date_trunc('year', TIMESTAMP '2001-02-16 20:38:40');
- The AT TIME ZONE construct allows conversions
- of time stamps to different time zones. shows its
- variants.
+ The AT TIME ZONE converts time
+ stamp without time zone to/from
+ time stamp with time zone , and
+ time values to different time zones. shows its variants.
@@ -8130,24 +8546,33 @@ SELECT date_trunc('year', TIMESTAMP '2001-02-16 20:38:40');
In these expressions, the desired time zone zone can be
- specified either as a text string (e.g., 'PST' )
+ specified either as a text string (e.g., 'America/Los_Angeles' )
or as an interval (e.g., INTERVAL '-08:00' ).
In the text case, a time zone name can be specified in any of the ways
described in .
- Examples (assuming the local time zone is PST8PDT ):
+ Examples (assuming the local time zone is America/Los_Angeles ):
-SELECT TIMESTAMP '2001-02-16 20:38:40' AT TIME ZONE 'MST';
+SELECT TIMESTAMP '2001-02-16 20:38:40' AT TIME ZONE 'America/Denver';
Result: 2001-02-16 19:38:40-08
-SELECT TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40-05' AT TIME ZONE 'MST';
+SELECT TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40-05' AT TIME ZONE 'America/Denver';
Result: 2001-02-16 18:38:40
+
+SELECT TIMESTAMP '2001-02-16 20:38:40-05' AT TIME ZONE 'Asia/Tokyo' AT TIME ZONE 'America/Chicago';
+Result: 2001-02-16 05:38:40
- The first example takes a time stamp without time zone and interprets it as MST time
- (UTC-7), which is then converted to PST (UTC-8) for display. The second example takes
- a time stamp specified in EST (UTC-5) and converts it to local time in MST (UTC-7).
+ The first example adds a time zone to a value that lacks it, and
+ displays the value using the current TimeZone
+ setting. The second example shifts the time stamp with time zone value
+ to the specified time zone, and returns the value without a time zone.
+ This allows storage and display of values different from the current
+ TimeZone setting. The third example converts
+ Tokyo time to Chicago time. Converting time
+ values to other time zones uses the currently active time zone rules
+ since no date is supplied.
@@ -9737,10 +10162,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple
reduce each value in the document, specified by filter to a tsvector ,
and then concatenate those in document order to produce a single tsvector .
- filter is a jsonb array, that enumerates what kind of elements need to be included
+ filter is a jsonb array, that enumerates what kind of elements need to be included
into the resulting tsvector . Possible values for filter are
"string" (to include all string values), "numeric" (to include all numeric values in the string format),
- "boolean" (to include all boolean values in the string format "true"/"false"),
+ "boolean" (to include all Boolean values in the string format "true" /"false" ),
"key" (to include all keys) or "all" (to include all above). These values
can be combined together to include, e.g. all string and numeric values.
@@ -10033,18 +10458,53 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple
+
+ UUID Functions
+
+
+ UUID
+ generating
+
+
+
+ gen_random_uuid
+
+
+
+ PostgreSQL includes one function to generate a UUID:
+
+gen_random_uuid() returns uuid
+
+ This function returns a version 4 (random) UUID. This is the most commonly
+ used type of UUID and is appropriate for most applications.
+
+
+
+ The module provides additional functions that
+ implement other standard algorithms for generating UUIDs.
+
+
+
XML Functions
+
+ XML Functions
+
+
The functions and function-like expressions described in this
- section operate on values of type xml . Check xml. See for information about the xml
type. The function-like expressions xmlparse
and xmlserialize for converting to and from
- type xml are not repeated here. Use of most of these
- functions requires the installation to have been built
+ type xml are documented there, not in this section.
+
+
+
+ Use of most of these functions
+ requires PostgreSQL to have been built
with configure --with-libxml .
@@ -10239,8 +10699,8 @@ SELECT xmlelement(name foo, xmlattributes('xyz' as bar),
encoding, depending on the setting of the configuration parameter
. The particular behavior for
individual data types is expected to evolve in order to align the
- SQL and PostgreSQL data types with the XML Schema specification,
- at which point a more precise description will appear.
+ PostgreSQL mappings with those specified in SQL:2006 and later,
+ as discussed in .
@@ -10478,20 +10938,23 @@ SELECT xmlagg(x) FROM (SELECT * FROM test ORDER BY y DESC) AS tab;
-XMLEXISTS (text PASSING BY REF xml BY REF )
+XMLEXISTS (text PASSING BY { REF | VALUE } xml BY { REF | VALUE } )
- The function xmlexists returns true if the
- XPath expression in the first argument returns any nodes, and
- false otherwise. (If either argument is null, the result is
- null.)
+ The function xmlexists evaluates an XPath 1.0
+ expression (the first argument), with the passed XML value as its context
+ item. The function returns false if the result of that evaluation
+ yields an empty node-set, true if it yields any other value. The
+ function returns null if any argument is null. A nonnull value
+ passed as the context item must be an XML document, not a content
+ fragment or any non-XML value.
Example:
Toronto Ottawa ');
+SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY VALUE 'Toronto Ottawa ');
xmlexists
------------
@@ -10501,14 +10964,14 @@ SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF 'Tor
- The BY REF clauses have no effect in
- PostgreSQL, but are allowed for SQL conformance and compatibility
- with other implementations. Per SQL standard, the
- first BY REF is required, the second is
- optional. Also note that the SQL standard specifies
- the xmlexists construct to take an XQuery
- expression as first argument, but PostgreSQL currently only
- supports XPath, which is a subset of XQuery.
+ The BY REF and BY VALUE clauses
+ are accepted in PostgreSQL , but are ignored,
+ as discussed in .
+ In the SQL standard, the xmlexists function
+ evaluates an expression in the XML Query language,
+ but PostgreSQL allows only an XPath 1.0
+ expression, as discussed in
+ .
@@ -10614,12 +11077,12 @@ SELECT xml_is_well_formed_document('test
The function xpath_exists is a specialized form
of the xpath function. Instead of returning the
- individual XML values that satisfy the XPath, this function returns a
- Boolean indicating whether the query was satisfied or not. This
- function is equivalent to the standard XMLEXISTS predicate,
+ individual XML values that satisfy the XPath 1.0 expression, this function
+ returns a Boolean indicating whether the query was satisfied or not
+ (specifically, whether it produced any value other than an empty node-set).
+ This function is equivalent to the XMLEXISTS predicate,
except that it also offers support for a namespace mapping argument.
@@ -10715,7 +11179,7 @@ SELECT xpath_exists('/my:a/text()', 'test
xmltable ( XMLNAMESPACES(namespace uri AS namespace name , ... ),
- row_expression PASSING BY REF document_expression BY REF
+ row_expression PASSING BY { REF | VALUE } document_expression BY { REF | VALUE }
COLUMNS name { type PATH column_expression DEFAULT default_expression NOT NULL | NULL
| FOR ORDINALITY }
, ...
@@ -10724,8 +11188,8 @@ SELECT xpath_exists('/my:a/text()', 'test
The xmltable function produces a table based
- on the given XML value, an XPath filter to extract rows, and an
- optional set of column definitions.
+ on the given XML value, an XPath filter to extract rows, and a
+ set of column definitions.
@@ -10736,30 +11200,34 @@ SELECT xpath_exists('/my:a/text()', 'test
- The required row_expression argument is an XPath
- expression that is evaluated against the supplied XML document to
- obtain an ordered sequence of XML nodes. This sequence is what
- xmltable transforms into output rows.
+ The required row_expression argument is
+ an XPath 1.0 expression that is evaluated, passing the
+ document_expression as its context item, to
+ obtain a set of XML nodes. These nodes are what
+ xmltable transforms into output rows. No rows
+ will be produced if the document_expression
+ is null, nor if the row_expression produces
+ an empty node-set or any value other than a node-set.
- document_expression provides the XML document to
- operate on.
- The BY REF clauses have no effect in PostgreSQL,
- but are allowed for SQL conformance and compatibility with other
- implementations.
- The argument must be a well-formed XML document; fragments/forests
- are not accepted.
+ document_expression provides the context
+ item for the row_expression . It must be a
+ well-formed XML document; fragments/forests are not accepted.
+ The BY REF and BY VALUE clauses
+ are accepted but ignored, as discussed in
+ .
+ In the SQL standard, the xmltable function
+ evaluates expressions in the XML Query language,
+ but PostgreSQL allows only XPath 1.0
+ expressions, as discussed in
+ .
The mandatory COLUMNS clause specifies the list
of columns in the output table.
- If the COLUMNS clause is omitted, the rows in the result
- set contain a single column of type xml containing the
- data matched by row_expression .
- If COLUMNS is specified, each entry describes a
- single column.
+ Each entry describes a single column.
See the syntax summary above for the format.
The column name and type are required; the path, default and
nullability clauses are optional.
@@ -10767,48 +11235,92 @@ SELECT xpath_exists('/my:a/text()', 'test
A column marked FOR ORDINALITY will be populated
- with row numbers matching the order in which the
- output rows appeared in the original input XML document.
+ with row numbers, starting with 1, in the order of nodes retrieved from
+ the row_expression 's result node-set.
At most one column may be marked FOR ORDINALITY .
+
+
+ XPath 1.0 does not specify an order for nodes in a node-set, so code
+ that relies on a particular order of the results will be
+ implementation-dependent. Details can be found in
+ .
+
+
+
+
+ The column_expression for a column is an
+ XPath 1.0 expression that is evaluated for each row, with the current
+ node from the row_expression result as its
+ context item, to find the value of the column. If
+ no column_expression is given, then the
+ column name is used as an implicit path.
+
+
- The column_expression for a column is an XPath expression
- that is evaluated for each row, relative to the result of the
- row_expression , to find the value of the column.
- If no column_expression is given, then the column name
- is used as an implicit path.
+ If a column's XPath expression returns a non-XML value (limited to
+ string, boolean, or double in XPath 1.0) and the column has a
+ PostgreSQL type other than xml , the column will be set
+ as if by assigning the value's string representation to the PostgreSQL
+ type. (If the value is a boolean, its string representation is taken
+ to be 1 or 0 if the output
+ column's type category is numeric, otherwise true or
+ false .)
- If a column's XPath expression returns multiple elements, an error
- is raised.
- If the expression matches an empty tag, the result is an
- empty string (not NULL ).
- Any xsi:nil attributes are ignored.
+ If a column's XPath expression returns a non-empty set of XML nodes
+ and the column's PostgreSQL type is xml , the column will
+ be assigned the expression result exactly, if it is of document or
+ content form.
+
+
+ A result containing more than one element node at the top level, or
+ non-whitespace text outside of an element, is an example of content form.
+ An XPath result can be of neither form, for example if it returns an
+ attribute node selected from the element that contains it. Such a result
+ will be put into content form with each such disallowed node replaced by
+ its string value, as defined for the XPath 1.0
+ string function.
+
+
- The text body of the XML matched by the column_expression
- is used as the column value. Multiple text() nodes
- within an element are concatenated in order. Any child elements,
- processing instructions, and comments are ignored, but the text contents
- of child elements are concatenated to the result.
+ A non-XML result assigned to an xml output column produces
+ content, a single text node with the string value of the result.
+ An XML result assigned to a column of any other type may not have more than
+ one node, or an error is raised. If there is exactly one node, the column
+ will be set as if by assigning the node's string
+ value (as defined for the XPath 1.0 string function)
+ to the PostgreSQL type.
+
+
+
+ The string value of an XML element is the concatenation, in document order,
+ of all text nodes contained in that element and its descendants. The string
+ value of an element with no descendant text nodes is an
+ empty string (not NULL ).
+ Any xsi:nil attributes are ignored.
Note that the whitespace-only text() node between two non-text
elements is preserved, and that leading whitespace on a text()
node is not flattened.
+ The XPath 1.0 string function may be consulted for the
+ rules defining the string value of other XML node types and non-XML values.
+
+
+
+ The conversion rules presented here are not exactly those of the SQL
+ standard, as discussed in .
- If the path expression does not match for a given row but
- default_expression is specified, the value resulting
- from evaluating that expression is used.
- If no DEFAULT clause is given for the column,
- the field will be set to NULL .
- It is possible for a default_expression to reference
- the value of output columns that appear prior to it in the column list,
- so the default of one column may be based on the value of another
- column.
+ If the path expression returns an empty node-set
+ (typically, when it does not match)
+ for a given row, the column will be set to NULL , unless
+ a default_expression is specified; then the
+ value resulting from evaluating that expression is used.
@@ -10820,20 +11332,14 @@ SELECT xpath_exists('/my:a/text()', 'test
- Unlike regular PostgreSQL functions, column_expression
- and default_expression are not evaluated to a simple
- value before calling the function.
- column_expression is normally evaluated
- exactly once per input row, and default_expression
- is evaluated each time a default is needed for a field.
- If the expression qualifies as stable or immutable the repeat
+ A default_expression , rather than being
+ evaluated immediately when xmltable is called,
+ is evaluated each time a default is needed for the column.
+ If the expression qualifies as stable or immutable, the repeat
evaluation may be skipped.
- Effectively xmltable behaves more like a subquery than a
- function call.
This means that you can usefully use volatile functions like
- nextval in default_expression , and
- column_expression may depend on other parts of the
- XML document.
+ nextval in
+ default_expression .
@@ -10894,16 +11400,16 @@ $$ AS data;
SELECT xmltable.*
FROM xmlelements, XMLTABLE('/root' PASSING data COLUMNS element text);
- element
-----------------------
- Hello2a2 bbbCC
+ element
+-------------------------
+ Hello2a2 bbbxxxCC
]]>
The following example illustrates how
the XMLNAMESPACES clause can be used to specify
- the default namespace, and a list of additional namespaces
+ a list of namespaces
used in the XML document as well as in the XPath expressions:
JSON Functions and Operators
- JSON
- functions and operators
+ JSON
+ functions and operators
-
- shows the operators that
- are available for use with the two JSON data types (see ).
-
+
+ This section describes:
-
+
+
+
+ functions and operators for processing and creating JSON data
+
+
+
+
+ the SQL/JSON path language
+
+
+
+
+
+
+ To learn more about the SQL/JSON standard, see
+ . For details on JSON types
+ supported in PostgreSQL ,
+ see .
+
+
+
+ Processing and Creating JSON Data
+
+
+ shows the operators that
+ are available for use with JSON data types (see ).
+
+
+
json and jsonb Operators
-
+
Operator
Right Operand Type
+ Return type
Description
Example
Example Result
@@ -11213,6 +11747,7 @@ table2-mapping
->
int
+ json or jsonb
Get JSON array element (indexed from zero, negative
integers count from the end)
'[{"a":"foo"},{"b":"bar"},{"c":"baz"}]'::json->2
@@ -11221,6 +11756,7 @@ table2-mapping
->
text
+ json or jsonb
Get JSON object field by key
'{"a": {"b":"foo"}}'::json->'a'
{"b":"foo"}
@@ -11228,6 +11764,7 @@ table2-mapping
->>
int
+ text
Get JSON array element as text
'[1,2,3]'::json->>2
3
@@ -11235,6 +11772,7 @@ table2-mapping
->>
text
+ text
Get JSON object field as text
'{"a":1,"b":2}'::json->>'b'
2
@@ -11242,14 +11780,16 @@ table2-mapping
#>
text[]
- Get JSON object at specified path
+ json or jsonb
+ Get JSON object at the specified path
'{"a": {"b":{"c": "foo"}}}'::json#>'{a,b}'
{"c": "foo"}
#>>
text[]
- Get JSON object at specified path as text
+ text
+ Get JSON object at the specified path as text
'{"a":[1,2,3],"b":[4,5,6]}'::json#>>'{a,2}'
3
@@ -11374,6 +11914,20 @@ table2-mapping
JSON arrays, negative integers count from the end)
'["a", {"b":1}]'::jsonb #- '{1,b}'
+
+ @?
+ jsonpath
+ Does JSON path return any item for the specified JSON value?
+ '{"a":[1,2,3,4,5]}'::jsonb @? '$.a[*] ? (@ > 2)'
+
+
+ @@
+ jsonpath
+ Returns the result of JSON path predicate check for the specified JSON value.
+ Only the first item of the result is taken into account. If the
+ result is not Boolean, then null is returned.
+ '{"a":[1,2,3,4,5]}'::jsonb @@ '$.a[*] > 2'
+
@@ -11387,6 +11941,16 @@ table2-mapping
+
+
+ The @? and @@ operators suppress
+ the following errors: lacking object field or array element, unexpected
+ JSON item type, datetime and numeric errors.
+ This behavior might be helpful while searching over JSON document
+ collections of varying structure.
+
+
+
shows the functions that are
available for creating json and jsonb values.
@@ -11647,6 +12211,36 @@ table2-mapping
jsonb_pretty
+
+ jsonb_path_exists
+
+
+ jsonb_path_exists_tz
+
+
+ jsonb_path_match
+
+
+ jsonb_path_match_tz
+
+
+ jsonb_path_query
+
+
+ jsonb_path_query_tz
+
+
+ jsonb_path_query_array
+
+
+ jsonb_path_query_array_tz
+
+
+ jsonb_path_query_first
+
+
+ jsonb_path_query_first_tz
+
JSON Processing Functions
@@ -11903,7 +12497,7 @@ table2-mapping
[{"f1":1},2,null,3]
- jsonb_set(target jsonb, path text[], new_value jsonb, create_missing boolean )
+ jsonb_set(target jsonb, path text[], new_value jsonb , create_missing boolean )
jsonb
@@ -11911,10 +12505,10 @@ table2-mapping
with the section designated by path
replaced by new_value , or with
new_value added if
- create_missing is true ( default is
+ create_missing is true (default is
true ) and the item
designated by path does not exist.
- As with the path orientated operators, negative integers that
+ As with the path oriented operators, negative integers that
appear in path count from the end
of JSON arrays.
@@ -11928,7 +12522,7 @@ table2-mapping
- jsonb_insert(target jsonb, path text[], new_value jsonb, insert_after boolean )
+ jsonb_insert(target jsonb, path text[], new_value jsonb , insert_after boolean )
jsonb
@@ -11943,7 +12537,7 @@ table2-mapping
designated by path is in JSONB object,
new_value will be inserted only if
target does not exist. As with the path
- orientated operators, negative integers that appear in
+ oriented operators, negative integers that appear in
path count from the end of JSON arrays.
@@ -11981,6 +12575,130 @@ table2-mapping
+
+
+
+ jsonb_path_exists(target jsonb, path jsonpath , vars jsonb , silent bool )
+
+
+ jsonb_path_exists_tz(target jsonb, path jsonpath , vars jsonb , silent bool )
+
+
+ boolean
+
+ Checks whether JSON path returns any item for the specified JSON
+ value.
+
+
+
+ jsonb_path_exists('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2,"max":4}')
+
+
+
+ true
+
+
+
+
+
+ jsonb_path_match(target jsonb, path jsonpath , vars jsonb , silent bool )
+
+
+ jsonb_path_match_tz(target jsonb, path jsonpath , vars jsonb , silent bool )
+
+
+ boolean
+
+ Returns the result of JSON path predicate check for the specified JSON value.
+ Only the first item of the result is taken into account. If the
+ result is not Boolean, then null is returned.
+
+
+
+ jsonb_path_match('{"a":[1,2,3,4,5]}', 'exists($.a[*] ? (@ >= $min && @ <= $max))', '{"min":2,"max":4}')
+
+
+
+ true
+
+
+
+
+
+ jsonb_path_query(target jsonb, path jsonpath , vars jsonb , silent bool )
+
+
+ jsonb_path_query_tz(target jsonb, path jsonpath , vars jsonb , silent bool )
+
+
+ setof jsonb
+
+ Gets all JSON items returned by JSON path for the specified JSON
+ value.
+
+
+
+ select * from jsonb_path_query('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2,"max":4}');
+
+
+
+
+
+ jsonb_path_query
+------------------
+ 2
+ 3
+ 4
+
+
+
+
+
+
+
+ jsonb_path_query_array(target jsonb, path jsonpath , vars jsonb , silent bool )
+
+
+ jsonb_path_query_array_tz(target jsonb, path jsonpath , vars jsonb , silent bool )
+
+
+ jsonb
+
+ Gets all JSON items returned by JSON path for the specified JSON
+ value and wraps result into an array.
+
+
+
+ jsonb_path_query_array('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2,"max":4}')
+
+
+
+ [2, 3, 4]
+
+
+
+
+
+ jsonb_path_query_first(target jsonb, path jsonpath , vars jsonb , silent bool )
+
+
+ jsonb_path_query_first_tz(target jsonb, path jsonpath , vars jsonb , silent bool )
+
+
+ jsonb
+
+ Gets the first JSON item returned by JSON path for the specified JSON
+ value. Returns NULL on no results.
+
+
+
+ jsonb_path_query_first('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2,"max":4}')
+
+
+
+ 2
+
+
@@ -11996,15 +12714,72 @@ table2-mapping
-
- In json_populate_record , json_populate_recordset ,
- json_to_record and json_to_recordset ,
- type coercion from the JSON is best effort
and may not result
- in desired values for some types. JSON keys are matched to
- identical column names in the target row type. JSON fields that do not
- appear in the target row type will be omitted from the output, and
- target columns that do not match any JSON field will simply be NULL.
-
+
+ The functions
+ json[b]_populate_record ,
+ json[b]_populate_recordset ,
+ json[b]_to_record and
+ json[b]_to_recordset
+ operate on a JSON object, or array of objects, and extract the values
+ associated with keys whose names match column names of the output row
+ type.
+ Object fields that do not correspond to any output column name are
+ ignored, and output columns that do not match any object field will be
+ filled with nulls.
+ To convert a JSON value to the SQL type of an output column, the
+ following rules are applied in sequence:
+
+
+
+ A JSON null value is converted to a SQL null in all cases.
+
+
+
+
+ If the output column is of type json
+ or jsonb , the JSON value is just reproduced exactly.
+
+
+
+
+ If the output column is a composite (row) type, and the JSON value is
+ a JSON object, the fields of the object are converted to columns of
+ the output row type by recursive application of these rules.
+
+
+
+
+ Likewise, if the output column is an array type and the JSON value is
+ a JSON array, the elements of the JSON array are converted to elements
+ of the output array by recursive application of these rules.
+
+
+
+
+ Otherwise, if the JSON value is a string literal, the contents of the
+ string are fed to the input conversion function for the column's data
+ type.
+
+
+
+
+ Otherwise, the ordinary text representation of the JSON value is fed
+ to the input conversion function for the column's data type.
+
+
+
+
+
+
+ While the examples for these functions use constants, the typical use
+ would be to reference a table in the FROM clause
+ and use one of its json or jsonb columns
+ as an argument to the function. Extracted key values can then be
+ referenced in other parts of the query, like WHERE
+ clauses and target lists. Extracting multiple values in this
+ way can improve performance over extracting them separately with
+ per-key operators.
+
@@ -12049,6 +12824,37 @@ table2-mapping
+
+
+ The jsonb_path_* functions have optional
+ vars and silent arguments.
+
+
+ If the vars argument is specified, it provides an
+ object containing named variables to be substituted into a
+ jsonpath expression.
+
+
+ If the silent argument is specified and has the
+ true value, these functions suppress the same errors
+ as the @? and @@ operators.
+
+
+
+
+
+ Some of the jsonb_path_* functions have the
+ _tz suffix. These functions have been implemented to
+ support comparison of date/time values that involves implicit
+ timezone-aware casts. Since operations with time zones are not immutable,
+ these functions are qualified as stable. Their counterparts without the
+ suffix do not support such casts, so they are immutable and can be used for
+ such use-cases as expression indexes
+ (see ). There is no difference
+ between these functions for other jsonpath operations.
+
+
+
See also for the aggregate
function json_agg which aggregates record
@@ -12057,7 +12863,688 @@ table2-mapping
into a JSON object, and their jsonb equivalents,
jsonb_agg and jsonb_object_agg .
+
+
+
+ The SQL/JSON Path Language
+
+
+ SQL/JSON path language
+
+
+
+ SQL/JSON path expressions specify the items to be retrieved
+ from the JSON data, similar to XPath expressions used
+ for SQL access to XML. In PostgreSQL ,
+ path expressions are implemented as the jsonpath
+ data type and can use any elements described in
+ .
+
+
+ JSON query functions and operators
+ pass the provided path expression to the path engine
+ for evaluation. If the expression matches the queried JSON data,
+ the corresponding SQL/JSON item is returned.
+ Path expressions are written in the SQL/JSON path language
+ and can also include arithmetic expressions and functions.
+ Query functions treat the provided expression as a
+ text string, so it must be enclosed in single quotes.
+
+
+
+ A path expression consists of a sequence of elements allowed
+ by the jsonpath data type.
+ The path expression is evaluated from left to right, but
+ you can use parentheses to change the order of operations.
+ If the evaluation is successful, a sequence of SQL/JSON items
+ (SQL/JSON sequence ) is produced,
+ and the evaluation result is returned to the JSON query function
+ that completes the specified computation.
+
+
+
+ To refer to the JSON data to be queried (the
+ context item ), use the $ sign
+ in the path expression. It can be followed by one or more
+ accessor operators,
+ which go down the JSON structure level by level to retrieve the
+ content of context item. Each operator that follows deals with the
+ result of the previous evaluation step.
+
+
+
+ For example, suppose you have some JSON data from a GPS tracker that you
+ would like to parse, such as:
+
+{
+ "track": {
+ "segments": [
+ {
+ "location": [ 47.763, 13.4034 ],
+ "start time": "2018-10-14 10:05:14",
+ "HR": 73
+ },
+ {
+ "location": [ 47.706, 13.2635 ],
+ "start time": "2018-10-14 10:39:21",
+ "HR": 135
+ }
+ ]
+ }
+}
+
+
+
+
+ To retrieve the available track segments, you need to use the
+ .key accessor
+ operator for all the preceding JSON objects:
+
+'$.track.segments'
+
+
+
+
+ If the item to retrieve is an element of an array, you have
+ to unnest this array using the [*] operator. For example,
+ the following path will return location coordinates for all
+ the available track segments:
+
+'$.track.segments[*].location'
+
+
+
+
+ To return the coordinates of the first segment only, you can
+ specify the corresponding subscript in the []
+ accessor operator. Note that the SQL/JSON arrays are 0-relative:
+
+'$.track.segments[0].location'
+
+
+
+
+ The result of each path evaluation step can be processed
+ by one or more jsonpath operators and methods
+ listed in .
+ Each method name must be preceded by a dot. For example,
+ you can get an array size:
+
+'$.track.segments.size()'
+
+ For more examples of using jsonpath operators
+ and methods within path expressions, see
+ .
+
+
+
+ When defining the path, you can also use one or more
+ filter expressions that work similar to the
+ WHERE clause in SQL. A filter expression begins with
+ a question mark and provides a condition in parentheses:
+
+
+? (condition )
+
+
+
+
+ Filter expressions must be specified right after the path evaluation step
+ to which they are applied. The result of this step is filtered to include
+ only those items that satisfy the provided condition. SQL/JSON defines
+ three-valued logic, so the condition can be true , false ,
+ or unknown . The unknown value
+ plays the same role as SQL NULL and can be tested
+ for with the is unknown predicate. Further path
+ evaluation steps use only those items for which filter expressions
+ return true .
+
+
+
+ Functions and operators that can be used in filter expressions are listed
+ in . The path
+ evaluation result to be filtered is denoted by the @
+ variable. To refer to a JSON element stored at a lower nesting level,
+ add one or more accessor operators after @ .
+
+
+
+ Suppose you would like to retrieve all heart rate values higher
+ than 130. You can achieve this using the following expression:
+
+'$.track.segments[*].HR ? (@ > 130)'
+
+
+
+
+ To get the start time of segments with such values instead, you have to
+ filter out irrelevant segments before returning the start time, so the
+ filter expression is applied to the previous step, and the path used
+ in the condition is different:
+
+'$.track.segments[*] ? (@.HR > 130)."start time"'
+
+
+
+
+ You can use several filter expressions on the same nesting level, if
+ required. For example, the following expression selects all segments
+ that contain locations with relevant coordinates and high heart rate values:
+
+'$.track.segments[*] ? (@.location[1] < 13.4) ? (@.HR > 130)."start time"'
+
+
+
+
+ Using filter expressions at different nesting levels is also allowed.
+ The following example first filters all segments by location, and then
+ returns high heart rate values for these segments, if available:
+
+'$.track.segments[*] ? (@.location[1] < 13.4).HR ? (@ > 130)'
+
+
+
+
+ You can also nest filter expressions within each other:
+
+'$.track ? (exists(@.segments[*] ? (@.HR > 130))).segments.size()'
+
+ This expression returns the size of the track if it contains any
+ segments with high heart rate values, or an empty sequence otherwise.
+
+
+
+ PostgreSQL 's implementation of SQL/JSON path
+ language has the following deviations from the SQL/JSON standard:
+
+
+
+
+
+ A path expression can be a Boolean predicate, although the SQL/JSON
+ standard allows predicates only in filters. This is necessary for
+ implementation of the @@ operator. For example,
+ the following jsonpath expression is valid in
+ PostgreSQL :
+
+'$.track.segments[*].HR < 70'
+
+
+
+
+
+
+ There are minor differences in the interpretation of regular
+ expression patterns used in like_regex filters, as
+ described in .
+
+
+
+
+
+ Strict and Lax Modes
+
+ When you query JSON data, the path expression may not match the
+ actual JSON data structure. An attempt to access a non-existent
+ member of an object or element of an array results in a
+ structural error. SQL/JSON path expressions have two modes
+ of handling structural errors:
+
+
+
+
+
+ lax (default) — the path engine implicitly adapts
+ the queried data to the specified path.
+ Any remaining structural errors are suppressed and converted
+ to empty SQL/JSON sequences.
+
+
+
+
+ strict — if a structural error occurs, an error is raised.
+
+
+
+
+
+ The lax mode facilitates matching of a JSON document structure and path
+ expression if the JSON data does not conform to the expected schema.
+ If an operand does not match the requirements of a particular operation,
+ it can be automatically wrapped as an SQL/JSON array or unwrapped by
+ converting its elements into an SQL/JSON sequence before performing
+ this operation. Besides, comparison operators automatically unwrap their
+ operands in the lax mode, so you can compare SQL/JSON arrays
+ out-of-the-box. An array of size 1 is considered equal to its sole element.
+ Automatic unwrapping is not performed only when:
+
+
+
+ The path expression contains type() or
+ size() methods that return the type
+ and the number of elements in the array, respectively.
+
+
+
+
+ The queried JSON data contain nested arrays. In this case, only
+ the outermost array is unwrapped, while all the inner arrays
+ remain unchanged. Thus, implicit unwrapping can only go one
+ level down within each path evaluation step.
+
+
+
+
+
+
+ For example, when querying the GPS data listed above, you can
+ abstract from the fact that it stores an array of segments
+ when using the lax mode:
+
+'lax $.track.segments.location'
+
+
+
+
+ In the strict mode, the specified path must exactly match the structure of
+ the queried JSON document to return an SQL/JSON item, so using this
+ path expression will cause an error. To get the same result as in
+ the lax mode, you have to explicitly unwrap the
+ segments array:
+
+'strict $.track.segments[*].location'
+
+
+
+
+
+
+ Regular Expressions
+
+
+ LIKE_REGEX
+ in SQL/JSON
+
+
+
+ SQL/JSON path expressions allow matching text to a regular expression
+ with the like_regex filter. For example, the
+ following SQL/JSON path query would case-insensitively match all
+ strings in an array that start with an English vowel:
+
+'$[*] ? (@ like_regex "^[aeiou]" flag "i")'
+
+
+
+
+ The optional flag string may include one or more of
+ the characters
+ i for case-insensitive match,
+ m to allow ^
+ and $ to match at newlines,
+ s to allow . to match a newline,
+ and q to quote the whole pattern (reducing the
+ behavior to a simple substring match).
+
+
+
+ The SQL/JSON standard borrows its definition for regular expressions
+ from the LIKE_REGEX operator, which in turn uses the
+ XQuery standard. PostgreSQL does not currently support the
+ LIKE_REGEX operator. Therefore,
+ the like_regex filter is implemented using the
+ POSIX regular expression engine described in
+ . This leads to various minor
+ discrepancies from standard SQL/JSON behavior, which are cataloged in
+ .
+ Note, however, that the flag-letter incompatibilities described there
+ do not apply to SQL/JSON, as it translates the XQuery flag letters to
+ match what the POSIX engine expects.
+
+
+
+ Keep in mind that the pattern argument of like_regex
+ is a JSON path string literal, written according to the rules given in
+ . This means in particular that any
+ backslashes you want to use in the regular expression must be doubled.
+ For example, to match strings that contain only digits:
+
+'$ ? (@ like_regex "^\\d+$")'
+
+
+
+
+
+
+ SQL/JSON Path Operators and Methods
+
+
+ shows the operators and
+ methods available in jsonpath . shows the available filter
+ expression elements.
+
+
+
+ jsonpath Operators and Methods
+
+
+
+ Operator/Method
+ Description
+ Example JSON
+ Example Query
+ Result
+
+
+
+
+ + (unary)
+ Plus operator that iterates over the SQL/JSON sequence
+ {"x": [2.85, -14.7, -9.4]}
+ + $.x.floor()
+ 2, -15, -10
+
+
+ - (unary)
+ Minus operator that iterates over the SQL/JSON sequence
+ {"x": [2.85, -14.7, -9.4]}
+ - $.x.floor()
+ -2, 15, 10
+
+
+ + (binary)
+ Addition
+ [2]
+ 2 + $[0]
+ 4
+
+
+ - (binary)
+ Subtraction
+ [2]
+ 4 - $[0]
+ 2
+
+
+ *
+ Multiplication
+ [4]
+ 2 * $[0]
+ 8
+
+
+ /
+ Division
+ [8]
+ $[0] / 2
+ 4
+
+
+ %
+ Modulus
+ [32]
+ $[0] % 10
+ 2
+
+
+ type()
+ Type of the SQL/JSON item
+ [1, "2", {}]
+ $[*].type()
+ "number", "string", "object"
+
+
+ size()
+ Size of the SQL/JSON item
+ {"m": [11, 15]}
+ $.m.size()
+ 2
+
+
+ double()
+ Approximate floating-point number converted from an SQL/JSON number or a string
+ {"len": "1.9"}
+ $.len.double() * 2
+ 3.8
+
+
+ ceiling()
+ Nearest integer greater than or equal to the SQL/JSON number
+ {"h": 1.3}
+ $.h.ceiling()
+ 2
+
+
+ floor()
+ Nearest integer less than or equal to the SQL/JSON number
+ {"h": 1.3}
+ $.h.floor()
+ 1
+
+
+ abs()
+ Absolute value of the SQL/JSON number
+ {"z": -0.3}
+ $.z.abs()
+ 0.3
+
+
+ datetime()
+ Date/time value converted from a string
+ ["2015-8-1", "2015-08-12"]
+ $[*] ? (@.datetime() < "2015-08-2". datetime())
+ 2015-8-1
+
+
+ datetime(template )
+ Date/time value converted from a string using the specified template
+ ["12:30", "18:40"]
+ $[*].datetime("HH24:MI")
+ "12:30:00", "18:40:00"
+
+
+ keyvalue()
+
+ Sequence of object's key-value pairs represented as array of items
+ containing three fields ("key" ,
+ "value" , and "id" ).
+ "id" is a unique identifier of the object
+ key-value pair belongs to.
+
+ {"x": "20", "y": 32}
+ $.keyvalue()
+ {"key": "x", "value": "20", "id": 0}, {"key": "y", "value": 32, "id": 0}
+
+
+
+
+
+
+
+ The result type of datetime() and
+ datetime(template )
+ methods can be date , timetz , time ,
+ timestamptz , or timestamp .
+ Both methods determine the result type dynamically.
+
+
+ The datetime() method sequentially tries ISO formats
+ for date , timetz , time ,
+ timestamptz , and timestamp . It stops on
+ the first matching format and the corresponding data type.
+
+
+ The datetime(template )
+ method determines the result type by the provided template string.
+
+
+ The datetime() and
+ datetime(template ) methods
+ use the same parsing rules as to_timestamp SQL
+ function does (see ) with three
+ exceptions. At first, these methods doesn't allow unmatched template
+ patterns. At second, only following separators are allowed in the
+ template string: minus sign, period, solidus, comma, apostrophe,
+ semicolon, colon and space. At third, separators in the template string
+ must exactly match the input string.
+
+
+
+
+ jsonpath Filter Expression Elements
+
+
+
+ Value/Predicate
+ Description
+ Example JSON
+ Example Query
+ Result
+
+
+
+
+ ==
+ Equality operator
+ [1, 2, 1, 3]
+ $[*] ? (@ == 1)
+ 1, 1
+
+
+ !=
+ Non-equality operator
+ [1, 2, 1, 3]
+ $[*] ? (@ != 1)
+ 2, 3
+
+
+ <>
+ Non-equality operator (same as != )
+ [1, 2, 1, 3]
+ $[*] ? (@ <> 1)
+ 2, 3
+
+
+ <
+ Less-than operator
+ [1, 2, 3]
+ $[*] ? (@ < 2)
+ 1
+
+
+ <=
+ Less-than-or-equal-to operator
+ [1, 2, 3]
+ $[*] ? (@ <= 2)
+ 1, 2
+
+
+ >
+ Greater-than operator
+ [1, 2, 3]
+ $[*] ? (@ > 2)
+ 3
+
+
+ >=
+ Greater-than-or-equal-to operator
+ [1, 2, 3]
+ $[*] ? (@ >= 2)
+ 2, 3
+
+
+ true
+ Value used to perform comparison with JSON true literal
+ [{"name": "John", "parent": false},
+ {"name": "Chris", "parent": true}]
+ $[*] ? (@.parent == true)
+ {"name": "Chris", "parent": true}
+
+
+ false
+ Value used to perform comparison with JSON false literal
+ [{"name": "John", "parent": false},
+ {"name": "Chris", "parent": true}]
+ $[*] ? (@.parent == false)
+ {"name": "John", "parent": false}
+
+
+ null
+ Value used to perform comparison with JSON null value
+ [{"name": "Mary", "job": null},
+ {"name": "Michael", "job": "driver"}]
+ $[*] ? (@.job == null) .name
+ "Mary"
+
+
+ &&
+ Boolean AND
+ [1, 3, 7]
+ $[*] ? (@ > 1 && @ < 5)
+ 3
+
+
+ ||
+ Boolean OR
+ [1, 3, 7]
+ $[*] ? (@ < 1 || @ > 5)
+ 7
+
+
+ !
+ Boolean NOT
+ [1, 3, 7]
+ $[*] ? (!(@ < 5))
+ 7
+
+
+ like_regex
+
+ Tests whether the first operand matches the regular expression
+ given by the second operand, optionally with modifications
+ described by a string of flag characters (see
+ )
+
+ ["abc", "abd", "aBdC", "abdacb", "babc"]
+ $[*] ? (@ like_regex "^ab.*c" flag "i")
+ "abc", "aBdC", "abdacb"
+
+
+ starts with
+ Tests whether the second operand is an initial substring of the first operand
+ ["John Smith", "Mary Stone", "Bob Johnson"]
+ $[*] ? (@ starts with "John")
+ "John Smith"
+
+
+ exists
+ Tests whether a path expression matches at least one SQL/JSON item
+ {"x": [1, 2], "y": [2, 4]}
+ strict $.* ? (exists (@ ? (@[*] > 2)))
+ 2, 4
+
+
+ is unknown
+ Tests whether a Boolean condition is unknown
+ [-1, 2, 7, "infinity"]
+ $[*] ? ((@ > 0) is unknown)
+ "infinity"
+
+
+
+
+
+
+ When different date/time values are compared, an implicit cast is
+ applied. A date value can be cast to timestamp
+ or timestamptz , timestamp can be cast to
+ timestamptz , and time — to timetz .
+
+
+
+
@@ -12346,11 +13833,20 @@ SELECT setval('foo', 42, false); Next nextval
If your needs go beyond the capabilities of these conditional
- expressions, you might want to consider writing a stored procedure
+ expressions, you might want to consider writing a server-side function
in a more expressive programming language.
+
+
+ Although COALESCE , GREATEST , and
+ LEAST are syntactically similar to functions, they are
+ not ordinary functions, and thus cannot be used with explicit
+ VARIADIC array arguments.
+
+
+
CASE
@@ -12646,14 +14142,14 @@ SELECT NULLIF(value, '(none)') ...
@>
contains
- ARRAY[1,4,3] @> ARRAY[3,1]
+ ARRAY[1,4,3] @> ARRAY[3,1,3]
t
<@
is contained by
- ARRAY[2,7] <@ ARRAY[1,7,4,2,6]
+ ARRAY[2,2,7] <@ ARRAY[1,7,4,2,6]
t
@@ -12696,8 +14192,10 @@ SELECT NULLIF(value, '(none)') ...
- Array comparisons compare the array contents element-by-element,
- using the default B-tree comparison function for the element data type.
+ The array ordering operators (< ,
+ >= , etc) compare the array contents
+ element-by-element, using the default B-tree comparison function for
+ the element data type, and sort based on the first difference.
In multidimensional arrays the elements are visited in row-major order
(last subscript varies most rapidly).
If the contents of two arrays are equal but the dimensionality is
@@ -12708,6 +14206,15 @@ SELECT NULLIF(value, '(none)') ...
number of dimensions or subscript ranges were different.)
+
+ The array containment operators (<@
+ and @> ) consider one array to be contained in
+ another one if each of its elements appears in the other one.
+ Duplicates are not treated specially, thus ARRAY[1]
+ and ARRAY[1,1] are each considered to contain the
+ other.
+
+
See for more details about array operator
behavior. See for more details about
@@ -13446,7 +14953,7 @@ NULL baz(3 rows)
No
input arrays concatenated into array of one higher dimension
(inputs must all have same dimensionality,
- and cannot be empty or NULL)
+ and cannot be empty or null)
@@ -13470,7 +14977,7 @@ NULL baz(3 rows)
otherwise the same as the argument data type
Yes
- the average (arithmetic mean) of all input values
+ the average (arithmetic mean) of all non-null input values
@@ -13598,7 +15105,7 @@ NULL baz(3 rows)
json
No
- aggregates values as a JSON array
+ aggregates values, including nulls, as a JSON array
@@ -13615,7 +15122,7 @@ NULL baz(3 rows)
jsonb
No
- aggregates values as a JSON array
+ aggregates values, including nulls, as a JSON array
@@ -13632,7 +15139,8 @@ NULL baz(3 rows)
json
No
- aggregates name/value pairs as a JSON object
+ aggregates name/value pairs as a JSON object; values can be
+ null, but not names
@@ -13649,7 +15157,8 @@ NULL baz(3 rows)
jsonb
No
- aggregates name/value pairs as a JSON object
+ aggregates name/value pairs as a JSON object; values can be
+ null, but not names
@@ -13659,13 +15168,13 @@ NULL baz(3 rows)
max(expression )
- any numeric, string, date/time, network, or enum type,
+ any numeric, string, date/time, network, pg_lsn, or enum type,
or arrays of these types
same as argument type
Yes
maximum value of expression across all input
+ class="parameter">expression across all non-null input
values
@@ -13677,13 +15186,13 @@ NULL baz(3 rows)
min(expression )
- any numeric, string, date/time, network, or enum type,
+ any numeric, string, date/time, network, pg_lsn, or enum type,
or arrays of these types
same as argument type
Yes
minimum value of expression across all input
+ class="parameter">expression across all non-null input
values
@@ -13705,7 +15214,7 @@ NULL baz(3 rows)
same as argument types
No
- input values concatenated into a string, separated by delimiter
+ non-null input values concatenated into a string, separated by delimiter
@@ -13728,7 +15237,8 @@ NULL baz(3 rows)
argument data type
Yes
- sum of expression across all input values
+ sum of expression
+ across all non-null input values
@@ -13745,7 +15255,8 @@ NULL baz(3 rows)
xml
No
- concatenation of XML values (see also )
+ concatenation of non-null XML values
+ (see also )
@@ -15168,8 +16679,8 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2);
The result is false
if the comparison returns false for every
subquery row (including the case where the subquery returns no
rows).
- The result is NULL if the comparison does not return true for any row,
- and it returns NULL for at least one row.
+ The result is NULL if no comparison with a subquery row returns true,
+ and at least one comparison returns NULL.
@@ -15194,8 +16705,8 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2);
The result of ALL is true
if all rows yield true
(including the case where the subquery returns no rows).
The result is false
if any false result is found.
- The result is NULL if the comparison does not return false for any row,
- and it returns NULL for at least one row.
+ The result is NULL if no comparison with a subquery row returns false,
+ and at least one comparison returns NULL.
@@ -15224,8 +16735,8 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2);
case where the subquery returns no rows).
The result is false
if the comparison returns false for any
subquery row.
- The result is NULL if the comparison does not return false for any
- subquery row, and it returns NULL for at least one row.
+ The result is NULL if no comparison with a subquery row returns false,
+ and at least one comparison returns NULL.
@@ -15235,7 +16746,7 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2);
- Single-row Comparison
+ Single-Row Comparison
comparison
@@ -15884,7 +17395,7 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n);
- System Information Functions
+ System Information Functions and Operators
shows several
@@ -16280,7 +17791,7 @@ SET search_path TO schema , sc
optional parameter. The return value is NULL when the
log format requested is not a configured
. The
- pg_current_logfiles reflects the contents of the
+ pg_current_logfile reflects the contents of the
current_logfiles file.
@@ -16349,7 +17860,7 @@ SET search_path TO schema , sc
because it needs access to the predicate lock manager's shared
state for a short time.
-
+
version
@@ -16360,7 +17871,7 @@ SET search_path TO schema , sc
get this information from or
for a machine-readable version, .
Software developers should use server_version_num
- (available since 8.2) or instead
+ (available since 8.2) or instead
of parsing the text version.
@@ -16815,6 +18326,130 @@ SELECT has_function_privilege('joeuser', 'myfunc(int, text)', 'execute');
be specified by name or by OID.
+
+ shows the operators
+ available for the aclitem type, which is the catalog
+ representation of access privileges. See
+ for information about how to read access privilege values.
+
+
+
+ acldefault
+
+
+ aclitemeq
+
+
+ aclcontains
+
+
+ aclexplode
+
+
+ makeaclitem
+
+
+
+ aclitem Operators
+
+
+
+ Operator
+ Description
+ Example
+ Result
+
+
+
+
+
+ =
+ equal
+ 'calvin=r*w/hobbes'::aclitem = 'calvin=r*w*/hobbes'::aclitem
+ f
+
+
+
+ @>
+ contains element
+ '{calvin=r*w/hobbes,hobbes=r*w*/postgres}'::aclitem[] @> 'calvin=r*w/hobbes'::aclitem
+ t
+
+
+
+ ~
+ contains element
+ '{calvin=r*w/hobbes,hobbes=r*w*/postgres}'::aclitem[] ~ 'calvin=r*w/hobbes'::aclitem
+ t
+
+
+
+
+
+
+
+ shows some additional
+ functions to manage the aclitem type.
+
+
+
+ aclitem Functions
+
+
+ Name Return Type Description
+
+
+
+ acldefault (type ,
+ ownerId )
+ aclitem[]
+ get the default access privileges for an object belonging to ownerId
+
+
+ aclexplode (aclitem[] )
+ setof record
+ get aclitem array as tuples
+
+
+ makeaclitem (grantee , grantor , privilege , grantable )
+ aclitem
+ build an aclitem from input
+
+
+
+
+
+
+ acldefault returns the built-in default access
+ privileges for an object of type type belonging to
+ role ownerId . These represent the access
+ privileges that will be assumed when an object's ACL entry is null.
+ (The default access privileges are described in .)
+ The type parameter is a CHAR : write
+ 'c' for COLUMN ,
+ 'r' for TABLE and table-like objects,
+ 's' for SEQUENCE ,
+ 'd' for DATABASE ,
+ 'f' for FUNCTION or PROCEDURE ,
+ 'l' for LANGUAGE ,
+ 'L' for LARGE OBJECT ,
+ 'n' for SCHEMA ,
+ 't' for TABLESPACE ,
+ 'F' for FOREIGN DATA WRAPPER ,
+ 'S' for FOREIGN SERVER ,
+ or
+ 'T' for TYPE or DOMAIN .
+
+
+
+ aclexplode returns an aclitem array
+ as a set of rows. Output columns are grantor oid ,
+ grantee oid (0 for PUBLIC ),
+ granted privilege as text (SELECT , ...)
+ and whether the privilege is grantable as boolean .
+ makeaclitem performs the inverse operation.
+
+
shows functions that
determine whether a certain object is visible in the
@@ -17519,7 +19154,9 @@ SELECT currval(pg_get_serial_sequence('sometable', 'id'));
backward_scan
- Can the index be scanned backwards?
+ Can the scan direction be changed in mid-scan (to
+ support FETCH BACKWARD on a cursor without
+ needing materialization)?
@@ -17674,24 +19311,24 @@ SELECT collation for ('foo' COLLATE "de_DE");
- pg_describe_object(catalog_id , object_id , object_sub_id )
+ pg_describe_object(classid oid , objid oid , objsubid integer )
text
get description of a database object
- pg_identify_object(catalog_id oid , object_id oid , object_sub_id integer )
+ pg_identify_object(classid oid , objid oid , objsubid integer )
type text , schema text , name text , identity text
get identity of a database object
- pg_identify_object_as_address(catalog_id oid , object_id oid , object_sub_id integer )
- type text , name text[] , args text[]
+ pg_identify_object_as_address(classid oid , objid oid , objsubid integer )
+ type text , object_names text[] , object_args text[]
get external representation of a database object's address
- pg_get_object_address(type text , name text[] , args text[] )
- catalog_id oid , object_id oid , object_sub_id int32
- get address of a database object, from its external representation
+ pg_get_object_address(type text , object_names text[] , object_args text[] )
+ classid oid , objid oid , objsubid integer
+ get address of a database object from its external representation
@@ -17699,7 +19336,9 @@ SELECT collation for ('foo' COLLATE "de_DE");
pg_describe_object returns a textual description of a database
- object specified by catalog OID, object OID and a (possibly zero) sub-object ID.
+ object specified by catalog OID, object OID, and sub-object ID (such as
+ a column number within a table; the sub-object ID is zero when referring
+ to a whole object).
This description is intended to be human-readable, and might be translated,
depending on server configuration.
This is useful to determine the identity of an object as stored in the
@@ -17708,29 +19347,30 @@ SELECT collation for ('foo' COLLATE "de_DE");
pg_identify_object returns a row containing enough information
- to uniquely identify the database object specified by catalog OID, object OID and a
- (possibly zero) sub-object ID. This information is intended to be machine-readable,
+ to uniquely identify the database object specified by catalog OID, object OID and
+ sub-object ID. This information is intended to be machine-readable,
and is never translated.
type identifies the type of database object;
schema is the schema name that the object belongs in, or
NULL for object types that do not belong to schemas;
- name is the name of the object, quoted if necessary, only
- present if it can be used (alongside schema name, if pertinent) as a unique
- identifier of the object, otherwise NULL ;
- identity is the complete object identity, with the precise format
- depending on object type, and each part within the format being
- schema-qualified and quoted as necessary.
+ name is the name of the object, quoted if necessary,
+ if the name (along with schema name, if pertinent) is sufficient to
+ uniquely identify the object, otherwise NULL ;
+ identity is the complete object identity, with the
+ precise format depending on object type, and each name within the format
+ being schema-qualified and quoted as necessary.
pg_identify_object_as_address returns a row containing
enough information to uniquely identify the database object specified by
- catalog OID, object OID and a (possibly zero) sub-object ID. The returned
+ catalog OID, object OID and sub-object ID. The returned
information is independent of the current server, that is, it could be used
to identify an identically named object in another server.
type identifies the type of database object;
- name and args are text arrays that together
- form a reference to the object. These three columns can be passed to
+ object_names and object_args
+ are text arrays that together form a reference to the object.
+ These three values can be passed to
pg_get_object_address to obtain the internal address
of the object.
This function is the inverse of pg_get_object_address .
@@ -17743,10 +19383,10 @@ SELECT collation for ('foo' COLLATE "de_DE");
ones that would be used in system catalogs such as pg_depend
and can be passed to other system functions such as
pg_identify_object or pg_describe_object .
- catalog_id is the OID of the system catalog containing the
+ classid is the OID of the system catalog containing the
object;
- object_id is the OID of the object itself, and
- object_sub_id is the object sub-ID, or zero if none.
+ objid is the OID of the object itself, and
+ objsubid is the sub-object ID, or zero if none.
This function is the inverse of pg_identify_object_as_address .
@@ -17917,7 +19557,7 @@ SELECT collation for ('foo' COLLATE "de_DE");
txid_status(bigint )
- txid_status
+ text
report the status of the given transaction: committed , aborted , in progress , or null if the transaction ID is too old
@@ -18017,7 +19657,7 @@ SELECT collation for ('foo' COLLATE "de_DE");
- Committed transaction information
+ Committed Transaction Information
Name Return Type Description
@@ -18658,6 +20298,8 @@ SELECT set_config('log_statement_stats', 'off', false);
The functions shown in assist in making on-line backups.
These functions cannot be executed during recovery (except
+ non-exclusive pg_start_backup ,
+ non-exclusive pg_stop_backup ,
pg_is_in_backup , pg_backup_start_time
and pg_wal_lsn_diff ).
@@ -18839,7 +20481,7 @@ postgres=# select pg_start_backup('label_goes_here');
pg_create_restore_point creates a named write-ahead log
record that can be used as recovery target, and returns the corresponding
write-ahead log location. The given name can then be used with
- to specify the point up to which
+ to specify the point up to which
recovery will proceed. Avoid creating multiple restore points with the
same name, since recovery will stop at the first one whose name matches
the recovery target.
@@ -18984,6 +20626,9 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup());
pg_is_wal_replay_paused
+
+ pg_promote
+
pg_wal_replay_pause
@@ -19014,6 +20659,24 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup());
True if recovery is paused.
+
+
+ pg_promote(wait boolean DEFAULT true, wait_seconds integer DEFAULT 60)
+
+ boolean
+
+ Promotes a physical standby server. With wait
+ set to true (the default), the function waits until
+ promotion is completed or wait_seconds seconds
+ have passed, and returns true if promotion is
+ successful and false otherwise.
+ If wait is set to false , the
+ function returns true immediately after sending
+ SIGUSR1 to the postmaster to trigger the promotion.
+ This function is restricted to superusers by default, but other users
+ can be granted EXECUTE to run the function.
+
+
pg_wal_replay_pause()
@@ -19137,8 +20800,10 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup());
See ,
, and
- for information about the underlying features. Use of these
- functions is restricted to superusers.
+ for information about the underlying features.
+ Use of functions for replication origin is restricted to superusers.
+ Use of functions for replication slot is restricted to superusers
+ and users having REPLICATION privilege.
@@ -19233,6 +20898,47 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup());
+
+
+
+ pg_copy_physical_replication_slot
+
+ pg_copy_physical_replication_slot(src_slot_name name , dst_slot_name name , temporary boolean )
+
+
+ (slot_name name , lsn pg_lsn )
+
+
+ Copies an existing physical replication slot named src_slot_name
+ to a physical replication slot named dst_slot_name .
+ The copied physical slot starts to reserve WAL from the same LSN as the
+ source slot.
+ temporary is optional. If temporary
+ is omitted, the same value as the source slot is used.
+
+
+
+
+
+
+ pg_copy_logical_replication_slot
+
+ pg_copy_logical_replication_slot(src_slot_name name , dst_slot_name name , temporary boolean , plugin name )
+
+
+ (slot_name name , lsn pg_lsn )
+
+
+ Copies an existing logical replication slot name src_slot_name
+ to a logical replication slot named dst_slot_name
+ while changing the output plugin and persistence. The copied logical slot starts
+ from the same LSN as the source logical slot. Both
+ temporary and plugin are optional.
+ If temporary or plugin are omitted,
+ the same values as the source logical slot are used.
+
+
+
@@ -19979,6 +21685,74 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup());
The function returns the number of new collation objects it created.
+
+ Partitioning Information Functions
+
+
+ Name Return Type Description
+
+
+
+
+
+ pg_partition_tree
+ pg_partition_tree(regclass )
+
+ setof record
+
+ List information about tables or indexes in a partition tree for a
+ given partitioned table or partitioned index, with one row for each
+ partition. Information provided includes the name of the partition,
+ the name of its immediate parent, a boolean value telling if the
+ partition is a leaf, and an integer telling its level in the hierarchy.
+ The value of level begins at 0 for the input table
+ or index in its role as the root of the partition tree,
+ 1 for its partitions, 2 for
+ their partitions, and so on.
+
+
+
+
+ pg_partition_ancestors
+ pg_partition_ancestors(regclass )
+
+ setof regclass
+
+ List the ancestor relations of the given partition,
+ including the partition itself.
+
+
+
+
+ pg_partition_root
+ pg_partition_root(regclass )
+
+ regclass
+
+ Return the top-most parent of a partition tree to which the given
+ relation belongs.
+
+
+
+
+
+
+
+ To check the total size of the data contained in
+ measurement table described in
+ , one could use the
+ following query:
+
+
+
+=# SELECT pg_size_pretty(sum(pg_relation_size(relid))) AS total_size
+ FROM pg_partition_tree('measurement');
+ total_size
+------------
+ 24 kB
+(1 row)
+
+
@@ -20087,8 +21861,8 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup());
- Note that granting users the EXECUTE privilege on the
- pg_read_file() , or related, functions allows them the
+ Note that granting users the EXECUTE privilege on
+ pg_read_file() , or related functions, allows them the
ability to read any file on the server which the database can read and
that those reads bypass all in-database privilege checks. This means that,
among other things, a user with this access is able to read the contents of the
@@ -20137,6 +21911,32 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup());
role and may be granted to other non-superuser roles.
+
+
+ pg_ls_archive_statusdir()
+
+ setof record
+
+ List the name, size, and last modification time of files in the WAL
+ archive status directory. Access is granted to members of the
+ pg_monitor role and may be granted to other
+ non-superuser roles.
+
+
+
+
+ pg_ls_tmpdir(tablespace oid )
+
+ setof record
+
+ List the name, size, and last modification time of files in the
+ temporary directory for tablespace . If
+ tablespace is not provided, the
+ pg_default tablespace is used. Access is granted
+ to members of the pg_monitor role and may be
+ granted to other non-superuser roles.
+
+
pg_read_file(filename text [, offset bigint , length bigint [, missing_ok boolean ] ])
@@ -20210,6 +22010,31 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup());
GRANT .
+
+ pg_ls_archive_statusdir
+
+
+ pg_ls_archive_statusdir returns the name, size, and
+ last modified time (mtime) of each file in the WAL archive status
+ directory pg_wal/archive_status . By default only
+ superusers and members of the pg_monitor role can
+ use this function. Access may be granted to others using
+ GRANT .
+
+
+
+ pg_ls_tmpdir
+
+
+ pg_ls_tmpdir returns the name, size, and last modified
+ time (mtime) of each file in the temporary file directory for the specified
+ tablespace . If tablespace is
+ not provided, the pg_default tablespace is used. By
+ default only superusers and members of the pg_monitor
+ role can use this function. Access may be granted to others using
+ GRANT .
+
+
pg_read_file
@@ -20578,7 +22403,7 @@ SELECT (pg_stat_file('filename')).modification;
CREATE TRIGGER z_min_update
BEFORE UPDATE ON tablename
-FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger();
+FOR EACH ROW EXECUTE FUNCTION suppress_redundant_updates_trigger();
In most cases, you would want to fire this trigger last for each row.
Bearing in mind that triggers fire in name order, you would then
@@ -20635,23 +22460,23 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger();
classid
- Oid
+ oid
OID of catalog the object belongs in
objid
- Oid
- OID of the object in the catalog
+ oid
+ OID of the object itself
objsubid
integer
- Object sub-id (e.g. attribute number for columns)
+ Sub-object ID (e.g. attribute number for a column)
command_tag
text
- command tag
+ Command tag
object_type
@@ -20670,14 +22495,14 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger();
object_identity
text
- Text rendering of the object identity, schema-qualified. Each and every
- identifier present in the identity is quoted if necessary.
+ Text rendering of the object identity, schema-qualified. Each
+ identifier included in the identity is quoted if necessary.
in_extension
bool
- whether the command is part of an extension script
+ True if the command is part of an extension script
command
@@ -20722,29 +22547,29 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger();
classid
- Oid
+ oid
OID of catalog the object belonged in
objid
- Oid
- OID the object had within the catalog
+ oid
+ OID of the object itself
objsubid
- int32
- Object sub-id (e.g. attribute number for columns)
+ integer
+ Sub-object ID (e.g. attribute number for a column)
original
bool
- Flag used to identify the root object(s) of the deletion
+ True if this was one of the root object(s) of the deletion
normal
bool
- Flag indicating that there's a normal dependency relationship
+ True if there was a normal dependency relationship
in the dependency graph leading to this object
@@ -20752,7 +22577,7 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger();
is_temporary
bool
- Flag indicating that the object was a temporary object.
+ True if this was a temporary object
@@ -20781,8 +22606,8 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger();
object_identity
text
- Text rendering of the object identity, schema-qualified. Each and every
- identifier present in the identity is quoted if necessary.
+ Text rendering of the object identity, schema-qualified. Each
+ identifier included in the identity is quoted if necessary.
@@ -20790,17 +22615,17 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger();
text[]
An array that, together with object_type and
- address_args ,
- can be used by the pg_get_object_address() to
+ address_args , can be used by
+ the pg_get_object_address() function to
recreate the object address in a remote server containing an
- identically named object of the same kind.
+ identically named object of the same kind
address_args
text[]
- Complement for address_names above.
+ Complement for address_names
@@ -20830,7 +22655,7 @@ END
$$;
CREATE EVENT TRIGGER test_event_trigger_for_drops
ON sql_drop
- EXECUTE PROCEDURE test_event_trigger_for_drops();
+ EXECUTE FUNCTION test_event_trigger_for_drops();
@@ -20847,7 +22672,7 @@ CREATE EVENT TRIGGER test_event_trigger_for_drops
- Table Rewrite information
+ Table Rewrite Information
Name Return Type Description
@@ -20895,10 +22720,92 @@ $$;
CREATE EVENT TRIGGER test_table_rewrite_oid
ON table_rewrite
- EXECUTE PROCEDURE test_event_trigger_table_rewrite_oid();
+ EXECUTE FUNCTION test_event_trigger_table_rewrite_oid();
+
+ Statistics Information Functions
+
+
+ function
+ statistics
+
+
+
+ PostgreSQL provides a function to inspect complex
+ statistics defined using the CREATE STATISTICS command.
+
+
+
+ Inspecting MCV Lists
+
+
+ pg_mcv_list_items
+ pg_mcv_list
+
+
+
+ pg_mcv_list_items returns a list of all items
+ stored in a multi-column MCV list, and returns the
+ following columns:
+
+
+
+
+
+ Name
+ Type
+ Description
+
+
+
+
+
+ index
+ int
+ index of the item in the MCV list
+
+
+ values
+ text[]
+ values stored in the MCV item
+
+
+ nulls
+ boolean[]
+ flags identifying NULL values
+
+
+ frequency
+ double precision
+ frequency of this MCV item
+
+
+ base_frequency
+ double precision
+ base frequency of this MCV item
+
+
+
+
+
+
+
+ The pg_mcv_list_items function can be used like this:
+
+
+SELECT m.* FROM pg_statistic_ext join pg_statistic_ext_data on (oid = stxoid),
+ pg_mcv_list_items(stxdmcv) m WHERE stxname = 'stts';
+
+
+ Values of the pg_mcv_list can be obtained only from the
+ pg_statistic_ext_data.stxdmcv column.
+
+
+
+
+
diff --git a/doc/src/sgml/generate-errcodes-table.pl b/doc/src/sgml/generate-errcodes-table.pl
index ebec43159e2..24b9d891c3b 100644
--- a/doc/src/sgml/generate-errcodes-table.pl
+++ b/doc/src/sgml/generate-errcodes-table.pl
@@ -1,7 +1,7 @@
#!/usr/bin/perl
#
# Generate the errcodes-table.sgml file from errcodes.txt
-# Copyright (c) 2000-2018, PostgreSQL Global Development Group
+# Copyright (c) 2000-2019, PostgreSQL Global Development Group
use warnings;
use strict;
diff --git a/doc/src/sgml/generate-keywords-table.pl b/doc/src/sgml/generate-keywords-table.pl
new file mode 100644
index 00000000000..e948d5fb5e9
--- /dev/null
+++ b/doc/src/sgml/generate-keywords-table.pl
@@ -0,0 +1,122 @@
+#!/usr/bin/perl
+#
+# Generate the keywords table file
+# Copyright (c) 2019, PostgreSQL Global Development Group
+
+use strict;
+use warnings;
+
+my @sql_versions = reverse sort ('1992', '2011', '2016');
+
+my $srcdir = $ARGV[0];
+
+my %keywords;
+
+# read SQL keywords
+
+foreach my $ver (@sql_versions)
+{
+ foreach my $res ('reserved', 'nonreserved')
+ {
+ foreach my $file (glob "$srcdir/keywords/sql${ver}*-${res}.txt")
+ {
+ open my $fh, '<', $file or die;
+
+ while (<$fh>)
+ {
+ chomp;
+ $keywords{$_}{$ver}{$res} = 1;
+ }
+
+ close $fh;
+ }
+ }
+}
+
+# read PostgreSQL keywords
+
+open my $fh, '<', "$srcdir/../../../src/include/parser/kwlist.h" or die;
+
+while (<$fh>)
+{
+ if (/^PG_KEYWORD\("(\w+)", \w+, (\w+)_KEYWORD\)/)
+ {
+ $keywords{ uc $1 }{'pg'}{ lc $2 } = 1;
+ }
+}
+
+close $fh;
+
+# print output
+
+print "\n";
+
+print <
+ SQL Key Words
+
+
+
+
+ Key Word
+ PostgreSQL
+END
+
+foreach my $ver (@sql_versions)
+{
+ my $s = ($ver eq '1992' ? 'SQL-92' : "SQL:$ver");
+ print " $s \n";
+}
+
+print <
+
+
+
+END
+
+foreach my $word (sort keys %keywords)
+{
+ print " \n";
+ print " $word \n";
+
+ print " ";
+ if ($keywords{$word}{pg}{'unreserved'})
+ {
+ print "non-reserved";
+ }
+ elsif ($keywords{$word}{pg}{'col_name'})
+ {
+ print "non-reserved (cannot be function or type)";
+ }
+ elsif ($keywords{$word}{pg}{'type_func_name'})
+ {
+ print "reserved (can be function or type)";
+ }
+ elsif ($keywords{$word}{pg}{'reserved'})
+ {
+ print "reserved";
+ }
+ print " \n";
+
+ foreach my $ver (@sql_versions)
+ {
+ print " ";
+ if ($keywords{$word}{$ver}{'reserved'})
+ {
+ print "reserved";
+ }
+ elsif ($keywords{$word}{$ver}{'nonreserved'})
+ {
+ print "non-reserved";
+ }
+ print " \n";
+ }
+ print "
\n";
+}
+
+print <
+
+
+END
diff --git a/doc/src/sgml/geqo.sgml b/doc/src/sgml/geqo.sgml
index 5120dfbb424..39d2163d160 100644
--- a/doc/src/sgml/geqo.sgml
+++ b/doc/src/sgml/geqo.sgml
@@ -84,9 +84,19 @@
Through simulation of the evolutionary operations recombination ,
mutation , and
selection new generations of search points are found
- that show a higher average fitness than their ancestors.
+ that show a higher average fitness than their ancestors.
+ illustrates these steps.
+
+ Structure of a Genetic Algorithm
+
+
+
+
+
+
+
According to the comp.ai.genetic FAQ it cannot be stressed too
strongly that a GA is not a pure random search for a solution to a
@@ -94,49 +104,6 @@
non-random (better than random).
-
- Structured Diagram of a Genetic Algorithm
-
-
-
-
-
- P(t)
- generation of ancestors at a time t
-
-
-
- P''(t)
- generation of descendants at a time t
-
-
-
-
-
-
-+=========================================+
-|>>>>>>>>>>> Algorithm GA <<<<<<<<<<<<<<|
-+=========================================+
-| INITIALIZE t := 0 |
-+=========================================+
-| INITIALIZE P(t) |
-+=========================================+
-| evaluate FITNESS of P(t) |
-+=========================================+
-| while not STOPPING CRITERION do |
-| +-------------------------------------+
-| | P'(t) := RECOMBINATION{P(t)} |
-| +-------------------------------------+
-| | P''(t) := MUTATION{P'(t)} |
-| +-------------------------------------+
-| | P(t+1) := SELECTION{P''(t) + P(t)} |
-| +-------------------------------------+
-| | evaluate FITNESS of P''(t) |
-| +-------------------------------------+
-| | t := t + 1 |
-+===+=====================================+
-
-
diff --git a/doc/src/sgml/gin.sgml b/doc/src/sgml/gin.sgml
index cc7cd1ed2c4..0182b445855 100644
--- a/doc/src/sgml/gin.sgml
+++ b/doc/src/sgml/gin.sgml
@@ -102,6 +102,8 @@
?&
?|
@>
+ @?
+ @@
@@ -109,6 +111,8 @@
jsonb
@>
+ @?
+ @@
@@ -223,7 +227,7 @@
pmatch is an output argument for use when partial match
is supported. To use it, extractQuery must allocate
- an array of *nkeys bools and store its address at
+ an array of *nkeys bool s and store its address at
*pmatch . Each element of the array should be set to true
if the corresponding key requires partial match, false if not.
If *pmatch is set to NULL then GIN assumes partial match
@@ -251,12 +255,12 @@
An operator class must also provide a function to check if an indexed item
- matches the query. It comes in two flavors, a boolean consistent
+ matches the query. It comes in two flavors, a Boolean consistent
function, and a ternary triConsistent function.
triConsistent covers the functionality of both, so providing
- triConsistent alone is sufficient. However, if the boolean
+ triConsistent alone is sufficient. However, if the Boolean
variant is significantly cheaper to calculate, it can be advantageous to
- provide both. If only the boolean variant is provided, some optimizations
+ provide both. If only the Boolean variant is provided, some optimizations
that depend on refuting index items before fetching all the keys are
disabled.
@@ -319,11 +323,11 @@
triConsistent is similar to consistent ,
- but instead of booleans in the check vector, there are
+ but instead of Booleans in the check vector, there are
three possible values for each
key: GIN_TRUE , GIN_FALSE and
GIN_MAYBE . GIN_FALSE and GIN_TRUE
- have the same meaning as regular boolean values, while
+ have the same meaning as regular Boolean values, while
GIN_MAYBE means that the presence of that key is not known.
When GIN_MAYBE values are present, the function should only
return GIN_TRUE if the item certainly matches whether or
@@ -338,7 +342,7 @@
When there are no GIN_MAYBE values in the check
vector, a GIN_MAYBE return value is the equivalent of
setting the recheck flag in the
- boolean consistent function.
+ Boolean consistent function.
@@ -436,7 +440,8 @@
page contains either a pointer to a B-tree of heap pointers (a
posting tree
), or a simple list of heap pointers (a posting
list
) when the list is small enough to fit into a single index tuple along
- with the key value.
+ with the key value. illustrates
+ these components of a GIN index.
@@ -453,6 +458,15 @@
key values for different columns can be of different types.
+
+ GIN Internals
+
+
+
+
+
+
+
GIN Fast Update Technique
diff --git a/doc/src/sgml/gist.sgml b/doc/src/sgml/gist.sgml
index 44a3b2c03c5..a7eec1e9497 100644
--- a/doc/src/sgml/gist.sgml
+++ b/doc/src/sgml/gist.sgml
@@ -83,6 +83,7 @@
~=
+ <->
@@ -696,8 +697,8 @@ my_picksplit(PG_FUNCTION_ARGS)
/*
* Choose where to put the index entries and update unionL and unionR
- * accordingly. Append the entries to either v_spl_left or
- * v_spl_right, and care about the counters.
+ * accordingly. Append the entries to either v->spl_left or
+ * v->spl_right, and care about the counters.
*/
if (my_choice_is_left(unionL, curl, unionR, curr))
@@ -910,7 +911,7 @@ Datum
my_fetch(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
- input_data_type *in = DatumGetP(entry->key);
+ input_data_type *in = DatumGetPointer(entry->key);
fetched_data_type *fetched_data;
GISTENTRY *retval;
@@ -921,7 +922,7 @@ my_fetch(PG_FUNCTION_ARGS)
* Convert 'fetched_data' into the a Datum of the original datatype.
*/
- /* fill *retval from fetch_data. */
+ /* fill *retval from fetched_data. */
gistentryinit(*retval, PointerGetDatum(converted_datum),
entry->rel, entry->page, entry->offset, FALSE);
@@ -960,7 +961,7 @@ my_fetch(PG_FUNCTION_ARGS)
Implementation
- GiST buffering build
+ GiST Buffering Build
Building large GiST indexes by simply inserting all the tuples tends to be
slow, because if the index tuples are scattered across the index and the
diff --git a/doc/src/sgml/high-availability.sgml b/doc/src/sgml/high-availability.sgml
index 46bf198a2ac..43bcb2a6efd 100644
--- a/doc/src/sgml/high-availability.sgml
+++ b/doc/src/sgml/high-availability.sgml
@@ -618,7 +618,7 @@ protocol to make nodes agree on a serializable transactional order.
In standby mode, the server continuously applies WAL received from the
master server. The standby server can read WAL from a WAL archive
- (see ) or directly from the master
+ (see ) or directly from the master
over a TCP connection (streaming replication). The standby server will
also attempt to restore any WAL found in the standby cluster's
pg_wal directory. That typically happens after a server
@@ -645,7 +645,7 @@ protocol to make nodes agree on a serializable transactional order.
Standby mode is exited and the server switches to normal operation
when pg_ctl promote is run or a trigger file is found
- (trigger_file ). Before failover,
+ (promote_trigger_file ). Before failover,
any WAL immediately available in the archive or in pg_wal will be
restored, but no attempt is made to connect to the master.
@@ -686,20 +686,19 @@ protocol to make nodes agree on a serializable transactional order.
To set up the standby server, restore the base backup taken from primary
- server (see ). Create a recovery
- command file recovery.conf in the standby's cluster data
- directory, and turn on standby_mode . Set
- restore_command to a simple command to copy files from
+ server (see ). Create a file
+ standby.signal in the standby's cluster data
+ directory. Set to a simple command to copy files from
the WAL archive. If you plan to have multiple standby servers for high
- availability purposes, set recovery_target_timeline to
- latest , to make the standby server follow the timeline change
+ availability purposes, make sure that recovery_target_timeline is set to
+ latest (the default), to make the standby server follow the timeline change
that occurs at failover to another standby.
Do not use pg_standby or similar tools with the built-in standby mode
- described here. restore_command should return immediately
+ described here. should return immediately
if the file does not exist; the server will retry the command again if
necessary. See
for using tools like pg_standby.
@@ -708,11 +707,11 @@ protocol to make nodes agree on a serializable transactional order.
If you want to use streaming replication, fill in
- primary_conninfo with a libpq connection string, including
+ with a libpq connection string, including
the host name (or IP address) and any additional details needed to
connect to the primary server. If the primary needs a password for
authentication, the password needs to be specified in
- primary_conninfo as well.
+ as well.
@@ -724,7 +723,7 @@ protocol to make nodes agree on a serializable transactional order.
If you're using a WAL archive, its size can be minimized using the parameter to remove files that are no
+ linkend="guc-archive-cleanup-command"/> parameter to remove files that are no
longer required by the standby server.
The pg_archivecleanup utility is designed specifically to
be used with archive_cleanup_command in typical single-standby
@@ -735,10 +734,9 @@ protocol to make nodes agree on a serializable transactional order.
- A simple example of a recovery.conf is:
+ A simple example of configuration is:
-standby_mode = 'on'
-primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass'
+primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass options=''-c wal_sender_timeout=5000'''
restore_command = 'cp /path/to/archive/%f %p'
archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r'
@@ -793,8 +791,8 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r'
To use streaming replication, set up a file-based log-shipping standby
server as described in . The step that
turns a file-based log-shipping standby into streaming replication
- standby is setting primary_conninfo setting in the
- recovery.conf file to point to the primary server. Set
+ standby is setting the primary_conninfo setting
+ to point to the primary server. Set
and authentication options
(see pg_hba.conf ) on the primary so that the standby server
can connect to the replication pseudo-database on the primary
@@ -854,14 +852,14 @@ host replication foo 192.168.1.100/32 md5
The host name and port number of the primary, connection user name,
- and password are specified in the recovery.conf file.
+ and password are specified in the .
The password can also be set in the ~/.pgpass file on the
standby (specify replication in the database
field).
For example, if the primary is running on host IP 192.168.1.50 ,
port 5432 , the account name for replication is
foo , and the password is foopass , the administrator
- can add the following line to the recovery.conf file on the
+ can add the following line to the postgresql.conf file on the
standby:
@@ -940,7 +938,7 @@ primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass'
protection. Replication slots overcome these disadvantages.
- Querying and manipulating replication slots
+ Querying and Manipulating Replication Slots
Each replication slot has a name, which can contain lower-case letters,
numbers, and the underscore character.
@@ -966,17 +964,15 @@ postgres=# SELECT * FROM pg_create_physical_replication_slot('node_a_slot');
-------------+-----
node_a_slot |
-postgres=# SELECT * FROM pg_replication_slots;
- slot_name | slot_type | datoid | database | active | xmin | restart_lsn | confirmed_flush_lsn
--------------+-----------+--------+----------+--------+------+-------------+---------------------
- node_a_slot | physical | | | f | | |
+postgres=# SELECT slot_name, slot_type, active FROM pg_replication_slots;
+ slot_name | slot_type | active
+-------------+-----------+--------
+ node_a_slot | physical | f
(1 row)
To configure the standby to use this slot, primary_slot_name
- should be configured in the standby's recovery.conf .
- Here is a simple example:
+ should be configured on the standby. Here is a simple example:
-standby_mode = 'on'
primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass'
primary_slot_name = 'node_a_slot'
@@ -1028,7 +1024,7 @@ primary_slot_name = 'node_a_slot'
If an upstream standby server is promoted to become new master, downstream
servers will continue to stream from the new master if
- recovery_target_timeline is set to 'latest' .
+ recovery_target_timeline is set to 'latest' (the default).
@@ -1324,14 +1320,14 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)'
- If primary restarts while commits are waiting for acknowledgement, those
+ If primary restarts while commits are waiting for acknowledgment, those
waiting transactions will be marked fully committed once the primary
database recovers.
There is no way to be certain that all standbys have received all
outstanding WAL data at time of the crash of the primary. Some
transactions may not show as committed on the standby, even though
they show as committed on the primary. The guarantee we offer is that
- the application will not receive explicit acknowledgement of the
+ the application will not receive explicit acknowledgment of the
successful commit of a transaction until the WAL data is known to be
safely received by all the synchronous standbys.
@@ -1361,7 +1357,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)'
- Continuous archiving in standby
+ Continuous Archiving in Standby
continuous archiving
@@ -1471,14 +1467,16 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)'
- To trigger failover of a log-shipping standby server,
- run pg_ctl promote or create a trigger
- file with the file name and path specified by the trigger_file
- setting in recovery.conf . If you're planning to use
- pg_ctl promote to fail over, trigger_file is
- not required. If you're setting up the reporting servers that are
- only used to offload read-only queries from the primary, not for high
- availability purposes, you don't need to promote it.
+ To trigger failover of a log-shipping standby server, run
+ pg_ctl promote , call pg_promote ,
+ or create a trigger file with the file name and path specified by the
+ promote_trigger_file . If you're planning to use
+ pg_ctl promote or to call
+ pg_promote to fail over,
+ promote_trigger_file is not required. If you're
+ setting up the reporting servers that are only used to offload read-only
+ queries from the primary, not for high availability purposes, you don't
+ need to promote it.
@@ -1488,11 +1486,8 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)'
An alternative to the built-in standby mode described in the previous
sections is to use a restore_command that polls the archive location.
- This was the only option available in versions 8.4 and below. In this
- setup, set standby_mode off, because you are implementing
- the polling required for standby operation yourself. See the
- module for a reference
- implementation of this.
+ This was the only option available in versions 8.4 and below. See the
+ module for a reference implementation of this.
@@ -1519,12 +1514,11 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)'
The magic that makes the two loosely coupled servers work together is
simply a restore_command used on the standby that,
when asked for the next WAL file, waits for it to become available from
- the primary. The restore_command is specified in the
- recovery.conf file on the standby server. Normal recovery
+ the primary. Normal recovery
processing would request a file from the WAL archive, reporting failure
if the file was unavailable. For standby processing it is normal for
the next WAL file to be unavailable, so the standby must wait for
- it to appear. For files ending in .backup or
+ it to appear. For files ending in
.history there is no need to wait, and a non-zero return
code must be returned. A waiting restore_command can be
written as a custom script that loops after polling for the existence of
@@ -1608,9 +1602,8 @@ if (!triggered)
Begin recovery on the standby server from the local WAL
- archive, using a recovery.conf that specifies a
- restore_command that waits as described
- previously (see ).
+ archive, using restore_command that waits
+ as described previously (see ).
@@ -1633,7 +1626,7 @@ if (!triggered)
- Record-based Log Shipping
+ Record-Based Log Shipping
It is also possible to implement record-based log shipping using this
@@ -1774,6 +1767,11 @@ if (!triggered)
Plugins and extensions - LOAD
+
+
+ UNLISTEN
+
+
@@ -1863,7 +1861,7 @@ if (!triggered)
- LISTEN , UNLISTEN , NOTIFY
+ LISTEN , NOTIFY
@@ -1871,8 +1869,8 @@ if (!triggered)
In normal operation, read-only
transactions are allowed to
- use LISTEN , UNLISTEN , and
- NOTIFY , so Hot Standby sessions operate under slightly tighter
+ use LISTEN and NOTIFY ,
+ so Hot Standby sessions operate under slightly tighter
restrictions than ordinary read-only sessions. It is possible that some
of these restrictions might be loosened in a future release.
@@ -2105,7 +2103,7 @@ if (!triggered)
If hot_standby is on in postgresql.conf
- (the default value) and there is a recovery.conf
+ (the default value) and there is a standby.signal
file present, the server will run in Hot Standby mode.
However, it may take some time for Hot Standby connections to be allowed,
because the server will not accept connections until it has completed
@@ -2179,6 +2177,11 @@ LOG: database system is ready to accept read only connections
max_locks_per_transaction
+
+
+ max_wal_senders
+
+
max_worker_processes
diff --git a/doc/src/sgml/history.sgml b/doc/src/sgml/history.sgml
index 59bfdb60552..180695afd94 100644
--- a/doc/src/sgml/history.sgml
+++ b/doc/src/sgml/history.sgml
@@ -64,9 +64,9 @@
POSTGRES has also been used as an
educational tool at several universities. Finally, Illustra
Information Technologies (later merged into
- Informix ,
+ Informix ,
which is now owned by IBM ) picked up the code and
+ url="https://www.ibm.com/">IBM) picked up the code and
commercialized it. In late 1992,
POSTGRES became the primary data manager
for the
diff --git a/doc/src/sgml/images/Makefile b/doc/src/sgml/images/Makefile
new file mode 100644
index 00000000000..f9e356348b2
--- /dev/null
+++ b/doc/src/sgml/images/Makefile
@@ -0,0 +1,27 @@
+# doc/src/sgml/images/Makefile
+#
+# see README in this directory about image handling
+
+ALL_IMAGES = \
+ genetic-algorithm.svg \
+ gin.svg \
+ pagelayout.svg
+
+DITAA = ditaa
+DOT = dot
+XSLTPROC = xsltproc
+
+all: $(ALL_IMAGES)
+
+%.svg.tmp: %.gv
+ $(DOT) -T svg -o $@ $<
+
+%.svg.tmp: %.txt
+ $(DITAA) -E -S --svg $< $@
+
+# Post-processing for SVG files coming from other tools
+#
+# Use --novalid to avoid loading SVG DTD if a file specifies it, since
+# it might not be available locally, and we don't need it.
+%.svg: %.svg.tmp fixup-svg.xsl
+ $(XSLTPROC) --novalid -o $@ $(word 2,$^) $<
diff --git a/doc/src/sgml/images/README b/doc/src/sgml/images/README
new file mode 100644
index 00000000000..07c45802553
--- /dev/null
+++ b/doc/src/sgml/images/README
@@ -0,0 +1,65 @@
+Images
+======
+
+This directory contains images for use in the documentation.
+
+Creating an image
+-----------------
+
+A variety of tools can be used to create an image. The appropriate
+choice depends on the nature of the image. We prefer workflows that
+involve diffable source files.
+
+These tools are acceptable:
+
+- Graphviz (https://graphviz.org/)
+- Ditaa (http://ditaa.sourceforge.net/)
+
+We use SVG as the format for integrating the image into the ultimate
+output formats of the documentation, that is, HTML, PDF, and others.
+Therefore, any tool used needs to be able to produce SVG.
+
+This directory contains makefile rules to build SVG from common input
+formats, using some common styling.
+
+fixup-svg.xsl applies some postprocessing to the SVG files produced by
+those external tools to address assorted issues. See comments in
+there, and adjust and expand as necessary.
+
+Both the source and the SVG output file are committed in this
+directory. That way, we don't need all developers to have all the
+tools installed. While we accept that there could be some gratuitous
+diffs in the SVG output depending the specific tool, let's keep an eye
+on that and keep it to a minimum.
+
+Using an image in DocBook
+-------------------------
+
+Here is an example for using an image in DocBook:
+
+
+ GIN Internals
+
+
+
+
+
+
+
+Notes:
+
+- The real action is in the element, but typically a
+ should be wrapped around it and an to the figure
+ should be put into the text somewhere. Don't just put an image into
+ the documentation without a link to it and an explanation of it.
+
+- Things are set up so that we only need one element, even
+ with different output formats.
+
+- The attribute format="SVG" is required. If you omit it, it will
+ still appear to work, but the stylesheets do a better job if the
+ image is declared as SVG explicitly.
+
+- The width should be set to something. This ensures that the image
+ is scaled to fit the page in PDF output. (Other widths than 100%
+ might be appropriate.)
diff --git a/doc/src/sgml/images/fixup-svg.xsl b/doc/src/sgml/images/fixup-svg.xsl
new file mode 100644
index 00000000000..d6c46b362e0
--- /dev/null
+++ b/doc/src/sgml/images/fixup-svg.xsl
@@ -0,0 +1,44 @@
+
+
+
+
+
+
+
+
+
+
+ 0 0
+
+
+
+
+
+
+
+
+
+
+
+ none
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/src/sgml/images/genetic-algorithm.gv b/doc/src/sgml/images/genetic-algorithm.gv
new file mode 100644
index 00000000000..80c354c2c8b
--- /dev/null
+++ b/doc/src/sgml/images/genetic-algorithm.gv
@@ -0,0 +1,48 @@
+digraph {
+ layout=dot;
+
+ // default values
+ node [shape=box, label="", fontname="sans-serif", style=filled, fillcolor=white, fontsize=8];
+ graph [fontname="sans-serif"]; // must be specified separately
+ edge [fontname="sans-serif"]; // must be specified separately
+
+ // an unobtrusive background color
+ pad="1.0, 0.5";
+ bgcolor=whitesmoke;
+
+ // layout of edges and nodes
+ splines=ortho;
+ nodesep=0.3;
+ ranksep=0.3;
+
+ // nodes
+ a1[label="INITIALIZE t := 0"];
+ a2[label="INITIALIZE P(t)"];
+ a3[label="evaluate FITNESS of P(t)"];
+ a4[shape="diamond", label="STOPPING CRITERION"; width=4];
+
+ // connect 'end' node with 'a9' node (bottom of figure)
+ {
+ rank=same;
+ a9[label="t := t + 1"];
+ // end-symbol similar to UML notation
+ end[shape=doublecircle, label="end", width=0.5];
+ }
+
+ a5[label="P'(t) := RECOMBINATION{P(t)}"];
+ a6[label="P''(t) := MUTATION{P'(t)}"];
+ a7[label="P(t+1) := SELECTION{P''(t) + P(t)}"];
+ a8[label="evaluate FITNESS of P''(t)"];
+
+ // edges
+ a1 -> a2 -> a3 -> a4;
+ a4 -> a5[xlabel="false ", fontsize=10];
+ a4 -> end[xlabel="true ", fontsize=10];
+ a5 -> a6 -> a7 -> a8 -> a9;
+ a4 -> a9 [dir=back];
+
+ // explain the notation
+ expl [shape=plaintext, fontsize=10, width=3.2, fillcolor=whitesmoke,
+ label="P(t): generation of ancestors at a time t\lP''(t): generation of descendants at a time t\l"];
+
+}
diff --git a/doc/src/sgml/images/genetic-algorithm.svg b/doc/src/sgml/images/genetic-algorithm.svg
new file mode 100644
index 00000000000..fb9fdd1ba78
--- /dev/null
+++ b/doc/src/sgml/images/genetic-algorithm.svg
@@ -0,0 +1,140 @@
+
+
+
+
+
+%3
+
+
+
+a1
+
+INITIALIZE t := 0
+
+
+
+a2
+
+INITIALIZE P(t)
+
+
+
+a1->a2
+
+
+
+
+
+a3
+
+evaluate FITNESS of P(t)
+
+
+
+a2->a3
+
+
+
+
+
+a4
+
+STOPPING CRITERION
+
+
+
+a3->a4
+
+
+
+
+
+a9
+
+t := t + 1
+
+
+
+a4->a9
+
+
+
+
+
+end
+
+
+end
+
+
+
+a4->end
+
+
+true Â
+
+
+
+a5
+
+P'(t) := RECOMBINATION{P(t)}
+
+
+
+a4->a5
+
+
+false  Â
+
+
+
+a6
+
+P''(t) := MUTATION{P'(t)}
+
+
+
+a5->a6
+
+
+
+
+
+a7
+
+P(t+1) := SELECTION{P''(t) + P(t)}
+
+
+
+a6->a7
+
+
+
+
+
+a8
+
+evaluate FITNESS of P''(t)
+
+
+
+a7->a8
+
+
+
+
+
+a8->a9
+
+
+
+
+
+expl
+
+P(t): generation of ancestors at a time t
+P''(t): generation of descendants at a time t
+
+
+
diff --git a/doc/src/sgml/images/gin.gv b/doc/src/sgml/images/gin.gv
new file mode 100644
index 00000000000..097e91029a4
--- /dev/null
+++ b/doc/src/sgml/images/gin.gv
@@ -0,0 +1,93 @@
+digraph "gin" {
+ layout=dot;
+ node [label="", shape=box, style=filled, fillcolor=gray, width=1.4];
+
+ m1 [label="meta page"];
+
+ subgraph cluster01 {
+ label="entry tree";
+ subgraph egroup1 {
+ rank=same;
+ e1;
+ }
+ subgraph egroup2 {
+ rank=same;
+ e2 -> e3 -> e4;
+ }
+ subgraph egroup3 {
+ rank=same;
+ e5 -> e6 -> e7 -> e8 -> e9;
+ }
+ e1 -> e4;
+ e1 -> e3;
+ e1 -> e2;
+ e2 -> e5;
+ e2 -> e6;
+ e3 -> e7;
+ e4 -> e8;
+ e4 -> e9;
+
+ e6 [fillcolor=green, label="posting list"];
+ e8 [fillcolor=green, label="posting list"];
+ e9 [fillcolor=green, label="posting list"];
+ }
+
+ subgraph cluster02 {
+ label="posting tree";
+ subgraph pgroup1 {
+ rank=same;
+ p1;
+ }
+ subgraph pgroup2 {
+ rank=same;
+ p2 -> p3;
+ }
+ p1 -> p2;
+ p1 -> p3;
+
+ p2 [fillcolor=green, label="heap ptr"];
+ p3 [fillcolor=green, label="heap ptr"];
+ }
+
+ subgraph cluster03 {
+ label="posting tree";
+ subgraph pgroup3 {
+ rank=same;
+ p4;
+ }
+
+ p4 [fillcolor=green, label="heap ptr"];
+ }
+
+ subgraph cluster04 {
+ label="posting tree";
+ subgraph pgroup4 {
+ rank=same;
+ p5;
+ }
+ subgraph pgroup5 {
+ rank=same;
+ p6 -> p7;
+ }
+ p5 -> p6;
+ p5 -> p7;
+
+ p6 [fillcolor=green, label="heap ptr"];
+ p7 [fillcolor=green, label="heap ptr"];
+ }
+
+ subgraph cluster05 {
+ label="pending list";
+ node [style=filled, fillcolor=red];
+ n1 -> n2 -> n3 -> n4;
+ }
+
+ m1 -> e1;
+ e5 -> p1;
+ e7 -> p4;
+ e7 -> p5;
+ m1 -> n1;
+
+ e5 [style=filled, fillcolor=green4];
+ e7 [style=filled, fillcolor=green4];
+}
diff --git a/doc/src/sgml/images/gin.svg b/doc/src/sgml/images/gin.svg
new file mode 100644
index 00000000000..04fe85ba44e
--- /dev/null
+++ b/doc/src/sgml/images/gin.svg
@@ -0,0 +1,317 @@
+
+
+
+
+
+gin
+
+
+cluster01
+
+entry tree
+
+
+cluster02
+
+posting tree
+
+
+cluster03
+
+posting tree
+
+
+cluster04
+
+posting tree
+
+
+cluster05
+
+pending list
+
+
+
+m1
+
+meta page
+
+
+
+e1
+
+
+
+
+m1->e1
+
+
+
+
+
+n1
+
+
+
+
+m1->n1
+
+
+
+
+
+e2
+
+
+
+
+e1->e2
+
+
+
+
+
+e3
+
+
+
+
+e1->e3
+
+
+
+
+
+e4
+
+
+
+
+e1->e4
+
+
+
+
+
+e2->e3
+
+
+
+
+
+e5
+
+
+
+
+e2->e5
+
+
+
+
+
+e6
+
+posting list
+
+
+
+e2->e6
+
+
+
+
+
+e3->e4
+
+
+
+
+
+e7
+
+
+
+
+e3->e7
+
+
+
+
+
+e8
+
+posting list
+
+
+
+e4->e8
+
+
+
+
+
+e9
+
+posting list
+
+
+
+e4->e9
+
+
+
+
+
+e5->e6
+
+
+
+
+
+p1
+
+
+
+
+e5->p1
+
+
+
+
+
+e6->e7
+
+
+
+
+
+e7->e8
+
+
+
+
+
+p4
+
+heap ptr
+
+
+
+e7->p4
+
+
+
+
+
+p5
+
+
+
+
+e7->p5
+
+
+
+
+
+e8->e9
+
+
+
+
+
+p2
+
+heap ptr
+
+
+
+p1->p2
+
+
+
+
+
+p3
+
+heap ptr
+
+
+
+p1->p3
+
+
+
+
+
+p2->p3
+
+
+
+
+
+p6
+
+heap ptr
+
+
+
+p5->p6
+
+
+
+
+
+p7
+
+heap ptr
+
+
+
+p5->p7
+
+
+
+
+
+p6->p7
+
+
+
+
+
+n2
+
+
+
+
+n1->n2
+
+
+
+
+
+n3
+
+
+
+
+n2->n3
+
+
+
+
+
+n4
+
+
+
+
+n3->n4
+
+
+
+
+
diff --git a/doc/src/sgml/images/pagelayout.svg b/doc/src/sgml/images/pagelayout.svg
new file mode 100644
index 00000000000..5b2caaf1708
--- /dev/null
+++ b/doc/src/sgml/images/pagelayout.svg
@@ -0,0 +1,35 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ PageHeaderData
+ Item
+ ItemId
+ ItemId
+ Item
+ Special
+
+
diff --git a/doc/src/sgml/images/pagelayout.txt b/doc/src/sgml/images/pagelayout.txt
new file mode 100644
index 00000000000..40bee5d1699
--- /dev/null
+++ b/doc/src/sgml/images/pagelayout.txt
@@ -0,0 +1,11 @@
++----------------+--------+--------+--------------------+
+| PageHeaderData | ItemId | ItemId +=--------> |
++----------------+---+----+---+----+ |
+| | | |
+| | +-----+ |
+| +--+------+ |
+| | | |
+| v v |
+| +----------+-----------------+---------+
+| <----=+ Item | Item | Special |
++----------------+----------+-----------------+---------+
diff --git a/doc/src/sgml/indexam.sgml b/doc/src/sgml/indexam.sgml
index 24c3405f918..dd54c688024 100644
--- a/doc/src/sgml/indexam.sgml
+++ b/doc/src/sgml/indexam.sgml
@@ -3,6 +3,14 @@
Index Access Method Interface Definition
+
+ Index Access Method
+
+
+ indexam
+ Index Access Method
+
+
This chapter defines the interface between the core
PostgreSQL system and index access
@@ -50,8 +58,8 @@
Each index access method is described by a row in the
pg_am
system catalog. The pg_am entry
- specifies a name and a handler function for the access
- method. These entries can be created and deleted using the
+ specifies a name and a handler function for the index
+ access method. These entries can be created and deleted using the
and
SQL commands.
@@ -112,10 +120,10 @@ typedef struct IndexAmRoutine
bool ampredlocks;
/* does AM support parallel scan? */
bool amcanparallel;
- /* type of data stored in index, or InvalidOid if variable */
- Oid amkeytype;
/* does AM support columns included with clause INCLUDE? */
bool amcaninclude;
+ /* type of data stored in index, or InvalidOid if variable */
+ Oid amkeytype;
/* interface functions */
ambuild_function ambuild;
@@ -127,6 +135,7 @@ typedef struct IndexAmRoutine
amcostestimate_function amcostestimate;
amoptions_function amoptions;
amproperty_function amproperty; /* can be NULL */
+ ambuildphasename_function ambuildphasename; /* can be NULL */
amvalidate_function amvalidate;
ambeginscan_function ambeginscan;
amrescan_function amrescan;
@@ -238,7 +247,7 @@ ambuild (Relation heapRelation,
but is empty. It must be filled in with whatever fixed data the
access method requires, plus entries for all tuples already existing
in the table. Ordinarily the ambuild function will call
- IndexBuildHeapScan() to scan the table for existing tuples
+ table_index_build_scan() to scan the table for existing tuples
and compute the keys that need to be inserted into the index.
The function must return a palloc'd struct containing statistics about
the new index.
@@ -385,7 +394,8 @@ amcostestimate (PlannerInfo *root,
Cost *indexStartupCost,
Cost *indexTotalCost,
Selectivity *indexSelectivity,
- double *indexCorrelation);
+ double *indexCorrelation,
+ double *indexPages);
Estimate the costs of an index scan. This function is described fully
in , below.
@@ -467,6 +477,18 @@ amproperty (Oid index_oid, int attno,
+char *
+ambuildphasename (int64 phasenum);
+
+ Return the textual name of the given build phase number.
+ The phase numbers are those reported during an index build via the
+ pgstat_progress_update_param interface.
+ The phase names are then exposed in the
+ pg_stat_progress_create_index view.
+
+
+
+
bool
amvalidate (Oid opclassoid);
@@ -613,7 +635,8 @@ amendscan (IndexScanDesc scan);
End a scan and release resources. The scan struct itself
should not be freed, but any locks or pins taken internally by the
- access method must be released.
+ access method must be released, as well as any other memory allocated
+ by ambeginscan and other scan-related functions.
@@ -718,7 +741,7 @@ amparallelrescan (IndexScanDesc scan);
the TIDs of all the tuples it has been told about that match the
scan keys . The access method is not involved in
actually fetching those tuples from the index's parent table, nor in
- determining whether they pass the scan's time qualification test or other
+ determining whether they pass the scan's visibility test or other
conditions.
@@ -987,8 +1010,9 @@ amparallelrescan (IndexScanDesc scan);
using unique indexes , which are indexes that disallow
multiple entries with identical keys. An access method that supports this
feature sets amcanunique true.
- (At present, only b-tree supports it.) Columns listed in the
- INCLUDE clause are not used to enforce uniqueness.
+ (At present, only b-tree supports it.) Columns listed in the
+ INCLUDE clause are not considered when enforcing
+ uniqueness.
@@ -1154,7 +1178,8 @@ amcostestimate (PlannerInfo *root,
Cost *indexStartupCost,
Cost *indexTotalCost,
Selectivity *indexSelectivity,
- double *indexCorrelation);
+ double *indexCorrelation,
+ double *indexPages);
The first three parameters are inputs:
@@ -1196,7 +1221,7 @@ amcostestimate (PlannerInfo *root,
- The last four parameters are pass-by-reference outputs:
+ The last five parameters are pass-by-reference outputs:
@@ -1235,6 +1260,15 @@ amcostestimate (PlannerInfo *root,
+
+
+ *indexPages
+
+
+ Set to number of index leaf pages
+
+
+
@@ -1282,6 +1316,11 @@ amcostestimate (PlannerInfo *root,
table.
+
+ The indexPages should be set to the number of leaf pages.
+ This is used to estimate the number of workers for parallel index scan.
+
+
When loop_count is greater than one, the returned numbers
should be averages expected for any one scan of the index.
diff --git a/doc/src/sgml/indices.sgml b/doc/src/sgml/indices.sgml
index 14a1aa56cb5..95c0a1926c5 100644
--- a/doc/src/sgml/indices.sgml
+++ b/doc/src/sgml/indices.sgml
@@ -281,6 +281,13 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10;
For more information see .
+
+ Like GiST, SP-GiST supports nearest-neighbor
searches.
+ For SP-GiST operator classes that support distance ordering, the
+ corresponding operator is specified in the Ordering Operators
+ column in .
+
+
index
@@ -497,8 +504,9 @@ CREATE INDEX test2_mm_idx ON test2 (major, minor);
By default, B-tree indexes store their entries in ascending order
- with nulls last. This means that a forward scan of an index on
- column x produces output satisfying ORDER BY x
+ with nulls last (table TID is treated as a tiebreaker column among
+ otherwise equal entries). This means that a forward scan of an
+ index on column x produces output satisfying ORDER BY x
(or more verbosely, ORDER BY x ASC NULLS LAST ). The
index can also be scanned backward, producing output satisfying
ORDER BY x DESC
@@ -638,8 +646,7 @@ CREATE INDEX test3_desc_index ON test3 (id DESC NULLS LAST);
Indexes can also be used to enforce uniqueness of a column's value,
or the uniqueness of the combined values of more than one column.
-CREATE UNIQUE INDEX name ON table (column , ... )
-[ INCLUDE (column , ... ) ];
+CREATE UNIQUE INDEX name ON table (column , ... );
Currently, only B-tree indexes can be declared unique.
@@ -648,9 +655,7 @@ CREATE UNIQUE INDEX name ON tableINCLUDE clause aren't used to enforce constraints
- (UNIQUE, PRIMARY KEY, etc).
+ indexed columns are equal in multiple rows.
@@ -809,18 +814,20 @@ SELECT *
FROM access_log
WHERE url = '/index.html' AND client_ip = inet '212.78.10.32';
- A query that cannot use this index is:
+ Here the query's IP address is covered by the partial index. The
+ following query cannot use the partial index, as it uses an IP address
+ that is excluded from the index:
SELECT *
FROM access_log
-WHERE client_ip = inet '192.168.100.23';
+WHERE url = '/index.html' AND client_ip = inet '192.168.100.23';
Observe that this kind of partial index requires that the common
values be predetermined, so such partial indexes are best used for
- data distributions that do not change. The indexes can be recreated
+ data distributions that do not change. Such indexes can be recreated
occasionally to adjust for new data distributions, but this adds
maintenance effort.
@@ -971,6 +978,266 @@ CREATE UNIQUE INDEX tests_success_constraint ON tests (subject, target)
+
+ Index-Only Scans and Covering Indexes
+
+
+ index
+ index-only scans
+
+
+ index-only scan
+
+
+ index
+ covering
+
+
+ covering index
+
+
+
+ All indexes in PostgreSQL
+ are secondary indexes, meaning that each index is
+ stored separately from the table's main data area (which is called the
+ table's heap
+ in PostgreSQL terminology). This means that
+ in an ordinary index scan, each row retrieval requires fetching data from
+ both the index and the heap. Furthermore, while the index entries that
+ match a given indexable WHERE condition are usually
+ close together in the index, the table rows they reference might be
+ anywhere in the heap. The heap-access portion of an index scan thus
+ involves a lot of random access into the heap, which can be slow,
+ particularly on traditional rotating media. (As described in
+ , bitmap scans try to alleviate
+ this cost by doing the heap accesses in sorted order, but that only goes
+ so far.)
+
+
+
+ To solve this performance problem, PostgreSQL
+ supports index-only scans , which can answer
+ queries from an index alone without any heap access. The basic idea is
+ to return values directly out of each index entry instead of consulting
+ the associated heap entry. There are two fundamental restrictions on
+ when this method can be used:
+
+
+
+
+ The index type must support index-only scans. B-tree indexes always
+ do. GiST and SP-GiST indexes support index-only scans for some
+ operator classes but not others. Other index types have no support.
+ The underlying requirement is that the index must physically store, or
+ else be able to reconstruct, the original data value for each index
+ entry. As a counterexample, GIN indexes cannot support index-only
+ scans because each index entry typically holds only part of the
+ original data value.
+
+
+
+
+
+ The query must reference only columns stored in the index. For
+ example, given an index on columns x
+ and y of a table that also has a
+ column z , these queries could use index-only scans:
+
+SELECT x, y FROM tab WHERE x = 'key';
+SELECT x FROM tab WHERE x = 'key' AND y < 42;
+
+ but these queries could not:
+
+SELECT x, z FROM tab WHERE x = 'key';
+SELECT x FROM tab WHERE x = 'key' AND z < 42;
+
+ (Expression indexes and partial indexes complicate this rule,
+ as discussed below.)
+
+
+
+
+
+
+ If these two fundamental requirements are met, then all the data values
+ required by the query are available from the index, so an index-only scan
+ is physically possible. But there is an additional requirement for any
+ table scan in PostgreSQL : it must verify that
+ each retrieved row be visible
to the query's MVCC
+ snapshot, as discussed in . Visibility information
+ is not stored in index entries, only in heap entries; so at first glance
+ it would seem that every row retrieval would require a heap access
+ anyway. And this is indeed the case, if the table row has been modified
+ recently. However, for seldom-changing data there is a way around this
+ problem. PostgreSQL tracks, for each page in
+ a table's heap, whether all rows stored in that page are old enough to be
+ visible to all current and future transactions. This information is
+ stored in a bit in the table's visibility map . An
+ index-only scan, after finding a candidate index entry, checks the
+ visibility map bit for the corresponding heap page. If it's set, the row
+ is known visible and so the data can be returned with no further work.
+ If it's not set, the heap entry must be visited to find out whether it's
+ visible, so no performance advantage is gained over a standard index
+ scan. Even in the successful case, this approach trades visibility map
+ accesses for heap accesses; but since the visibility map is four orders
+ of magnitude smaller than the heap it describes, far less physical I/O is
+ needed to access it. In most situations the visibility map remains
+ cached in memory all the time.
+
+
+
+ In short, while an index-only scan is possible given the two fundamental
+ requirements, it will be a win only if a significant fraction of the
+ table's heap pages have their all-visible map bits set. But tables in
+ which a large fraction of the rows are unchanging are common enough to
+ make this type of scan very useful in practice.
+
+
+
+
+ INCLUDE
+ in index definitions
+
+ To make effective use of the index-only scan feature, you might choose to
+ create a covering index , which is an index
+ specifically designed to include the columns needed by a particular
+ type of query that you run frequently. Since queries typically need to
+ retrieve more columns than just the ones they search
+ on, PostgreSQL allows you to create an index
+ in which some columns are just payload
and are not part
+ of the search key. This is done by adding an INCLUDE
+ clause listing the extra columns. For example, if you commonly run
+ queries like
+
+SELECT y FROM tab WHERE x = 'key';
+
+ the traditional approach to speeding up such queries would be to create
+ an index on x only. However, an index defined as
+
+CREATE INDEX tab_x_y ON tab(x) INCLUDE (y);
+
+ could handle these queries as index-only scans,
+ because y can be obtained from the index without
+ visiting the heap.
+
+
+
+ Because column y is not part of the index's search
+ key, it does not have to be of a data type that the index can handle;
+ it's merely stored in the index and is not interpreted by the index
+ machinery. Also, if the index is a unique index, that is
+
+CREATE UNIQUE INDEX tab_x_y ON tab(x) INCLUDE (y);
+
+ the uniqueness condition applies to just column x ,
+ not to the combination of x and y .
+ (An INCLUDE clause can also be written
+ in UNIQUE and PRIMARY KEY
+ constraints, providing alternative syntax for setting up an index like
+ this.)
+
+
+
+ It's wise to be conservative about adding non-key payload columns to an
+ index, especially wide columns. If an index tuple exceeds the
+ maximum size allowed for the index type, data insertion will fail.
+ In any case, non-key columns duplicate data from the index's table
+ and bloat the size of the index, thus potentially slowing searches.
+ And remember that there is little point in including payload columns in an
+ index unless the table changes slowly enough that an index-only scan is
+ likely to not need to access the heap. If the heap tuple must be visited
+ anyway, it costs nothing more to get the column's value from there.
+ Other restrictions are that expressions are not currently supported as
+ included columns, and that only B-tree and GiST indexes currently support
+ included columns.
+
+
+
+ Before PostgreSQL had
+ the INCLUDE feature, people sometimes made covering
+ indexes by writing the payload columns as ordinary index columns,
+ that is writing
+
+CREATE INDEX tab_x_y ON tab(x, y);
+
+ even though they had no intention of ever using y as
+ part of a WHERE clause. This works fine as long as
+ the extra columns are trailing columns; making them be leading columns is
+ unwise for the reasons explained in .
+ However, this method doesn't support the case where you want the index to
+ enforce uniqueness on the key column(s).
+
+
+
+ Suffix truncation always removes non-key
+ columns from upper B-Tree levels. As payload columns, they are
+ never used to guide index scans. The truncation process also
+ removes one or more trailing key column(s) when the remaining
+ prefix of key column(s) happens to be sufficient to describe tuples
+ on the lowest B-Tree level. In practice, covering indexes without
+ an INCLUDE clause often avoid storing columns
+ that are effectively payload in the upper levels. However,
+ explicitly defining payload columns as non-key columns
+ reliably keeps the tuples in upper levels
+ small.
+
+
+
+ In principle, index-only scans can be used with expression indexes.
+ For example, given an index on f(x)
+ where x is a table column, it should be possible to
+ execute
+
+SELECT f(x) FROM tab WHERE f(x) < 1;
+
+ as an index-only scan; and this is very attractive
+ if f() is an expensive-to-compute function.
+ However, PostgreSQL 's planner is currently not
+ very smart about such cases. It considers a query to be potentially
+ executable by index-only scan only when all columns
+ needed by the query are available from the index. In this
+ example, x is not needed except in the
+ context f(x) , but the planner does not notice that and
+ concludes that an index-only scan is not possible. If an index-only scan
+ seems sufficiently worthwhile, this can be worked around by
+ adding x as an included column, for example
+
+CREATE INDEX tab_f_x ON tab (f(x)) INCLUDE (x);
+
+ An additional caveat, if the goal is to avoid
+ recalculating f(x) , is that the planner won't
+ necessarily match uses of f(x) that aren't in
+ indexable WHERE clauses to the index column. It will
+ usually get this right in simple queries such as shown above, but not in
+ queries that involve joins. These deficiencies may be remedied in future
+ versions of PostgreSQL .
+
+
+
+ Partial indexes also have interesting interactions with index-only scans.
+ Consider the partial index shown in :
+
+CREATE UNIQUE INDEX tests_success_constraint ON tests (subject, target)
+ WHERE success;
+
+ In principle, we could do an index-only scan on this index to satisfy a
+ query like
+
+SELECT target FROM tests WHERE subject = 'some-subject' AND success;
+
+ But there's a problem: the WHERE clause refers
+ to success which is not available as a result column
+ of the index. Nonetheless, an index-only scan is possible because the
+ plan does not need to recheck that part of the WHERE
+ clause at run time: all entries found in the index necessarily
+ have success = true so this need not be explicitly
+ checked in the plan. PostgreSQL versions 9.6
+ and later will recognize such cases and allow index-only scans to be
+ generated, but older versions will not.
+
+
+
+
Operator Classes and Operator Families
@@ -1138,183 +1405,6 @@ CREATE INDEX test1c_content_y_index ON test1c (content COLLATE "y");
-
- Index-Only Scans
-
-
- index
- index-only scans
-
-
- index-only scan
-
-
-
- All indexes in PostgreSQL are secondary
- indexes, meaning that each index is stored separately from the table's
- main data area (which is called the table's heap
- in PostgreSQL terminology). This means that in an
- ordinary index scan, each row retrieval requires fetching data from both
- the index and the heap. Furthermore, while the index entries that match a
- given indexable WHERE condition are usually close together in
- the index, the table rows they reference might be anywhere in the heap.
- The heap-access portion of an index scan thus involves a lot of random
- access into the heap, which can be slow, particularly on traditional
- rotating media. (As described in ,
- bitmap scans try to alleviate this cost by doing the heap accesses in
- sorted order, but that only goes so far.)
-
-
-
- To solve this performance problem, PostgreSQL
- supports index-only scans , which can answer queries from an
- index alone without any heap access. The basic idea is to return values
- directly out of each index entry instead of consulting the associated heap
- entry. There are two fundamental restrictions on when this method can be
- used:
-
-
-
-
- The index type must support index-only scans. B-tree indexes always
- do. GiST and SP-GiST indexes support index-only scans for some
- operator classes but not others. Other index types have no support.
- The underlying requirement is that the index must physically store, or
- else be able to reconstruct, the original data value for each index
- entry. As a counterexample, GIN indexes cannot support index-only
- scans because each index entry typically holds only part of the
- original data value.
-
-
-
-
-
- The query must reference only columns stored in the index. For
- example, given an index on columns x and y of a
- table that also has a column z , these queries could use
- index-only scans:
-
-SELECT x, y FROM tab WHERE x = 'key';
-SELECT x FROM tab WHERE x = 'key' AND y < 42;
-
- but these queries could not:
-
-SELECT x, z FROM tab WHERE x = 'key';
-SELECT x FROM tab WHERE x = 'key' AND z < 42;
-
- (Expression indexes and partial indexes complicate this rule,
- as discussed below.)
-
-
-
-
-
-
- If these two fundamental requirements are met, then all the data values
- required by the query are available from the index, so an index-only scan
- is physically possible. But there is an additional requirement for any
- table scan in PostgreSQL : it must verify that each
- retrieved row be visible
to the query's MVCC snapshot, as
- discussed in . Visibility information is not stored
- in index entries, only in heap entries; so at first glance it would seem
- that every row retrieval would require a heap access anyway. And this is
- indeed the case, if the table row has been modified recently. However,
- for seldom-changing data there is a way around this
- problem. PostgreSQL tracks, for each page in a table's
- heap, whether all rows stored in that page are old enough to be visible to
- all current and future transactions. This information is stored in a bit
- in the table's visibility map . An index-only scan, after
- finding a candidate index entry, checks the visibility map bit for the
- corresponding heap page. If it's set, the row is known visible and so the
- data can be returned with no further work. If it's not set, the heap
- entry must be visited to find out whether it's visible, so no performance
- advantage is gained over a standard index scan. Even in the successful
- case, this approach trades visibility map accesses for heap accesses; but
- since the visibility map is four orders of magnitude smaller than the heap
- it describes, far less physical I/O is needed to access it. In most
- situations the visibility map remains cached in memory all the time.
-
-
-
- In short, while an index-only scan is possible given the two fundamental
- requirements, it will be a win only if a significant fraction of the
- table's heap pages have their all-visible map bits set. But tables in
- which a large fraction of the rows are unchanging are common enough to
- make this type of scan very useful in practice.
-
-
-
- To make effective use of the index-only scan feature, you might choose to
- create indexes in which only the leading columns are meant to
- match WHERE clauses, while the trailing columns
- hold payload
data to be returned by a query. For example, if
- you commonly run queries like
-
-SELECT y FROM tab WHERE x = 'key';
-
- the traditional approach to speeding up such queries would be to create an
- index on x only. However, an index on (x, y)
- would offer the possibility of implementing this query as an index-only
- scan. As previously discussed, such an index would be larger and hence
- more expensive than an index on x alone, so this is attractive
- only if the table is known to be mostly static. Note it's important that
- the index be declared on (x, y) not (y, x) , as for
- most index types (particularly B-trees) searches that do not constrain the
- leading index columns are not very efficient.
-
-
-
- In principle, index-only scans can be used with expression indexes.
- For example, given an index on f(x) where x is a
- table column, it should be possible to execute
-
-SELECT f(x) FROM tab WHERE f(x) < 1;
-
- as an index-only scan; and this is very attractive if f() is
- an expensive-to-compute function. However, PostgreSQL 's
- planner is currently not very smart about such cases. It considers a
- query to be potentially executable by index-only scan only when
- all columns needed by the query are available from the index.
- In this example, x is not needed except in the
- context f(x) , but the planner does not notice that and
- concludes that an index-only scan is not possible. If an index-only scan
- seems sufficiently worthwhile, this can be worked around by declaring the
- index to be on (f(x), x) , where the second column is not
- expected to be used in practice but is just there to convince the planner
- that an index-only scan is possible. An additional caveat, if the goal is
- to avoid recalculating f(x) , is that the planner won't
- necessarily match uses of f(x) that aren't in
- indexable WHERE clauses to the index column. It will usually
- get this right in simple queries such as shown above, but not in queries
- that involve joins. These deficiencies may be remedied in future versions
- of PostgreSQL .
-
-
-
- Partial indexes also have interesting interactions with index-only scans.
- Consider the partial index shown in :
-
-CREATE UNIQUE INDEX tests_success_constraint ON tests (subject, target)
- WHERE success;
-
- In principle, we could do an index-only scan on this index to satisfy a
- query like
-
-SELECT target FROM tests WHERE subject = 'some-subject' AND success;
-
- But there's a problem: the WHERE clause refers
- to success which is not available as a result column of the
- index. Nonetheless, an index-only scan is possible because the plan does
- not need to recheck that part of the WHERE clause at run time:
- all entries found in the index necessarily have success = true
- so this need not be explicitly checked in the
- plan. PostgreSQL versions 9.6 and later will recognize
- such cases and allow index-only scans to be generated, but older versions
- will not.
-
-
-
-
Examining Index Usage
diff --git a/doc/src/sgml/information_schema.sgml b/doc/src/sgml/information_schema.sgml
index 09ef2827f2a..906fe7819f5 100644
--- a/doc/src/sgml/information_schema.sgml
+++ b/doc/src/sgml/information_schema.sgml
@@ -952,6 +952,62 @@
+
+ column_column_usage
+
+
+ The view column_column_usage identifies all generated
+ columns that depend on another base column in the same table. Only tables
+ owned by a currently enabled role are included.
+
+
+
+ column_column_usage Columns
+
+
+
+
+ Name
+ Data Type
+ Description
+
+
+
+
+
+ table_catalog
+ sql_identifier
+ Name of the database containing the table (always the current database)
+
+
+
+ table_schema
+ sql_identifier
+ Name of the schema containing the table
+
+
+
+ table_name
+ sql_identifier
+ Name of the table
+
+
+
+ column_name
+ sql_identifier
+ Name of the base column that a generated column depends on
+
+
+
+ dependent_column
+ sql_identifier
+ Name of the generated column
+
+
+
+
+
+
column_domain_usage
@@ -1256,7 +1312,7 @@
The view columns contains information about all
table columns (or view columns) in the database. System columns
- (oid , etc.) are not included. Only those columns are
+ (ctid , etc.) are not included. Only those columns are
shown that the current user has access to (by way of being the
owner or having some privilege).
@@ -1648,13 +1704,19 @@
is_generated
character_data
- Applies to a feature not available in PostgreSQL
+
+ If the column is a generated column, then ALWAYS ,
+ else NEVER .
+
generation_expression
character_data
- Applies to a feature not available in PostgreSQL
+
+ If the column is a generated column, then the generation expression,
+ else null.
+
@@ -2621,8 +2683,9 @@ ORDER BY c.ordinal_position;
For permission checking, the set of applicable roles
is applied, which can be broader than the set of enabled roles. So
generally, it is better to use the view
- applicable_roles instead of this one; see also
- there.
+ applicable_roles instead of this one; See
+ for details on
+ applicable_roles view.
@@ -5793,7 +5856,7 @@ ORDER BY c.ordinal_position;
character_data
Statement that is executed by the trigger (currently always
- EXECUTE PROCEDURE
+ EXECUTE FUNCTION
function (...) )
@@ -6674,7 +6737,11 @@ ORDER BY c.ordinal_position;
check_option
character_data
- Applies to a feature not available in PostgreSQL
+
+ CASCADED or LOCAL if the view
+ has a CHECK OPTION defined on it,
+ NONE if not
+
diff --git a/doc/src/sgml/install-windows.sgml b/doc/src/sgml/install-windows.sgml
index 99e9c7b78ee..08556b6ebe7 100644
--- a/doc/src/sgml/install-windows.sgml
+++ b/doc/src/sgml/install-windows.sgml
@@ -19,10 +19,9 @@
There are several different ways of building PostgreSQL on
Windows . The simplest way to build with
- Microsoft tools is to install Visual Studio Express 2017
- for Windows Desktop and use the included
- compiler. It is also possible to build with the full
- Microsoft Visual C++ 2005 to 2017 .
+ Microsoft tools is to install Visual Studio 2019
+ and use the included compiler. It is also possible to build with the full
+ Microsoft Visual C++ 2013 to 2019 .
In some cases that requires the installation of the
Windows SDK in addition to the compiler.
@@ -69,28 +68,35 @@
Visual Studio Express or some versions of the
Microsoft Windows SDK . If you do not already have a
Visual Studio environment set up, the easiest
- ways are to use the compilers from Visual Studio Express 2017
- for Windows Desktop or those in the Windows SDK
- 8.1 , which are both free downloads from Microsoft.
+ ways are to use the compilers from
+ Visual Studio 2019 or those in the
+ Windows SDK 10 , which are both free downloads
+ from Microsoft.
Both 32-bit and 64-bit builds are possible with the Microsoft Compiler suite.
32-bit PostgreSQL builds are possible with
- Visual Studio 2005 to
- Visual Studio 2017 (including Express editions),
- as well as standalone Windows SDK releases 6.0 to 8.1.
+ Visual Studio 2013 to
+ Visual Studio 2019 ,
+ as well as standalone Windows SDK releases 8.1a to 10.
64-bit PostgreSQL builds are supported with
- Microsoft Windows SDK version 6.0a to 8.1 or
- Visual Studio 2008 and above. Compilation
- is supported down to Windows XP and
- Windows Server 2003 when building with
- Visual Studio 2005 to
- Visual Studio 2013 . Building with
- Visual Studio 2015 is supported down to
- Windows Vista and Windows Server 2008 .
- Building with Visual Studio 2017 is supported
- down to Windows 7 SP1 and Windows Server 2008 R2 SP1 .
+ Microsoft Windows SDK version 8.1a to 10 or
+ Visual Studio 2013 and above. Compilation
+ is supported down to Windows 7 and
+ Windows Server 2008 R2 SP1 when building with
+ Visual Studio 2013 to
+ Visual Studio 2019 .
+
@@ -162,7 +168,7 @@ $ENV{MSBFLAGS}="/m";
If your build environment doesn't ship with a supported version of the
Microsoft Windows SDK it
is recommended that you upgrade to the latest version (currently
- version 7.1), available for download from
+ version 10), available for download from
.
@@ -182,7 +188,7 @@ $ENV{MSBFLAGS}="/m";
ActiveState Perl is required to run the build generation scripts. MinGW
or Cygwin Perl will not work. It must also be present in the PATH.
Binaries can be downloaded from
-
+
(Note: version 5.8.3 or later is required,
the free Standard Distribution is sufficient).
@@ -293,11 +299,11 @@ $ENV{MSBFLAGS}="/m";
- openssl
+ OpenSSL
Required for SSL support. Binaries can be downloaded from
-
- or source from .
+
+ or source from .
@@ -314,7 +320,7 @@ $ENV{MSBFLAGS}="/m";
Python
Required for building PL/Python . Binaries can
- be downloaded from .
+ be downloaded from .
@@ -332,7 +338,7 @@ $ENV{MSBFLAGS}="/m";
- Special Considerations for 64-bit Windows
+ Special Considerations for 64-Bit Windows
PostgreSQL will only build for the x64 architecture on 64-bit Windows, there
@@ -348,7 +354,7 @@ $ENV{MSBFLAGS}="/m";
To use a server-side third party library such as python or
- openssl , this library must also be
+ OpenSSL , this library must also be
64-bit. There is no support for loading a 32-bit library in a 64-bit
server. Several of the third party libraries that PostgreSQL supports may
only be available in 32-bit versions, in which case they cannot be used with
@@ -473,7 +479,7 @@ $ENV{CONFIG}="Debug";
ActiveState Perl installation, nor in the ActiveState Perl Package
Manager (PPM) library. To install, download the
IPC-Run-<version>.tar.gz source archive from CPAN,
- at , and
+ at , and
uncompress. Edit the buildenv.pl file, and add a PERL5LIB
variable to point to the lib subdirectory from the
extracted archive. For example:
@@ -486,52 +492,5 @@ $ENV{PERL5LIB}=$ENV{PERL5LIB} . ';c:\IPC-Run-0.94\lib';
-
- Building the Documentation
-
-
- Building the PostgreSQL documentation in HTML format requires several tools
- and files. Create a root directory for all these files, and store them
- in the subdirectories in the list below.
-
-
- OpenJade 1.3.1-2
-
- Download from
-
- and uncompress in the subdirectory openjade-1.3.1 .
-
-
-
-
- DocBook DTD 4.2
-
- Download from
-
- and uncompress in the subdirectory docbook .
-
-
-
-
- ISO character entities
-
- Download from
- and
- uncompress in the subdirectory docbook .
-
-
-
- Edit the buildenv.pl file, and add a variable for the
- location of the root directory, for example:
-
-$ENV{DOCROOT}='c:\docbook';
-
- To build the documentation, run the command
- builddoc.bat . Note that this will actually run the
- build twice, in order to generate the indexes. The generated HTML files
- will be in doc\src\sgml .
-
-
-
diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml
index 7a7823b7a30..d8494e293bd 100644
--- a/doc/src/sgml/installation.sgml
+++ b/doc/src/sgml/installation.sgml
@@ -15,12 +15,20 @@ documentation. See standalone-profile.xsl for details.
installation
+
This chapter describes the installation of
PostgreSQL using the source code
- distribution. (If you are installing a pre-packaged distribution,
+ distribution. If you are installing a pre-packaged distribution,
such as an RPM or Debian package, ignore this chapter
- and read the packager's instructions instead.)
+ and read the packager's instructions instead.
+
+
+
+ If you are building PostgreSQL for Microsoft
+ Windows, read this chapter if you intend to build with MinGW or Cygwin;
+ but if you intend to build with Microsoft's Visual
+ C++ , see instead.
@@ -37,7 +45,7 @@ mkdir /usr/local/pgsql/data
chown postgres /usr/local/pgsql/data
su - postgres
/usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data
-/usr/local/pgsql/bin/postgres -D /usr/local/pgsql/data >logfile 2>&1 &
+/usr/local/pgsql/bin/pg_ctl -D /usr/local/pgsql/data -l logfile start
/usr/local/pgsql/bin/createdb test
/usr/local/pgsql/bin/psql test
@@ -54,10 +62,8 @@ su - postgres
In general, a modern Unix-compatible platform should be able to run
PostgreSQL .
The platforms that had received specific testing at the
- time of release are listed in
- below. In the doc subdirectory of the distribution
- there are several platform-specific FAQ documents you
- might wish to consult if you are having trouble.
+ time of release are described in
+ below.
@@ -85,7 +91,7 @@ su - postgres
You need an ISO /ANSI C compiler (at least
- C89-compliant). Recent
+ C99-compliant). Recent
versions of GCC are recommended, but
PostgreSQL is known to build using a wide variety
of compilers from different vendors.
@@ -245,8 +251,10 @@ su - postgres
You need OpenSSL , if you want to support
- encrypted client connections. The minimum required version is
- 0.9.8.
+ encrypted client connections. OpenSSL is
+ also required for random number generation on platforms that do not
+ have /dev/urandom (except Windows). The minimum
+ version required is 0.9.8.
@@ -316,25 +324,25 @@ su - postgres
If you need to get a GNU package, you can find
it at your local GNU mirror site (see
+ url="https://www.gnu.org/prep/ftp">
for a list) or at .
Also check that you have sufficient disk space. You will need about
- 100 MB for the source tree during compilation and about 20 MB for
+ 350 MB for the source tree during compilation and about 60 MB for
the installation directory. An empty database cluster takes about
- 35 MB; databases take about five times the amount of space that a
+ 40 MB; databases take about five times the amount of space that a
flat text file with the same data would take. If you are going to
run the regression tests you will temporarily need up to an extra
- 150 MB. Use the df command to check free disk
+ 300 MB. Use the df command to check free disk
space.
- Getting The Source
+ Getting the Source
The PostgreSQL &version; sources can be obtained from the
@@ -347,8 +355,11 @@ su - postgres
gunzip postgresql-&version;.tar.gz
tar xf postgresql-&version;.tar
- (Use bunzip2 instead of gunzip if you
- have the .bz2 file.)
+ (Use bunzip2 instead of gunzip if
+ you have the .bz2 file. Also, note that most
+ modern versions of tar can unpack compressed archives
+ directly, so you don't really need the
+ separate gunzip or bunzip2 step.)
This will create a directory
postgresql-&version; under the current directory
with the PostgreSQL sources.
@@ -385,10 +396,14 @@ su - postgres
This script will run a number of tests to determine values for various
system dependent variables and detect any quirks of your
operating system, and finally will create several files in the
- build tree to record what it found. You can also run
- configure in a directory outside the source
- tree, if you want to keep the build directory separate. This
- procedure is also called a
+ build tree to record what it found.
+
+
+
+ You can also run configure in a directory outside
+ the source tree, and then build there, if you want to keep the build
+ directory separate from the original source files. This procedure is
+ called a
VPATH VPATH
build. Here's how:
@@ -408,8 +423,231 @@ su - postgres
You can customize the build and installation process by supplying one
- or more of the following command line options to
- configure :
+ or more command line options to configure .
+ Typically you would customize the install location, or the set of
+ optional features that are built. configure
+ has a large number of options, which are described in
+ .
+
+
+
+ Also, configure responds to certain environment
+ variables, as described in .
+ These provide additional ways to customize the configuration.
+
+
+
+
+ Build
+
+
+ To start the build, type either of:
+
+make
+make all
+
+ (Remember to use GNU make .)
+ The build will take a few minutes depending on your
+ hardware. The last line displayed should be:
+
+All of PostgreSQL successfully made. Ready to install.
+
+
+
+
+ If you want to build everything that can be built, including the
+ documentation (HTML and man pages), and the additional modules
+ (contrib ), type instead:
+
+make world
+
+ The last line displayed should be:
+
+PostgreSQL, contrib, and documentation successfully made. Ready to install.
+
+
+
+
+ If you want to invoke the build from another makefile rather than
+ manually, you must unset MAKELEVEL or set it to zero,
+ for instance like this:
+
+build-postgresql:
+ $(MAKE) -C postgresql MAKELEVEL=0 all
+
+ Failure to do that can lead to strange error messages, typically about
+ missing header files.
+
+
+
+
+ Regression Tests
+
+
+ regression test
+
+
+
+ If you want to test the newly built server before you install it,
+ you can run the regression tests at this point. The regression
+ tests are a test suite to verify that PostgreSQL
+ runs on your machine in the way the developers expected it
+ to. Type:
+
+make check
+
+ (This won't work as root; do it as an unprivileged user.)
+ See for
+ detailed information about interpreting the test results. You can
+ repeat this test at any later time by issuing the same command.
+
+
+
+
+ Installing the Files
+
+
+
+ If you are upgrading an existing system be sure to read
+ ,
+ which has instructions about upgrading a
+ cluster.
+
+
+
+
+ To install PostgreSQL enter:
+
+make install
+
+ This will install files into the directories that were specified
+ in . Make sure that you have appropriate
+ permissions to write into that area. Normally you need to do this
+ step as root. Alternatively, you can create the target
+ directories in advance and arrange for appropriate permissions to
+ be granted.
+
+
+
+ To install the documentation (HTML and man pages), enter:
+
+make install-docs
+
+
+
+
+ If you built the world above, type instead:
+
+make install-world
+
+ This also installs the documentation.
+
+
+
+ You can use make install-strip instead of
+ make install to strip the executable files and
+ libraries as they are installed. This will save some space. If
+ you built with debugging support, stripping will effectively
+ remove the debugging support, so it should only be done if
+ debugging is no longer needed. install-strip
+ tries to do a reasonable job saving space, but it does not have
+ perfect knowledge of how to strip every unneeded byte from an
+ executable file, so if you want to save all the disk space you
+ possibly can, you will have to do manual work.
+
+
+
+ The standard installation provides all the header files needed for client
+ application development as well as for server-side program
+ development, such as custom functions or data types written in C.
+ (Prior to PostgreSQL 8.0, a separate make
+ install-all-headers command was needed for the latter, but this
+ step has been folded into the standard install.)
+
+
+
+ Client-only installation:
+
+ If you want to install only the client applications and
+ interface libraries, then you can use these commands:
+
+make -C src/bin install
+make -C src/include install
+make -C src/interfaces install
+make -C doc install
+
+ src/bin has a few binaries for server-only use,
+ but they are small.
+
+
+
+
+
+
+ Uninstallation:
+
+ To undo the installation use the command make
+ uninstall . However, this will not remove any created directories.
+
+
+
+
+ Cleaning:
+
+
+ After the installation you can free disk space by removing the built
+ files from the source tree with the command make
+ clean . This will preserve the files made by the configure
+ program, so that you can rebuild everything with make
+ later on. To reset the source tree to the state in which it was
+ distributed, use make distclean . If you are going to
+ build for several platforms within the same source tree you must do
+ this and re-configure for each platform. (Alternatively, use
+ a separate build tree for each platform, so that the source tree
+ remains unmodified.)
+
+
+
+
+ If you perform a build and then discover that your configure
+ options were wrong, or if you change anything that configure
+ investigates (for example, software upgrades), then it's a good
+ idea to do make distclean before reconfiguring and
+ rebuilding. Without this, your changes in configuration choices
+ might not propagate everywhere they need to.
+
+
+
+ configure Options
+
+
+ configure options
+
+
+
+ configure 's command line options are explained below.
+ This list is not exhaustive (use ./configure --help
+ to get one that is). The options not covered here are meant for
+ advanced use-cases such as cross-compilation, and are documented in
+ the standard Autoconf documentation.
+
+
+
+ Installation Locations
+
+
+ These options control where make install will put
+ the files. The --prefix option is sufficient for
+ most cases. If you have special needs, you can customize the
+ installation subdirectories with the other options described in this
+ section. Beware however that changing the relative locations of the
+ different subdirectories may render the installation non-relocatable,
+ meaning you won't be able to move it after installation.
+ (The man and doc locations are
+ not affected by this restriction.) For relocatable installs, you
+ might want to use the --disable-rpath option
+ described later.
+
@@ -422,22 +660,6 @@ su - postgres
will ever be installed directly into the
PREFIX directory.
-
-
- If you have special needs, you can also customize the
- individual subdirectories with the following options. However,
- if you leave these with their defaults, the installation will be
- relocatable, meaning you can move the directory after
- installation. (The man and doc
- locations are not affected by this.)
-
-
-
- For relocatable installs, you might want to use
- configure 's --disable-rpath
- option. Also, you will need to tell the operating system how
- to find the shared libraries.
-
@@ -597,56 +819,21 @@ su - postgres
for dynamically loadable modules.
-
-
-
-
- --with-extra-version=STRING
-
-
- Append STRING to the PostgreSQL version number. You
- can use this, for example, to mark binaries built from unreleased Git
- snapshots or containing custom patches with an extra version string
- such as a git describe identifier or a
- distribution package release number.
-
-
-
+
-
- --with-includes=DIRECTORIES
-
-
- DIRECTORIES is a colon-separated list of
- directories that will be added to the list the compiler
- searches for header files. If you have optional packages
- (such as GNU Readline ) installed in a non-standard
- location,
- you have to use this option and probably also the corresponding
- --with-libraries option.
-
-
- Example: --with-includes=/opt/gnu/include:/usr/sup/include .
-
-
-
+
+ PostgreSQL Features
-
- --with-libraries=DIRECTORIES
-
-
- DIRECTORIES is a colon-separated list of
- directories to search for libraries. You will probably have
- to use this option (and the corresponding
- --with-includes option) if you have packages
- installed in non-standard locations.
-
-
- Example: --with-libraries=/opt/gnu/lib:/usr/sup/lib .
-
-
-
+
+ The options described in this section enable building of
+ various PostgreSQL features that are not
+ built by default. Most of these are non-default only because they
+ require additional software, as described in
+ .
+
+
+
--enable-nls=LANGUAGES
@@ -666,28 +853,13 @@ su - postgres
To use this option, you will need an implementation of the
- Gettext API; see above.
+ Gettext API.
- --with-pgport=NUMBER
-
-
- Set NUMBER as the default port number for
- server and clients. The default is 5432. The port can always
- be changed later on, but if you specify it here then both
- server and clients will have the same default compiled in,
- which can be very convenient. Usually the only good reason
- to select a non-default value is if you intend to run multiple
- PostgreSQL servers on the same machine.
-
-
-
-
-
- --with-perl
+ --with-perl
Build the PL/Perl server-side language.
@@ -722,38 +894,41 @@ su - postgres
interfacing to Tcl. This file is normally found automatically
at a well-known location, but if you want to use a different
version of Tcl you can specify the directory in which to look
- for it.
+ for tclConfig.sh .
- --with-gssapi
+ --with-icu
- Build with support for GSSAPI authentication. On many
- systems, the GSSAPI (usually a part of the Kerberos installation)
- system is not installed in a location
- that is searched by default (e.g., /usr/include ,
- /usr/lib ), so you must use the options
- --with-includes and --with-libraries in
- addition to this option. configure will check
- for the required header files and libraries to make sure that
- your GSSAPI installation is sufficient before proceeding.
+ Build with support for
+ the ICU ICU
+ library, enabling use of ICU collation
+ features (see
+ ) .
+ This requires the ICU4C package
+ to be installed. The minimum required version
+ of ICU4C is currently 4.2.
-
-
-
- --with-krb-srvnam=NAME
-
- The default name of the Kerberos service principal used
- by GSSAPI.
- postgres is the default. There's usually no
- reason to change this unless you have a Windows environment,
- in which case it must be set to upper case
- POSTGRES .
+ By default,
+ pkg-config pkg-config
+ will be used to find the required compilation options. This is
+ supported for ICU4C version 4.6 and later.
+ For older versions, or if pkg-config is
+ not available, the variables ICU_CFLAGS
+ and ICU_LIBS can be specified
+ to configure , like in this example:
+
+./configure ... --with-icu ICU_CFLAGS='-I/some/where/include' ICU_LIBS='-L/some/where/lib -licui18n -licuuc -licudata'
+
+ (If ICU4C is in the default search path
+ for the compiler, then you still need to specify nonempty strings in
+ order to avoid use of pkg-config , for
+ example, ICU_CFLAGS=' ' .)
@@ -775,9 +950,10 @@ su - postgres
will be used to find the required compilation options.
llvm-config , and then
llvm-config-$major-$minor for all supported
- versions, will be searched on PATH . If that would not
- yield the correct binary, use LLVM_CONFIG to specify a
- path to the correct llvm-config . For example
+ versions, will be searched for in your PATH . If
+ that would not yield the desired program,
+ use LLVM_CONFIG to specify a path to the
+ correct llvm-config . For example
./configure ... --with-llvm LLVM_CONFIG='/path/to/llvm/bin/llvm-config'
@@ -793,37 +969,6 @@ su - postgres
-
- --with-icu
-
-
- Build with support for
- the ICU ICU
- library. This requires the ICU4C package
- to be installed. The minimum required version
- of ICU4C is currently 4.2.
-
-
-
- By default,
- pkg-config pkg-config
- will be used to find the required compilation options. This is
- supported for ICU4C version 4.6 and later.
- For older versions, or if pkg-config is
- not available, the variables ICU_CFLAGS
- and ICU_LIBS can be specified
- to configure , like in this example:
-
-./configure ... --with-icu ICU_CFLAGS='-I/some/where/include' ICU_LIBS='-L/some/where/lib -licui18n -licuuc -licudata'
-
- (If ICU4C is in the default search path
- for the compiler, then you still need to specify a nonempty string in
- order to avoid use of pkg-config , for
- example, ICU_CFLAGS=' ' .)
-
-
-
-
--with-openssl
@@ -844,22 +989,18 @@ su - postgres
- --with-pam
-
-
- Build with PAM PAM
- (Pluggable Authentication Modules) support.
-
-
-
-
-
- --with-bsd-auth
+ --with-gssapi
- Build with BSD Authentication support.
- (The BSD Authentication framework is
- currently only available on OpenBSD.)
+ Build with support for GSSAPI authentication. On many systems, the
+ GSSAPI system (usually a part of the Kerberos installation) is not
+ installed in a location
+ that is searched by default (e.g., /usr/include ,
+ /usr/lib ), so you must use the options
+ --with-includes and --with-libraries in
+ addition to this option. configure will check
+ for the required header files and libraries to make sure that
+ your GSSAPI installation is sufficient before proceeding.
@@ -883,41 +1024,37 @@ su - postgres
- --with-systemd
+ --with-pam
- Build with support
- for systemd systemd
- service notifications. This improves integration if the server binary
- is started under systemd but has no impact
- otherwise; see for more
- information . libsystemd and the
- associated header files need to be installed to be able to use this
- option.
+ Build with PAM PAM
+ (Pluggable Authentication Modules) support.
- --without-readline
+ --with-bsd-auth
- Prevents use of the Readline library
- (and libedit as well). This option disables
- command-line editing and history in
- psql , so it is not recommended.
+ Build with BSD Authentication support.
+ (The BSD Authentication framework is
+ currently only available on OpenBSD.)
- --with-libedit-preferred
+ --with-systemd
- Favors the use of the BSD-licensed libedit library
- rather than GPL-licensed Readline . This option
- is significant only if you have both libraries installed; the
- default in that case is to use Readline .
+ Build with support
+ for systemd systemd
+ service notifications. This improves integration if the server
+ is started under systemd but has no impact
+ otherwise; see for more
+ information . libsystemd and the
+ associated header files need to be installed to use this option.
@@ -926,8 +1063,9 @@ su - postgres
--with-bonjour
- Build with Bonjour support. This requires Bonjour support
- in your operating system. Recommended on macOS.
+ Build with support for Bonjour automatic service discovery.
+ This requires Bonjour support in your operating system.
+ Recommended on macOS.
@@ -979,7 +1117,7 @@ su - postgres
--with-libxml
- Build with libxml (enables SQL/XML support). Libxml version 2.6.23 or
+ Build with libxml, enabling SQL/XML support. Libxml version 2.6.23 or
later is required for this feature.
@@ -1002,95 +1140,94 @@ su - postgres
--with-libxslt
- Use libxslt when building the
+ Build with libxslt, enabling the
- module. xml2 relies on this library
- to perform XSL transformations of XML.
+ module to perform XSL transformations of XML.
+ --with-libxml must be specified as well.
+
+
+
+
+
+ Anti-Features
+
+
+ The options described in this section allow disabling
+ certain PostgreSQL features that are built
+ by default, but which might need to be turned off if the required
+ software or system features are not available. Using these options is
+ not recommended unless really necessary.
+
+
+
+
- --disable-float4-byval
+ --without-readline
- Disable passing float4 values by value
, causing them
- to be passed by reference
instead. This option costs
- performance, but may be needed for compatibility with old
- user-defined functions that are written in C and use the
- version 0
calling convention. A better long-term
- solution is to update any such functions to use the
- version 1
calling convention.
+ Prevents use of the Readline library
+ (and libedit as well). This option disables
+ command-line editing and history in
+ psql .
- --disable-float8-byval
+ --with-libedit-preferred
- Disable passing float8 values by value
, causing them
- to be passed by reference
instead. This option costs
- performance, but may be needed for compatibility with old
- user-defined functions that are written in C and use the
- version 0
calling convention. A better long-term
- solution is to update any such functions to use the
- version 1
calling convention.
- Note that this option affects not only float8, but also int8 and some
- related types such as timestamp.
- On 32-bit platforms, --disable-float8-byval is the default
- and it is not allowed to select --enable-float8-byval .
+ Favors the use of the BSD-licensed libedit library
+ rather than GPL-licensed Readline . This option
+ is significant only if you have both libraries installed; the
+ default in that case is to use Readline .
- --with-segsize=SEGSIZE
+ --without-zlib
- Set the segment size , in gigabytes. Large tables are
- divided into multiple operating-system files, each of size equal
- to the segment size. This avoids problems with file size limits
- that exist on many platforms. The default segment size, 1 gigabyte,
- is safe on all supported platforms. If your operating system has
- largefile
support (which most do, nowadays), you can use
- a larger segment size. This can be helpful to reduce the number of
- file descriptors consumed when working with very large tables.
- But be careful not to select a value larger than is supported
- by your platform and the file systems you intend to use. Other
- tools you might wish to use, such as tar , could
- also set limits on the usable file size.
- It is recommended, though not absolutely required, that this value
- be a power of 2.
- Note that changing this value requires an initdb.
+
+ zlib
+
+ Prevents use of the Zlib library.
+ This disables
+ support for compressed archives in pg_dump
+ and pg_restore .
- --with-blocksize=BLOCKSIZE
+ --disable-float4-byval
- Set the block size , in kilobytes. This is the unit
- of storage and I/O within tables. The default, 8 kilobytes,
- is suitable for most situations; but other values may be useful
- in special cases.
- The value must be a power of 2 between 1 and 32 (kilobytes).
- Note that changing this value requires an initdb.
+ Disable passing float4 values by value
, causing them
+ to be passed by reference
instead. This option costs
+ performance, but may be needed for compatibility with very old
+ user-defined functions written in C.
- --with-wal-blocksize=BLOCKSIZE
+ --disable-float8-byval
- Set the WAL block size , in kilobytes. This is the unit
- of storage and I/O within the WAL log. The default, 8 kilobytes,
- is suitable for most situations; but other values may be useful
- in special cases.
- The value must be a power of 2 between 1 and 64 (kilobytes).
- Note that changing this value requires an initdb.
+ Disable passing float8 values by value
, causing them
+ to be passed by reference
instead. This option costs
+ performance, but may be needed for compatibility with very old
+ user-defined functions written in C.
+ Note that this option affects not only float8, but also int8 and some
+ related types such as timestamp.
+ On 32-bit platforms, --disable-float8-byval is the default
+ and it is not allowed to select --enable-float8-byval .
@@ -1101,7 +1238,7 @@ su - postgres
Allow the build to succeed even if PostgreSQL
has no CPU spinlock support for the platform. The lack of
- spinlock support will result in poor performance; therefore,
+ spinlock support will result in very poor performance; therefore,
this option should only be used if the build aborts and
informs you that the platform lacks spinlock support. If this
option is required to build PostgreSQL on
@@ -1112,19 +1249,13 @@ su - postgres
- --disable-strong-random
+ --disable-atomics
- Allow the build to succeed even if PostgreSQL
- has no support for strong random numbers on the platform.
- A source of random numbers is needed for some authentication
- protocols, as well as some routines in the
-
- module. --disable-strong-random disables functionality that
- requires cryptographically strong random numbers, and substitutes
- a weak pseudo-random-number-generator for the generation of
- authentication salt values and query cancel keys. It may make
- authentication less secure.
+ Disable use of CPU atomic operations. This option does nothing on
+ platforms that lack such operations. On platforms that do have
+ them, this will result in poor performance. This option is only
+ useful for debugging or making performance comparisons.
@@ -1136,7 +1267,51 @@ su - postgres
Disable the thread-safety of client libraries. This prevents
concurrent threads in libpq and
ECPG programs from safely controlling
- their private connection handles.
+ their private connection handles. Use this only on platforms
+ with deficient threading support.
+
+
+
+
+
+
+
+
+
+ Build Process Details
+
+
+
+
+ --with-includes=DIRECTORIES
+
+
+ DIRECTORIES is a colon-separated list of
+ directories that will be added to the list the compiler
+ searches for header files. If you have optional packages
+ (such as GNU Readline ) installed in a non-standard
+ location,
+ you have to use this option and probably also the corresponding
+ --with-libraries option.
+
+
+ Example: --with-includes=/opt/gnu/include:/usr/sup/include .
+
+
+
+
+
+ --with-libraries=DIRECTORIES
+
+
+ DIRECTORIES is a colon-separated list of
+ directories to search for libraries. You will probably have
+ to use this option (and the corresponding
+ --with-includes option) if you have packages
+ installed in non-standard locations.
+
+
+ Example: --with-libraries=/opt/gnu/lib:/usr/sup/lib .
@@ -1182,62 +1357,178 @@ su - postgres
- --without-zlib
+ --with-extra-version=STRING
-
- zlib
-
- Prevents use of the Zlib library. This disables
- support for compressed archives in pg_dump
- and pg_restore .
- This option is only intended for those rare systems where this
- library is not available.
+ Append STRING to the PostgreSQL version number. You
+ can use this, for example, to mark binaries built from unreleased Git
+ snapshots or containing custom patches with an extra version string,
+ such as a git describe identifier or a
+ distribution package release number.
- --enable-debug
+ --disable-rpath
- Compiles all programs and libraries with debugging symbols.
- This means that you can run the programs in a debugger
- to analyze problems. This enlarges the size of the installed
- executables considerably, and on non-GCC compilers it usually
- also disables compiler optimization, causing slowdowns. However,
- having the symbols available is extremely helpful for dealing
- with any problems that might arise. Currently, this option is
- recommended for production installations only if you use GCC.
- But you should always have it on if you are doing development work
- or running a beta version.
+ Do not mark PostgreSQL 's executables
+ to indicate that they should search for shared libraries in the
+ installation's library directory (see --libdir ).
+ On most platforms, this marking uses an absolute path to the
+ library directory, so that it will be unhelpful if you relocate
+ the installation later. However, you will then need to provide
+ some other way for the executables to find the shared libraries.
+ Typically this requires configuring the operating system's
+ dynamic linker to search the library directory; see
+ for more detail.
+
+
+
+
+
+ Miscellaneous
+
+
+ It's fairly common, particularly for test builds, to adjust the
+ default port number with --with-pgport .
+ The other options in this section are recommended only for advanced
+ users.
+
+
+
+
- --enable-coverage
+ --with-pgport=NUMBER
- If using GCC, all programs and libraries are compiled with
- code coverage testing instrumentation. When run, they
- generate files in the build directory with code coverage
- metrics.
- See
- for more information. This option is for use only with GCC
- and when doing development work.
+ Set NUMBER as the default port number for
+ server and clients. The default is 5432. The port can always
+ be changed later on, but if you specify it here then both
+ server and clients will have the same default compiled in,
+ which can be very convenient. Usually the only good reason
+ to select a non-default value is if you intend to run multiple
+ PostgreSQL servers on the same machine.
- --enable-profiling
+ --with-krb-srvnam=NAME
- If using GCC, all programs and libraries are compiled so they
- can be profiled. On backend exit, a subdirectory will be created
- that contains the gmon.out file for use in profiling.
- This option is for use only with GCC and when doing development work.
+ The default name of the Kerberos service principal used
+ by GSSAPI.
+ postgres is the default. There's usually no
+ reason to change this unless you are building for a Windows
+ environment, in which case it must be set to upper case
+ POSTGRES .
+
+
+
+
+
+ --with-segsize=SEGSIZE
+
+
+ Set the segment size , in gigabytes. Large tables are
+ divided into multiple operating-system files, each of size equal
+ to the segment size. This avoids problems with file size limits
+ that exist on many platforms. The default segment size, 1 gigabyte,
+ is safe on all supported platforms. If your operating system has
+ largefile
support (which most do, nowadays), you can use
+ a larger segment size. This can be helpful to reduce the number of
+ file descriptors consumed when working with very large tables.
+ But be careful not to select a value larger than is supported
+ by your platform and the file systems you intend to use. Other
+ tools you might wish to use, such as tar , could
+ also set limits on the usable file size.
+ It is recommended, though not absolutely required, that this value
+ be a power of 2.
+ Note that changing this value breaks on-disk database compatibility,
+ meaning you cannot use pg_upgrade to upgrade to
+ a build with a different segment size.
+
+
+
+
+
+ --with-blocksize=BLOCKSIZE
+
+
+ Set the block size , in kilobytes. This is the unit
+ of storage and I/O within tables. The default, 8 kilobytes,
+ is suitable for most situations; but other values may be useful
+ in special cases.
+ The value must be a power of 2 between 1 and 32 (kilobytes).
+ Note that changing this value breaks on-disk database compatibility,
+ meaning you cannot use pg_upgrade to upgrade to
+ a build with a different block size.
+
+
+
+
+
+ --with-wal-blocksize=BLOCKSIZE
+
+
+ Set the WAL block size , in kilobytes. This is the unit
+ of storage and I/O within the WAL log. The default, 8 kilobytes,
+ is suitable for most situations; but other values may be useful
+ in special cases.
+ The value must be a power of 2 between 1 and 64 (kilobytes).
+ Note that changing this value breaks on-disk database compatibility,
+ meaning you cannot use pg_upgrade to upgrade to
+ a build with a different WAL block size.
+
+
+
+
+
+
+
+
+
+ Developer Options
+
+
+ Most of the options in this section are only of interest for
+ developing or debugging PostgreSQL .
+ They are not recommended for production builds, except
+ for --enable-debug , which can be useful to enable
+ detailed bug reports in the unlucky event that you encounter a bug.
+ On platforms supporting DTrace, --enable-dtrace
+ may also be reasonable to use in production.
+
+
+
+ When building an installation that will be used to develop code inside
+ the server, it is recommended to use at least the
+ options --enable-debug
+ and --enable-cassert .
+
+
+
+
+
+ --enable-debug
+
+
+ Compiles all programs and libraries with debugging symbols.
+ This means that you can run the programs in a debugger
+ to analyze problems. This enlarges the size of the installed
+ executables considerably, and on non-GCC compilers it usually
+ also disables compiler optimization, causing slowdowns. However,
+ having the symbols available is extremely helpful for dealing
+ with any problems that might arise. Currently, this option is
+ recommended for production installations only if you use GCC.
+ But you should always have it on if you are doing development work
+ or running a beta version.
@@ -1261,6 +1552,17 @@ su - postgres
+
+ --enable-tap-tests
+
+
+ Enable tests using the Perl TAP tools. This requires a Perl
+ installation and the Perl module IPC::Run .
+ See for more information.
+
+
+
+
--enable-depend
@@ -1275,6 +1577,34 @@ su - postgres
+
+ --enable-coverage
+
+
+ If using GCC, all programs and libraries are compiled with
+ code coverage testing instrumentation. When run, they
+ generate files in the build directory with code coverage
+ metrics.
+ See
+ for more information. This option is for use only with GCC
+ and when doing development work.
+
+
+
+
+
+ --enable-profiling
+
+
+ If using GCC, all programs and libraries are compiled so they
+ can be profiled. On backend exit, a subdirectory will be created
+ that contains the gmon.out file containing
+ profile data.
+ This option is for use only with GCC and when doing development work.
+
+
+
+
--enable-dtrace
@@ -1293,7 +1623,7 @@ su - postgres
environment variable DTRACE can be set. This
will often be necessary because dtrace is
typically installed under /usr/sbin ,
- which might not be in the path.
+ which might not be in your PATH .
@@ -1301,7 +1631,7 @@ su - postgres
can be specified in the environment variable
DTRACEFLAGS . On Solaris,
to include DTrace support in a 64-bit binary, you must specify
- DTRACEFLAGS="-64" to configure. For example,
+ DTRACEFLAGS="-64" . For example,
using the GCC compiler:
./configure CC='gcc -m64' --enable-dtrace DTRACEFLAGS='-64' ...
@@ -1313,36 +1643,50 @@ su - postgres
-
-
- --enable-tap-tests
-
-
- Enable tests using the Perl TAP tools. This requires a Perl
- installation and the Perl module IPC::Run .
- See for more information.
-
-
-
-
-
- If you prefer a C compiler different from the one
- configure picks, you can set the
- environment variable CC to the program of your choice.
- By default, configure will pick
- gcc if available, else the platform's
- default (usually cc ). Similarly, you can override the
- default compiler flags if needed with the CFLAGS variable.
-
+
+
+
+
+
+ configure Environment Variables
+
+
+ configure environment variables
+
+ In addition to the ordinary command-line options described above,
+ configure responds to a number of environment
+ variables.
You can specify environment variables on the
configure command line, for example:
./configure CC=/opt/bin/gcc CFLAGS='-O2 -pipe'
+ In this usage an environment variable is little different from a
+ command-line option.
+ You can also set such variables beforehand:
+
+export CC=/opt/bin/gcc
+export CFLAGS='-O2 -pipe'
+./configure
+
+ This usage can be convenient because many programs' configuration
+ scripts respond to these variables in similar ways.
+
+
+
+ The most commonly used of these environment variables are
+ CC and CFLAGS .
+ If you prefer a C compiler different from the one
+ configure picks, you can set the
+ variable CC to the program of your choice.
+ By default, configure will pick
+ gcc if available, else the platform's
+ default (usually cc ). Similarly, you can override the
+ default compiler flags if needed with the CFLAGS variable.
@@ -1482,7 +1826,7 @@ su - postgres
llvm-config program used to locate the
- LLVM installation.
+ LLVM installation
@@ -1500,8 +1844,9 @@ su - postgres
PERL
- Full path name of the Perl interpreter. This will be used to
- determine the dependencies for building PL/Perl.
+ Perl interpreter program. This will be used to determine the
+ dependencies for building PL/Perl. The default is
+ perl .
@@ -1510,13 +1855,14 @@ su - postgres
PYTHON
- Full path name of the Python interpreter. This will be used to
+ Python interpreter program. This will be used to
determine the dependencies for building PL/Python. Also,
whether Python 2 or 3 is specified here (or otherwise
implicitly chosen) determines which variant of the PL/Python
language becomes available. See
- for more information.
+ for more information. If this is not set, the following are probed
+ in this order: python python3 python2 .
@@ -1525,9 +1871,11 @@ su - postgres
TCLSH
- Full path name of the Tcl interpreter. This will be used to
- determine the dependencies for building PL/Tcl, and it will
- be substituted into Tcl scripts.
+ Tcl interpreter program. This will be used to
+ determine the dependencies for building PL/Tcl.
+ If this is not set, the following are probed in this
+ order: tclsh tcl tclsh8.6 tclsh86 tclsh8.5 tclsh85
+ tclsh8.4 tclsh84 .
@@ -1537,7 +1885,7 @@ su - postgres
xml2-config program used to locate the
- libxml installation.
+ libxml installation
@@ -1565,13 +1913,6 @@ su - postgres
-
- When developing code inside the server, it is recommended to
- use the configure options --enable-cassert (which
- turns on many run-time error checks) and --enable-debug
- (which improves the usefulness of debugging tools).
-
-
If using GCC, it is best to build with an optimization level of
at least -O1 , because using no optimization
@@ -1593,180 +1934,13 @@ su - postgres
adjustments, while COPT might be kept set all the time.
-
-
-
- Build
-
-
- To start the build, type:
-
-make
-
- (Remember to use GNU make .) The build
- will take a few minutes depending on your
- hardware. The last line displayed should be:
-
-All of PostgreSQL successfully made. Ready to install.
-
-
-
-
- If you want to build everything that can be built, including the
- documentation (HTML and man pages), and the additional modules
- (contrib ), type instead:
-
-make world
-
- The last line displayed should be:
-
-PostgreSQL, contrib, and documentation successfully made. Ready to install.
-
-
-
-
-
- Regression Tests
-
-
- regression test
-
-
-
- If you want to test the newly built server before you install it,
- you can run the regression tests at this point. The regression
- tests are a test suite to verify that PostgreSQL
- runs on your machine in the way the developers expected it
- to. Type:
-
-make check
-
- (This won't work as root; do it as an unprivileged user.)
- See for
- detailed information about interpreting the test results. You can
- repeat this test at any later time by issuing the same command.
-
-
-
-
- Installing the Files
-
-
-
- If you are upgrading an existing system be sure to read
- ,
- which has instructions about upgrading a
- cluster.
-
-
-
-
- To install PostgreSQL enter:
-
-make install
-
- This will install files into the directories that were specified
- in . Make sure that you have appropriate
- permissions to write into that area. Normally you need to do this
- step as root. Alternatively, you can create the target
- directories in advance and arrange for appropriate permissions to
- be granted.
-
-
-
- To install the documentation (HTML and man pages), enter:
-
-make install-docs
-
-
-
-
- If you built the world above, type instead:
-
-make install-world
-
- This also installs the documentation.
-
-
-
- You can use make install-strip instead of
- make install to strip the executable files and
- libraries as they are installed. This will save some space. If
- you built with debugging support, stripping will effectively
- remove the debugging support, so it should only be done if
- debugging is no longer needed. install-strip
- tries to do a reasonable job saving space, but it does not have
- perfect knowledge of how to strip every unneeded byte from an
- executable file, so if you want to save all the disk space you
- possibly can, you will have to do manual work.
-
-
-
- The standard installation provides all the header files needed for client
- application development as well as for server-side program
- development, such as custom functions or data types written in C.
- (Prior to PostgreSQL 8.0, a separate make
- install-all-headers command was needed for the latter, but this
- step has been folded into the standard install.)
-
-
-
- Client-only installation:
-
- If you want to install only the client applications and
- interface libraries, then you can use these commands:
-
-make -C src/bin install
-make -C src/include install
-make -C src/interfaces install
-make -C doc install
-
- src/bin has a few binaries for server-only use,
- but they are small.
-
-
-
-
-
-
- Uninstallation:
-
- To undo the installation use the command make
- uninstall . However, this will not remove any created directories.
-
-
-
-
- Cleaning:
-
-
- After the installation you can free disk space by removing the built
- files from the source tree with the command make
- clean . This will preserve the files made by the configure
- program, so that you can rebuild everything with make
- later on. To reset the source tree to the state in which it was
- distributed, use make distclean . If you are going to
- build for several platforms within the same source tree you must do
- this and re-configure for each platform. (Alternatively, use
- a separate build tree for each platform, so that the source tree
- remains unmodified.)
-
-
-
-
- If you perform a build and then discover that your configure
- options were wrong, or if you change anything that configure
- investigates (for example, software upgrades), then it's a good
- idea to do make distclean before reconfiguring and
- rebuilding. Without this, your changes in configuration choices
- might not propagate everywhere they need to.
-
+
Post-Installation Setup
-
+
Shared Libraries
@@ -1805,7 +1979,7 @@ setenv LD_LIBRARY_PATH /usr/local/pgsql/lib
/etc/profile or ~/.bash_profile . Some
good information about the caveats associated with this method can
be found at .
+ url="http://xahlee.info/UnixResource_dir/_/ldpath.html">.
@@ -1954,15 +2128,15 @@ export MANPATH
If you have installation problems on a platform that is known
to be supported according to recent build farm results, please report
- it to pgsql-bugs@postgresql.org . If you are interested
+ it to pgsql-bugs@lists.postgresql.org . If you are interested
in porting PostgreSQL to a new platform,
- pgsql-hackers@postgresql.org is the appropriate place
+ pgsql-hackers@lists.postgresql.org is the appropriate place
to discuss that.
- Platform-specific Notes
+ Platform-Specific Notes
This section documents additional platform-specific issues
@@ -1987,175 +2161,11 @@ export MANPATH
- PostgreSQL works on AIX, but getting it installed properly can be
- challenging. AIX versions from 4.3.3 to 6.1 are considered supported.
- You can use GCC or the native IBM compiler xlc . In
- general, using recent versions of AIX and PostgreSQL helps. Check
- the build farm for up to date information about which versions of
- AIX are known to work.
-
-
-
- The minimum recommended fix levels for supported AIX versions are:
-
-
-
-
- AIX 4.3.3
- Maintenance Level 11 + post ML11 bundle
-
-
-
- AIX 5.1
- Maintenance Level 9 + post ML9 bundle
-
-
-
- AIX 5.2
- Technology Level 10 Service Pack 3
-
-
-
- AIX 5.3
- Technology Level 7
-
-
-
- AIX 6.1
- Base Level
-
-
-
-
- To check your current fix level, use
- oslevel -r in AIX 4.3.3 to AIX 5.2 ML 7, or
- oslevel -s in later versions.
-
-
-
- Use the following configure flags in addition
- to your own if you have installed Readline or libz in
- /usr/local :
- --with-includes=/usr/local/include
- --with-libraries=/usr/local/lib .
+ PostgreSQL works on AIX, but AIX versions before about 6.1 have
+ various issues and are not recommended.
+ You can use GCC or the native IBM compiler xlc .
-
- GCC Issues
-
-
- On AIX 5.3, there have been some problems getting PostgreSQL to
- compile and run using GCC.
-
-
-
- You will want to use a version of GCC subsequent to 3.3.2,
- particularly if you use a prepackaged version. We had good
- success with 4.0.1. Problems with earlier versions seem to have
- more to do with the way IBM packaged GCC than with actual issues
- with GCC, so that if you compile GCC yourself, you might well
- have success with an earlier version of GCC.
-
-
-
-
- Unix-Domain Sockets Broken
-
-
- AIX 5.3 has a problem
- where sockaddr_storage is not defined to
- be large enough. In version 5.3, IBM increased the size of
- sockaddr_un , the address structure for
- Unix-domain sockets, but did not correspondingly increase the
- size of sockaddr_storage . The result of
- this is that attempts to use Unix-domain sockets with PostgreSQL
- lead to libpq overflowing the data structure. TCP/IP connections
- work OK, but not Unix-domain sockets, which prevents the
- regression tests from working.
-
-
-
- The problem was reported to IBM, and is recorded as bug report
- PMR29657. If you upgrade to maintenance level 5300-03 or later,
- that will include this fix. A quick workaround
- is to alter _SS_MAXSIZE to 1025 in
- /usr/include/sys/socket.h . In either case,
- recompile PostgreSQL once you have the corrected header file.
-
-
-
-
- Internet Address Issues
-
-
- PostgreSQL relies on the system's getaddrinfo function
- to parse IP addresses in listen_addresses ,
- pg_hba.conf , etc. Older versions of AIX have assorted
- bugs in this function. If you have problems related to these settings,
- updating to the appropriate AIX fix level shown above
- should take care of it.
-
-
-
-
-
- One user reports:
-
-
-
- When implementing PostgreSQL version 8.1 on AIX 5.3, we
- periodically ran into problems where the statistics collector
- would mysteriously
not come up successfully. This
- appears to be the result of unexpected behavior in the IPv6
- implementation. It looks like PostgreSQL and IPv6 do not play
- very well together on AIX 5.3.
-
-
-
- Any of the following actions fix
the problem.
-
-
-
- Delete the IPv6 address for localhost:
-
-(as root)
-# ifconfig lo0 inet6 ::1/0 delete
-
-
-
-
-
-
- Remove IPv6 from net services. The
- file /etc/netsvc.conf on AIX is roughly
- equivalent to /etc/nsswitch.conf on
- Solaris/Linux. The default, on AIX, is thus:
-
-hosts=local,bind
-
- Replace this with:
-
-hosts=local4,bind4
-
- to deactivate searching for IPv6 addresses.
-
-
-
-
-
-
-
- This is really a workaround for problems relating
- to immaturity of IPv6 support, which improved visibly during the
- course of AIX 5.3 releases. It has worked with AIX version 5.3,
- but does not represent an elegant solution to the problem. It has
- been reported that this workaround is not only unnecessary, but
- causes problems on AIX 6.1, where IPv6 support has become more mature.
-
-
-
-
-
Memory Management
@@ -2325,9 +2335,9 @@ ERROR: could not load library "/opt/dbs/pgsql/lib/plperl.so": Bad address
- When building from source, proceed according to the normal
+ When building from source, proceed according to the Unix-style
installation procedure (i.e., ./configure;
- make ; etc.), noting the following-Cygwin specific
+ make; etc.), noting the following Cygwin-specific
differences:
@@ -2379,7 +2389,7 @@ ERROR: could not load library "/opt/dbs/pgsql/lib/plperl.so": Bad address
Building might fail on some systems where a locale other than
C is in use. To fix this, set the locale to C by doing
export LANG=C.utf8 before building, and then
- setting it back to the previous setting, after you have installed
+ setting it back to the previous setting after you have installed
PostgreSQL.
@@ -2396,7 +2406,7 @@ ERROR: could not load library "/opt/dbs/pgsql/lib/plperl.so": Bad address
make MAX_CONNECTIONS=5 check
(On some systems you can have up to about 10 simultaneous
- connections).
+ connections.)
@@ -2412,91 +2422,54 @@ make MAX_CONNECTIONS=5 check
-
- HP-UX
+
+ macOS
-
- HP-UX
+
+ macOS
installation on
- PostgreSQL 7.3+ should work on Series 700/800 PA-RISC machines
- running HP-UX 10.X or 11.X, given appropriate system patch levels
- and build tools. At least one developer routinely tests on HP-UX
- 10.20, and we have reports of successful installations on HP-UX
- 11.00 and 11.11.
-
-
-
- Aside from the PostgreSQL source distribution, you will need GNU
- make (HP's make will not do), and either GCC or HP's full ANSI C
- compiler. If you intend to build from Git sources rather than a
- distribution tarball, you will also need Flex (GNU lex) and Bison
- (GNU yacc). We also recommend making sure you are fairly
- up-to-date on HP patches. At a minimum, if you are building 64
- bit binaries on HP-UX 11.11 you may need PHSS_30966 (11.11) or a
- successor patch otherwise initdb may hang:
-
-PHSS_30966 s700_800 ld(1) and linker tools cumulative patch
-
-
- On general principles you should be current on libc and ld/dld
- patches, as well as compiler patches if you are using HP's C
- compiler. See HP's support sites such
- as for free
- copies of their latest patches.
-
-
-
- If you are building on a PA-RISC 2.0 machine and want to have
- 64-bit binaries using GCC, you must use a GCC 64-bit version.
-
-
-
- If you are building on a PA-RISC 2.0 machine and want the compiled
- binaries to run on PA-RISC 1.1 machines you will need to specify
- +DAportable in CFLAGS .
-
-
-
- If you are building on a HP-UX Itanium machine, you will need the
- latest HP ANSI C compiler with its dependent patch or successor
- patches:
-
-PHSS_30848 s700_800 HP C Compiler (A.05.57)
-PHSS_30849 s700_800 u2comp/be/plugin library Patch
-
-
-
-
- If you have both HP's C compiler and GCC's, then you might want to
- explicitly select the compiler to use when you
- run configure :
+ On recent macOS releases, it's necessary to
+ embed the sysroot
path in the include switches used to
+ find some system header files. This results in the outputs of
+ the configure script varying depending on
+ which SDK version was used during configure .
+ That shouldn't pose any problem in simple scenarios, but if you are
+ trying to do something like building an extension on a different machine
+ than the server code was built on, you may need to force use of a
+ different sysroot path. To do that, set PG_SYSROOT ,
+ for example
-./configure CC=cc
+make PG_SYSROOT=/desired/path all
- for HP's C compiler, or
+ To find out the appropriate path on your machine, run
-./configure CC=gcc
+xcodebuild -version -sdk macosx Path
- for GCC. If you omit this setting, then configure will
- pick gcc if it has a choice.
+ Note that building an extension using a different sysroot version than
+ was used to build the core server is not really recommended; in the
+ worst case it could result in hard-to-debug ABI inconsistencies.
- The default install target location
- is /usr/local/pgsql , which you might want to
- change to something under /opt . If so, use
- the
- --prefix switch to configure .
+ You can also select a non-default sysroot path when configuring, by
+ specifying PG_SYSROOT
+ to configure :
+
+./configure ... PG_SYSROOT=/desired/path
+
- In the regression tests, there might be some low-order-digit
- differences in the geometry tests, which vary depending on which
- compiler and math library versions you use. Any other error is
- cause for suspicion.
+ macOS 's System Integrity
+ Protection
(SIP) feature breaks make check ,
+ because it prevents passing the needed setting
+ of DYLD_LIBRARY_PATH down to the executables being
+ tested. You can work around that by doing make
+ install before make check .
+ Most PostgreSQL developers just turn off SIP, though.
@@ -2512,12 +2485,9 @@ PHSS_30849 s700_800 u2comp/be/plugin library Patch
PostgreSQL for Windows can be built using MinGW, a Unix-like build
environment for Microsoft operating systems, or using
Microsoft's Visual C++ compiler suite.
- The MinGW build variant uses the normal build system described in
+ The MinGW build procedure uses the normal build system described in
this chapter; the Visual C++ build works completely differently
and is described in .
- It is a fully native build and uses no additional software like
- MinGW. A ready-made installer is available on the main
- PostgreSQL web site.
@@ -2534,7 +2504,7 @@ PHSS_30849 s700_800 u2comp/be/plugin library Patch
To build 64 bit binaries using MinGW, install the 64 bit tool set
- from , put its bin
+ from , put its bin
directory in the PATH , and run
configure with the
--host=x86_64-w64-mingw32 option.
@@ -2574,8 +2544,7 @@ PHSS_30849 s700_800 u2comp/be/plugin library Patch
PostgreSQL is well-supported on Solaris. The more up to date your
- operating system, the fewer issues you will experience; details
- below.
+ operating system, the fewer issues you will experience.
@@ -2584,8 +2553,7 @@ PHSS_30849 s700_800 u2comp/be/plugin library Patch
You can build with either GCC or Sun's compiler suite. For
better code optimization, Sun's compiler is strongly recommended
- on the SPARC architecture. We have heard reports of problems
- when using GCC 2.95.1; GCC 2.95.3 or later is recommended. If
+ on the SPARC architecture. If
you are using Sun's compiler, be careful not to select
/usr/ucb/cc ;
use /opt/SUNWspro/bin/cc .
@@ -2593,14 +2561,14 @@ PHSS_30849 s700_800 u2comp/be/plugin library Patch
You can download Sun Studio
- from .
- Many of GNU tools are integrated into Solaris 10, or they are
- present on the Solaris companion CD. If you like packages for
- older version of Solaris, you can find these tools
+ from .
+ Many GNU tools are integrated into Solaris 10, or they are
+ present on the Solaris companion CD. If you need packages for
+ older versions of Solaris, you can find these tools
at .
If you prefer
sources, look
- at .
+ at .
@@ -2623,30 +2591,6 @@ configure ... LDFLAGS="-R /usr/sfw/lib:/opt/sfw/lib:/usr/local/lib"
-
- 64-bit Build Sometimes Crashes
-
-
- On Solaris 7 and older, the 64-bit version of libc has a buggy
- vsnprintf routine, which leads to erratic
- core dumps in PostgreSQL. The simplest known workaround is to
- force PostgreSQL to use its own version of vsnprintf rather than
- the library copy. To do this, after you
- run configure edit a file produced by
- configure :
- In src/Makefile.global , change the line
-
-LIBOBJS =
-
- to read
-
-LIBOBJS = snprintf.o
-
- (There might be other files already listed in this variable.
- Order does not matter.) Then build as usual.
-
-
-
Compiling for Optimal Performance
@@ -2656,18 +2600,15 @@ LIBOBJS = snprintf.o
flag to generate significantly faster binaries. Do not use any
flags that modify behavior of floating-point operations
and errno processing (e.g.,
- -fast ). These flags could raise some
- nonstandard PostgreSQL behavior for example in the date/time
- computing.
+ -fast ).
If you do not have a reason to use 64-bit binaries on SPARC,
prefer the 32-bit version. The 64-bit operations are slower and
- 64-bit binaries are slower than the 32-bit variants. And on
+ 64-bit binaries are slower than the 32-bit variants. On the
other hand, 32-bit code on the AMD64 CPU family is not native,
- and that is why 32-bit code is significant slower on this CPU
- family.
+ so 32-bit code is significantly slower on that CPU family.
@@ -2692,7 +2633,7 @@ collect2: ld returned 1 exit status
make: *** [postgres] Error 1
your DTrace installation is too old to handle probes in static
- functions. You need Solaris 10u4 or newer.
+ functions. You need Solaris 10u4 or newer to use DTrace.
diff --git a/doc/src/sgml/intro.sgml b/doc/src/sgml/intro.sgml
index 3038826311a..25e98ebe07d 100644
--- a/doc/src/sgml/intro.sgml
+++ b/doc/src/sgml/intro.sgml
@@ -82,7 +82,7 @@
- What is PostgreSQL ?
+ What Is PostgreSQL ?
PostgreSQL is an object-relational
diff --git a/doc/src/sgml/isn.sgml b/doc/src/sgml/isn.sgml
index 34d37ede018..598dda2e9a8 100644
--- a/doc/src/sgml/isn.sgml
+++ b/doc/src/sgml/isn.sgml
@@ -355,19 +355,19 @@ SELECT isbn13(id) FROM test;
The information to implement this module was collected from
several sites, including:
-
+
-
-
+
+
The prefixes used for hyphenation were also compiled from:
-
-
+
+
-
-
+
+
Care was taken during the creation of the algorithms and they
diff --git a/doc/src/sgml/jit.sgml b/doc/src/sgml/jit.sgml
index 2a647e8c6c5..af7e380c58c 100644
--- a/doc/src/sgml/jit.sgml
+++ b/doc/src/sgml/jit.sgml
@@ -18,25 +18,25 @@
- What is JIT compilation?
+ What Is JIT compilation?
- Just-in-time compilation (JIT ) is the process of turning
+ Just-in-Time (JIT ) compilation is the process of turning
some form of interpreted program evaluation into a native program, and
- doing so at runtime.
-
- For example, instead of using a facility that can evaluate arbitrary SQL
- expressions to evaluate an SQL predicate like WHERE a.col =
- 3 , it is possible to generate a function than can be natively
- executed by the CPU that just handles that expression, yielding a speedup.
+ doing so at run time.
+ For example, instead of using general-purpose code that can evaluate
+ arbitrary SQL expressions to evaluate a particular SQL predicate
+ like WHERE a.col = 3 , it is possible to generate a
+ function that is specific to that expression and can be natively executed
+ by the CPU, yielding a speedup.
PostgreSQL has builtin support to perform
JIT compilation using LLVM when
- PostgreSQL was built with
- --with-llvm (see ).
+ PostgreSQL is built with
+ --with-llvm .
@@ -58,9 +58,23 @@
Tuple deforming is the process of transforming an on-disk tuple (see ) into its in-memory representation. It can be
- accelerated by creating a function specific to the table layout and the
- number of columns to be extracted.
+ linkend="storage-tuple-layout"/>) into its in-memory representation.
+ It can be accelerated by creating a function specific to the table layout
+ and the number of columns to be extracted.
+
+
+
+
+ Inlining
+
+ PostgreSQL is very extensible and allows new
+ data types, functions, operators and other database objects to be defined;
+ see . In fact the built-in objects are implemented
+ using nearly the same mechanisms. This extensibility implies some
+ overhead, for example due to function calls (see ).
+ To reduce that overhead, JIT compilation can inline the
+ bodies of small functions into the expressions using them. That allows a
+ significant percentage of the overhead to be optimized away.
@@ -70,27 +84,12 @@
LLVM has support for optimizing generated
code. Some of the optimizations are cheap enough to be performed whenever
JIT is used, while others are only beneficial for
- longer running queries.
-
+ longer-running queries.
See for
more details about optimizations.
-
- Inlining
-
- PostgreSQL is very extensible and allows new
- datatypes, functions, operators and other database objects to be defined;
- see . In fact the built-in ones are implemented
- using nearly the same mechanisms. This extensibility implies some
- overhead, for example due to function calls (see ).
- To reduce that overhead JIT compilation can inline the
- body for small functions into the expression using them. That allows a
- significant percentage of the overhead to be optimized away.
-
-
-
@@ -98,50 +97,46 @@
JIT compilation is beneficial primarily for long-running
- CPU bound queries. Frequently these will be analytical queries. For short
+ CPU-bound queries. Frequently these will be analytical queries. For short
queries the added overhead of performing JIT compilation
will often be higher than the time it can save.
- To determine whether JIT compilation is used, the total
- cost of a query (see and ) is used.
-
-
-
- The cost of the query will be compared with GUC. If the cost is higher,
+ To determine whether JIT compilation should be used,
+ the total estimated cost of a query (see
+ and
+ ) is used.
+ The estimated cost of the query will be compared with the setting of . If the cost is higher,
JIT compilation will be performed.
+ Two further decisions are then needed.
+ Firstly, if the estimated cost is more
+ than the setting of , short
+ functions and operators used in the query will be inlined.
+ Secondly, if the estimated cost is more than the setting of , expensive optimizations are
+ applied to improve the generated code.
+ Each of these options increases the JIT compilation
+ overhead, but can reduce query execution time considerably.
- If the planner, based on the above criterion, decided that
- JIT compilation is beneficial, two further decisions are
- made. Firstly, if the query is more costly than the GUC, expensive optimizations are
- used to improve the generated code. Secondly, if the query is more costly
- than the GUC, short functions
- and operators used in the query will be inlined. Both of these operations
- increase the JIT overhead, but can reduce query
- execution time considerably.
-
-
-
- This cost based decision will be made at plan time, not execution
- time. This means that when prepared statements are in use, and the generic
- plan is used (see ), the values of the
- GUCs set at prepare time take effect, not the settings at execution time.
+ These cost-based decisions will be made at plan time, not execution
+ time. This means that when prepared statements are in use, and a generic
+ plan is used (see ), the values of the
+ configuration parameters in effect at prepare time control the decisions,
+ not the settings at execution time.
- If is set to off , or no
+ If is set to off , or if no
JIT implementation is available (for example because
the server was compiled without --with-llvm ),
- JIT will not performed, even if considered to be
+ JIT will not be performed, even if it would be
beneficial based on the above criteria. Setting
- to off takes effect both at plan and at execution time.
+ to off has effects at both plan and execution time.
@@ -149,48 +144,40 @@
can be used to see whether
JIT is used or not. As an example, here is a query that
is not using JIT :
-
+
=# EXPLAIN ANALYZE SELECT SUM(relpages) FROM pg_class;
-┌─────────────────────────────────────────────────────────────────────────────────────────────────────────────â”
-│ QUERY PLAN │
-├─────────────────────────────────────────────────────────────────────────────────────────────────────────────┤
-│ Aggregate (cost=16.27..16.29 rows=1 width=8) (actual time=0.303..0.303 rows=1 loops=1) │
-│ -> Seq Scan on pg_class (cost=0.00..15.42 rows=342 width=4) (actual time=0.017..0.111 rows=356 loops=1) │
-│ Planning Time: 0.116 ms │
-│ Execution Time: 0.365 ms │
-└─────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------
+ Aggregate (cost=16.27..16.29 rows=1 width=8) (actual time=0.303..0.303 rows=1 loops=1)
+ -> Seq Scan on pg_class (cost=0.00..15.42 rows=342 width=4) (actual time=0.017..0.111 rows=356 loops=1)
+ Planning Time: 0.116 ms
+ Execution Time: 0.365 ms
(4 rows)
-
+
Given the cost of the plan, it is entirely reasonable that no
- JIT was used, the cost of JIT would
- have been bigger than the savings. Adjusting the cost limits will lead to
- JIT use:
-
+ JIT was used; the cost of JIT would
+ have been bigger than the potential savings. Adjusting the cost limits
+ will lead to JIT use:
+
=# SET jit_above_cost = 10;
SET
=# EXPLAIN ANALYZE SELECT SUM(relpages) FROM pg_class;
-┌─────────────────────────────────────────────────────────────────────────────────────────────────────────────â”
-│ QUERY PLAN │
-├─────────────────────────────────────────────────────────────────────────────────────────────────────────────┤
-│ Aggregate (cost=16.27..16.29 rows=1 width=8) (actual time=6.049..6.049 rows=1 loops=1) │
-│ -> Seq Scan on pg_class (cost=0.00..15.42 rows=342 width=4) (actual time=0.019..0.052 rows=356 loops=1) │
-│ Planning Time: 0.133 ms │
-│ JIT: │
-│ Functions: 3 │
-│ Generation Time: 1.259 ms │
-│ Inlining: false │
-│ Inlining Time: 0.000 ms │
-│ Optimization: false │
-│ Optimization Time: 0.797 ms │
-│ Emission Time: 5.048 ms │
-│ Execution Time: 7.416 ms │
-└─────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
-
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------
+ Aggregate (cost=16.27..16.29 rows=1 width=8) (actual time=6.049..6.049 rows=1 loops=1)
+ -> Seq Scan on pg_class (cost=0.00..15.42 rows=342 width=4) (actual time=0.019..0.052 rows=356 loops=1)
+ Planning Time: 0.133 ms
+ JIT:
+ Functions: 3
+ Options: Inlining false, Optimization false, Expressions true, Deforming true
+ Timing: Generation 1.259 ms, Inlining 0.000 ms, Optimization 0.797 ms, Emission 5.048 ms, Total 7.104 ms
+ Execution Time: 7.416 ms
+
As visible here, JIT was used, but inlining and
expensive optimization were not. If , were lowered, just like , that would change.
+ linkend="guc-jit-inline-above-cost"/> or were also lowered,
+ that would change.
@@ -198,58 +185,53 @@ SET
Configuration
+ The configuration variable
determines whether JIT
compilation is enabled or disabled.
-
-
-
- As explained in the configuration variables
+ If it is enabled, the configuration variables
, , decide whether JIT
- compilation is performed for a query, and how much effort is spent doing
- so.
+ linkend="guc-jit-inline-above-cost"/>, and determine
+ whether JIT compilation is performed for a query,
+ and how much effort is spent doing so.
- For development and debugging purposes a few additional GUCs exist. allows the generated bitcode to be
- inspected. allows GDB to see
- generated functions. emits
- information so the perf profiler can interpret
- JIT generated functions sensibly.
+ determines which JIT
+ implementation is used. It is rarely required to be changed. See .
- determines which JIT
- implementation is used. It rarely is required to be changed. See .
+ For development and debugging purposes a few additional configuration
+ parameters exist, as described in
+ .
-
+
Extensibility
Inlining Support for Extensions
PostgreSQL 's JIT
- implementation can inline the implementation of operators and functions
- (of type C and internal ). See . To do so for functions in extensions, the
- definition of these functions needs to be made available. When using PGXS to build an extension against a server
- that has been compiled with LLVM support, the relevant files will be
- installed automatically.
+ implementation can inline the bodies of functions
+ of types C and internal , as well as
+ operators based on such functions. To do so for functions in extensions,
+ the definitions of those functions need to be made available.
+ When using PGXS to build an extension
+ against a server that has been compiled with LLVM JIT support, the
+ relevant files will be built and installed automatically.
The relevant files have to be installed into
$pkglibdir/bitcode/$extension/ and a summary of them
- to $pkglibdir/bitcode/$extension.index.bc , where
+ into $pkglibdir/bitcode/$extension.index.bc , where
$pkglibdir is the directory returned by
pg_config --pkglibdir and $extension
- the basename of the extension's shared library.
+ is the base name of the extension's shared library.
@@ -262,14 +244,16 @@ SET
- Pluggable JIT Provider
+ Pluggable JIT Providers
PostgreSQL provides a JIT
implementation based on LLVM . The interface to
the JIT provider is pluggable and the provider can be
- changed without recompiling. The provider is chosen via the GUC .
+ changed without recompiling (although currently, the build process only
+ provides inlining support data for LLVM ).
+ The active provider is chosen via the setting
+ .
@@ -279,19 +263,20 @@ SET
named shared library. The normal library search path is used to locate
the library. To provide the required JIT provider
callbacks and to indicate that the library is actually a
- JIT provider it needs to provide a function named
+ JIT provider, it needs to provide a C function named
_PG_jit_provider_init . This function is passed a
struct that needs to be filled with the callback function pointers for
- individual actions.
-
+ individual actions:
+
struct JitProviderCallbacks
{
JitProviderResetAfterErrorCB reset_after_error;
JitProviderReleaseContextCB release_context;
JitProviderCompileExprCB compile_expr;
};
+
extern void _PG_jit_provider_init(JitProviderCallbacks *cb);
-
+
diff --git a/doc/src/sgml/json.sgml b/doc/src/sgml/json.sgml
index e7b68fa0d24..6ff87518705 100644
--- a/doc/src/sgml/json.sgml
+++ b/doc/src/sgml/json.sgml
@@ -22,8 +22,16 @@
- There are two JSON data types: json and jsonb .
- They accept almost identical sets of values as
+ PostgreSQL offers two types for storing JSON
+ data: json and jsonb . To implement efficient query
+ mechanisms for these data types, PostgreSQL
+ also provides the jsonpath data type described in
+ .
+
+
+
+ The json and jsonb data types
+ accept almost identical sets of values as
input. The major practical difference is one of efficiency. The
json data type stores an exact copy of the input text,
which processing functions must reparse on each execution; while
@@ -123,7 +131,7 @@
- JSON primitive types and corresponding PostgreSQL types
+ JSON Primitive Types and Corresponding PostgreSQL Types
@@ -217,10 +225,15 @@ SELECT '{"reading": 1.230e-5}'::json, '{"reading": 1.230e-5}'::jsonb;
in this example, even though those are semantically insignificant for
purposes such as equality checks.
+
+
+ For the list of built-in functions and operators available for
+ constructing and processing JSON values, see .
+
- Designing JSON documents effectively
+ Designing JSON Documents
Representing data as JSON can be considerably more flexible than
the traditional relational data model, which is compelling in
@@ -467,6 +480,22 @@ CREATE INDEX idxgintags ON api USING GIN ((jdoc -> 'tags'));
(More information on expression indexes can be found in .)
+
+ Also, GIN index supports @@ and @?
+ operators, which perform jsonpath matching.
+
+SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @@ '$.tags[*] == "qui"';
+
+
+SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @@ '$.tags[*] ? (@ == "qui")';
+
+ GIN index extracts statements of following form out of
+ jsonpath : accessors_chain = const .
+ Accessors chain may consist of .key ,
+ [*] , and [index ] accessors.
+ jsonb_ops additionally supports .*
+ and .** accessors.
+
Another approach to querying is to exploit containment, for example:
@@ -485,7 +514,8 @@ SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @> '{"tags": ["qu
Although the jsonb_path_ops operator class supports
- only queries with the @> operator, it has notable
+ only queries with the @> , @@
+ and @? operators, it has notable
performance advantages over the default operator
class jsonb_ops . A jsonb_path_ops
index is usually much smaller than a jsonb_ops
@@ -593,4 +623,273 @@ SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @> '{"tags": ["qu
lists, and scalars, as appropriate.
+
+
+ jsonpath Type
+
+
+ jsonpath
+
+
+
+ The jsonpath type implements support for the SQL/JSON path language
+ in PostgreSQL to efficiently query JSON data.
+ It provides a binary representation of the parsed SQL/JSON path
+ expression that specifies the items to be retrieved by the path
+ engine from the JSON data for further processing with the
+ SQL/JSON query functions.
+
+
+
+ The semantics of SQL/JSON path predicates and operators generally follow SQL.
+ At the same time, to provide a most natural way of working with JSON data,
+ SQL/JSON path syntax uses some of the JavaScript conventions:
+
+
+
+
+
+ Dot (. ) is used for member access.
+
+
+
+
+ Square brackets ([] ) are used for array access.
+
+
+
+
+ SQL/JSON arrays are 0-relative, unlike regular SQL arrays that start from 1.
+
+
+
+
+
+ An SQL/JSON path expression is typically written in an SQL query as an
+ SQL character string literal, so it must be enclosed in single quotes,
+ and any single quotes desired within the value must be doubled
+ (see ).
+ Some forms of path expressions require string literals within them.
+ These embedded string literals follow JavaScript/ECMAScript conventions:
+ they must be surrounded by double quotes, and backslash escapes may be
+ used within them to represent otherwise-hard-to-type characters.
+ In particular, the way to write a double quote within an embedded string
+ literal is \" , and to write a backslash itself, you
+ must write \\ . Other special backslash sequences
+ include those recognized in JSON strings:
+ \b ,
+ \f ,
+ \n ,
+ \r ,
+ \t ,
+ \v
+ for various ASCII control characters, and
+ \uNNNN for a Unicode
+ character identified by its 4-hex-digit code point. The backslash
+ syntax also includes two cases not allowed by JSON:
+ \xNN for a character code
+ written with only two hex digits, and
+ \u{N... } for a character
+ code written with 1 to 6 hex digits.
+
+
+
+ A path expression consists of a sequence of path elements,
+ which can be the following:
+
+
+
+ Path literals of JSON primitive types:
+ Unicode text, numeric, true, false, or null.
+
+
+
+
+ Path variables listed in .
+
+
+
+
+ Accessor operators listed in .
+
+
+
+
+ jsonpath operators and methods listed
+ in
+
+
+
+
+ Parentheses, which can be used to provide filter expressions
+ or define the order of path evaluation.
+
+
+
+
+
+
+ For details on using jsonpath expressions with SQL/JSON
+ query functions, see .
+
+
+
+ jsonpath Variables
+
+
+
+ Variable
+ Description
+
+
+
+
+ $
+ A variable representing the JSON text to be queried
+ (the context item ).
+
+
+
+ $varname
+
+ A named variable. Its value can be set by the parameter
+ vars of several JSON processing functions.
+ See and
+ its notes for details.
+
+
+
+
+ @
+ A variable representing the result of path evaluation
+ in filter expressions.
+
+
+
+
+
+
+
+ jsonpath Accessors
+
+
+
+ Accessor Operator
+ Description
+
+
+
+
+
+
+ .key
+
+
+ ."$varname "
+
+
+
+
+ Member accessor that returns an object member with
+ the specified key. If the key name is a named variable
+ starting with $ or does not meet the
+ JavaScript rules of an identifier, it must be enclosed in
+ double quotes as a character string literal.
+
+
+
+
+
+
+ .*
+
+
+
+
+ Wildcard member accessor that returns the values of all
+ members located at the top level of the current object.
+
+
+
+
+
+
+ .**
+
+
+
+
+ Recursive wildcard member accessor that processes all levels
+ of the JSON hierarchy of the current object and returns all
+ the member values, regardless of their nesting level. This
+ is a PostgreSQL extension of
+ the SQL/JSON standard.
+
+
+
+
+
+
+ .**{level }
+
+
+ .**{start_level to
+ end_level }
+
+
+
+
+ Same as .** , but with a filter over nesting
+ levels of JSON hierarchy. Nesting levels are specified as integers.
+ Zero level corresponds to the current object. To access the lowest
+ nesting level, you can use the last keyword.
+ This is a PostgreSQL extension of
+ the SQL/JSON standard.
+
+
+
+
+
+
+ [subscript , ...]
+
+
+
+
+ Array element accessor.
+ subscript can be
+ given in two forms: index
+ or start_index to end_index .
+ The first form returns a single array element by its index. The second
+ form returns an array slice by the range of indexes, including the
+ elements that correspond to the provided
+ start_index and end_index .
+
+
+ The specified index can be an integer, as
+ well as an expression returning a single numeric value, which is
+ automatically cast to integer. Zero index corresponds to the first
+ array element. You can also use the last keyword
+ to denote the last array element, which is useful for handling arrays
+ of unknown length.
+
+
+
+
+
+
+ [*]
+
+
+
+
+ Wildcard array element accessor that returns all array elements.
+
+
+
+
+
+
+
+
diff --git a/doc/src/sgml/keywords.sgml b/doc/src/sgml/keywords.sgml
index 2dba7adedfe..57dcd6ae5c7 100644
--- a/doc/src/sgml/keywords.sgml
+++ b/doc/src/sgml/keywords.sgml
@@ -75,5330 +75,6 @@
presence of a key word does not indicate the existence of a feature.
-
-
-
-
- SQL Key Words
-
-
-
-
- Key Word
- PostgreSQL
- SQL:2011
- SQL:2008
- SQL-92
-
-
-
-
-
- A
-
- non-reserved
- non-reserved
-
-
-
- ABORT
- non-reserved
-
-
-
-
-
- ABS
-
- reserved
- reserved
-
-
-
- ABSENT
-
- non-reserved
- non-reserved
-
-
-
- ABSOLUTE
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- ACCESS
- non-reserved
-
-
-
-
-
- ACCORDING
-
- non-reserved
- non-reserved
-
-
-
- ACTION
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- ADA
-
- non-reserved
- non-reserved
- non-reserved
-
-
- ADD
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- ADMIN
- non-reserved
- non-reserved
- non-reserved
-
-
-
- AFTER
- non-reserved
- non-reserved
- non-reserved
-
-
-
- AGGREGATE
- non-reserved
-
-
-
-
-
- ALL
- reserved
- reserved
- reserved
- reserved
-
-
- ALLOCATE
-
- reserved
- reserved
- reserved
-
-
- ALSO
- non-reserved
-
-
-
-
-
- ALTER
- non-reserved
- reserved
- reserved
- reserved
-
-
- ALWAYS
- non-reserved
- non-reserved
- non-reserved
-
-
-
- ANALYSE
- reserved
-
-
-
-
-
- ANALYZE
- reserved
-
-
-
-
-
- AND
- reserved
- reserved
- reserved
- reserved
-
-
- ANY
- reserved
- reserved
- reserved
- reserved
-
-
- ARE
-
- reserved
- reserved
- reserved
-
-
- ARRAY
- reserved
- reserved
- reserved
-
-
-
- ARRAY_AGG
-
- reserved
- reserved
-
-
-
- ARRAY_MAX_CARDINALITY
-
- reserved
-
-
-
-
- AS
- reserved
- reserved
- reserved
- reserved
-
-
- ASC
- reserved
- non-reserved
- non-reserved
- reserved
-
-
- ASENSITIVE
-
- reserved
- reserved
-
-
-
- ASSERTION
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- ASSIGNMENT
- non-reserved
- non-reserved
- non-reserved
-
-
-
- ASYMMETRIC
- reserved
- reserved
- reserved
-
-
-
- AT
- non-reserved
- reserved
- reserved
- reserved
-
-
- ATOMIC
-
- reserved
- reserved
-
-
-
- ATTACH
- non-reserved
-
-
-
-
-
- ATTRIBUTE
- non-reserved
- non-reserved
- non-reserved
-
-
-
- ATTRIBUTES
-
- non-reserved
- non-reserved
-
-
-
- AUTHORIZATION
- reserved (can be function or type)
- reserved
- reserved
- reserved
-
-
- AVG
-
- reserved
- reserved
- reserved
-
-
- BACKWARD
- non-reserved
-
-
-
-
-
- BASE64
-
- non-reserved
- non-reserved
-
-
-
- BEFORE
- non-reserved
- non-reserved
- non-reserved
-
-
-
- BEGIN
- non-reserved
- reserved
- reserved
- reserved
-
-
- BEGIN_FRAME
-
- reserved
-
-
-
-
- BEGIN_PARTITION
-
- reserved
-
-
-
-
- BERNOULLI
-
- non-reserved
- non-reserved
-
-
-
- BETWEEN
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- BIGINT
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- BINARY
- reserved (can be function or type)
- reserved
- reserved
-
-
-
- BIT
- non-reserved (cannot be function or type)
-
-
- reserved
-
-
- BIT_LENGTH
-
-
-
- reserved
-
-
- BLOB
-
- reserved
- reserved
-
-
-
- BLOCKED
-
- non-reserved
- non-reserved
-
-
-
- BOM
-
- non-reserved
- non-reserved
-
-
-
- BOOLEAN
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- BOTH
- reserved
- reserved
- reserved
- reserved
-
-
- BREADTH
-
- non-reserved
- non-reserved
-
-
-
- BY
- non-reserved
- reserved
- reserved
- reserved
-
-
- C
-
- non-reserved
- non-reserved
- non-reserved
-
-
- CACHE
- non-reserved
-
-
-
-
-
- CALL
-
- reserved
- reserved
-
-
-
- CALLED
- non-reserved
- reserved
- reserved
-
-
-
- CARDINALITY
-
- reserved
- reserved
-
-
-
- CASCADE
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- CASCADED
- non-reserved
- reserved
- reserved
- reserved
-
-
- CASE
- reserved
- reserved
- reserved
- reserved
-
-
- CAST
- reserved
- reserved
- reserved
- reserved
-
-
- CATALOG
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- CATALOG_NAME
-
- non-reserved
- non-reserved
- non-reserved
-
-
- CEIL
-
- reserved
- reserved
-
-
-
- CEILING
-
- reserved
- reserved
-
-
-
- CHAIN
- non-reserved
- non-reserved
- non-reserved
-
-
-
- CHAR
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- CHARACTER
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- CHARACTERISTICS
- non-reserved
- non-reserved
- non-reserved
-
-
-
- CHARACTERS
-
- non-reserved
- non-reserved
-
-
-
- CHARACTER_LENGTH
-
- reserved
- reserved
- reserved
-
-
- CHARACTER_SET_CATALOG
-
- non-reserved
- non-reserved
- non-reserved
-
-
- CHARACTER_SET_NAME
-
- non-reserved
- non-reserved
- non-reserved
-
-
- CHARACTER_SET_SCHEMA
-
- non-reserved
- non-reserved
- non-reserved
-
-
- CHAR_LENGTH
-
- reserved
- reserved
- reserved
-
-
- CHECK
- reserved
- reserved
- reserved
- reserved
-
-
- CHECKPOINT
- non-reserved
-
-
-
-
-
- CLASS
- non-reserved
-
-
-
-
-
- CLASS_ORIGIN
-
- non-reserved
- non-reserved
- non-reserved
-
-
- CLOB
-
- reserved
- reserved
-
-
-
- CLOSE
- non-reserved
- reserved
- reserved
- reserved
-
-
- CLUSTER
- non-reserved
-
-
-
-
-
- COALESCE
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- COBOL
-
- non-reserved
- non-reserved
- non-reserved
-
-
- COLLATE
- reserved
- reserved
- reserved
- reserved
-
-
- COLLATION
- reserved (can be function or type)
- non-reserved
- non-reserved
- reserved
-
-
- COLLATION_CATALOG
-
- non-reserved
- non-reserved
- non-reserved
-
-
- COLLATION_NAME
-
- non-reserved
- non-reserved
- non-reserved
-
-
- COLLATION_SCHEMA
-
- non-reserved
- non-reserved
- non-reserved
-
-
- COLLECT
-
- reserved
- reserved
-
-
-
- COLUMN
- reserved
- reserved
- reserved
- reserved
-
-
- COLUMNS
- non-reserved
- non-reserved
- non-reserved
-
-
-
- COLUMN_NAME
-
- non-reserved
- non-reserved
- non-reserved
-
-
- COMMAND_FUNCTION
-
- non-reserved
- non-reserved
- non-reserved
-
-
- COMMAND_FUNCTION_CODE
-
- non-reserved
- non-reserved
-
-
-
- COMMENT
- non-reserved
-
-
-
-
-
- COMMENTS
- non-reserved
-
-
-
-
-
- COMMIT
- non-reserved
- reserved
- reserved
- reserved
-
-
- COMMITTED
- non-reserved
- non-reserved
- non-reserved
- non-reserved
-
-
- CONCURRENTLY
- reserved (can be function or type)
-
-
-
-
-
- CONDITION
-
- reserved
- reserved
-
-
-
- CONDITION_NUMBER
-
- non-reserved
- non-reserved
- non-reserved
-
-
- CONFIGURATION
- non-reserved
-
-
-
-
-
- CONFLICT
- non-reserved
-
-
-
-
-
- CONNECT
-
- reserved
- reserved
- reserved
-
-
- CONNECTION
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- CONNECTION_NAME
-
- non-reserved
- non-reserved
- non-reserved
-
-
- CONSTRAINT
- reserved
- reserved
- reserved
- reserved
-
-
- CONSTRAINTS
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- CONSTRAINT_CATALOG
-
- non-reserved
- non-reserved
- non-reserved
-
-
- CONSTRAINT_NAME
-
- non-reserved
- non-reserved
- non-reserved
-
-
- CONSTRAINT_SCHEMA
-
- non-reserved
- non-reserved
- non-reserved
-
-
- CONSTRUCTOR
-
- non-reserved
- non-reserved
-
-
-
- CONTAINS
-
- reserved
- non-reserved
-
-
-
- CONTENT
- non-reserved
- non-reserved
- non-reserved
-
-
-
- CONTINUE
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- CONTROL
-
- non-reserved
- non-reserved
-
-
-
- CONVERSION
- non-reserved
-
-
-
-
-
- CONVERT
-
- reserved
- reserved
- reserved
-
-
- COPY
- non-reserved
-
-
-
-
-
- CORR
-
- reserved
- reserved
-
-
-
- CORRESPONDING
-
- reserved
- reserved
- reserved
-
-
- COST
- non-reserved
-
-
-
-
-
- COUNT
-
- reserved
- reserved
- reserved
-
-
- COVAR_POP
-
- reserved
- reserved
-
-
-
- COVAR_SAMP
-
- reserved
- reserved
-
-
-
- CREATE
- reserved
- reserved
- reserved
- reserved
-
-
- CROSS
- reserved (can be function or type)
- reserved
- reserved
- reserved
-
-
- CSV
- non-reserved
-
-
-
-
-
- CUBE
- non-reserved
- reserved
- reserved
-
-
-
- CUME_DIST
-
- reserved
- reserved
-
-
-
- CURRENT
- non-reserved
- reserved
- reserved
- reserved
-
-
- CURRENT_CATALOG
- reserved
- reserved
- reserved
-
-
-
- CURRENT_DATE
- reserved
- reserved
- reserved
- reserved
-
-
- CURRENT_DEFAULT_TRANSFORM_GROUP
-
- reserved
- reserved
-
-
-
- CURRENT_PATH
-
- reserved
- reserved
-
-
-
- CURRENT_ROLE
- reserved
- reserved
- reserved
-
-
-
- CURRENT_ROW
-
- reserved
-
-
-
-
- CURRENT_SCHEMA
- reserved (can be function or type)
- reserved
- reserved
-
-
-
- CURRENT_TIME
- reserved
- reserved
- reserved
- reserved
-
-
- CURRENT_TIMESTAMP
- reserved
- reserved
- reserved
- reserved
-
-
- CURRENT_TRANSFORM_GROUP_FOR_TYPE
-
- reserved
- reserved
-
-
-
- CURRENT_USER
- reserved
- reserved
- reserved
- reserved
-
-
- CURSOR
- non-reserved
- reserved
- reserved
- reserved
-
-
- CURSOR_NAME
-
- non-reserved
- non-reserved
- non-reserved
-
-
- CYCLE
- non-reserved
- reserved
- reserved
-
-
-
- DATA
- non-reserved
- non-reserved
- non-reserved
- non-reserved
-
-
- DATABASE
- non-reserved
-
-
-
-
-
- DATALINK
-
- reserved
- reserved
-
-
-
- DATE
-
- reserved
- reserved
- reserved
-
-
- DATETIME_INTERVAL_CODE
-
- non-reserved
- non-reserved
- non-reserved
-
-
- DATETIME_INTERVAL_PRECISION
-
- non-reserved
- non-reserved
- non-reserved
-
-
- DAY
- non-reserved
- reserved
- reserved
- reserved
-
-
- DB
-
- non-reserved
- non-reserved
-
-
-
- DEALLOCATE
- non-reserved
- reserved
- reserved
- reserved
-
-
- DEC
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- DECIMAL
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- DECLARE
- non-reserved
- reserved
- reserved
- reserved
-
-
- DEFAULT
- reserved
- reserved
- reserved
- reserved
-
-
- DEFAULTS
- non-reserved
- non-reserved
- non-reserved
-
-
-
- DEFERRABLE
- reserved
- non-reserved
- non-reserved
- reserved
-
-
- DEFERRED
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- DEFINED
-
- non-reserved
- non-reserved
-
-
-
- DEFINER
- non-reserved
- non-reserved
- non-reserved
-
-
-
- DEGREE
-
- non-reserved
- non-reserved
-
-
-
- DELETE
- non-reserved
- reserved
- reserved
- reserved
-
-
- DELIMITER
- non-reserved
-
-
-
-
-
- DELIMITERS
- non-reserved
-
-
-
-
-
- DENSE_RANK
-
- reserved
- reserved
-
-
-
- DEPENDS
- non-reserved
-
-
-
-
-
- DEPTH
-
- non-reserved
- non-reserved
-
-
-
- DEREF
-
- reserved
- reserved
-
-
-
- DERIVED
-
- non-reserved
- non-reserved
-
-
-
- DESC
- reserved
- non-reserved
- non-reserved
- reserved
-
-
- DESCRIBE
-
- reserved
- reserved
- reserved
-
-
- DESCRIPTOR
-
- non-reserved
- non-reserved
- reserved
-
-
- DETACH
- non-reserved
-
-
-
-
-
- DETERMINISTIC
-
- reserved
- reserved
-
-
-
- DIAGNOSTICS
-
- non-reserved
- non-reserved
- reserved
-
-
- DICTIONARY
- non-reserved
-
-
-
-
-
- DISABLE
- non-reserved
-
-
-
-
-
- DISCARD
- non-reserved
-
-
-
-
-
- DISCONNECT
-
- reserved
- reserved
- reserved
-
-
- DISPATCH
-
- non-reserved
- non-reserved
-
-
-
- DISTINCT
- reserved
- reserved
- reserved
- reserved
-
-
- DLNEWCOPY
-
- reserved
- reserved
-
-
-
- DLPREVIOUSCOPY
-
- reserved
- reserved
-
-
-
- DLURLCOMPLETE
-
- reserved
- reserved
-
-
-
- DLURLCOMPLETEONLY
-
- reserved
- reserved
-
-
-
- DLURLCOMPLETEWRITE
-
- reserved
- reserved
-
-
-
- DLURLPATH
-
- reserved
- reserved
-
-
-
- DLURLPATHONLY
-
- reserved
- reserved
-
-
-
- DLURLPATHWRITE
-
- reserved
- reserved
-
-
-
- DLURLSCHEME
-
- reserved
- reserved
-
-
-
- DLURLSERVER
-
- reserved
- reserved
-
-
-
- DLVALUE
-
- reserved
- reserved
-
-
-
- DO
- reserved
-
-
-
-
-
- DOCUMENT
- non-reserved
- non-reserved
- non-reserved
-
-
-
- DOMAIN
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- DOUBLE
- non-reserved
- reserved
- reserved
- reserved
-
-
- DROP
- non-reserved
- reserved
- reserved
- reserved
-
-
- DYNAMIC
-
- reserved
- reserved
-
-
-
- DYNAMIC_FUNCTION
-
- non-reserved
- non-reserved
- non-reserved
-
-
- DYNAMIC_FUNCTION_CODE
-
- non-reserved
- non-reserved
-
-
-
- EACH
- non-reserved
- reserved
- reserved
-
-
-
- ELEMENT
-
- reserved
- reserved
-
-
-
- ELSE
- reserved
- reserved
- reserved
- reserved
-
-
- EMPTY
-
- non-reserved
- non-reserved
-
-
-
- ENABLE
- non-reserved
-
-
-
-
-
- ENCODING
- non-reserved
- non-reserved
- non-reserved
-
-
-
- ENCRYPTED
- non-reserved
-
-
-
-
-
- END
- reserved
- reserved
- reserved
- reserved
-
-
- END-EXEC
-
- reserved
- reserved
- reserved
-
-
- END_FRAME
-
- reserved
-
-
-
-
- END_PARTITION
-
- reserved
-
-
-
-
- ENFORCED
-
- non-reserved
-
-
-
-
- ENUM
- non-reserved
-
-
-
-
-
- EQUALS
-
- reserved
- non-reserved
-
-
-
- ESCAPE
- non-reserved
- reserved
- reserved
- reserved
-
-
- EVENT
- non-reserved
-
-
-
-
-
- EVERY
-
- reserved
- reserved
-
-
-
- EXCEPT
- reserved
- reserved
- reserved
- reserved
-
-
- EXCEPTION
-
-
-
- reserved
-
-
- EXCLUDE
- non-reserved
- non-reserved
- non-reserved
-
-
-
- EXCLUDING
- non-reserved
- non-reserved
- non-reserved
-
-
-
- EXCLUSIVE
- non-reserved
-
-
-
-
-
- EXEC
-
- reserved
- reserved
- reserved
-
-
- EXECUTE
- non-reserved
- reserved
- reserved
- reserved
-
-
- EXISTS
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- EXP
-
- reserved
- reserved
-
-
-
- EXPLAIN
- non-reserved
-
-
-
-
-
- EXPRESSION
-
- non-reserved
-
-
-
-
- EXTENSION
- non-reserved
-
-
-
-
-
- EXTERNAL
- non-reserved
- reserved
- reserved
- reserved
-
-
- EXTRACT
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- FALSE
- reserved
- reserved
- reserved
- reserved
-
-
- FAMILY
- non-reserved
-
-
-
-
-
- FETCH
- reserved
- reserved
- reserved
- reserved
-
-
- FILE
-
- non-reserved
- non-reserved
-
-
-
- FILTER
- non-reserved
- reserved
- reserved
-
-
-
- FINAL
-
- non-reserved
- non-reserved
-
-
-
- FIRST
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- FIRST_VALUE
-
- reserved
- reserved
-
-
-
- FLAG
-
- non-reserved
- non-reserved
-
-
-
- FLOAT
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- FLOOR
-
- reserved
- reserved
-
-
-
- FOLLOWING
- non-reserved
- non-reserved
- non-reserved
-
-
-
- FOR
- reserved
- reserved
- reserved
- reserved
-
-
- FORCE
- non-reserved
-
-
-
-
-
- FOREIGN
- reserved
- reserved
- reserved
- reserved
-
-
- FORTRAN
-
- non-reserved
- non-reserved
- non-reserved
-
-
- FORWARD
- non-reserved
-
-
-
-
-
- FOUND
-
- non-reserved
- non-reserved
- reserved
-
-
- FRAME_ROW
-
- reserved
-
-
-
-
- FREE
-
- reserved
- reserved
-
-
-
- FREEZE
- reserved (can be function or type)
-
-
-
-
-
- FROM
- reserved
- reserved
- reserved
- reserved
-
-
- FS
-
- non-reserved
- non-reserved
-
-
-
- FULL
- reserved (can be function or type)
- reserved
- reserved
- reserved
-
-
- FUNCTION
- non-reserved
- reserved
- reserved
-
-
-
- FUNCTIONS
- non-reserved
-
-
-
-
-
- FUSION
-
- reserved
- reserved
-
-
-
- G
-
- non-reserved
- non-reserved
-
-
-
- GENERAL
-
- non-reserved
- non-reserved
-
-
-
- GENERATED
- non-reserved
- non-reserved
- non-reserved
-
-
-
- GET
-
- reserved
- reserved
- reserved
-
-
- GLOBAL
- non-reserved
- reserved
- reserved
- reserved
-
-
- GO
-
- non-reserved
- non-reserved
- reserved
-
-
- GOTO
-
- non-reserved
- non-reserved
- reserved
-
-
- GRANT
- reserved
- reserved
- reserved
- reserved
-
-
- GRANTED
- non-reserved
- non-reserved
- non-reserved
-
-
-
- GREATEST
- non-reserved (cannot be function or type)
-
-
-
-
-
- GROUP
- reserved
- reserved
- reserved
- reserved
-
-
- GROUPING
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- GROUPS
-
- reserved
-
-
-
-
- HANDLER
- non-reserved
-
-
-
-
-
- HAVING
- reserved
- reserved
- reserved
- reserved
-
-
- HEADER
- non-reserved
-
-
-
-
-
- HEX
-
- non-reserved
- non-reserved
-
-
-
- HIERARCHY
-
- non-reserved
- non-reserved
-
-
-
- HOLD
- non-reserved
- reserved
- reserved
-
-
-
- HOUR
- non-reserved
- reserved
- reserved
- reserved
-
-
- ID
-
- non-reserved
- non-reserved
-
-
-
- IDENTITY
- non-reserved
- reserved
- reserved
- reserved
-
-
- IF
- non-reserved
-
-
-
-
-
- IGNORE
-
- non-reserved
- non-reserved
-
-
-
- ILIKE
- reserved (can be function or type)
-
-
-
-
-
- IMMEDIATE
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- IMMEDIATELY
-
- non-reserved
-
-
-
-
- IMMUTABLE
- non-reserved
-
-
-
-
-
- IMPLEMENTATION
-
- non-reserved
- non-reserved
-
-
-
- IMPLICIT
- non-reserved
-
-
-
-
-
- IMPORT
- non-reserved
- reserved
- reserved
-
-
-
- IN
- reserved
- reserved
- reserved
- reserved
-
-
- INCLUDING
- non-reserved
- non-reserved
- non-reserved
-
-
-
- INCREMENT
- non-reserved
- non-reserved
- non-reserved
-
-
-
- INDENT
-
- non-reserved
- non-reserved
-
-
-
- INDEX
- non-reserved
-
-
-
-
-
- INDEXES
- non-reserved
-
-
-
-
-
- INDICATOR
-
- reserved
- reserved
- reserved
-
-
- INHERIT
- non-reserved
-
-
-
-
-
- INHERITS
- non-reserved
-
-
-
-
-
- INITIALLY
- reserved
- non-reserved
- non-reserved
- reserved
-
-
- INLINE
- non-reserved
-
-
-
-
-
- INNER
- reserved (can be function or type)
- reserved
- reserved
- reserved
-
-
- INOUT
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- INPUT
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- INSENSITIVE
- non-reserved
- reserved
- reserved
- reserved
-
-
- INSERT
- non-reserved
- reserved
- reserved
- reserved
-
-
- INSTANCE
-
- non-reserved
- non-reserved
-
-
-
- INSTANTIABLE
-
- non-reserved
- non-reserved
-
-
-
- INSTEAD
- non-reserved
- non-reserved
- non-reserved
-
-
-
- INT
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- INTEGER
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- INTEGRITY
-
- non-reserved
- non-reserved
-
-
-
- INTERSECT
- reserved
- reserved
- reserved
- reserved
-
-
- INTERSECTION
-
- reserved
- reserved
-
-
-
- INTERVAL
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- INTO
- reserved
- reserved
- reserved
- reserved
-
-
- INVOKER
- non-reserved
- non-reserved
- non-reserved
-
-
-
- IS
- reserved (can be function or type)
- reserved
- reserved
- reserved
-
-
- ISNULL
- reserved (can be function or type)
-
-
-
-
-
- ISOLATION
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- JOIN
- reserved (can be function or type)
- reserved
- reserved
- reserved
-
-
- K
-
- non-reserved
- non-reserved
-
-
-
- KEY
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- KEY_MEMBER
-
- non-reserved
- non-reserved
-
-
-
- KEY_TYPE
-
- non-reserved
- non-reserved
-
-
-
- LABEL
- non-reserved
-
-
-
-
-
- LAG
-
- reserved
- reserved
-
-
-
- LANGUAGE
- non-reserved
- reserved
- reserved
- reserved
-
-
- LARGE
- non-reserved
- reserved
- reserved
-
-
-
- LAST
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- LAST_VALUE
-
- reserved
- reserved
-
-
-
- LATERAL
- reserved
- reserved
- reserved
-
-
-
- LEAD
-
- reserved
- reserved
-
-
-
- LEADING
- reserved
- reserved
- reserved
- reserved
-
-
- LEAKPROOF
- non-reserved
-
-
-
-
-
- LEAST
- non-reserved (cannot be function or type)
-
-
-
-
-
- LEFT
- reserved (can be function or type)
- reserved
- reserved
- reserved
-
-
- LENGTH
-
- non-reserved
- non-reserved
- non-reserved
-
-
- LEVEL
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- LIBRARY
-
- non-reserved
- non-reserved
-
-
-
- LIKE
- reserved (can be function or type)
- reserved
- reserved
- reserved
-
-
- LIKE_REGEX
-
- reserved
- reserved
-
-
-
- LIMIT
- reserved
- non-reserved
- non-reserved
-
-
-
- LINK
-
- non-reserved
- non-reserved
-
-
-
- LISTEN
- non-reserved
-
-
-
-
-
- LN
-
- reserved
- reserved
-
-
-
- LOAD
- non-reserved
-
-
-
-
-
- LOCAL
- non-reserved
- reserved
- reserved
- reserved
-
-
- LOCALTIME
- reserved
- reserved
- reserved
-
-
-
- LOCALTIMESTAMP
- reserved
- reserved
- reserved
-
-
-
- LOCATION
- non-reserved
- non-reserved
- non-reserved
-
-
-
- LOCATOR
-
- non-reserved
- non-reserved
-
-
-
- LOCK
- non-reserved
-
-
-
-
-
- LOCKED
- non-reserved
-
-
-
-
-
- LOGGED
- non-reserved
-
-
-
-
-
- LOWER
-
- reserved
- reserved
- reserved
-
-
- M
-
- non-reserved
- non-reserved
-
-
-
- MAP
-
- non-reserved
- non-reserved
-
-
-
- MAPPING
- non-reserved
- non-reserved
- non-reserved
-
-
-
- MATCH
- non-reserved
- reserved
- reserved
- reserved
-
-
- MATCHED
-
- non-reserved
- non-reserved
-
-
-
- MATERIALIZED
- non-reserved
-
-
-
-
-
- MAX
-
- reserved
- reserved
- reserved
-
-
- MAXVALUE
- non-reserved
- non-reserved
- non-reserved
-
-
-
- MAX_CARDINALITY
-
-
- reserved
-
-
-
- MEMBER
-
- reserved
- reserved
-
-
-
- MERGE
-
- reserved
- reserved
-
-
-
- MESSAGE_LENGTH
-
- non-reserved
- non-reserved
- non-reserved
-
-
- MESSAGE_OCTET_LENGTH
-
- non-reserved
- non-reserved
- non-reserved
-
-
- MESSAGE_TEXT
-
- non-reserved
- non-reserved
- non-reserved
-
-
- METHOD
- non-reserved
- reserved
- reserved
-
-
-
- MIN
-
- reserved
- reserved
- reserved
-
-
- MINUTE
- non-reserved
- reserved
- reserved
- reserved
-
-
- MINVALUE
- non-reserved
- non-reserved
- non-reserved
-
-
-
- MOD
-
- reserved
- reserved
-
-
-
- MODE
- non-reserved
-
-
-
-
-
- MODIFIES
-
- reserved
- reserved
-
-
-
- MODULE
-
- reserved
- reserved
- reserved
-
-
- MONTH
- non-reserved
- reserved
- reserved
- reserved
-
-
- MORE
-
- non-reserved
- non-reserved
- non-reserved
-
-
- MOVE
- non-reserved
-
-
-
-
-
- MULTISET
-
- reserved
- reserved
-
-
-
- MUMPS
-
- non-reserved
- non-reserved
- non-reserved
-
-
- NAME
- non-reserved
- non-reserved
- non-reserved
- non-reserved
-
-
- NAMES
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- NAMESPACE
-
- non-reserved
- non-reserved
-
-
-
- NATIONAL
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- NATURAL
- reserved (can be function or type)
- reserved
- reserved
- reserved
-
-
- NCHAR
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- NCLOB
-
- reserved
- reserved
-
-
-
- NESTING
-
- non-reserved
- non-reserved
-
-
-
- NEW
- non-reserved
- reserved
- reserved
-
-
-
- NEXT
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- NFC
-
- non-reserved
- non-reserved
-
-
-
- NFD
-
- non-reserved
- non-reserved
-
-
-
- NFKC
-
- non-reserved
- non-reserved
-
-
-
- NFKD
-
- non-reserved
- non-reserved
-
-
-
- NIL
-
- non-reserved
- non-reserved
-
-
-
- NO
- non-reserved
- reserved
- reserved
- reserved
-
-
- NONE
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- NORMALIZE
-
- reserved
- reserved
-
-
-
- NORMALIZED
-
- non-reserved
- non-reserved
-
-
-
- NOT
- reserved
- reserved
- reserved
- reserved
-
-
- NOTHING
- non-reserved
-
-
-
-
-
- NOTIFY
- non-reserved
-
-
-
-
-
- NOTNULL
- reserved (can be function or type)
-
-
-
-
-
- NOWAIT
- non-reserved
-
-
-
-
-
- NTH_VALUE
-
- reserved
- reserved
-
-
-
- NTILE
-
- reserved
- reserved
-
-
-
- NULL
- reserved
- reserved
- reserved
- reserved
-
-
- NULLABLE
-
- non-reserved
- non-reserved
- non-reserved
-
-
- NULLIF
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- NULLS
- non-reserved
- non-reserved
- non-reserved
-
-
-
- NUMBER
-
- non-reserved
- non-reserved
- non-reserved
-
-
- NUMERIC
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- OBJECT
- non-reserved
- non-reserved
- non-reserved
-
-
-
- OCCURRENCES_REGEX
-
- reserved
- reserved
-
-
-
- OCTETS
-
- non-reserved
- non-reserved
-
-
-
- OCTET_LENGTH
-
- reserved
- reserved
- reserved
-
-
- OF
- non-reserved
- reserved
- reserved
- reserved
-
-
- OFF
- non-reserved
- non-reserved
- non-reserved
-
-
-
- OFFSET
- reserved
- reserved
- reserved
-
-
-
- OIDS
- non-reserved
-
-
-
-
-
- OLD
- non-reserved
- reserved
- reserved
-
-
-
- ON
- reserved
- reserved
- reserved
- reserved
-
-
- ONLY
- reserved
- reserved
- reserved
- reserved
-
-
- OPEN
-
- reserved
- reserved
- reserved
-
-
- OPERATOR
- non-reserved
-
-
-
-
-
- OPTION
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- OPTIONS
- non-reserved
- non-reserved
- non-reserved
-
-
-
- OR
- reserved
- reserved
- reserved
- reserved
-
-
- ORDER
- reserved
- reserved
- reserved
- reserved
-
-
- ORDERING
-
- non-reserved
- non-reserved
-
-
-
- ORDINALITY
- non-reserved
- non-reserved
- non-reserved
-
-
-
- OTHERS
-
- non-reserved
- non-reserved
-
-
-
- OUT
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- OUTER
- reserved (can be function or type)
- reserved
- reserved
- reserved
-
-
- OUTPUT
-
- non-reserved
- non-reserved
- reserved
-
-
- OVER
- non-reserved
- reserved
- reserved
-
-
-
- OVERLAPS
- reserved (can be function or type)
- reserved
- reserved
- reserved
-
-
- OVERLAY
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- OVERRIDING
- non-reserved
- non-reserved
- non-reserved
-
-
-
- OWNED
- non-reserved
-
-
-
-
-
- OWNER
- non-reserved
-
-
-
-
-
- P
-
- non-reserved
- non-reserved
-
-
-
- PAD
-
- non-reserved
- non-reserved
- reserved
-
-
- PARALLEL
- non-reserved
-
-
-
-
-
- PARAMETER
-
- reserved
- reserved
-
-
-
- PARAMETER_MODE
-
- non-reserved
- non-reserved
-
-
-
- PARAMETER_NAME
-
- non-reserved
- non-reserved
-
-
-
- PARAMETER_ORDINAL_POSITION
-
- non-reserved
- non-reserved
-
-
-
- PARAMETER_SPECIFIC_CATALOG
-
- non-reserved
- non-reserved
-
-
-
- PARAMETER_SPECIFIC_NAME
-
- non-reserved
- non-reserved
-
-
-
- PARAMETER_SPECIFIC_SCHEMA
-
- non-reserved
- non-reserved
-
-
-
- PARSER
- non-reserved
-
-
-
-
-
- PARTIAL
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- PARTITION
- non-reserved
- reserved
- reserved
-
-
-
- PASCAL
-
- non-reserved
- non-reserved
- non-reserved
-
-
- PASSING
- non-reserved
- non-reserved
- non-reserved
-
-
-
- PASSTHROUGH
-
- non-reserved
- non-reserved
-
-
-
- PASSWORD
- non-reserved
-
-
-
-
-
- PATH
-
- non-reserved
- non-reserved
-
-
-
- PERCENT
-
- reserved
-
-
-
-
- PERCENTILE_CONT
-
- reserved
- reserved
-
-
-
- PERCENTILE_DISC
-
- reserved
- reserved
-
-
-
- PERCENT_RANK
-
- reserved
- reserved
-
-
-
- PERIOD
-
- reserved
-
-
-
-
- PERMISSION
-
- non-reserved
- non-reserved
-
-
-
- PLACING
- reserved
- non-reserved
- non-reserved
-
-
-
- PLANS
- non-reserved
-
-
-
-
-
- PLI
-
- non-reserved
- non-reserved
- non-reserved
-
-
- POLICY
- non-reserved
-
-
-
-
-
- PORTION
-
- reserved
-
-
-
-
- POSITION
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- POSITION_REGEX
-
- reserved
- reserved
-
-
-
- POWER
-
- reserved
- reserved
-
-
-
- PRECEDES
-
- reserved
-
-
-
-
- PRECEDING
- non-reserved
- non-reserved
- non-reserved
-
-
-
- PRECISION
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- PREPARE
- non-reserved
- reserved
- reserved
- reserved
-
-
- PREPARED
- non-reserved
-
-
-
-
-
- PRESERVE
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- PRIMARY
- reserved
- reserved
- reserved
- reserved
-
-
- PRIOR
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- PRIVILEGES
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- PROCEDURAL
- non-reserved
-
-
-
-
-
- PROCEDURE
- non-reserved
- reserved
- reserved
- reserved
-
-
- PROGRAM
- non-reserved
-
-
-
-
-
- PUBLIC
-
- non-reserved
- non-reserved
- reserved
-
-
- PUBLICATION
- non-reserved
-
-
-
-
-
- QUOTE
- non-reserved
-
-
-
-
-
- RANGE
- non-reserved
- reserved
- reserved
-
-
-
- RANK
-
- reserved
- reserved
-
-
-
- READ
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- READS
-
- reserved
- reserved
-
-
-
- REAL
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- REASSIGN
- non-reserved
-
-
-
-
-
- RECHECK
- non-reserved
-
-
-
-
-
- RECOVERY
-
- non-reserved
- non-reserved
-
-
-
- RECURSIVE
- non-reserved
- reserved
- reserved
-
-
-
- REF
- non-reserved
- reserved
- reserved
-
-
-
- REFERENCES
- reserved
- reserved
- reserved
- reserved
-
-
- REFERENCING
- non-reserved
- reserved
- reserved
-
-
-
- REFRESH
- non-reserved
-
-
-
-
-
- REGR_AVGX
-
- reserved
- reserved
-
-
-
- REGR_AVGY
-
- reserved
- reserved
-
-
-
- REGR_COUNT
-
- reserved
- reserved
-
-
-
- REGR_INTERCEPT
-
- reserved
- reserved
-
-
-
- REGR_R2
-
- reserved
- reserved
-
-
-
- REGR_SLOPE
-
- reserved
- reserved
-
-
-
- REGR_SXX
-
- reserved
- reserved
-
-
-
- REGR_SXY
-
- reserved
- reserved
-
-
-
- REGR_SYY
-
- reserved
- reserved
-
-
-
- REINDEX
- non-reserved
-
-
-
-
-
- RELATIVE
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- RELEASE
- non-reserved
- reserved
- reserved
-
-
-
- RENAME
- non-reserved
-
-
-
-
-
- REPEATABLE
- non-reserved
- non-reserved
- non-reserved
- non-reserved
-
-
- REPLACE
- non-reserved
-
-
-
-
-
- REPLICA
- non-reserved
-
-
-
-
-
- REQUIRING
-
- non-reserved
- non-reserved
-
-
-
- RESET
- non-reserved
-
-
-
-
-
- RESPECT
-
- non-reserved
- non-reserved
-
-
-
- RESTART
- non-reserved
- non-reserved
- non-reserved
-
-
-
- RESTORE
-
- non-reserved
- non-reserved
-
-
-
- RESTRICT
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- RESULT
-
- reserved
- reserved
-
-
-
- RETURN
-
- reserved
- reserved
-
-
-
- RETURNED_CARDINALITY
-
- non-reserved
- non-reserved
-
-
-
- RETURNED_LENGTH
-
- non-reserved
- non-reserved
- non-reserved
-
-
- RETURNED_OCTET_LENGTH
-
- non-reserved
- non-reserved
- non-reserved
-
-
- RETURNED_SQLSTATE
-
- non-reserved
- non-reserved
- non-reserved
-
-
- RETURNING
- reserved
- non-reserved
- non-reserved
-
-
-
- RETURNS
- non-reserved
- reserved
- reserved
-
-
-
- REVOKE
- non-reserved
- reserved
- reserved
- reserved
-
-
- RIGHT
- reserved (can be function or type)
- reserved
- reserved
- reserved
-
-
- ROLE
- non-reserved
- non-reserved
- non-reserved
-
-
-
- ROLLBACK
- non-reserved
- reserved
- reserved
- reserved
-
-
- ROLLUP
- non-reserved
- reserved
- reserved
-
-
-
- ROUTINE
-
- non-reserved
- non-reserved
-
-
-
- ROUTINE_CATALOG
-
- non-reserved
- non-reserved
-
-
-
- ROUTINE_NAME
-
- non-reserved
- non-reserved
-
-
-
- ROUTINE_SCHEMA
-
- non-reserved
- non-reserved
-
-
-
- ROW
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- ROWS
- non-reserved
- reserved
- reserved
- reserved
-
-
- ROW_COUNT
-
- non-reserved
- non-reserved
- non-reserved
-
-
- ROW_NUMBER
-
- reserved
- reserved
-
-
-
- RULE
- non-reserved
-
-
-
-
-
- SAVEPOINT
- non-reserved
- reserved
- reserved
-
-
-
- SCALE
-
- non-reserved
- non-reserved
- non-reserved
-
-
- SCHEMA
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- SCHEMAS
- non-reserved
-
-
-
-
-
- SCHEMA_NAME
-
- non-reserved
- non-reserved
- non-reserved
-
-
- SCOPE
-
- reserved
- reserved
-
-
-
- SCOPE_CATALOG
-
- non-reserved
- non-reserved
-
-
-
- SCOPE_NAME
-
- non-reserved
- non-reserved
-
-
-
- SCOPE_SCHEMA
-
- non-reserved
- non-reserved
-
-
-
- SCROLL
- non-reserved
- reserved
- reserved
- reserved
-
-
- SEARCH
- non-reserved
- reserved
- reserved
-
-
-
- SECOND
- non-reserved
- reserved
- reserved
- reserved
-
-
- SECTION
-
- non-reserved
- non-reserved
- reserved
-
-
- SECURITY
- non-reserved
- non-reserved
- non-reserved
-
-
-
- SELECT
- reserved
- reserved
- reserved
- reserved
-
-
- SELECTIVE
-
- non-reserved
- non-reserved
-
-
-
- SELF
-
- non-reserved
- non-reserved
-
-
-
- SENSITIVE
-
- reserved
- reserved
-
-
-
- SEQUENCE
- non-reserved
- non-reserved
- non-reserved
-
-
-
- SEQUENCES
- non-reserved
-
-
-
-
-
- SERIALIZABLE
- non-reserved
- non-reserved
- non-reserved
- non-reserved
-
-
- SERVER
- non-reserved
- non-reserved
- non-reserved
-
-
-
- SERVER_NAME
-
- non-reserved
- non-reserved
- non-reserved
-
-
- SESSION
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- SESSION_USER
- reserved
- reserved
- reserved
- reserved
-
-
- SET
- non-reserved
- reserved
- reserved
- reserved
-
-
- SETOF
- non-reserved (cannot be function or type)
-
-
-
-
-
- SETS
- non-reserved
- non-reserved
- non-reserved
-
-
-
- SHARE
- non-reserved
-
-
-
-
-
- SHOW
- non-reserved
-
-
-
-
-
- SIMILAR
- reserved (can be function or type)
- reserved
- reserved
-
-
-
- SIMPLE
- non-reserved
- non-reserved
- non-reserved
-
-
-
- SIZE
-
- non-reserved
- non-reserved
- reserved
-
-
- SKIP
- non-reserved
-
-
-
-
-
- SMALLINT
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- SNAPSHOT
- non-reserved
-
-
-
-
-
- SOME
- reserved
- reserved
- reserved
- reserved
-
-
- SOURCE
-
- non-reserved
- non-reserved
-
-
-
- SPACE
-
- non-reserved
- non-reserved
- reserved
-
-
- SPECIFIC
-
- reserved
- reserved
-
-
-
- SPECIFICTYPE
-
- reserved
- reserved
-
-
-
- SPECIFIC_NAME
-
- non-reserved
- non-reserved
-
-
-
- SQL
- non-reserved
- reserved
- reserved
- reserved
-
-
- SQLCODE
-
-
-
- reserved
-
-
- SQLERROR
-
-
-
- reserved
-
-
- SQLEXCEPTION
-
- reserved
- reserved
-
-
-
- SQLSTATE
-
- reserved
- reserved
- reserved
-
-
- SQLWARNING
-
- reserved
- reserved
-
-
-
- SQRT
-
- reserved
- reserved
-
-
-
- STABLE
- non-reserved
-
-
-
-
-
- STANDALONE
- non-reserved
- non-reserved
- non-reserved
-
-
-
- START
- non-reserved
- reserved
- reserved
-
-
-
- STATE
-
- non-reserved
- non-reserved
-
-
-
- STATEMENT
- non-reserved
- non-reserved
- non-reserved
-
-
-
- STATIC
-
- reserved
- reserved
-
-
-
- STATISTICS
- non-reserved
-
-
-
-
-
- STDDEV_POP
-
- reserved
- reserved
-
-
-
- STDDEV_SAMP
-
- reserved
- reserved
-
-
-
- STDIN
- non-reserved
-
-
-
-
-
- STDOUT
- non-reserved
-
-
-
-
-
- STORAGE
- non-reserved
-
-
-
-
-
- STRICT
- non-reserved
-
-
-
-
-
- STRIP
- non-reserved
- non-reserved
- non-reserved
-
-
-
- STRUCTURE
-
- non-reserved
- non-reserved
-
-
-
- STYLE
-
- non-reserved
- non-reserved
-
-
-
- SUBCLASS_ORIGIN
-
- non-reserved
- non-reserved
- non-reserved
-
-
- SUBMULTISET
-
- reserved
- reserved
-
-
-
- SUBSCRIPTION
- non-reserved
-
-
-
-
-
- SUBSTRING
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- SUBSTRING_REGEX
-
- reserved
- reserved
-
-
-
- SUCCEEDS
-
- reserved
-
-
-
-
- SUM
-
- reserved
- reserved
- reserved
-
-
- SYMMETRIC
- reserved
- reserved
- reserved
-
-
-
- SYSID
- non-reserved
-
-
-
-
-
- SYSTEM
- non-reserved
- reserved
- reserved
-
-
-
- SYSTEM_TIME
-
- reserved
-
-
-
-
- SYSTEM_USER
-
- reserved
- reserved
- reserved
-
-
- T
-
- non-reserved
- non-reserved
-
-
-
- TABLE
- reserved
- reserved
- reserved
- reserved
-
-
- TABLES
- non-reserved
-
-
-
-
-
- TABLESAMPLE
- reserved (can be function or type)
- reserved
- reserved
-
-
-
- TABLESPACE
- non-reserved
-
-
-
-
-
- TABLE_NAME
-
- non-reserved
- non-reserved
- non-reserved
-
-
- TEMP
- non-reserved
-
-
-
-
-
- TEMPLATE
- non-reserved
-
-
-
-
-
- TEMPORARY
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- TEXT
- non-reserved
-
-
-
-
-
- THEN
- reserved
- reserved
- reserved
- reserved
-
-
- TIES
-
- non-reserved
- non-reserved
-
-
-
- TIME
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- TIMESTAMP
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- TIMEZONE_HOUR
-
- reserved
- reserved
- reserved
-
-
- TIMEZONE_MINUTE
-
- reserved
- reserved
- reserved
-
-
- TO
- reserved
- reserved
- reserved
- reserved
-
-
- TOKEN
-
- non-reserved
- non-reserved
-
-
-
- TOP_LEVEL_COUNT
-
- non-reserved
- non-reserved
-
-
-
- TRAILING
- reserved
- reserved
- reserved
- reserved
-
-
- TRANSACTION
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- TRANSACTIONS_COMMITTED
-
- non-reserved
- non-reserved
-
-
-
- TRANSACTIONS_ROLLED_BACK
-
- non-reserved
- non-reserved
-
-
-
- TRANSACTION_ACTIVE
-
- non-reserved
- non-reserved
-
-
-
- TRANSFORM
- non-reserved
- non-reserved
- non-reserved
-
-
-
- TRANSFORMS
-
- non-reserved
- non-reserved
-
-
-
- TRANSLATE
-
- reserved
- reserved
- reserved
-
-
- TRANSLATE_REGEX
-
- reserved
- reserved
-
-
-
- TRANSLATION
-
- reserved
- reserved
- reserved
-
-
- TREAT
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- TRIGGER
- non-reserved
- reserved
- reserved
-
-
-
- TRIGGER_CATALOG
-
- non-reserved
- non-reserved
-
-
-
- TRIGGER_NAME
-
- non-reserved
- non-reserved
-
-
-
- TRIGGER_SCHEMA
-
- non-reserved
- non-reserved
-
-
-
- TRIM
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- TRIM_ARRAY
-
- reserved
- reserved
-
-
-
- TRUE
- reserved
- reserved
- reserved
- reserved
-
-
- TRUNCATE
- non-reserved
- reserved
- reserved
-
-
-
- TRUSTED
- non-reserved
-
-
-
-
-
- TYPE
- non-reserved
- non-reserved
- non-reserved
- non-reserved
-
-
- TYPES
- non-reserved
-
-
-
-
-
- UESCAPE
-
- reserved
- reserved
-
-
-
- UNBOUNDED
- non-reserved
- non-reserved
- non-reserved
-
-
-
- UNCOMMITTED
- non-reserved
- non-reserved
- non-reserved
- non-reserved
-
-
- UNDER
-
- non-reserved
- non-reserved
-
-
-
- UNENCRYPTED
- non-reserved
-
-
-
-
-
- UNION
- reserved
- reserved
- reserved
- reserved
-
-
- UNIQUE
- reserved
- reserved
- reserved
- reserved
-
-
- UNKNOWN
- non-reserved
- reserved
- reserved
- reserved
-
-
- UNLINK
-
- non-reserved
- non-reserved
-
-
-
- UNLISTEN
- non-reserved
-
-
-
-
-
- UNLOGGED
- non-reserved
-
-
-
-
-
- UNNAMED
-
- non-reserved
- non-reserved
- non-reserved
-
-
- UNNEST
-
- reserved
- reserved
-
-
-
- UNTIL
- non-reserved
-
-
-
-
-
- UNTYPED
-
- non-reserved
- non-reserved
-
-
-
- UPDATE
- non-reserved
- reserved
- reserved
- reserved
-
-
- UPPER
-
- reserved
- reserved
- reserved
-
-
- URI
-
- non-reserved
- non-reserved
-
-
-
- USAGE
-
- non-reserved
- non-reserved
- reserved
-
-
- USER
- reserved
- reserved
- reserved
- reserved
-
-
- USER_DEFINED_TYPE_CATALOG
-
- non-reserved
- non-reserved
-
-
-
- USER_DEFINED_TYPE_CODE
-
- non-reserved
- non-reserved
-
-
-
- USER_DEFINED_TYPE_NAME
-
- non-reserved
- non-reserved
-
-
-
- USER_DEFINED_TYPE_SCHEMA
-
- non-reserved
- non-reserved
-
-
-
- USING
- reserved
- reserved
- reserved
- reserved
-
-
- VACUUM
- non-reserved
-
-
-
-
-
- VALID
- non-reserved
- non-reserved
- non-reserved
-
-
-
- VALIDATE
- non-reserved
-
-
-
-
-
- VALIDATOR
- non-reserved
-
-
-
-
-
- VALUE
- non-reserved
- reserved
- reserved
- reserved
-
-
- VALUES
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- VALUE_OF
-
- reserved
-
-
-
-
- VARBINARY
-
- reserved
- reserved
-
-
-
- VARCHAR
- non-reserved (cannot be function or type)
- reserved
- reserved
- reserved
-
-
- VARIADIC
- reserved
-
-
-
-
-
- VARYING
- non-reserved
- reserved
- reserved
- reserved
-
-
- VAR_POP
-
- reserved
- reserved
-
-
-
- VAR_SAMP
-
- reserved
- reserved
-
-
-
- VERBOSE
- reserved (can be function or type)
-
-
-
-
-
- VERSION
- non-reserved
- non-reserved
- non-reserved
-
-
-
- VERSIONING
-
- reserved
-
-
-
-
- VIEW
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- VIEWS
- non-reserved
-
-
-
-
-
- VOLATILE
- non-reserved
-
-
-
-
-
- WHEN
- reserved
- reserved
- reserved
- reserved
-
-
- WHENEVER
-
- reserved
- reserved
- reserved
-
-
- WHERE
- reserved
- reserved
- reserved
- reserved
-
-
- WHITESPACE
- non-reserved
- non-reserved
- non-reserved
-
-
-
- WIDTH_BUCKET
-
- reserved
- reserved
-
-
-
- WINDOW
- reserved
- reserved
- reserved
-
-
-
- WITH
- reserved
- reserved
- reserved
- reserved
-
-
- WITHIN
- non-reserved
- reserved
- reserved
-
-
-
- WITHOUT
- non-reserved
- reserved
- reserved
-
-
-
- WORK
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- WRAPPER
- non-reserved
- non-reserved
- non-reserved
-
-
-
- WRITE
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
- XML
- non-reserved
- reserved
- reserved
-
-
-
- XMLAGG
-
- reserved
- reserved
-
-
-
- XMLATTRIBUTES
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- XMLBINARY
-
- reserved
- reserved
-
-
-
- XMLCAST
-
- reserved
- reserved
-
-
-
- XMLCOMMENT
-
- reserved
- reserved
-
-
-
- XMLCONCAT
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- XMLDECLARATION
-
- non-reserved
- non-reserved
-
-
-
- XMLDOCUMENT
-
- reserved
- reserved
-
-
-
- XMLELEMENT
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- XMLEXISTS
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- XMLFOREST
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- XMLITERATE
-
- reserved
- reserved
-
-
-
- XMLNAMESPACES
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- XMLPARSE
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- XMLPI
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- XMLQUERY
-
- reserved
- reserved
-
-
-
- XMLROOT
- non-reserved (cannot be function or type)
-
-
-
-
-
- XMLSCHEMA
-
- non-reserved
- non-reserved
-
-
-
- XMLSERIALIZE
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- XMLTABLE
- non-reserved (cannot be function or type)
- reserved
- reserved
-
-
-
- XMLTEXT
-
- reserved
- reserved
-
-
-
- XMLVALIDATE
-
- reserved
- reserved
-
-
-
- YEAR
- non-reserved
- reserved
- reserved
- reserved
-
-
- YES
- non-reserved
- non-reserved
- non-reserved
-
-
-
- ZONE
- non-reserved
- non-reserved
- non-reserved
- reserved
-
-
-
-
-
+ &keywords-table;
diff --git a/doc/src/sgml/keywords/sql1992-nonreserved.txt b/doc/src/sgml/keywords/sql1992-nonreserved.txt
new file mode 100644
index 00000000000..00abedaac79
--- /dev/null
+++ b/doc/src/sgml/keywords/sql1992-nonreserved.txt
@@ -0,0 +1,50 @@
+ADA
+C
+CATALOG_NAME
+CHARACTER_SET_CATALOG
+CHARACTER_SET_NAME
+CHARACTER_SET_SCHEMA
+CLASS_ORIGIN
+COBOL
+COLLATION_CATALOG
+COLLATION_NAME
+COLLATION_SCHEMA
+COLUMN_NAME
+COMMAND_FUNCTION
+COMMITTED
+CONDITION_NUMBER
+CONNECTION_NAME
+CONSTRAINT_CATALOG
+CONSTRAINT_NAME
+CONSTRAINT_SCHEMA
+CURSOR_NAME
+DATA
+DATETIME_INTERVAL_CODE
+DATETIME_INTERVAL_PRECISION
+DYNAMIC_FUNCTION
+FORTRAN
+LENGTH
+MESSAGE_LENGTH
+MESSAGE_OCTET_LENGTH
+MESSAGE_TEXT
+MORE
+MUMPS
+NAME
+NULLABLE
+NUMBER
+PASCAL
+PLI
+REPEATABLE
+RETURNED_LENGTH
+RETURNED_OCTET_LENGTH
+RETURNED_SQLSTATE
+ROW_COUNT
+SCALE
+SCHEMA_NAME
+SERIALIZABLE
+SERVER_NAME
+SUBCLASS_ORIGIN
+TABLE_NAME
+TYPE
+UNCOMMITTED
+UNNAMED
diff --git a/doc/src/sgml/keywords/sql1992-reserved.txt b/doc/src/sgml/keywords/sql1992-reserved.txt
new file mode 100644
index 00000000000..5bda21315f8
--- /dev/null
+++ b/doc/src/sgml/keywords/sql1992-reserved.txt
@@ -0,0 +1,227 @@
+ABSOLUTE
+ACTION
+ADD
+ALL
+ALLOCATE
+ALTER
+AND
+ANY
+ARE
+AS
+ASC
+ASSERTION
+AT
+AUTHORIZATION
+AVG
+BEGIN
+BETWEEN
+BIT
+BIT_LENGTH
+BOTH
+BY
+CASCADE
+CASCADED
+CASE
+CAST
+CATALOG
+CHAR
+CHARACTER
+CHAR_LENGTH
+CHARACTER_LENGTH
+CHECK
+CLOSE
+COALESCE
+COLLATE
+COLLATION
+COLUMN
+COMMIT
+CONNECT
+CONNECTION
+CONSTRAINT
+CONSTRAINTS
+CONTINUE
+CONVERT
+CORRESPONDING
+COUNT
+CREATE
+CROSS
+CURRENT
+CURRENT_DATE
+CURRENT_TIME
+CURRENT_TIMESTAMP
+CURRENT_USER
+CURSOR
+DATE
+DAY
+DEALLOCATE
+DEC
+DECIMAL
+DECLARE
+DEFAULT
+DEFERRABLE
+DEFERRED
+DELETE
+DESC
+DESCRIBE
+DESCRIPTOR
+DIAGNOSTICS
+DISCONNECT
+DISTINCT
+DOMAIN
+DOUBLE
+DROP
+ELSE
+END
+END-EXEC
+ESCAPE
+EXCEPT
+EXCEPTION
+EXEC
+EXECUTE
+EXISTS
+EXTERNAL
+EXTRACT
+FALSE
+FETCH
+FIRST
+FLOAT
+FOR
+FOREIGN
+FOUND
+FROM
+FULL
+GET
+GLOBAL
+GO
+GOTO
+GRANT
+GROUP
+HAVING
+HOUR
+IDENTITY
+IMMEDIATE
+IN
+INDICATOR
+INITIALLY
+INNER
+INPUT
+INSENSITIVE
+INSERT
+INT
+INTEGER
+INTERSECT
+INTERVAL
+INTO
+IS
+ISOLATION
+JOIN
+KEY
+LANGUAGE
+LAST
+LEADING
+LEFT
+LEVEL
+LIKE
+LOCAL
+LOWER
+MATCH
+MAX
+MIN
+MINUTE
+MODULE
+MONTH
+NAMES
+NATIONAL
+NATURAL
+NCHAR
+NEXT
+NO
+NOT
+NULL
+NULLIF
+NUMERIC
+OCTET_LENGTH
+OF
+ON
+ONLY
+OPEN
+OPTION
+OR
+ORDER
+OUTER
+OUTPUT
+OVERLAPS
+PAD
+PARTIAL
+POSITION
+PRECISION
+PREPARE
+PRESERVE
+PRIMARY
+PRIOR
+PRIVILEGES
+PROCEDURE
+PUBLIC
+READ
+REAL
+REFERENCES
+RELATIVE
+RESTRICT
+REVOKE
+RIGHT
+ROLLBACK
+ROWS
+SCHEMA
+SCROLL
+SECOND
+SECTION
+SELECT
+SESSION
+SESSION_USER
+SET
+SIZE
+SMALLINT
+SOME
+SPACE
+SQL
+SQLCODE
+SQLERROR
+SQLSTATE
+SUBSTRING
+SUM
+SYSTEM_USER
+TABLE
+TEMPORARY
+THEN
+TIME
+TIMESTAMP
+TIMEZONE_HOUR
+TIMEZONE_MINUTE
+TO
+TRAILING
+TRANSACTION
+TRANSLATE
+TRANSLATION
+TRIM
+TRUE
+UNION
+UNIQUE
+UNKNOWN
+UPDATE
+UPPER
+USAGE
+USER
+USING
+VALUE
+VALUES
+VARCHAR
+VARYING
+VIEW
+WHEN
+WHENEVER
+WHERE
+WITH
+WORK
+WRITE
+YEAR
+ZONE
diff --git a/doc/src/sgml/keywords/sql2011-02-nonreserved.txt b/doc/src/sgml/keywords/sql2011-02-nonreserved.txt
new file mode 100644
index 00000000000..b28a180f0bd
--- /dev/null
+++ b/doc/src/sgml/keywords/sql2011-02-nonreserved.txt
@@ -0,0 +1,219 @@
+A
+ABSOLUTE
+ACTION
+ADA
+ADD
+ADMIN
+AFTER
+ALWAYS
+ASC
+ASSERTION
+ASSIGNMENT
+ATTRIBUTE
+ATTRIBUTES
+BEFORE
+BERNOULLI
+BREADTH
+C
+CASCADE
+CATALOG
+CATALOG_NAME
+CHAIN
+CHARACTER_SET_CATALOG
+CHARACTER_SET_NAME
+CHARACTER_SET_SCHEMA
+CHARACTERISTICS
+CHARACTERS
+CLASS_ORIGIN
+COBOL
+COLLATION
+COLLATION_CATALOG
+COLLATION_NAME
+COLLATION_SCHEMA
+COLUMN_NAME
+COMMAND_FUNCTION
+COMMAND_FUNCTION_CODE
+COMMITTED
+CONDITION_NUMBER
+CONNECTION
+CONNECTION_NAME
+CONSTRAINT_CATALOG
+CONSTRAINT_NAME
+CONSTRAINT_SCHEMA
+CONSTRAINTS
+CONSTRUCTOR
+CONTINUE
+CURSOR_NAME
+DATA
+DATETIME_INTERVAL_CODE
+DATETIME_INTERVAL_PRECISION
+DEFAULTS
+DEFERRABLE
+DEFERRED
+DEFINED
+DEFINER
+DEGREE
+DEPTH
+DERIVED
+DESC
+DESCRIPTOR
+DIAGNOSTICS
+DISPATCH
+DOMAIN
+DYNAMIC_FUNCTION
+DYNAMIC_FUNCTION_CODE
+ENFORCED
+EXCLUDE
+EXCLUDING
+EXPRESSION
+FINAL
+FIRST
+FLAG
+FOLLOWING
+FORTRAN
+FOUND
+G
+GENERAL
+GENERATED
+GO
+GOTO
+GRANTED
+HIERARCHY
+IGNORE
+IMMEDIATE
+IMMEDIATELY
+IMPLEMENTATION
+INCLUDING
+INCREMENT
+INITIALLY
+INPUT
+INSTANCE
+INSTANTIABLE
+INSTEAD
+INVOKER
+ISOLATION
+K
+KEY
+KEY_MEMBER
+KEY_TYPE
+LAST
+LENGTH
+LEVEL
+LOCATOR
+M
+MAP
+MATCHED
+MAXVALUE
+MESSAGE_LENGTH
+MESSAGE_OCTET_LENGTH
+MESSAGE_TEXT
+MINVALUE
+MORE
+MUMPS
+NAME
+NAMES
+NESTING
+NEXT
+NFC
+NFD
+NFKC
+NFKD
+NORMALIZED
+NULLABLE
+NULLS
+NUMBER
+OBJECT
+OCTETS
+OPTION
+OPTIONS
+ORDERING
+ORDINALITY
+OTHERS
+OUTPUT
+OVERRIDING
+P
+PAD
+PARAMETER_MODE
+PARAMETER_NAME
+PARAMETER_ORDINAL_POSITION
+PARAMETER_SPECIFIC_CATALOG
+PARAMETER_SPECIFIC_NAME
+PARAMETER_SPECIFIC_SCHEMA
+PARTIAL
+PASCAL
+PATH
+PLACING
+PLI
+PRECEDING
+PRESERVE
+PRIOR
+PRIVILEGES
+PUBLIC
+READ
+RELATIVE
+REPEATABLE
+RESPECT
+RESTART
+RESTRICT
+RETURNED_CARDINALITY
+RETURNED_LENGTH
+RETURNED_OCTET_LENGTH
+RETURNED_SQLSTATE
+ROLE
+ROUTINE
+ROUTINE_CATALOG
+ROUTINE_NAME
+ROUTINE_SCHEMA
+ROW_COUNT
+SCALE
+SCHEMA
+SCHEMA_NAME
+SCOPE_CATALOG
+SCOPE_NAME
+SCOPE_SCHEMA
+SECTION
+SECURITY
+SELF
+SEQUENCE
+SERIALIZABLE
+SERVER_NAME
+SESSION
+SETS
+SIMPLE
+SIZE
+SOURCE
+SPACE
+SPECIFIC_NAME
+STATE
+STATEMENT
+STRUCTURE
+STYLE
+SUBCLASS_ORIGIN
+T
+TABLE_NAME
+TEMPORARY
+TIES
+TOP_LEVEL_COUNT
+TRANSACTION
+TRANSACTION_ACTIVE
+TRANSACTIONS_COMMITTED
+TRANSACTIONS_ROLLED_BACK
+TRANSFORM
+TRANSFORMS
+TRIGGER_CATALOG
+TRIGGER_NAME
+TRIGGER_SCHEMA
+TYPE
+UNBOUNDED
+UNCOMMITTED
+UNDER
+UNNAMED
+USAGE
+USER_DEFINED_TYPE_CATALOG
+USER_DEFINED_TYPE_CODE
+USER_DEFINED_TYPE_NAME
+USER_DEFINED_TYPE_SCHEMA
+VIEW
+WORK
+WRITE
+ZONE
diff --git a/doc/src/sgml/keywords/sql2011-02-reserved.txt b/doc/src/sgml/keywords/sql2011-02-reserved.txt
new file mode 100644
index 00000000000..95f99e7df20
--- /dev/null
+++ b/doc/src/sgml/keywords/sql2011-02-reserved.txt
@@ -0,0 +1,324 @@
+ABS
+ALL
+ALLOCATE
+ALTER
+AND
+ANY
+ARE
+ARRAY
+ARRAY_AGG
+ARRAY_MAX_CARDINALITY
+AS
+ASENSITIVE
+ASYMMETRIC
+AT
+ATOMIC
+AUTHORIZATION
+AVG
+BEGIN
+BEGIN_FRAME
+BEGIN_PARTITION
+BETWEEN
+BIGINT
+BINARY
+BLOB
+BOOLEAN
+BOTH
+BY
+CALL
+CALLED
+CARDINALITY
+CASCADED
+CASE
+CAST
+CEIL
+CEILING
+CHAR
+CHAR_LENGTH
+CHARACTER
+CHARACTER_LENGTH
+CHECK
+CLOB
+CLOSE
+COALESCE
+COLLATE
+COLLECT
+COLUMN
+COMMIT
+CONDITION
+CONNECT
+CONSTRAINT
+CONTAINS
+CONVERT
+CORR
+CORRESPONDING
+COUNT
+COVAR_POP
+COVAR_SAMP
+CREATE
+CROSS
+CUBE
+CUME_DIST
+CURRENT
+CURRENT_CATALOG
+CURRENT_DATE
+CURRENT_DEFAULT_TRANSFORM_GROUP
+CURRENT_PATH
+CURRENT_ROLE
+CURRENT_ROW
+CURRENT_SCHEMA
+CURRENT_TIME
+CURRENT_TIMESTAMP
+CURRENT_TRANSFORM_GROUP_FOR_TYPE
+CURRENT_USER
+CURSOR
+CYCLE
+DATE
+DAY
+DEALLOCATE
+DEC
+DECIMAL
+DECLARE
+DEFAULT
+DELETE
+DENSE_RANK
+DEREF
+DESCRIBE
+DETERMINISTIC
+DISCONNECT
+DISTINCT
+DOUBLE
+DROP
+DYNAMIC
+EACH
+ELEMENT
+ELSE
+END
+END_FRAME
+END_PARTITION
+END-EXEC
+EQUALS
+ESCAPE
+EVERY
+EXCEPT
+EXEC
+EXECUTE
+EXISTS
+EXP
+EXTERNAL
+EXTRACT
+FALSE
+FETCH
+FILTER
+FIRST_VALUE
+FLOAT
+FLOOR
+FOR
+FOREIGN
+FRAME_ROW
+FREE
+FROM
+FULL
+FUNCTION
+FUSION
+GET
+GLOBAL
+GRANT
+GROUP
+GROUPING
+GROUPS
+HAVING
+HOLD
+HOUR
+IDENTITY
+IN
+INDICATOR
+INNER
+INOUT
+INSENSITIVE
+INSERT
+INT
+INTEGER
+INTERSECT
+INTERSECTION
+INTERVAL
+INTO
+IS
+JOIN
+LAG
+LANGUAGE
+LARGE
+LAST_VALUE
+LATERAL
+LEAD
+LEADING
+LEFT
+LIKE
+LIKE_REGEX
+LN
+LOCAL
+LOCALTIME
+LOCALTIMESTAMP
+LOWER
+MATCH
+MAX
+MEMBER
+MERGE
+METHOD
+MIN
+MINUTE
+MOD
+MODIFIES
+MODULE
+MONTH
+MULTISET
+NATIONAL
+NATURAL
+NCHAR
+NCLOB
+NEW
+NO
+NONE
+NORMALIZE
+NOT
+NTH_VALUE
+NTILE
+NULL
+NULLIF
+NUMERIC
+OCTET_LENGTH
+OCCURRENCES_REGEX
+OF
+OFFSET
+OLD
+ON
+ONLY
+OPEN
+OR
+ORDER
+OUT
+OUTER
+OVER
+OVERLAPS
+OVERLAY
+PARAMETER
+PARTITION
+PERCENT
+PERCENT_RANK
+PERCENTILE_CONT
+PERCENTILE_DISC
+PERIOD
+PORTION
+POSITION
+POSITION_REGEX
+POWER
+PRECEDES
+PRECISION
+PREPARE
+PRIMARY
+PROCEDURE
+RANGE
+RANK
+READS
+REAL
+RECURSIVE
+REF
+REFERENCES
+REFERENCING
+REGR_AVGX
+REGR_AVGY
+REGR_COUNT
+REGR_INTERCEPT
+REGR_R2
+REGR_SLOPE
+REGR_SXX
+REGR_SXY
+REGR_SYY
+RELEASE
+RESULT
+RETURN
+RETURNS
+REVOKE
+RIGHT
+ROLLBACK
+ROLLUP
+ROW
+ROW_NUMBER
+ROWS
+SAVEPOINT
+SCOPE
+SCROLL
+SEARCH
+SECOND
+SELECT
+SENSITIVE
+SESSION_USER
+SET
+SIMILAR
+SMALLINT
+SOME
+SPECIFIC
+SPECIFICTYPE
+SQL
+SQLEXCEPTION
+SQLSTATE
+SQLWARNING
+SQRT
+START
+STATIC
+STDDEV_POP
+STDDEV_SAMP
+SUBMULTISET
+SUBSTRING
+SUBSTRING_REGEX
+SUCCEEDS
+SUM
+SYMMETRIC
+SYSTEM
+SYSTEM_TIME
+SYSTEM_USER
+TABLE
+TABLESAMPLE
+THEN
+TIME
+TIMESTAMP
+TIMEZONE_HOUR
+TIMEZONE_MINUTE
+TO
+TRAILING
+TRANSLATE
+TRANSLATE_REGEX
+TRANSLATION
+TREAT
+TRIGGER
+TRUNCATE
+TRIM
+TRIM_ARRAY
+TRUE
+UESCAPE
+UNION
+UNIQUE
+UNKNOWN
+UNNEST
+UPDATE
+UPPER
+USER
+USING
+VALUE
+VALUES
+VALUE_OF
+VAR_POP
+VAR_SAMP
+VARBINARY
+VARCHAR
+VARYING
+VERSIONING
+WHEN
+WHENEVER
+WHERE
+WIDTH_BUCKET
+WINDOW
+WITH
+WITHIN
+WITHOUT
+YEAR
diff --git a/doc/src/sgml/keywords/sql2011-09-nonreserved.txt b/doc/src/sgml/keywords/sql2011-09-nonreserved.txt
new file mode 100644
index 00000000000..b360f0ce861
--- /dev/null
+++ b/doc/src/sgml/keywords/sql2011-09-nonreserved.txt
@@ -0,0 +1,23 @@
+BLOCKED
+CONTROL
+DB
+FILE
+FS
+INTEGRITY
+LIBRARY
+LIMIT
+LINK
+MAPPING
+OFF
+PASSTHROUGH
+PERMISSION
+RECOVERY
+REQUIRING
+RESTORE
+SELECTIVE
+SERVER
+TOKEN
+UNLINK
+VERSION
+WRAPPER
+YES
diff --git a/doc/src/sgml/keywords/sql2011-09-reserved.txt b/doc/src/sgml/keywords/sql2011-09-reserved.txt
new file mode 100644
index 00000000000..02054857667
--- /dev/null
+++ b/doc/src/sgml/keywords/sql2011-09-reserved.txt
@@ -0,0 +1,13 @@
+DATALINK
+DLNEWCOPY
+DLPREVIOUSCOPY
+DLURLCOMPLETE
+DLURLCOMPLETEWRITE
+DLURLCOMPLETEONLY
+DLURLPATH
+DLURLPATHWRITE
+DLURLPATHONLY
+DLURLSCHEME
+DLURLSERVER
+DLVALUE
+IMPORT
diff --git a/doc/src/sgml/keywords/sql2011-14-nonreserved.txt b/doc/src/sgml/keywords/sql2011-14-nonreserved.txt
new file mode 100644
index 00000000000..317f651f0ee
--- /dev/null
+++ b/doc/src/sgml/keywords/sql2011-14-nonreserved.txt
@@ -0,0 +1,29 @@
+ABSENT
+ACCORDING
+BASE64
+BOM
+COLUMNS
+CONTENT
+DOCUMENT
+EMPTY
+ENCODING
+HEX
+ID
+INDENT
+LOCATION
+NAMESPACE
+NIL
+PASSING
+PATH
+PRESERVE
+RETURNING
+SEQUENCE
+STANDALONE
+STRIP
+UNTYPED
+URI
+VALID
+VERSION
+WHITESPACE
+XMLSCHEMA
+XMLDECLARATION
diff --git a/doc/src/sgml/keywords/sql2011-14-reserved.txt b/doc/src/sgml/keywords/sql2011-14-reserved.txt
new file mode 100644
index 00000000000..cf8052946f9
--- /dev/null
+++ b/doc/src/sgml/keywords/sql2011-14-reserved.txt
@@ -0,0 +1,20 @@
+XML
+XMLAGG
+XMLATTRIBUTES
+XMLBINARY
+XMLCAST
+XMLCOMMENT
+XMLCONCAT
+XMLDOCUMENT
+XMLELEMENT
+XMLEXISTS
+XMLFOREST
+XMLITERATE
+XMLNAMESPACES
+XMLPARSE
+XMLPI
+XMLQUERY
+XMLSERIALIZE
+XMLTABLE
+XMLTEXT
+XMLVALIDATE
diff --git a/doc/src/sgml/keywords/sql2016-02-nonreserved.txt b/doc/src/sgml/keywords/sql2016-02-nonreserved.txt
new file mode 100644
index 00000000000..f39e52e475d
--- /dev/null
+++ b/doc/src/sgml/keywords/sql2016-02-nonreserved.txt
@@ -0,0 +1,248 @@
+A
+ABSOLUTE
+ACTION
+ADA
+ADD
+ADMIN
+AFTER
+ALWAYS
+ASC
+ASSERTION
+ASSIGNMENT
+ATTRIBUTE
+ATTRIBUTES
+BEFORE
+BERNOULLI
+BREADTH
+C
+CASCADE
+CATALOG
+CATALOG_NAME
+CHAIN
+CHAINING
+CHARACTER_SET_CATALOG
+CHARACTER_SET_NAME
+CHARACTER_SET_SCHEMA
+CHARACTERISTICS
+CHARACTERS
+CLASS_ORIGIN
+COBOL
+COLLATION
+COLLATION_CATALOG
+COLLATION_NAME
+COLLATION_SCHEMA
+COLUMNS
+COLUMN_NAME
+COMMAND_FUNCTION
+COMMAND_FUNCTION_CODE
+COMMITTED
+CONDITIONAL
+CONDITION_NUMBER
+CONNECTION
+CONNECTION_NAME
+CONSTRAINT_CATALOG
+CONSTRAINT_NAME
+CONSTRAINT_SCHEMA
+CONSTRAINTS
+CONSTRUCTOR
+CONTINUE
+CURSOR_NAME
+DATA
+DATETIME_INTERVAL_CODE
+DATETIME_INTERVAL_PRECISION
+DEFAULTS
+DEFERRABLE
+DEFERRED
+DEFINED
+DEFINER
+DEGREE
+DEPTH
+DERIVED
+DESC
+DESCRIPTOR
+DIAGNOSTICS
+DISPATCH
+DOMAIN
+DYNAMIC_FUNCTION
+DYNAMIC_FUNCTION_CODE
+ENCODING
+ENFORCED
+ERROR
+EXCLUDE
+EXCLUDING
+EXPRESSION
+FINAL
+FINISH
+FIRST
+FLAG
+FOLLOWING
+FORMAT
+FORTRAN
+FOUND
+FULFILL
+G
+GENERAL
+GENERATED
+GO
+GOTO
+GRANTED
+HIERARCHY
+IGNORE
+IMMEDIATE
+IMMEDIATELY
+IMPLEMENTATION
+INCLUDING
+INCREMENT
+INITIALLY
+INPUT
+INSTANCE
+INSTANTIABLE
+INSTEAD
+INVOKER
+ISOLATION
+JSON
+K
+KEEP
+KEY
+KEYS
+KEY_MEMBER
+KEY_TYPE
+LAST
+LENGTH
+LEVEL
+LOCATOR
+M
+MAP
+MATCHED
+MAXVALUE
+MESSAGE_LENGTH
+MESSAGE_OCTET_LENGTH
+MESSAGE_TEXT
+MINVALUE
+MORE
+MUMPS
+NAME
+NAMES
+NESTED
+NESTING
+NEXT
+NFC
+NFD
+NFKC
+NFKD
+NORMALIZED
+NULLABLE
+NULLS
+NUMBER
+OBJECT
+OCTETS
+OPTION
+OPTIONS
+ORDERING
+ORDINALITY
+OTHERS
+OUTPUT
+OVERFLOW
+OVERRIDING
+P
+PAD
+PARAMETER_MODE
+PARAMETER_NAME
+PARAMETER_ORDINAL_POSITION
+PARAMETER_SPECIFIC_CATALOG
+PARAMETER_SPECIFIC_NAME
+PARAMETER_SPECIFIC_SCHEMA
+PARTIAL
+PASCAL
+PASS
+PASSING
+PAST
+PATH
+PLACING
+PLAN
+PLI
+PRECEDING
+PRESERVE
+PRIOR
+PRIVATE
+PRIVILEGES
+PRUNE
+PUBLIC
+QUOTES
+READ
+RELATIVE
+REPEATABLE
+RESPECT
+RESTART
+RESTRICT
+RETURNED_CARDINALITY
+RETURNED_LENGTH
+RETURNED_OCTET_LENGTH
+RETURNED_SQLSTATE
+RETURNING
+ROLE
+ROUTINE
+ROUTINE_CATALOG
+ROUTINE_NAME
+ROUTINE_SCHEMA
+ROW_COUNT
+SCALAR
+SCALE
+SCHEMA
+SCHEMA_NAME
+SCOPE_CATALOG
+SCOPE_NAME
+SCOPE_SCHEMA
+SECTION
+SECURITY
+SELF
+SEQUENCE
+SERIALIZABLE
+SERVER_NAME
+SESSION
+SETS
+SIMPLE
+SIZE
+SOURCE
+SPACE
+SPECIFIC_NAME
+STATE
+STATEMENT
+STRING
+STRUCTURE
+STYLE
+SUBCLASS_ORIGIN
+T
+TABLE_NAME
+TEMPORARY
+THROUGH
+TIES
+TOP_LEVEL_COUNT
+TRANSACTION
+TRANSACTION_ACTIVE
+TRANSACTIONS_COMMITTED
+TRANSACTIONS_ROLLED_BACK
+TRANSFORM
+TRANSFORMS
+TRIGGER_CATALOG
+TRIGGER_NAME
+TRIGGER_SCHEMA
+TYPE
+UNBOUNDED
+UNCOMMITTED
+UNCONDITIONAL
+UNDER
+UNNAMED
+USAGE
+USER_DEFINED_TYPE_CATALOG
+USER_DEFINED_TYPE_CODE
+USER_DEFINED_TYPE_NAME
+USER_DEFINED_TYPE_SCHEMA
+UTF16
+UTF32
+UTF8
+VIEW
+WORK
+WRAPPER
+WRITE
+ZONE
diff --git a/doc/src/sgml/keywords/sql2016-02-reserved.txt b/doc/src/sgml/keywords/sql2016-02-reserved.txt
new file mode 100644
index 00000000000..ae110123881
--- /dev/null
+++ b/doc/src/sgml/keywords/sql2016-02-reserved.txt
@@ -0,0 +1,367 @@
+ABS
+ACOS
+ALL
+ALLOCATE
+ALTER
+AND
+ANY
+ARE
+ARRAY
+ARRAY_AGG
+ARRAY_MAX_CARDINALITY
+AS
+ASENSITIVE
+ASIN
+ASYMMETRIC
+AT
+ATAN
+ATOMIC
+AUTHORIZATION
+AVG
+BEGIN
+BEGIN_FRAME
+BEGIN_PARTITION
+BETWEEN
+BIGINT
+BINARY
+BLOB
+BOOLEAN
+BOTH
+BY
+CALL
+CALLED
+CARDINALITY
+CASCADED
+CASE
+CAST
+CEIL
+CEILING
+CHAR
+CHAR_LENGTH
+CHARACTER
+CHARACTER_LENGTH
+CHECK
+CLASSIFIER
+CLOB
+CLOSE
+COALESCE
+COLLATE
+COLLECT
+COLUMN
+COMMIT
+CONDITION
+CONNECT
+CONSTRAINT
+CONTAINS
+CONVERT
+COPY
+CORR
+CORRESPONDING
+COS
+COSH
+COUNT
+COVAR_POP
+COVAR_SAMP
+CREATE
+CROSS
+CUBE
+CUME_DIST
+CURRENT
+CURRENT_CATALOG
+CURRENT_DATE
+CURRENT_DEFAULT_TRANSFORM_GROUP
+CURRENT_PATH
+CURRENT_ROLE
+CURRENT_ROW
+CURRENT_SCHEMA
+CURRENT_TIME
+CURRENT_TIMESTAMP
+CURRENT_TRANSFORM_GROUP_FOR_TYPE
+CURRENT_USER
+CURSOR
+CYCLE
+DATE
+DAY
+DEALLOCATE
+DEC
+DECIMAL
+DECFLOAT
+DECLARE
+DEFAULT
+DEFINE
+DELETE
+DENSE_RANK
+DEREF
+DESCRIBE
+DETERMINISTIC
+DISCONNECT
+DISTINCT
+DOUBLE
+DROP
+DYNAMIC
+EACH
+ELEMENT
+ELSE
+EMPTY
+END
+END_FRAME
+END_PARTITION
+END-EXEC
+EQUALS
+ESCAPE
+EVERY
+EXCEPT
+EXEC
+EXECUTE
+EXISTS
+EXP
+EXTERNAL
+EXTRACT
+FALSE
+FETCH
+FILTER
+FIRST_VALUE
+FLOAT
+FLOOR
+FOR
+FOREIGN
+FRAME_ROW
+FREE
+FROM
+FULL
+FUNCTION
+FUSION
+GET
+GLOBAL
+GRANT
+GROUP
+GROUPING
+GROUPS
+HAVING
+HOLD
+HOUR
+IDENTITY
+IN
+INDICATOR
+INITIAL
+INNER
+INOUT
+INSENSITIVE
+INSERT
+INT
+INTEGER
+INTERSECT
+INTERSECTION
+INTERVAL
+INTO
+IS
+JOIN
+JSON_ARRAY
+JSON_ARRAYAGG
+JSON_EXISTS
+JSON_OBJECT
+JSON_OBJECTAGG
+JSON_QUERY
+JSON_TABLE
+JSON_TABLE_PRIMITIVE
+JSON_VALUE
+LAG
+LANGUAGE
+LARGE
+LAST_VALUE
+LATERAL
+LEAD
+LEADING
+LEFT
+LIKE
+LIKE_REGEX
+LISTAGG
+LN
+LOCAL
+LOCALTIME
+LOCALTIMESTAMP
+LOG
+LOG10
+LOWER
+MATCH
+MATCH_NUMBER
+MATCH_RECOGNIZE
+MATCHES
+MAX
+MEASURES
+MEMBER
+MERGE
+METHOD
+MIN
+MINUTE
+MOD
+MODIFIES
+MODULE
+MONTH
+MULTISET
+NATIONAL
+NATURAL
+NCHAR
+NCLOB
+NEW
+NO
+NONE
+NORMALIZE
+NOT
+NTH_VALUE
+NTILE
+NULL
+NULLIF
+NUMERIC
+OCTET_LENGTH
+OCCURRENCES_REGEX
+OF
+OFFSET
+OLD
+OMIT
+ON
+ONE
+ONLY
+OPEN
+OR
+ORDER
+OUT
+OUTER
+OVER
+OVERLAPS
+OVERLAY
+PARAMETER
+PARTITION
+PATTERN
+PER
+PERCENT
+PERCENT_RANK
+PERCENTILE_CONT
+PERCENTILE_DISC
+PERIOD
+PERMUTE
+PORTION
+POSITION
+POSITION_REGEX
+POWER
+PRECEDES
+PRECISION
+PREPARE
+PRIMARY
+PROCEDURE
+PTF
+RANGE
+RANK
+READS
+REAL
+RECURSIVE
+REF
+REFERENCES
+REFERENCING
+REGR_AVGX
+REGR_AVGY
+REGR_COUNT
+REGR_INTERCEPT
+REGR_R2
+REGR_SLOPE
+REGR_SXX
+REGR_SXY
+REGR_SYY
+RELEASE
+RESULT
+RETURN
+RETURNS
+REVOKE
+RIGHT
+ROLLBACK
+ROLLUP
+ROW
+ROW_NUMBER
+ROWS
+RUNNING
+SAVEPOINT
+SCOPE
+SCROLL
+SEARCH
+SECOND
+SEEK
+SELECT
+SENSITIVE
+SESSION_USER
+SET
+SHOW
+SIMILAR
+SIN
+SINH
+SKIP
+SMALLINT
+SOME
+SPECIFIC
+SPECIFICTYPE
+SQL
+SQLEXCEPTION
+SQLSTATE
+SQLWARNING
+SQRT
+START
+STATIC
+STDDEV_POP
+STDDEV_SAMP
+SUBMULTISET
+SUBSET
+SUBSTRING
+SUBSTRING_REGEX
+SUCCEEDS
+SUM
+SYMMETRIC
+SYSTEM
+SYSTEM_TIME
+SYSTEM_USER
+TABLE
+TABLESAMPLE
+TAN
+TANH
+THEN
+TIME
+TIMESTAMP
+TIMEZONE_HOUR
+TIMEZONE_MINUTE
+TO
+TRAILING
+TRANSLATE
+TRANSLATE_REGEX
+TRANSLATION
+TREAT
+TRIGGER
+TRIM
+TRIM_ARRAY
+TRUE
+TRUNCATE
+UESCAPE
+UNION
+UNIQUE
+UNKNOWN
+UNMATCHED
+UNNEST
+UPDATE
+UPPER
+USER
+USING
+VALUE
+VALUES
+VALUE_OF
+VAR_POP
+VAR_SAMP
+VARBINARY
+VARCHAR
+VARYING
+VERSIONING
+WHEN
+WHENEVER
+WHERE
+WIDTH_BUCKET
+WINDOW
+WITH
+WITHIN
+WITHOUT
+YEAR
diff --git a/doc/src/sgml/keywords/sql2016-09-nonreserved.txt b/doc/src/sgml/keywords/sql2016-09-nonreserved.txt
new file mode 100644
index 00000000000..b360f0ce861
--- /dev/null
+++ b/doc/src/sgml/keywords/sql2016-09-nonreserved.txt
@@ -0,0 +1,23 @@
+BLOCKED
+CONTROL
+DB
+FILE
+FS
+INTEGRITY
+LIBRARY
+LIMIT
+LINK
+MAPPING
+OFF
+PASSTHROUGH
+PERMISSION
+RECOVERY
+REQUIRING
+RESTORE
+SELECTIVE
+SERVER
+TOKEN
+UNLINK
+VERSION
+WRAPPER
+YES
diff --git a/doc/src/sgml/keywords/sql2016-09-reserved.txt b/doc/src/sgml/keywords/sql2016-09-reserved.txt
new file mode 100644
index 00000000000..02054857667
--- /dev/null
+++ b/doc/src/sgml/keywords/sql2016-09-reserved.txt
@@ -0,0 +1,13 @@
+DATALINK
+DLNEWCOPY
+DLPREVIOUSCOPY
+DLURLCOMPLETE
+DLURLCOMPLETEWRITE
+DLURLCOMPLETEONLY
+DLURLPATH
+DLURLPATHWRITE
+DLURLPATHONLY
+DLURLSCHEME
+DLURLSERVER
+DLVALUE
+IMPORT
diff --git a/doc/src/sgml/keywords/sql2016-14-nonreserved.txt b/doc/src/sgml/keywords/sql2016-14-nonreserved.txt
new file mode 100644
index 00000000000..00c88ffc57d
--- /dev/null
+++ b/doc/src/sgml/keywords/sql2016-14-nonreserved.txt
@@ -0,0 +1,27 @@
+ABSENT
+ACCORDING
+BASE64
+BOM
+COLUMNS
+CONTENT
+DOCUMENT
+ENCODING
+HEX
+ID
+INDENT
+LOCATION
+NAMESPACE
+NIL
+PATH
+PRESERVE
+RETURNING
+SEQUENCE
+STANDALONE
+STRIP
+UNTYPED
+URI
+VALID
+VERSION
+WHITESPACE
+XMLSCHEMA
+XMLDECLARATION
diff --git a/doc/src/sgml/keywords/sql2016-14-reserved.txt b/doc/src/sgml/keywords/sql2016-14-reserved.txt
new file mode 100644
index 00000000000..cf8052946f9
--- /dev/null
+++ b/doc/src/sgml/keywords/sql2016-14-reserved.txt
@@ -0,0 +1,20 @@
+XML
+XMLAGG
+XMLATTRIBUTES
+XMLBINARY
+XMLCAST
+XMLCOMMENT
+XMLCONCAT
+XMLDOCUMENT
+XMLELEMENT
+XMLEXISTS
+XMLFOREST
+XMLITERATE
+XMLNAMESPACES
+XMLPARSE
+XMLPI
+XMLQUERY
+XMLSERIALIZE
+XMLTABLE
+XMLTEXT
+XMLVALIDATE
diff --git a/doc/src/sgml/legal.sgml b/doc/src/sgml/legal.sgml
index fd5cda30b74..9bb1d7983be 100644
--- a/doc/src/sgml/legal.sgml
+++ b/doc/src/sgml/legal.sgml
@@ -1,9 +1,9 @@
-2018
+2019
- 1996-2018
+ 1996-2019
The PostgreSQL Global Development Group
@@ -11,7 +11,7 @@
Legal Notice
- PostgreSQL is Copyright © 1996-2018
+ PostgreSQL is Copyright © 1996-2019
by the PostgreSQL Global Development Group.
diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml
index 1626999a701..c58527b0c3b 100644
--- a/doc/src/sgml/libpq.sgml
+++ b/doc/src/sgml/libpq.sgml
@@ -56,12 +56,12 @@
one time. (One reason to do that is to access more than one
database.) Each connection is represented by a
PGconn PGconn object, which
- is obtained from the function PQconnectdb ,
- PQconnectdbParams , or
- PQsetdbLogin . Note that these functions will always
+ is obtained from the function ,
+ , or
+ . Note that these functions will always
return a non-null object pointer, unless perhaps there is too
little memory even to allocate the PGconn object.
- The PQstatus function should be called to check
+ The function should be called to check
the return value for a successful connection before queries are sent
via the connection object.
@@ -107,7 +107,7 @@
-
+
PQconnectdbParams PQconnectdbParams
@@ -125,9 +125,9 @@ PGconn *PQconnectdbParams(const char * const *keywords,
from two NULL -terminated arrays. The first,
keywords , is defined as an array of strings, each one
being a key word. The second, values , gives the value
- for each key word. Unlike PQsetdbLogin below, the parameter
+ for each key word. Unlike below, the parameter
set can be extended without changing the function signature, so use of
- this function (or its nonblocking analogs PQconnectStartParams
+ this function (or its nonblocking analogs
and PQconnectPoll ) is preferred for new application
programming.
@@ -172,7 +172,7 @@ PGconn *PQconnectdbParams(const char * const *keywords,
-
+
PQconnectdb PQconnectdb
@@ -199,7 +199,7 @@ PGconn *PQconnectdb(const char *conninfo);
-
+
PQsetdbLogin PQsetdbLogin
@@ -216,7 +216,7 @@ PGconn *PQsetdbLogin(const char *pghost,
- This is the predecessor of PQconnectdb with a fixed
+ This is the predecessor of with a fixed
set of parameters. It has the same functionality except that the
missing parameters will always take on default values. Write NULL or an
empty string for any one of the fixed parameters that is to be defaulted.
@@ -226,13 +226,13 @@ PGconn *PQsetdbLogin(const char *pghost,
If the dbName contains
an = sign or has a valid connection URI prefix, it
is taken as a conninfo string in exactly the same way as
- if it had been passed to PQconnectdb , and the remaining
- parameters are then applied as specified for PQconnectdbParams .
+ if it had been passed to , and the remaining
+ parameters are then applied as specified for .
-
+
PQsetdb PQsetdb
@@ -247,14 +247,14 @@ PGconn *PQsetdb(char *pghost,
- This is a macro that calls PQsetdbLogin with null pointers
+ This is a macro that calls with null pointers
for the login and pwd parameters. It is provided
for backward compatibility with very old programs.
-
+
PQconnectStartParams PQconnectStartParams
PQconnectStart PQconnectStart
PQconnectPoll PQconnectPoll
@@ -279,46 +279,46 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn);
that your application's thread of execution is not blocked on remote I/O
whilst doing so. The point of this approach is that the waits for I/O to
complete can occur in the application's main loop, rather than down inside
- PQconnectdbParams or PQconnectdb , and so the
+ or , and so the
application can manage this operation in parallel with other activities.
- With PQconnectStartParams , the database connection is made
+ With , the database connection is made
using the parameters taken from the keywords and
values arrays, and controlled by expand_dbname ,
- as described above for PQconnectdbParams .
+ as described above for .
With PQconnectStart , the database connection is made
using the parameters taken from the string conninfo as
- described above for PQconnectdb .
+ described above for .
- Neither PQconnectStartParams nor PQconnectStart
+ Neither nor PQconnectStart
nor PQconnectPoll will block, so long as a number of
restrictions are met:
- The hostaddr and host parameters are used appropriately to ensure that
- name and reverse name queries are not made. See the documentation of
- these parameters in for details.
+ The hostaddr parameter must be used appropriately
+ to prevent DNS queries from being made. See the documentation of
+ this parameter in for details.
- If you call PQtrace , ensure that the stream object
+ If you call , ensure that the stream object
into which you trace will not block.
- You ensure that the socket is in the appropriate state
+ You must ensure that the socket is in the appropriate state
before calling PQconnectPoll , as described below.
@@ -326,24 +326,27 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn);
- Note: use of PQconnectStartParams is analogous to
- PQconnectStart shown below.
+ To begin a nonblocking connection request,
+ call PQconnectStart
+ or . If the result is null,
+ then libpq has been unable to allocate a
+ new PGconn structure. Otherwise, a
+ valid PGconn pointer is returned (though not
+ yet representing a valid connection to the database). Next
+ call PQstatus(conn) . If the result
+ is CONNECTION_BAD , the connection attempt has already
+ failed, typically because of invalid connection parameters.
- To begin a nonblocking connection request, call conn = PQconnectStart("connection_info_string ") .
- If conn is null, then libpq has been unable to allocate a new PGconn
- structure. Otherwise, a valid PGconn pointer is returned (though not yet
- representing a valid connection to the database). On return from
- PQconnectStart , call status = PQstatus(conn) . If status equals
- CONNECTION_BAD , PQconnectStart has failed.
-
-
-
- If PQconnectStart succeeds, the next stage is to poll
- libpq so that it can proceed with the connection sequence.
+ If PQconnectStart
+ or succeeds, the next stage
+ is to poll libpq so that it can proceed with
+ the connection sequence.
Use PQsocket(conn) to obtain the descriptor of the
socket underlying the database connection.
+ (Caution: do not assume that the socket remains the same
+ across PQconnectPoll calls.)
Loop thus: If PQconnectPoll(conn) last returned
PGRES_POLLING_READING , wait until the socket is ready to
read (as indicated by select() , poll() , or
@@ -352,9 +355,8 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn);
Conversely, if PQconnectPoll(conn) last returned
PGRES_POLLING_WRITING , wait until the socket is ready
to write, then call PQconnectPoll(conn) again.
- If you have yet to call
- PQconnectPoll , i.e., just after the call to
- PQconnectStart , behave as if it last returned
+ On the first iteration, i.e. if you have yet to call
+ PQconnectPoll , behave as if it last returned
PGRES_POLLING_WRITING . Continue this loop until
PQconnectPoll(conn) returns
PGRES_POLLING_FAILED , indicating the connection procedure
@@ -364,7 +366,7 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn);
At any time during connection, the status of the connection can be
- checked by calling PQstatus . If this call returns CONNECTION_BAD , then the
+ checked by calling . If this call returns CONNECTION_BAD , then the
connection procedure has failed; if the call returns CONNECTION_OK , then the
connection is ready. Both of these states are equally detectable
from the return value of PQconnectPoll , described above. Other states might also occur
@@ -475,19 +477,21 @@ switch(PQstatus(conn))
responsibility to decide whether an excessive amount of time has elapsed.
Otherwise, PQconnectStart followed by a
PQconnectPoll loop is equivalent to
- PQconnectdb .
+ .
- Note that if PQconnectStart returns a non-null pointer, you must call
- PQfinish when you are finished with it, in order to dispose of
- the structure and any associated memory blocks. This must be done even if
- the connection attempt fails or is abandoned.
+ Note that when PQconnectStart
+ or returns a non-null
+ pointer, you must call when you are
+ finished with it, in order to dispose of the structure and any
+ associated memory blocks. This must be done even if the connection
+ attempt fails or is abandoned.
-
+
PQconndefaults PQconndefaults
@@ -514,7 +518,7 @@ typedef struct
Returns a connection options array. This can be used to determine
- all possible PQconnectdb options and their
+ all possible options and their
current default values. The return value points to an array of
PQconninfoOption structures, which ends
with an entry having a null keyword pointer. The
@@ -527,14 +531,14 @@ typedef struct
After processing the options array, free it by passing it to
- PQconninfoFree . If this is not done, a small amount of memory
- is leaked for each call to PQconndefaults .
+ . If this is not done, a small amount of memory
+ is leaked for each call to .
-
+
PQconninfo PQconninfo
@@ -546,19 +550,19 @@ PQconninfoOption *PQconninfo(PGconn *conn);
Returns a connection options array. This can be used to determine
- all possible PQconnectdb options and the
+ all possible options and the
values that were used to connect to the server. The return
value points to an array of PQconninfoOption
structures, which ends with an entry having a null keyword
- pointer. All notes above for PQconndefaults also
- apply to the result of PQconninfo .
+ pointer. All notes above for also
+ apply to the result of .
-
+
PQconninfoParse PQconninfoParse
@@ -573,7 +577,7 @@ PQconninfoOption *PQconninfoParse(const char *conninfo, char **errmsg);
Parses a connection string and returns the resulting options as an
array; or returns NULL if there is a problem with the connection
string. This function can be used to extract
- the PQconnectdb options in the provided
+ the options in the provided
connection string. The return value points to an array of
PQconninfoOption structures, which ends
with an entry having a null keyword pointer.
@@ -596,16 +600,16 @@ PQconninfoOption *PQconninfoParse(const char *conninfo, char **errmsg);
After processing the options array, free it by passing it to
- PQconninfoFree . If this is not done, some memory
- is leaked for each call to PQconninfoParse .
+ . If this is not done, some memory
+ is leaked for each call to .
Conversely, if an error occurs and errmsg is not NULL ,
- be sure to free the error string using PQfreemem .
+ be sure to free the error string using .
-
+
PQfinish PQfinish
@@ -618,15 +622,15 @@ void PQfinish(PGconn *conn);
Note that even if the server connection attempt fails (as
- indicated by PQstatus ), the application should call PQfinish
+ indicated by ), the application should call
to free the memory used by the PGconn object.
The PGconn pointer must not be used again after
- PQfinish has been called.
+ has been called.
-
+
PQreset PQreset
@@ -646,7 +650,7 @@ void PQreset(PGconn *conn);
-
+
PQresetStart PQresetStart
PQresetPoll PQresetPoll
@@ -664,15 +668,15 @@ PostgresPollingStatusType PQresetPoll(PGconn *conn);
These functions will close the connection to the server and attempt to
reestablish a new connection to the same server, using all the same
parameters previously used. This can be useful for error recovery if a
- working connection is lost. They differ from PQreset (above) in that they
+ working connection is lost. They differ from (above) in that they
act in a nonblocking manner. These functions suffer from the same
- restrictions as PQconnectStartParams , PQconnectStart
+ restrictions as , PQconnectStart
and PQconnectPoll .
To initiate a connection reset, call
- PQresetStart . If it returns 0, the reset has
+ . If it returns 0, the reset has
failed. If it returns 1, poll the reset using
PQresetPoll in exactly the same way as you
would create the connection using PQconnectPoll .
@@ -680,13 +684,13 @@ PostgresPollingStatusType PQresetPoll(PGconn *conn);
-
+
PQpingParams PQpingParams
- PQpingParams reports the status of the
+ reports the status of the
server. It accepts connection parameters identical to those of
- PQconnectdbParams , described above. It is not
+ , described above. It is not
necessary to supply correct user name, password, or database name
values to obtain the server status; however, if incorrect values
are provided, the server will log a failed connection attempt.
@@ -700,7 +704,7 @@ PGPing PQpingParams(const char * const *keywords,
The function returns one of the following values:
-
+
PQPING_OK
@@ -709,7 +713,7 @@ PGPing PQpingParams(const char * const *keywords,
-
+
PQPING_REJECT
@@ -719,7 +723,7 @@ PGPing PQpingParams(const char * const *keywords,
-
+
PQPING_NO_RESPONSE
@@ -732,7 +736,7 @@ PGPing PQpingParams(const char * const *keywords,
-
+
PQPING_NO_ATTEMPT
@@ -749,13 +753,13 @@ PGPing PQpingParams(const char * const *keywords,
-
+
PQping PQping
- PQping reports the status of the
+ reports the status of the
server. It accepts connection parameters identical to those of
- PQconnectdb , described above. It is not
+ , described above. It is not
necessary to supply correct user name, password, or database name
values to obtain the server status; however, if incorrect values
are provided, the server will log a failed connection attempt.
@@ -766,7 +770,7 @@ PGPing PQping(const char *conninfo);
- The return values are the same as for PQpingParams .
+ The return values are the same as for .
@@ -913,7 +917,8 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
It is possible to specify multiple hosts to connect to, so that they are
tried in the given order. In the Keyword/Value format, the host ,
hostaddr , and port options accept a comma-separated
- list of values. The same number of elements must be given in each option, such
+ list of values. The same number of elements must be given in each
+ option that is specified, such
that e.g. the first hostaddr corresponds to the first host name,
the second hostaddr corresponds to the second host name, and so
forth. As an exception, if only one port is specified, it
@@ -922,13 +927,17 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
In the connection URI format, you can list multiple host:port pairs
- separated by commas, in the host component of the URI. In either
- format, a single hostname can also translate to multiple network addresses. A
- common example of this is a host that has both an IPv4 and an IPv6 address.
+ separated by commas, in the host component of the URI.
+
+
+
+ In either format, a single host name can translate to multiple network
+ addresses. A common example of this is a host that has both an IPv4 and
+ an IPv6 address.
- When multiple hosts are specified, or when a single hostname is
+ When multiple hosts are specified, or when a single host name is
translated to multiple addresses, all the hosts and addresses will be
tried in order, until one succeeds. If none of the hosts can be reached,
the connection fails. If a connection is established successfully, but
@@ -938,8 +947,8 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
If a password file is used, you can have different passwords for
different hosts. All the other connection options are the same for every
- host, it is not possible to e.g. specify a different username for
- different hosts.
+ host in the list; it is not possible to e.g. specify different
+ usernames for different hosts.
@@ -958,10 +967,9 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
Name of host to connect to.host name
If a host name begins with a slash, it specifies Unix-domain
communication rather than TCP/IP communication; the value is the
- name of the directory in which the socket file is stored. If
- multiple host names are specified, each will be tried in turn in
- the order given. The default behavior when host is
- not specified is to connect to a Unix-domain
+ name of the directory in which the socket file is stored.
+ The default behavior when host is
+ not specified, or is empty, is to connect to a Unix-domain
socketUnix domain socket in
/tmp (or whatever socket directory was specified
when PostgreSQL was built). On machines without
@@ -969,7 +977,8 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
A comma-separated list of host names is also accepted, in which case
- each host name in the list is tried in order. See
+ each host name in the list is tried in order; an empty item in the
+ list selects the default behavior as explained above. See
for details.
@@ -984,10 +993,14 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
your machine supports IPv6, you can also use those addresses.
TCP/IP communication is
always used when a nonempty string is specified for this parameter.
+ If this parameter is not specified, the value of host
+ will be looked up to find the corresponding IP address — or, if
+ host specifies an IP address, that value will be
+ used directly.
- Using hostaddr instead of host allows the
+ Using hostaddr allows the
application to avoid a host name look-up, which might be important
in applications with time constraints. However, a host name is
required for GSSAPI or SSPI authentication
@@ -996,8 +1009,12 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
- If host is specified without hostaddr ,
- a host name lookup occurs.
+ If host is specified
+ without hostaddr , a host name lookup occurs.
+ (When using PQconnectPoll , the lookup occurs
+ when PQconnectPoll first considers this host
+ name, and it may cause PQconnectPoll to block
+ for a significant amount of time.)
@@ -1020,14 +1037,17 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
Note that authentication is likely to fail if host
is not the name of the server at network address hostaddr .
- Also, note that host rather than hostaddr
+ Also, when both host and hostaddr
+ are specified, host
is used to identify the connection in a password file (see
).
A comma-separated list of hostaddr values is also
- accepted, in which case each host in the list is tried in order. See
+ accepted, in which case each host in the list is tried in order.
+ An empty item in the list causes the corresponding host name to be
+ used, or the default host name if that is empty as well. See
for details.
@@ -1047,9 +1067,12 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
name extension for Unix-domain
connections.port
If multiple hosts were given in the host or
- hostaddr parameters, this parameter may specify a list
- of ports of equal length, or it may specify a single port number to
- be used for all hosts.
+ hostaddr parameters, this parameter may specify a
+ comma-separated list of ports of the same length as the host list, or
+ it may specify a single port number to be used for all hosts.
+ An empty string, or an empty item in a comma-separated list,
+ specifies the default port number established
+ when PostgreSQL was built.
@@ -1099,14 +1122,37 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
+
+ channel_binding
+
+
+ This option controls the client's use of channel binding. A setting
+ of require means that the connection must employ
+ channel binding, prefer means that the client will
+ choose channel binding if available, and disable
+ prevents the use of channel binding. The default
+ is prefer if
+ PostgreSQL is compiled with SSL support;
+ otherwise the default is disable .
+
+
+ Channel binding is a method for the server to authenticate itself to
+ the client. It is only supported over SSL connections
+ with PostgreSQL 11 or later servers using
+ the SCRAM authentication method.
+
+
+
+
connect_timeout
- Maximum wait for connection, in seconds (write as a decimal integer
- string). Zero or not specified means wait indefinitely. It is not
- recommended to use a timeout of less than 2 seconds.
- This timeout applies separately to each connection attempt.
+ Maximum wait for connection, in seconds (write as a decimal integer,
+ e.g. 10 ). Zero, negative, or not specified means
+ wait indefinitely. The minimum allowed timeout is 2 seconds, therefore
+ a value of 1 is interpreted as 2 .
+ This timeout applies separately to each host name or IP address.
For example, if you specify two hosts and connect_timeout
is 5, each host will time out if no connection is made within 5
seconds, so the total time spent waiting for a connection might be
@@ -1229,6 +1275,20 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
+
+ tcp_user_timeout
+
+
+ Controls the number of milliseconds that transmitted data may
+ remain unacknowledged before a connection is forcibly closed.
+ A value of zero uses the system default. This parameter is
+ ignored for connections made via a Unix-domain socket.
+ It is only supported on systems where TCP_USER_TIMEOUT
+ is available; on other systems, it has no effect.
+
+
+
+
tty
@@ -1238,30 +1298,6 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
-
- scram_channel_binding
-
-
- Specifies the channel binding type to use with SCRAM authentication.
- The list of channel binding types supported by server are listed in
- . An empty value specifies that
- the client will not use channel binding. The default value is
- tls-unique .
-
-
-
- Channel binding is only supported on SSL connections. If the
- connection is not using SSL, then this setting is ignored.
-
-
-
- This parameter is mainly intended for protocol testing. In normal
- use, there should not be a need to choose a channel binding type other
- than the default one.
-
-
-
-
replication
@@ -1320,6 +1356,63 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
+
+ gssencmode
+
+
+ This option determines whether or with what priority a secure
+ GSS TCP/IP connection will be negotiated with the
+ server. There are three modes:
+
+
+
+ disable
+
+
+ only try a non-GSSAPI -encrypted connection
+
+
+
+
+
+ prefer (default)
+
+
+ if there are GSSAPI credentials present (i.e.,
+ in a credentials cache), first try
+ a GSSAPI -encrypted connection; if that fails or
+ there are no credentials, try a
+ non-GSSAPI -encrypted connection. This is the
+ default when PostgreSQL has been
+ compiled with GSSAPI support.
+
+
+
+
+
+ require
+
+
+ only try a GSSAPI -encrypted connection
+
+
+
+
+
+
+
+ gssencmode is ignored for Unix domain socket
+ communication. If PostgreSQL is compiled
+ without GSSAPI support, using the require option
+ will cause an error, while prefer will be accepted
+ but libpq will not actually attempt
+ a GSSAPI -encrypted
+ connection.GSSAPI with
+ libpq
+
+
+
+
sslmode
@@ -1622,19 +1715,14 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
The following functions return parameter values established at connection.
These values are fixed for the life of the connection. If a multi-host
- connection string is used, the values of PQhost ,
- PQport , and PQpass can change if a new connection
+ connection string is used, the values of ,
+ , and can change if a new connection
is established using the same PGconn object. Other values
are fixed for the lifetime of the PGconn object.
-
-
- PQdb
-
- PQdb
-
-
+
+ PQdb PQdb
@@ -1646,13 +1734,8 @@ char *PQdb(const PGconn *conn);
-
-
- PQuser
-
- PQuser
-
-
+
+ PQuser PQuser
@@ -1664,13 +1747,8 @@ char *PQuser(const PGconn *conn);
-
-
- PQpass
-
- PQpass
-
-
+
+ PQpass PQpass
@@ -1679,16 +1757,22 @@ char *PQuser(const PGconn *conn);
char *PQpass(const PGconn *conn);
+
+
+ will return either the password specified
+ in the connection parameters, or if there was none and the password
+ was obtained from the password
+ file, it will return that. In the latter case,
+ if multiple hosts were specified in the connection parameters, it is
+ not possible to rely on the result of until
+ the connection is established. The status of the connection can be
+ checked using the function .
+
-
-
- PQhost
-
- PQhost
-
-
+
+ PQhost PQhost
@@ -1704,15 +1788,15 @@ char *PQhost(const PGconn *conn);
If the connection parameters specified both host and
- hostaddr , then PQhost will
+ hostaddr , then will
return the host information. If only
hostaddr was specified, then that is returned.
If multiple hosts were specified in the connection parameters,
- PQhost returns the host actually connected to.
+ returns the host actually connected to.
- PQhost returns NULL if the
+ returns NULL if the
conn argument is NULL .
Otherwise, if there is an error producing the host information (perhaps
if the connection has not been fully established or there was an
@@ -1721,20 +1805,40 @@ char *PQhost(const PGconn *conn);
If multiple hosts were specified in the connection parameters, it is
- not possible to rely on the result of PQhost until
+ not possible to rely on the result of until
the connection is established. The status of the connection can be
- checked using the function PQstatus .
+ checked using the function .
-
-
- PQport
-
- PQport
-
-
+
+
+ PQhostaddr PQhostaddr
+
+
+
+ Returns the server IP address of the active connection.
+ This can be the address that a host name resolved to,
+ or an IP address provided through the hostaddr
+ parameter.
+
+char *PQhostaddr(const PGconn *conn);
+
+
+
+
+ returns NULL if the
+ conn argument is NULL .
+ Otherwise, if there is an error producing the host information
+ (perhaps if the connection has not been fully established or
+ there was an error), it returns an empty string.
+
+
+
+
+
+ PQport PQport
@@ -1747,11 +1851,11 @@ char *PQport(const PGconn *conn);
If multiple ports were specified in the connection parameters,
- PQport returns the port actually connected to.
+ returns the port actually connected to.
- PQport returns NULL if the
+ returns NULL if the
conn argument is NULL .
Otherwise, if there is an error producing the port information (perhaps
if the connection has not been fully established or there was an
@@ -1760,20 +1864,15 @@ char *PQport(const PGconn *conn);
If multiple ports were specified in the connection parameters, it is
- not possible to rely on the result of PQport until
+ not possible to rely on the result of until
the connection is established. The status of the connection can be
- checked using the function PQstatus .
+ checked using the function .
-
-
- PQtty
-
- PQtty
-
-
+
+ PQtty PQtty
@@ -1789,13 +1888,8 @@ char *PQtty(const PGconn *conn);
-
-
- PQoptions
-
- PQoptions
-
-
+
+ PQoptions PQoptions
@@ -1814,13 +1908,8 @@ char *PQoptions(const PGconn *conn);
are executed on the PGconn object.
-
-
- PQstatus
-
- PQstatus
-
-
+
+ PQstatus PQstatus
@@ -1838,28 +1927,23 @@ ConnStatusType PQstatus(const PGconn *conn);
has the status CONNECTION_OK . A failed
connection attempt is signaled by status
CONNECTION_BAD . Ordinarily, an OK status will
- remain so until PQfinish , but a communications
+ remain so until , but a communications
failure might result in the status changing to
CONNECTION_BAD prematurely. In that case the
application could try to recover by calling
- PQreset .
+ .
- See the entry for PQconnectStartParams , PQconnectStart
+ See the entry for , PQconnectStart
and PQconnectPoll with regards to other status codes that
might be returned.
-
-
- PQtransactionStatus
-
- PQtransactionStatus
-
-
+
+ PQtransactionStatus PQtransactionStatus
@@ -1880,13 +1964,8 @@ PGTransactionStatusType PQtransactionStatus(const PGconn *conn);
-
-
- PQparameterStatus
-
- PQparameterStatus
-
-
+
+ PQparameterStatus PQparameterStatus
@@ -1898,7 +1977,7 @@ const char *PQparameterStatus(const PGconn *conn, const char *paramName);
Certain parameter values are reported by the server automatically at
connection startup or whenever their values change.
- PQparameterStatus can be used to interrogate these settings.
+ can be used to interrogate these settings.
It returns the current value of a parameter if known, or NULL
if the parameter is not known.
@@ -1933,13 +2012,13 @@ const char *PQparameterStatus(const PGconn *conn, const char *paramName);
Pre-3.0-protocol servers do not report parameter settings, but
libpq includes logic to obtain values for
server_version and client_encoding anyway.
- Applications are encouraged to use PQparameterStatus
+ Applications are encouraged to use
rather than ad hoc code to determine these values.
(Beware however that on a pre-3.0 connection, changing
client_encoding via SET after connection
- startup will not be reflected by PQparameterStatus .)
+ startup will not be reflected by .)
For server_version , see also
- PQserverVersion , which returns the information in a
+ , which returns the information in a
numeric form that is much easier to compare against.
@@ -1959,13 +2038,8 @@ const char *PQparameterStatus(const PGconn *conn, const char *paramName);
-
-
- PQprotocolVersion
-
- PQprotocolVersion
-
-
+
+ PQprotocolVersion PQprotocolVersion
@@ -1987,13 +2061,8 @@ int PQprotocolVersion(const PGconn *conn);
-
-
- PQserverVersion
-
- PQserverVersion
-
-
+
+ PQserverVersion PQserverVersion
@@ -2016,14 +2085,14 @@ int PQserverVersion(const PGconn *conn);
Prior to major version 10, PostgreSQL used
three-part version numbers in which the first two parts together
represented the major version. For those
- versions, PQserverVersion uses two digits for each
+ versions, uses two digits for each
part; for example version 9.1.5 will be returned as 90105, and
version 9.2.0 will be returned as 90200.
Therefore, for purposes of determining feature compatibility,
- applications should divide the result of PQserverVersion
+ applications should divide the result of
by 100 not 10000 to determine a logical major version number.
In all release series, only the last two digits differ between
minor releases (bug-fix releases).
@@ -2031,13 +2100,8 @@ int PQserverVersion(const PGconn *conn);
-
-
- PQerrorMessage
-
- PQerrorMessage
-
-
+
+ PQerrorMessage PQerrorMessage
@@ -2052,20 +2116,20 @@ char *PQerrorMessage(const PGconn *conn);
Nearly all libpq functions will set a message for
- PQerrorMessage if they fail. Note that by
+ if they fail. Note that by
libpq convention, a nonempty
- PQerrorMessage result can consist of multiple lines,
+ result can consist of multiple lines,
and will include a trailing newline. The caller should not free
the result directly. It will be freed when the associated
PGconn handle is passed to
- PQfinish . The result string should not be
+ . The result string should not be
expected to remain the same across operations on the
PGconn structure.
-
+
PQsocket PQsocket
@@ -2083,7 +2147,7 @@ int PQsocket(const PGconn *conn);
-
+
PQbackendPID PQbackendPID
@@ -2110,7 +2174,7 @@ int PQbackendPID(const PGconn *conn);
-
+
PQconnectionNeedsPassword PQconnectionNeedsPassword
@@ -2130,7 +2194,7 @@ int PQconnectionNeedsPassword(const PGconn *conn);
-
+
PQconnectionUsedPassword PQconnectionUsedPassword
@@ -2156,7 +2220,7 @@ int PQconnectionUsedPassword(const PGconn *conn);
usually doesn't change after a connection is established.
-
+
PQsslInUse PQsslInUse
@@ -2170,7 +2234,7 @@ int PQsslInUse(const PGconn *conn);
-
+
PQsslAttribute PQsslAttribute
@@ -2243,7 +2307,7 @@ const char *PQsslAttribute(const PGconn *conn, const char *attribute_name);
-
+
PQsslAttributeNames