|
| 1 | +/*------------------------------------------------------------------------- |
| 2 | + * |
| 3 | + * tqueue.c |
| 4 | + * Use shm_mq to send & receive tuples between parallel backends |
| 5 | + * |
| 6 | + * A DestReceiver of type DestTupleQueue, which is a TQueueDestReceiver |
| 7 | + * under the hood, writes tuples from the executor to a shm_mq. |
| 8 | + * |
| 9 | + * A TupleQueueFunnel helps manage the process of reading tuples from |
| 10 | + * one or more shm_mq objects being used as tuple queues. |
| 11 | + * |
| 12 | + * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group |
| 13 | + * Portions Copyright (c) 1994, Regents of the University of California |
| 14 | + * |
| 15 | + * IDENTIFICATION |
| 16 | + * src/backend/executor/tqueue.c |
| 17 | + * |
| 18 | + *------------------------------------------------------------------------- |
| 19 | + */ |
| 20 | + |
| 21 | +#include "postgres.h" |
| 22 | + |
| 23 | +#include "access/htup_details.h" |
| 24 | +#include "executor/tqueue.h" |
| 25 | +#include "miscadmin.h" |
| 26 | + |
| 27 | +typedef struct |
| 28 | +{ |
| 29 | + DestReceiver pub; |
| 30 | + shm_mq_handle *handle; |
| 31 | +} TQueueDestReceiver; |
| 32 | + |
| 33 | +struct TupleQueueFunnel |
| 34 | +{ |
| 35 | + int nqueues; |
| 36 | + int maxqueues; |
| 37 | + int nextqueue; |
| 38 | + shm_mq_handle **queue; |
| 39 | +}; |
| 40 | + |
| 41 | +/* |
| 42 | + * Receive a tuple. |
| 43 | + */ |
| 44 | +static void |
| 45 | +tqueueReceiveSlot(TupleTableSlot *slot, DestReceiver *self) |
| 46 | +{ |
| 47 | + TQueueDestReceiver *tqueue = (TQueueDestReceiver *) self; |
| 48 | + HeapTuple tuple; |
| 49 | + |
| 50 | + tuple = ExecMaterializeSlot(slot); |
| 51 | + shm_mq_send(tqueue->handle, tuple->t_len, tuple->t_data, false); |
| 52 | +} |
| 53 | + |
| 54 | +/* |
| 55 | + * Prepare to receive tuples from executor. |
| 56 | + */ |
| 57 | +static void |
| 58 | +tqueueStartupReceiver(DestReceiver *self, int operation, TupleDesc typeinfo) |
| 59 | +{ |
| 60 | + /* do nothing */ |
| 61 | +} |
| 62 | + |
| 63 | +/* |
| 64 | + * Clean up at end of an executor run |
| 65 | + */ |
| 66 | +static void |
| 67 | +tqueueShutdownReceiver(DestReceiver *self) |
| 68 | +{ |
| 69 | + /* do nothing */ |
| 70 | +} |
| 71 | + |
| 72 | +/* |
| 73 | + * Destroy receiver when done with it |
| 74 | + */ |
| 75 | +static void |
| 76 | +tqueueDestroyReceiver(DestReceiver *self) |
| 77 | +{ |
| 78 | + pfree(self); |
| 79 | +} |
| 80 | + |
| 81 | +/* |
| 82 | + * Create a DestReceiver that writes tuples to a tuple queue. |
| 83 | + */ |
| 84 | +DestReceiver * |
| 85 | +CreateTupleQueueDestReceiver(shm_mq_handle *handle) |
| 86 | +{ |
| 87 | + TQueueDestReceiver *self; |
| 88 | + |
| 89 | + self = (TQueueDestReceiver *) palloc0(sizeof(TQueueDestReceiver)); |
| 90 | + |
| 91 | + self->pub.receiveSlot = tqueueReceiveSlot; |
| 92 | + self->pub.rStartup = tqueueStartupReceiver; |
| 93 | + self->pub.rShutdown = tqueueShutdownReceiver; |
| 94 | + self->pub.rDestroy = tqueueDestroyReceiver; |
| 95 | + self->pub.mydest = DestTupleQueue; |
| 96 | + self->handle = handle; |
| 97 | + |
| 98 | + return (DestReceiver *) self; |
| 99 | +} |
| 100 | + |
| 101 | +/* |
| 102 | + * Create a tuple queue funnel. |
| 103 | + */ |
| 104 | +TupleQueueFunnel * |
| 105 | +CreateTupleQueueFunnel(void) |
| 106 | +{ |
| 107 | + TupleQueueFunnel *funnel = palloc0(sizeof(TupleQueueFunnel)); |
| 108 | + |
| 109 | + funnel->maxqueues = 8; |
| 110 | + funnel->queue = palloc(funnel->maxqueues * sizeof(shm_mq_handle *)); |
| 111 | + |
| 112 | + return funnel; |
| 113 | +} |
| 114 | + |
| 115 | +/* |
| 116 | + * Destroy a tuple queue funnel. |
| 117 | + */ |
| 118 | +void |
| 119 | +DestroyTupleQueueFunnel(TupleQueueFunnel *funnel) |
| 120 | +{ |
| 121 | + int i; |
| 122 | + |
| 123 | + for (i = 0; i < funnel->nqueues; i++) |
| 124 | + shm_mq_detach(shm_mq_get_queue(funnel->queue[i])); |
| 125 | + pfree(funnel->queue); |
| 126 | + pfree(funnel); |
| 127 | +} |
| 128 | + |
| 129 | +/* |
| 130 | + * Remember the shared memory queue handle in funnel. |
| 131 | + */ |
| 132 | +void |
| 133 | +RegisterTupleQueueOnFunnel(TupleQueueFunnel *funnel, shm_mq_handle *handle) |
| 134 | +{ |
| 135 | + if (funnel->nqueues < funnel->maxqueues) |
| 136 | + { |
| 137 | + funnel->queue[funnel->nqueues++] = handle; |
| 138 | + return; |
| 139 | + } |
| 140 | + |
| 141 | + if (funnel->nqueues >= funnel->maxqueues) |
| 142 | + { |
| 143 | + int newsize = funnel->nqueues * 2; |
| 144 | + |
| 145 | + Assert(funnel->nqueues == funnel->maxqueues); |
| 146 | + |
| 147 | + funnel->queue = repalloc(funnel->queue, |
| 148 | + newsize * sizeof(shm_mq_handle *)); |
| 149 | + funnel->maxqueues = newsize; |
| 150 | + } |
| 151 | + |
| 152 | + funnel->queue[funnel->nqueues++] = handle; |
| 153 | +} |
| 154 | + |
| 155 | +/* |
| 156 | + * Fetch a tuple from a tuple queue funnel. |
| 157 | + * |
| 158 | + * We try to read from the queues in round-robin fashion so as to avoid |
| 159 | + * the situation where some workers get their tuples read expediently while |
| 160 | + * others are barely ever serviced. |
| 161 | + * |
| 162 | + * Even when nowait = false, we read from the individual queues in |
| 163 | + * non-blocking mode. Even when shm_mq_receive() returns SHM_MQ_WOULD_BLOCK, |
| 164 | + * it can still accumulate bytes from a partially-read message, so doing it |
| 165 | + * this way should outperform doing a blocking read on each queue in turn. |
| 166 | + * |
| 167 | + * The return value is NULL if there are no remaining queues or if |
| 168 | + * nowait = true and no queue returned a tuple without blocking. *done, if |
| 169 | + * not NULL, is set to true when there are no remaining queues and false in |
| 170 | + * any other case. |
| 171 | + */ |
| 172 | +HeapTuple |
| 173 | +TupleQueueFunnelNext(TupleQueueFunnel *funnel, bool nowait, bool *done) |
| 174 | +{ |
| 175 | + int waitpos = funnel->nextqueue; |
| 176 | + |
| 177 | + /* Corner case: called before adding any queues, or after all are gone. */ |
| 178 | + if (funnel->nqueues == 0) |
| 179 | + { |
| 180 | + if (done != NULL) |
| 181 | + *done = true; |
| 182 | + return NULL; |
| 183 | + } |
| 184 | + |
| 185 | + if (done != NULL) |
| 186 | + *done = false; |
| 187 | + |
| 188 | + for (;;) |
| 189 | + { |
| 190 | + shm_mq_handle *mqh = funnel->queue[funnel->nextqueue]; |
| 191 | + shm_mq_result result; |
| 192 | + Size nbytes; |
| 193 | + void *data; |
| 194 | + |
| 195 | + /* Attempt to read a message. */ |
| 196 | + result = shm_mq_receive(mqh, &nbytes, &data, true); |
| 197 | + |
| 198 | + /* |
| 199 | + * Normally, we advance funnel->nextqueue to the next queue at this |
| 200 | + * point, but if we're pointing to a queue that we've just discovered |
| 201 | + * is detached, then forget that queue and leave the pointer where it |
| 202 | + * is until the number of remaining queues fall below that pointer and |
| 203 | + * at that point make the pointer point to the first queue. |
| 204 | + */ |
| 205 | + if (result != SHM_MQ_DETACHED) |
| 206 | + funnel->nextqueue = (funnel->nextqueue + 1) % funnel->nqueues; |
| 207 | + else |
| 208 | + { |
| 209 | + --funnel->nqueues; |
| 210 | + if (funnel->nqueues == 0) |
| 211 | + { |
| 212 | + if (done != NULL) |
| 213 | + *done = true; |
| 214 | + return NULL; |
| 215 | + } |
| 216 | + |
| 217 | + memmove(&funnel->queue[funnel->nextqueue], |
| 218 | + &funnel->queue[funnel->nextqueue + 1], |
| 219 | + sizeof(shm_mq_handle *) |
| 220 | + * (funnel->nqueues - funnel->nextqueue)); |
| 221 | + |
| 222 | + if (funnel->nextqueue >= funnel->nqueues) |
| 223 | + funnel->nextqueue = 0; |
| 224 | + |
| 225 | + if (funnel->nextqueue < waitpos) |
| 226 | + --waitpos; |
| 227 | + |
| 228 | + continue; |
| 229 | + } |
| 230 | + |
| 231 | + /* If we got a message, return it. */ |
| 232 | + if (result == SHM_MQ_SUCCESS) |
| 233 | + { |
| 234 | + HeapTupleData htup; |
| 235 | + |
| 236 | + /* |
| 237 | + * The tuple data we just read from the queue is only valid until |
| 238 | + * we again attempt to read from it. Copy the tuple into a single |
| 239 | + * palloc'd chunk as callers will expect. |
| 240 | + */ |
| 241 | + ItemPointerSetInvalid(&htup.t_self); |
| 242 | + htup.t_tableOid = InvalidOid; |
| 243 | + htup.t_len = nbytes; |
| 244 | + htup.t_data = data; |
| 245 | + return heap_copytuple(&htup); |
| 246 | + } |
| 247 | + |
| 248 | + /* |
| 249 | + * If we've visited all of the queues, then we should either give up |
| 250 | + * and return NULL (if we're in non-blocking mode) or wait for the |
| 251 | + * process latch to be set (otherwise). |
| 252 | + */ |
| 253 | + if (funnel->nextqueue == waitpos) |
| 254 | + { |
| 255 | + if (nowait) |
| 256 | + return NULL; |
| 257 | + WaitLatch(MyLatch, WL_LATCH_SET, 0); |
| 258 | + CHECK_FOR_INTERRUPTS(); |
| 259 | + ResetLatch(MyLatch); |
| 260 | + } |
| 261 | + } |
| 262 | +} |
0 commit comments