|
25 | 25 | #include "common/pg_lzcompress.h"
|
26 | 26 | #include "replication/origin.h"
|
27 | 27 |
|
| 28 | +#ifndef FRONTEND |
| 29 | +#include "utils/memutils.h" |
| 30 | +#endif |
| 31 | + |
28 | 32 | static bool allocate_recordbuf(XLogReaderState *state, uint32 reclength);
|
29 | 33 |
|
30 | 34 | static bool ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr,
|
@@ -160,6 +164,25 @@ allocate_recordbuf(XLogReaderState *state, uint32 reclength)
|
160 | 164 | newSize += XLOG_BLCKSZ - (newSize % XLOG_BLCKSZ);
|
161 | 165 | newSize = Max(newSize, 5 * Max(BLCKSZ, XLOG_BLCKSZ));
|
162 | 166 |
|
| 167 | +#ifndef FRONTEND |
| 168 | + |
| 169 | + /* |
| 170 | + * Note that in much unlucky circumstances, the random data read from a |
| 171 | + * recycled segment can cause this routine to be called with a size |
| 172 | + * causing a hard failure at allocation. For a standby, this would cause |
| 173 | + * the instance to stop suddenly with a hard failure, preventing it to |
| 174 | + * retry fetching WAL from one of its sources which could allow it to move |
| 175 | + * on with replay without a manual restart. If the data comes from a past |
| 176 | + * recycled segment and is still valid, then the allocation may succeed |
| 177 | + * but record checks are going to fail so this would be short-lived. If |
| 178 | + * the allocation fails because of a memory shortage, then this is not a |
| 179 | + * hard failure either per the guarantee given by MCXT_ALLOC_NO_OOM. |
| 180 | + */ |
| 181 | + if (!AllocSizeIsValid(newSize)) |
| 182 | + return false; |
| 183 | + |
| 184 | +#endif |
| 185 | + |
163 | 186 | if (state->readRecordBuf)
|
164 | 187 | pfree(state->readRecordBuf);
|
165 | 188 | state->readRecordBuf =
|
|
0 commit comments