Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit 5f2694a

Browse files
committed
Dead code elimination
1 parent 86a97ae commit 5f2694a

File tree

8 files changed

+99
-194
lines changed

8 files changed

+99
-194
lines changed

src/ddd.c

-15
Original file line numberDiff line numberDiff line change
@@ -493,21 +493,6 @@ MtmDetectGlobalDeadLockForXid(TransactionId xid)
493493
hasDeadlock = MtmGraphFindLoop(&graph, &gtid);
494494
mtm_log(DeadlockCheck, "Distributed deadlock check by backend %d for %u:" XID_FMT " = %d",
495495
MyProcPid, gtid.node, gtid.xid, hasDeadlock);
496-
// if (!hasDeadlock) {
497-
// /* There is no deadlock loop in graph, but deadlock can be caused by lack of apply workers: if all of them are busy, then some transactions
498-
// * can not be appied just because there are no vacant workers and it cause additional dependency between transactions which is not
499-
// * refelected in lock graph
500-
// */
501-
// timestamp_t lastPeekTime = minBgwGetLastPeekTime(&Mtm->pool);
502-
// if (lastPeekTime != 0 && MtmGetSystemTime() - lastPeekTime >= MSEC_TO_USEC(DeadlockTimeout)) {
503-
// hasDeadlock = true;
504-
// MTM_ELOG(WARNING, "Apply workers were blocked more than %d msec",
505-
// (int)USEC_TO_MSEC(MtmGetSystemTime() - lastPeekTime));
506-
// } else {
507-
// MTM_LOG1("Enable deadlock timeout in backend %d for transaction %llu", MyProcPid, (long64)xid);
508-
// enable_timeout_after(DEADLOCK_TIMEOUT, DeadlockTimeout);
509-
// }
510-
// }
511496

512497
if (!hasDeadlock)
513498
{

src/ddl.c

-6
Original file line numberDiff line numberDiff line change
@@ -657,7 +657,6 @@ MtmProcessUtilityReceiver(PlannedStmt *pstmt, const char *queryString,
657657
{
658658
case T_CreateTableSpaceStmt:
659659
case T_DropTableSpaceStmt:
660-
// case T_VacuumStmt:
661660
Assert(MtmCapturedDDL == NULL);
662661
MtmCapturedDDL = copyObject(parsetree);
663662
captured = true;
@@ -1205,11 +1204,6 @@ MtmApplyDDLMessage(const char *messageBody, bool transactional)
12051204

12061205
switch (nodeTag(MtmCapturedDDL))
12071206
{
1208-
// case T_VacuumStmt:
1209-
// {
1210-
// ExecVacuum((VacuumStmt *) MtmCapturedDDL, 1);
1211-
// break;
1212-
// }
12131207
case T_IndexStmt:
12141208
{
12151209
IndexStmt *indexStmt = (IndexStmt *) MtmCapturedDDL;

src/dmq.c

-18
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,6 @@ dmq_init(int send_timeout)
269269
PreviousShmemStartupHook = shmem_startup_hook;
270270
shmem_startup_hook = dmq_shmem_startup_hook;
271271

272-
// on_proc_exit(dmq_at_exit, 0);
273272
}
274273

275274
static Size
@@ -292,14 +291,6 @@ dmq_toc_size()
292291
*
293292
*****************************************************************************/
294293

295-
// static void
296-
// fe_close(PGconn *conn)
297-
// {
298-
// PQputCopyEnd(conn, NULL);
299-
// PQflush(conn);
300-
// PQfinish(conn);
301-
// }
302-
303294
static int
304295
fe_send(PGconn *conn, char *msg, size_t len)
305296
{
@@ -448,9 +439,7 @@ dmq_sender_main(Datum main_arg)
448439

449440
if (ret < 0)
450441
{
451-
// Assert(PQstatus(conns[conn_id].pgconn) != CONNECTION_OK);
452442
conns[conn_id].state = Idle;
453-
// DeleteWaitEvent(set, conns[conn_id].pos);
454443

455444
mtm_log(DmqStateFinal,
456445
"[DMQ] failed to send message to %s: %s",
@@ -568,8 +557,6 @@ dmq_sender_main(Datum main_arg)
568557
if (ret < 0)
569558
{
570559
conns[conn_id].state = Idle;
571-
// DeleteWaitEvent(set, conns[conn_id].pos);
572-
// Assert(PQstatus(conns[i].pgconn) != CONNECTION_OK);
573560

574561
mtm_log(DmqStateFinal,
575562
"[DMQ] failed to send heartbeat to %s: %s",
@@ -1205,7 +1192,6 @@ ensure_outq_handle()
12051192
if (dmq_local.mq_outh != NULL)
12061193
return;
12071194

1208-
// Assert(dmq_state->handle != DSM_HANDLE_INVALID);
12091195
seg = dsm_attach(dmq_state->out_dsm);
12101196
if (seg == NULL)
12111197
ereport(ERROR,
@@ -1223,8 +1209,6 @@ ensure_outq_handle()
12231209
outq = shm_toc_lookup(toc, MyProc->pgprocno, false);
12241210
shm_mq_set_sender(outq, MyProc);
12251211

1226-
// elog(LOG, "XXXshm_mq_set_sender %d", MyProc->pgprocno);
1227-
12281212
oldctx = MemoryContextSwitchTo(TopMemoryContext);
12291213
dmq_local.mq_outh = shm_mq_attach(outq, seg, NULL);
12301214
MemoryContextSwitchTo(oldctx);
@@ -1431,7 +1415,6 @@ dmq_detach_receiver(char *sender_name)
14311415

14321416
if (dmq_local.inhandles[handle_id].dsm_seg)
14331417
{
1434-
// dsm_unpin_mapping(dmq_local.inhandles[handle_id].dsm_seg);
14351418
dsm_detach(dmq_local.inhandles[handle_id].dsm_seg);
14361419
dmq_local.inhandles[handle_id].dsm_seg = NULL;
14371420
}
@@ -1542,7 +1525,6 @@ dmq_pop(DmqSenderId *sender_id, StringInfo msg, uint64 mask)
15421525
*sender_id = i;
15431526
return false;
15441527
}
1545-
// mtm_log(ERROR, "[DMQ] dmq_pop: failed to reattach");
15461528
}
15471529
}
15481530

src/multimaster.c

-3
Original file line numberDiff line numberDiff line change
@@ -600,9 +600,6 @@ mtm_init_cluster(PG_FUNCTION_ARGS)
600600
PGconn **peer_conns;
601601
StringInfoData local_query;
602602

603-
// if (!check_config())
604-
// mtm_log(ERROR, "Multimaster config is not ok, refusing to work");
605-
606603
/* parse array with peer connstrings */
607604
Assert(ARR_ELEMTYPE(peers_arr) == TEXTOID);
608605
Assert(ARR_NDIM(peers_arr) == 1);

src/pglogical_apply.c

-14
Original file line numberDiff line numberDiff line change
@@ -487,20 +487,6 @@ process_remote_begin(StringInfo s, GlobalTransactionId *gtid)
487487

488488
suppress_internal_consistency_checks = true;
489489

490-
// AcceptInvalidationMessages();
491-
// if (!receiver_mtm_cfg_valid)
492-
// {
493-
// if (receiver_mtm_cfg)
494-
// pfree(receiver_mtm_cfg);
495-
496-
// receiver_mtm_cfg = MtmLoadConfig();
497-
498-
// if (receiver_mtm_cfg->my_node_id == 0)
499-
// proc_exit(0);
500-
501-
// receiver_mtm_cfg_valid = true;
502-
// }
503-
504490
return true;
505491
}
506492

src/pglogical_receiver.c

-1
Original file line numberDiff line numberDiff line change
@@ -780,7 +780,6 @@ pglogical_receiver_main(Datum main_arg)
780780
{
781781
int64 now = feGetCurrentTimestamp();
782782

783-
// MtmUpdateLsnMapping(nodeId, walEnd);
784783
/* Leave if feedback is not sent properly */
785784
sendFeedback(conn, now, nodeId);
786785
}

0 commit comments

Comments
 (0)