Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit 2c24811

Browse files
committed
fix warnings in pythonic tests
1 parent 59cf3d7 commit 2c24811

File tree

3 files changed

+38
-52
lines changed

3 files changed

+38
-52
lines changed

tests/python/.flake8

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
[flake8]
2+
ignore = E241, E501

tests/python/.style.yapf

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,4 @@
22
based_on_style = pep8
33
spaces_before_comment = 4
44
split_before_logical_operator = false
5-
column_limit=90
5+
column_limit=100

tests/python/partitioning_test.py

Lines changed: 35 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,11 @@
1313
import re
1414
import subprocess
1515
import threading
16-
import json
1716
import time
1817
import unittest
1918

2019
from distutils.version import LooseVersion
21-
from testgres import get_new_node, get_bin_path, get_pg_config
20+
from testgres import get_new_node, get_bin_path, get_pg_version
2221

2322
# set setup base logging config, it can be turned on by `use_logging`
2423
# parameter on node setup
@@ -54,7 +53,7 @@
5453
}
5554

5655
logging.config.dictConfig(LOG_CONFIG)
57-
version = LooseVersion(get_pg_config().get("VERSION_NUM"))
56+
version = LooseVersion(get_pg_version())
5857

5958

6059
# Helper function for json equality
@@ -106,23 +105,6 @@ def start_new_pathman_cluster(self,
106105

107106
return node
108107

109-
def catchup_replica(self, master, replica):
110-
""" Wait until replica synchronizes with master """
111-
if version >= LooseVersion('10'):
112-
wait_lsn_query = """
113-
SELECT pg_current_wal_lsn() <= replay_lsn
114-
FROM pg_stat_replication
115-
WHERE application_name = '{0}'
116-
"""
117-
else:
118-
wait_lsn_query = """
119-
SELECT pg_current_xlog_location() <= replay_location
120-
FROM pg_stat_replication
121-
WHERE application_name = '{0}'
122-
"""
123-
124-
master.poll_query_until('postgres', wait_lsn_query.format(replica.name))
125-
126108
def test_concurrent(self):
127109
""" Test concurrent partitioning """
128110

@@ -158,8 +140,7 @@ def test_replication(self):
158140
with self.start_new_pathman_cluster(allow_streaming=True, test_data=True) as node:
159141
with node.replicate('node2') as replica:
160142
replica.start()
161-
# wait until replica catches up
162-
self.catchup_replica(node, replica)
143+
replica.catchup()
163144

164145
# check that results are equal
165146
self.assertEqual(
@@ -169,7 +150,9 @@ def test_replication(self):
169150
# enable parent and see if it is enabled in replica
170151
node.psql('postgres', "select enable_parent('abc')")
171152

172-
self.catchup_replica(node, replica)
153+
# wait until replica catches up
154+
replica.catchup()
155+
173156
self.assertEqual(
174157
node.psql('postgres', 'explain (costs off) select * from abc'),
175158
replica.psql('postgres', 'explain (costs off) select * from abc'))
@@ -182,7 +165,10 @@ def test_replication(self):
182165
# check that UPDATE in pathman_config_params invalidates cache
183166
node.psql('postgres',
184167
'update pathman_config_params set enable_parent = false')
185-
self.catchup_replica(node, replica)
168+
169+
# wait until replica catches up
170+
replica.catchup()
171+
186172
self.assertEqual(
187173
node.psql('postgres', 'explain (costs off) select * from abc'),
188174
replica.psql('postgres', 'explain (costs off) select * from abc'))
@@ -688,7 +674,7 @@ def con2_thread():
688674
explain (analyze, costs off, timing off)
689675
select * from drop_test
690676
where val = any (select generate_series(1, 40, 34))
691-
""") # query selects from drop_test_1 and drop_test_4
677+
""") # query selects from drop_test_1 and drop_test_4
692678

693679
con2.commit()
694680

@@ -712,15 +698,14 @@ def con2_thread():
712698
# return all values in tuple
713699
queue.put((has_runtime_append, has_drop_test_1, has_drop_test_4))
714700

715-
716701
# Step 1: cache partitioned table in con1
717702
con1.begin()
718-
con1.execute('select count(*) from drop_test') # load pathman's cache
703+
con1.execute('select count(*) from drop_test') # load pathman's cache
719704
con1.commit()
720705

721706
# Step 2: cache partitioned table in con2
722707
con2.begin()
723-
con2.execute('select count(*) from drop_test') # load pathman's cache
708+
con2.execute('select count(*) from drop_test') # load pathman's cache
724709
con2.commit()
725710

726711
# Step 3: drop first partition of 'drop_test'
@@ -786,12 +771,12 @@ def con2_thread():
786771

787772
# Step 1: lock partitioned table in con1
788773
con1.begin()
789-
con1.execute('select count(*) from ins_test') # load pathman's cache
774+
con1.execute('select count(*) from ins_test') # load pathman's cache
790775
con1.execute('lock table ins_test in share update exclusive mode')
791776

792777
# Step 2: try inserting new value in con2 (waiting)
793778
con2.begin()
794-
con2.execute('select count(*) from ins_test') # load pathman's cache
779+
con2.execute('select count(*) from ins_test') # load pathman's cache
795780
t = threading.Thread(target=con2_thread)
796781
t.start()
797782

@@ -853,12 +838,12 @@ def con2_thread():
853838

854839
# Step 1: initilize con1
855840
con1.begin()
856-
con1.execute('select count(*) from ins_test') # load pathman's cache
841+
con1.execute('select count(*) from ins_test') # load pathman's cache
857842

858843
# Step 2: initilize con2
859844
con2.begin()
860-
con2.execute('select count(*) from ins_test') # load pathman's cache
861-
con2.commit() # unlock relations
845+
con2.execute('select count(*) from ins_test') # load pathman's cache
846+
con2.commit() # unlock relations
862847

863848
# Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success)
864849
con1.execute(
@@ -1031,12 +1016,12 @@ def turnon_pathman(node):
10311016
get_bin_path("pg_dump"), "-p {}".format(node.port),
10321017
"initial"
10331018
], [get_bin_path("psql"), "-p {}".format(node.port), "copy"],
1034-
cmp_full), # dump as plain text and restore via COPY
1019+
cmp_full), # dump as plain text and restore via COPY
10351020
(turnoff_pathman, turnon_pathman, [
10361021
get_bin_path("pg_dump"), "-p {}".format(node.port),
10371022
"--inserts", "initial"
10381023
], [get_bin_path("psql"), "-p {}".format(node.port), "copy"],
1039-
cmp_full), # dump as plain text and restore via INSERTs
1024+
cmp_full), # dump as plain text and restore via INSERTs
10401025
(None, None, [
10411026
get_bin_path("pg_dump"), "-p {}".format(node.port),
10421027
"--format=custom", "initial"
@@ -1052,7 +1037,7 @@ def turnon_pathman(node):
10521037
dump_restore_cmd = " | ".join((' '.join(pg_dump_params),
10531038
' '.join(pg_restore_params)))
10541039

1055-
if (preproc != None):
1040+
if (preproc is not None):
10561041
preproc(node)
10571042

10581043
# transfer and restore data
@@ -1065,12 +1050,12 @@ def turnon_pathman(node):
10651050
stderr=fnull)
10661051
p2.communicate(input=stdoutdata)
10671052

1068-
if (postproc != None):
1053+
if (postproc is not None):
10691054
postproc(node)
10701055

10711056
# validate data
10721057
with node.connect('initial') as con1, \
1073-
node.connect('copy') as con2:
1058+
node.connect('copy') as con2:
10741059

10751060
# compare plans and contents of initial and copy
10761061
cmp_result = cmp_dbs(con1, con2)
@@ -1092,8 +1077,8 @@ def turnon_pathman(node):
10921077
config_params_initial[row[0]] = row[1:]
10931078
for row in con2.execute(config_params_query):
10941079
config_params_copy[row[0]] = row[1:]
1095-
self.assertEqual(config_params_initial, config_params_copy, \
1096-
"mismatch in pathman_config_params under the command: %s" % dump_restore_cmd)
1080+
self.assertEqual(config_params_initial, config_params_copy,
1081+
"mismatch in pathman_config_params under the command: %s" % dump_restore_cmd)
10971082

10981083
# compare constraints on each partition
10991084
constraints_query = """
@@ -1106,8 +1091,8 @@ def turnon_pathman(node):
11061091
constraints_initial[row[0]] = row[1:]
11071092
for row in con2.execute(constraints_query):
11081093
constraints_copy[row[0]] = row[1:]
1109-
self.assertEqual(constraints_initial, constraints_copy, \
1110-
"mismatch in partitions' constraints under the command: %s" % dump_restore_cmd)
1094+
self.assertEqual(constraints_initial, constraints_copy,
1095+
"mismatch in partitions' constraints under the command: %s" % dump_restore_cmd)
11111096

11121097
# clear copy database
11131098
node.psql('copy', 'drop schema public cascade')
@@ -1128,9 +1113,9 @@ def test_concurrent_detach(self):
11281113
test_interval = int(math.ceil(detach_timeout * num_detachs))
11291114

11301115
insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \
1131-
+ "/pgbench_scripts/insert_current_timestamp.pgbench"
1116+
+ "/pgbench_scripts/insert_current_timestamp.pgbench"
11321117
detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \
1133-
+ "/pgbench_scripts/detachs_in_timeout.pgbench"
1118+
+ "/pgbench_scripts/detachs_in_timeout.pgbench"
11341119

11351120
# Check pgbench scripts on existance
11361121
self.assertTrue(
@@ -1202,16 +1187,14 @@ def test_update_node_plan1(self):
12021187
Test scan on all partititions when using update node.
12031188
We can't use regression tests here because 9.5 and 9.6 give
12041189
different plans
1205-
'''
1190+
'''
12061191

12071192
with get_new_node('test_update_node') as node:
12081193
node.init()
1209-
node.append_conf(
1210-
'postgresql.conf',
1211-
"""
1212-
shared_preload_libraries=\'pg_pathman\'
1213-
pg_pathman.override_copy=false
1214-
pg_pathman.enable_partitionrouter=on
1194+
node.append_conf('postgresql.conf', """
1195+
shared_preload_libraries=\'pg_pathman\'
1196+
pg_pathman.override_copy=false
1197+
pg_pathman.enable_partitionrouter=on
12151198
""")
12161199
node.start()
12171200

@@ -1275,5 +1258,6 @@ def test_update_node_plan1(self):
12751258
node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;')
12761259
node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;')
12771260

1261+
12781262
if __name__ == "__main__":
12791263
unittest.main()

0 commit comments

Comments
 (0)