@@ -15,9 +15,12 @@ class BackupTest(ProbackupTest, unittest.TestCase):
15
15
def test_backup_modes_archive (self ):
16
16
"""standart backup modes with ARCHIVE WAL method"""
17
17
fname = self .id ().split ('.' )[3 ]
18
- node = self .make_simple_node (base_dir = "{0}/{1}/node" .format (module_name , fname ),
18
+ node = self .make_simple_node (
19
+ base_dir = "{0}/{1}/node" .format (module_name , fname ),
19
20
initdb_params = ['--data-checksums' ],
20
- pg_options = {'wal_level' : 'replica' , 'ptrack_enable' : 'on' }
21
+ pg_options = {
22
+ 'wal_level' : 'replica' ,
23
+ 'ptrack_enable' : 'on' }
21
24
)
22
25
backup_dir = os .path .join (self .tmp_path , module_name , fname , 'backup' )
23
26
self .init_pb (backup_dir )
@@ -26,7 +29,7 @@ def test_backup_modes_archive(self):
26
29
node .start ()
27
30
28
31
# full backup mode
29
- #with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log:
32
+ # with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log:
30
33
# backup_log.write(self.backup_node(node, options=["--verbose"]))
31
34
32
35
backup_id = self .backup_node (backup_dir , 'node' , node )
@@ -37,15 +40,23 @@ def test_backup_modes_archive(self):
37
40
38
41
# postmaster.pid and postmaster.opts shouldn't be copied
39
42
excluded = True
40
- db_dir = os .path .join (backup_dir , "backups" , 'node' , backup_id , "database" )
43
+ db_dir = os .path .join (
44
+ backup_dir , "backups" , 'node' , backup_id , "database" )
45
+
41
46
for f in os .listdir (db_dir ):
42
- if os .path .isfile (os .path .join (db_dir , f )) \
43
- and (f == "postmaster.pid" or f == "postmaster.opts" ):
44
- excluded = False
45
- self .assertEqual (excluded , True )
47
+ if (
48
+ os .path .isfile (os .path .join (db_dir , f )) and
49
+ (
50
+ f == "postmaster.pid" or
51
+ f == "postmaster.opts"
52
+ )
53
+ ):
54
+ excluded = False
55
+ self .assertEqual (excluded , True )
46
56
47
57
# page backup mode
48
- page_backup_id = self .backup_node (backup_dir , 'node' , node , backup_type = "page" )
58
+ page_backup_id = self .backup_node (
59
+ backup_dir , 'node' , node , backup_type = "page" )
49
60
50
61
# print self.show_pb(node)
51
62
show_backup = self .show_pb (backup_dir , 'node' )[1 ]
@@ -55,7 +66,9 @@ def test_backup_modes_archive(self):
55
66
# Check parent backup
56
67
self .assertEqual (
57
68
backup_id ,
58
- self .show_pb (backup_dir , 'node' , backup_id = show_backup ['ID' ])["parent-backup-id" ])
69
+ self .show_pb (
70
+ backup_dir , 'node' ,
71
+ backup_id = show_backup ['ID' ])["parent-backup-id" ])
59
72
60
73
# ptrack backup mode
61
74
self .backup_node (backup_dir , 'node' , node , backup_type = "ptrack" )
@@ -67,7 +80,9 @@ def test_backup_modes_archive(self):
67
80
# Check parent backup
68
81
self .assertEqual (
69
82
page_backup_id ,
70
- self .show_pb (backup_dir , 'node' , backup_id = show_backup ['ID' ])["parent-backup-id" ])
83
+ self .show_pb (
84
+ backup_dir , 'node' ,
85
+ backup_id = show_backup ['ID' ])["parent-backup-id" ])
71
86
72
87
# Clean after yourself
73
88
self .del_test_dir (module_name , fname )
@@ -76,17 +91,20 @@ def test_backup_modes_archive(self):
76
91
def test_smooth_checkpoint (self ):
77
92
"""full backup with smooth checkpoint"""
78
93
fname = self .id ().split ('.' )[3 ]
79
- node = self .make_simple_node (base_dir = "{0}/{1}/node" .format (module_name , fname ),
94
+ node = self .make_simple_node (
95
+ base_dir = "{0}/{1}/node" .format (module_name , fname ),
80
96
initdb_params = ['--data-checksums' ],
81
97
pg_options = {'wal_level' : 'replica' }
82
- )
98
+ )
83
99
backup_dir = os .path .join (self .tmp_path , module_name , fname , 'backup' )
84
100
self .init_pb (backup_dir )
85
101
self .add_instance (backup_dir , 'node' , node )
86
102
self .set_archiving (backup_dir , 'node' , node )
87
103
node .start ()
88
104
89
- self .backup_node (backup_dir , 'node' ,node , options = ["-C" ])
105
+ self .backup_node (
106
+ backup_dir , 'node' , node ,
107
+ options = ["-C" ])
90
108
self .assertEqual (self .show_pb (backup_dir , 'node' )[0 ]['Status' ], "OK" )
91
109
node .stop ()
92
110
@@ -97,7 +115,8 @@ def test_smooth_checkpoint(self):
97
115
def test_incremental_backup_without_full (self ):
98
116
"""page-level backup without validated full backup"""
99
117
fname = self .id ().split ('.' )[3 ]
100
- node = self .make_simple_node (base_dir = "{0}/{1}/node" .format (module_name , fname ),
118
+ node = self .make_simple_node (
119
+ base_dir = "{0}/{1}/node" .format (module_name , fname ),
101
120
initdb_params = ['--data-checksums' ],
102
121
pg_options = {'wal_level' : 'replica' , 'ptrack_enable' : 'on' }
103
122
)
@@ -110,28 +129,40 @@ def test_incremental_backup_without_full(self):
110
129
try :
111
130
self .backup_node (backup_dir , 'node' , node , backup_type = "page" )
112
131
# we should die here because exception is what we expect to happen
113
- self .assertEqual (1 , 0 , "Expecting Error because page backup should not be possible without valid full backup.\n Output: {0} \n CMD: {1}" .format (
114
- repr (self .output ), self .cmd ))
132
+ self .assertEqual (
133
+ 1 , 0 ,
134
+ "Expecting Error because page backup should not be possible "
135
+ "without valid full backup.\n Output: {0} \n CMD: {1}" .format (
136
+ repr (self .output ), self .cmd ))
115
137
except ProbackupException as e :
116
138
self .assertIn (
117
- "ERROR: Valid backup on current timeline is not found. Create new FULL backup before an incremental one." ,
139
+ "ERROR: Valid backup on current timeline is not found. "
140
+ "Create new FULL backup before an incremental one." ,
118
141
e .message ,
119
- "\n Unexpected Error Message: {0}\n CMD: {1}" .format (repr (e .message ), self .cmd ))
142
+ "\n Unexpected Error Message: {0}\n CMD: {1}" .format (
143
+ repr (e .message ), self .cmd ))
120
144
121
145
sleep (1 )
122
146
123
147
try :
124
148
self .backup_node (backup_dir , 'node' , node , backup_type = "ptrack" )
125
149
# we should die here because exception is what we expect to happen
126
- self .assertEqual (1 , 0 , "Expecting Error because page backup should not be possible without valid full backup.\n Output: {0} \n CMD: {1}" .format (
127
- repr (self .output ), self .cmd ))
150
+ self .assertEqual (
151
+ 1 , 0 ,
152
+ "Expecting Error because page backup should not be possible "
153
+ "without valid full backup.\n Output: {0} \n CMD: {1}" .format (
154
+ repr (self .output ), self .cmd ))
128
155
except ProbackupException as e :
129
156
self .assertIn (
130
- "ERROR: Valid backup on current timeline is not found. Create new FULL backup before an incremental one." ,
157
+ "ERROR: Valid backup on current timeline is not found. "
158
+ "Create new FULL backup before an incremental one." ,
131
159
e .message ,
132
- "\n Unexpected Error Message: {0}\n CMD: {1}" .format (repr (e .message ), self .cmd ))
160
+ "\n Unexpected Error Message: {0}\n CMD: {1}" .format (
161
+ repr (e .message ), self .cmd ))
133
162
134
- self .assertEqual (self .show_pb (backup_dir , 'node' )[0 ]['Status' ], "ERROR" )
163
+ self .assertEqual (
164
+ self .show_pb (backup_dir , 'node' )[0 ]['Status' ],
165
+ "ERROR" )
135
166
136
167
# Clean after yourself
137
168
self .del_test_dir (module_name , fname )
@@ -140,7 +171,8 @@ def test_incremental_backup_without_full(self):
140
171
def test_incremental_backup_corrupt_full (self ):
141
172
"""page-level backup with corrupted full backup"""
142
173
fname = self .id ().split ('.' )[3 ]
143
- node = self .make_simple_node (base_dir = "{0}/{1}/node" .format (module_name , fname ),
174
+ node = self .make_simple_node (
175
+ base_dir = "{0}/{1}/node" .format (module_name , fname ),
144
176
initdb_params = ['--data-checksums' ],
145
177
pg_options = {'wal_level' : 'replica' , 'ptrack_enable' : 'on' }
146
178
)
@@ -151,35 +183,50 @@ def test_incremental_backup_corrupt_full(self):
151
183
node .start ()
152
184
153
185
backup_id = self .backup_node (backup_dir , 'node' , node )
154
- file = os .path .join (backup_dir , "backups" , "node" , backup_id , "database" , "postgresql.conf" )
186
+ file = os .path .join (
187
+ backup_dir , "backups" , "node" , backup_id ,
188
+ "database" , "postgresql.conf" )
155
189
os .remove (file )
156
190
157
191
try :
158
192
self .validate_pb (backup_dir , 'node' )
159
193
# we should die here because exception is what we expect to happen
160
- self .assertEqual (1 , 0 , "Expecting Error because of validation of corrupted backup.\n Output: {0} \n CMD: {1}" .format (
161
- repr (self .output ), self .cmd ))
194
+ self .assertEqual (
195
+ 1 , 0 ,
196
+ "Expecting Error because of validation of corrupted backup.\n "
197
+ " Output: {0} \n CMD: {1}" .format (
198
+ repr (self .output ), self .cmd ))
162
199
except ProbackupException as e :
163
- self .assertTrue ("INFO: Validate backups of the instance 'node'\n " in e .message
164
- and 'WARNING: Backup file "{0}" is not found\n ' .format (file ) in e .message
165
- and "WARNING: Backup {0} data files are corrupted\n " .format (backup_id ) in e .message
166
- and "INFO: Some backups are not valid\n " in e .message ,
167
- "\n Unexpected Error Message: {0}\n CMD: {1}" .format (repr (e .message ), self .cmd ))
200
+ self .assertTrue (
201
+ "INFO: Validate backups of the instance 'node'\n " in e .message and
202
+ "WARNING: Backup file \" {0}\" is not found\n " .format (
203
+ file ) in e .message and
204
+ "WARNING: Backup {0} data files are corrupted\n " .format (
205
+ backup_id ) in e .message and
206
+ "INFO: Some backups are not valid\n " in e .message ,
207
+ "\n Unexpected Error Message: {0}\n CMD: {1}" .format (
208
+ repr (e .message ), self .cmd ))
168
209
169
210
try :
170
211
self .backup_node (backup_dir , 'node' , node , backup_type = "page" )
171
212
# we should die here because exception is what we expect to happen
172
- self .assertEqual (1 , 0 , "Expecting Error because page backup should not be possible without valid full backup.\n Output: {0} \n CMD: {1}" .format (
173
- repr (self .output ), self .cmd ))
213
+ self .assertEqual (
214
+ 1 , 0 ,
215
+ "Expecting Error because page backup should not be possible "
216
+ "without valid full backup.\n Output: {0} \n CMD: {1}" .format (
217
+ repr (self .output ), self .cmd ))
174
218
except ProbackupException as e :
175
219
self .assertIn (
176
- "ERROR: Valid backup on current timeline is not found. Create new FULL backup before an incremental one." ,
220
+ "ERROR: Valid backup on current timeline is not found. "
221
+ "Create new FULL backup before an incremental one." ,
177
222
e .message ,
178
- "\n Unexpected Error Message: {0}\n CMD: {1}" .format (repr (e .message ), self .cmd ))
223
+ "\n Unexpected Error Message: {0}\n CMD: {1}" .format (
224
+ repr (e .message ), self .cmd ))
179
225
180
- # sleep(1)
181
- self .assertEqual (self .show_pb (backup_dir , 'node' , backup_id )['status' ], "CORRUPT" )
182
- self .assertEqual (self .show_pb (backup_dir , 'node' )[1 ]['Status' ], "ERROR" )
226
+ self .assertEqual (
227
+ self .show_pb (backup_dir , 'node' , backup_id )['status' ], "CORRUPT" )
228
+ self .assertEqual (
229
+ self .show_pb (backup_dir , 'node' )[1 ]['Status' ], "ERROR" )
183
230
184
231
# Clean after yourself
185
232
self .del_test_dir (module_name , fname )
@@ -188,7 +235,8 @@ def test_incremental_backup_corrupt_full(self):
188
235
def test_ptrack_threads (self ):
189
236
"""ptrack multi thread backup mode"""
190
237
fname = self .id ().split ('.' )[3 ]
191
- node = self .make_simple_node (base_dir = "{0}/{1}/node" .format (module_name , fname ),
238
+ node = self .make_simple_node (
239
+ base_dir = "{0}/{1}/node" .format (module_name , fname ),
192
240
initdb_params = ['--data-checksums' ],
193
241
pg_options = {'wal_level' : 'replica' , 'ptrack_enable' : 'on' }
194
242
)
@@ -198,10 +246,14 @@ def test_ptrack_threads(self):
198
246
self .set_archiving (backup_dir , 'node' , node )
199
247
node .start ()
200
248
201
- self .backup_node (backup_dir , 'node' , node , backup_type = "full" , options = ["-j" , "4" ])
249
+ self .backup_node (
250
+ backup_dir , 'node' , node ,
251
+ backup_type = "full" , options = ["-j" , "4" ])
202
252
self .assertEqual (self .show_pb (backup_dir , 'node' )[0 ]['Status' ], "OK" )
203
253
204
- self .backup_node (backup_dir , 'node' , node , backup_type = "ptrack" , options = ["-j" , "4" ])
254
+ self .backup_node (
255
+ backup_dir , 'node' , node ,
256
+ backup_type = "ptrack" , options = ["-j" , "4" ])
205
257
self .assertEqual (self .show_pb (backup_dir , 'node' )[0 ]['Status' ], "OK" )
206
258
207
259
# Clean after yourself
@@ -211,20 +263,28 @@ def test_ptrack_threads(self):
211
263
def test_ptrack_threads_stream (self ):
212
264
"""ptrack multi thread backup mode and stream"""
213
265
fname = self .id ().split ('.' )[3 ]
214
- node = self .make_simple_node (base_dir = "{0}/{1}/node" .format (module_name , fname ),
266
+ node = self .make_simple_node (
267
+ base_dir = "{0}/{1}/node" .format (module_name , fname ),
215
268
set_replication = True ,
216
269
initdb_params = ['--data-checksums' ],
217
- pg_options = {'wal_level' : 'replica' , 'ptrack_enable' : 'on' , 'max_wal_senders' : '2' }
270
+ pg_options = {
271
+ 'wal_level' : 'replica' ,
272
+ 'ptrack_enable' : 'on' ,
273
+ 'max_wal_senders' : '2' }
218
274
)
219
275
backup_dir = os .path .join (self .tmp_path , module_name , fname , 'backup' )
220
276
self .init_pb (backup_dir )
221
277
self .add_instance (backup_dir , 'node' , node )
222
278
node .start ()
223
279
224
- self .backup_node (backup_dir , 'node' , node , backup_type = "full" , options = ["-j" , "4" , "--stream" ])
280
+ self .backup_node (
281
+ backup_dir , 'node' , node , backup_type = "full" ,
282
+ options = ["-j" , "4" , "--stream" ])
225
283
226
284
self .assertEqual (self .show_pb (backup_dir , 'node' )[0 ]['Status' ], "OK" )
227
- self .backup_node (backup_dir , 'node' , node , backup_type = "ptrack" , options = ["-j" , "4" , "--stream" ])
285
+ self .backup_node (
286
+ backup_dir , 'node' , node ,
287
+ backup_type = "ptrack" , options = ["-j" , "4" , "--stream" ])
228
288
self .assertEqual (self .show_pb (backup_dir , 'node' )[1 ]['Status' ], "OK" )
229
289
230
290
# Clean after yourself
0 commit comments