blob: d3d8e089bc9dabf1c25e2146a0b84d5acd89e383 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
|
# See the file LICENSE for redistribution information.
#
# Copyright (c) 2004
# Sleepycat Software. All rights reserved.
#
# $Id: rep024.tcl,v 1.7 2004/09/22 18:01:06 bostic Exp $
#
# TEST rep024
# TEST Replication page allocation / verify test
# TEST
# TEST Start a master (site 1) and a client (site 2). Master
# TEST closes (simulating a crash). Site 2 becomes the master
# TEST and site 1 comes back up as a client. Verify database.
proc rep024 { method { niter 1000 } { tnum "024" } args } {
global fixed_len
set orig_fixed_len $fixed_len
set fixed_len 448
set args [convert_args $method $args]
set logsets [create_logsets 2]
# Run all tests with and without recovery.
set envargs ""
set recopts { "" "-recover" }
foreach r $recopts {
foreach l $logsets {
set logindex [lsearch -exact $l "in-memory"]
if { $r == "-recover" && $logindex != -1 } {
puts "Rep$tnum: Skipping\
for in-memory logs with -recover."
continue
}
puts "Rep$tnum ($method $r): \
Replication page allocation/verify test."
puts "Rep$tnum: Master logs are [lindex $l 0]"
puts "Rep$tnum: Client logs are [lindex $l 1]"
rep024_sub $method $niter $tnum $envargs $l $r $args
}
}
set fixed_len $orig_fixed_len
return
}
proc rep024_sub { method niter tnum envargs logset recargs largs } {
source ./include.tcl
global testdir
env_cleanup $testdir
replsetup $testdir/MSGQUEUEDIR
set masterdir $testdir/MASTERDIR
set clientdir $testdir/CLIENTDIR
file mkdir $masterdir
file mkdir $clientdir
set m_logtype [lindex $logset 0]
set c_logtype [lindex $logset 1]
# In-memory logs require a large log buffer, and cannot
# be used with -txn nosync. This test requires -txn, so
# we only have to adjust the logargs.
set m_logargs [adjust_logargs $m_logtype]
set c_logargs [adjust_logargs $c_logtype]
if { [is_record_based $method] == 1 } {
set checkfunc test024_recno.check
} else {
set checkfunc test024.check
}
# Open a master.
repladd 1
set env_cmd(1) "berkdb_env_noerr -create -lock_max 2500 \
-log_max 1000000 $envargs $recargs -home $masterdir \
-errpfx MASTER -txn $m_logargs \
-rep_transport \[list 1 replsend\]"
# set env_cmd(1) "berkdb_env_noerr -create -lock_max 2500 \
# -log_max 1000000 $envargs $recargs -home $masterdir \
# -verbose {rep on} -errfile /dev/stderr \
# -errpfx MASTER -txn $m_logargs \
# -rep_transport \[list 1 replsend\]"
set masterenv [eval $env_cmd(1) -rep_master]
error_check_good master_env [is_valid_env $masterenv] TRUE
# Open a client
repladd 2
set env_cmd(2) "berkdb_env_noerr -create -lock_max 2500 \
-log_max 1000000 $envargs $recargs -home $clientdir \
-errpfx CLIENT -txn $c_logargs \
-rep_transport \[list 2 replsend\]"
# set env_cmd(2) "berkdb_env_noerr -create -lock_max 2500 \
# -log_max 1000000 $envargs $recargs -home $clientdir \
# -verbose {rep on} -errfile /dev/stderr \
# -errpfx CLIENT -txn $c_logargs \
# -rep_transport \[list 2 replsend\]"
set clientenv [eval $env_cmd(2) -rep_client]
error_check_good client_env [is_valid_env $clientenv] TRUE
# Bring the client online by processing the startup messages.
set envlist "{$masterenv 1} {$clientenv 2}"
process_msgs $envlist
puts "\tRep$tnum.a: Add data to master, update client."
#
# This test uses a small page size and a large fixed_len
# so it is easy to force a page allocation.
set key [expr $niter + 1]
set data A
set pagesize 512
if { [is_fixed_length $method] == 1 } {
set bigdata [repeat $data [expr $pagesize / 2]]
} else {
set bigdata [repeat $data [expr 2 * $pagesize]]
}
set omethod [convert_method $method]
set testfile "test$tnum.db"
set db [eval "berkdb_open -create $omethod -auto_commit \
-pagesize $pagesize -env $masterenv $largs $testfile"]
eval rep_test $method $masterenv $db $niter 0 0
process_msgs $envlist
# Close client. Force a page allocation on the master.
# An overflow page (or big page, for hash) will do the job.
#
puts "\tRep$tnum.b: Close client, force page allocation on master."
error_check_good client_close [$clientenv close] 0
# error_check_good client_verify \
# [verify_dir $clientdir "\tRep$tnum.b: " 0 0 1] 0
set pages1 [r24_check_pages $db $method]
set txn [$masterenv txn]
error_check_good put_bigdata [eval {$db put} \
-txn $txn {$key [chop_data $method $bigdata]}] 0
error_check_good txn_commit [$txn commit] 0
# Verify that we have allocated new pages.
set pages2 [r24_check_pages $db $method]
set newpages [expr $pages2 - $pages1]
# Close master and discard messages for site 2. Now everybody
# is closed and sites 1 and 2 have different contents.
puts "\tRep$tnum.c: Close master."
error_check_good db_close [$db close] 0
error_check_good master_close [$masterenv close] 0
if { $newpages <= 0 } {
puts "FAIL: no new pages allocated."
return
}
error_check_good master_verify \
[verify_dir $masterdir "\tRep$tnum.c: " 0 0 1] 0
# Run a loop, opening the original client as master and the
# original master as client. Test db_verify.
foreach option { "no new data" "add new data" } {
puts "\tRep$tnum.d: Swap master and client ($option)."
set newmasterenv [eval $env_cmd(2) -rep_master]
set newclientenv [eval $env_cmd(1) -rep_client]
set envlist "{$newmasterenv 2} {$newclientenv 1}"
process_msgs $envlist
if { $option == "add new data" } {
set key [expr $niter + 2]
set db [eval "berkdb_open -create $omethod \
-auto_commit -pagesize $pagesize \
-env $newmasterenv $largs $testfile"]
set pages1 [r24_check_pages $db $method]
set txn [$newmasterenv txn]
error_check_good put_bigdata [eval {$db put} \
-txn $txn {$key [chop_data $method $bigdata]}] 0
error_check_good txn_commit [$txn commit] 0
set pages2 [r24_check_pages $db $method]
set newpages [expr $pages2 - $pages1]
error_check_good db_close [$db close] 0
process_msgs $envlist
}
puts "\tRep$tnum.e: Close master and client, run verify."
error_check_good newmasterenv_close [$newmasterenv close] 0
error_check_good newclientenv_close [$newclientenv close] 0
if { $newpages <= 0 } {
puts "FAIL: no new pages allocated."
return
}
# This test can leave unreferenced pages on systems without
# FTRUNCATE and that's OK, so set unref to 0.
error_check_good verify \
[verify_dir $masterdir "\tRep$tnum.f: " 0 0 1 0 0] 0
error_check_good verify \
[verify_dir $clientdir "\tRep$tnum.g: " 0 0 1 0 0] 0
}
replclose $testdir/MSGQUEUEDIR
}
proc r24_check_pages { db method } {
if { [is_hash $method] == 1 } {
set pages [stat_field $db stat "Number of big pages"]
} elseif { [is_queue $method] == 1 } {
set pages [stat_field $db stat "Number of pages"]
} else {
set pages [stat_field $db stat "Overflow pages"]
}
return $pages
}
|