1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
|
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996-2004
* Sleepycat Software. All rights reserved.
*
* $Id: mp_alloc.c,v 11.47 2004/10/15 16:59:42 bostic Exp $
*/
#include "db_config.h"
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <string.h>
#endif
#include "db_int.h"
#include "dbinc/db_shash.h"
#include "dbinc/mp.h"
static void __memp_bad_buffer __P((DB_MPOOL_HASH *));
/*
* __memp_alloc --
* Allocate some space from a cache region.
*
* PUBLIC: int __memp_alloc __P((DB_MPOOL *,
* PUBLIC: REGINFO *, MPOOLFILE *, size_t, roff_t *, void *));
*/
int
__memp_alloc(dbmp, infop, mfp, len, offsetp, retp)
DB_MPOOL *dbmp;
REGINFO *infop;
MPOOLFILE *mfp;
size_t len;
roff_t *offsetp;
void *retp;
{
BH *bhp;
DB_ENV *dbenv;
DB_MPOOL_HASH *dbht, *hp, *hp_end, *hp_tmp;
DB_MUTEX *mutexp;
MPOOL *c_mp;
MPOOLFILE *bh_mfp;
size_t freed_space;
u_int32_t buckets, buffers, high_priority, priority, put_counter;
u_int32_t total_buckets;
int aggressive, giveup, ret;
void *p;
dbenv = dbmp->dbenv;
c_mp = infop->primary;
dbht = R_ADDR(infop, c_mp->htab);
hp_end = &dbht[c_mp->htab_buckets];
buckets = buffers = put_counter = total_buckets = 0;
aggressive = giveup = 0;
hp_tmp = NULL;
c_mp->stat.st_alloc++;
/*
* If we're allocating a buffer, and the one we're discarding is the
* same size, we don't want to waste the time to re-integrate it into
* the shared memory free list. If the DB_MPOOLFILE argument isn't
* NULL, we'll compare the underlying page sizes of the two buffers
* before free-ing and re-allocating buffers.
*/
if (mfp != NULL)
len = (sizeof(BH) - sizeof(u_int8_t)) + mfp->stat.st_pagesize;
R_LOCK(dbenv, infop);
/*
* Anything newer than 1/10th of the buffer pool is ignored during
* allocation (unless allocation starts failing).
*/
high_priority = c_mp->lru_count - c_mp->stat.st_pages / 10;
/*
* First we try to allocate from free memory. If that fails, scan the
* buffer pool to find buffers with low priorities. We consider small
* sets of hash buckets each time to limit the amount of work needing
* to be done. This approximates LRU, but not very well. We either
* find a buffer of the same size to use, or we will free 3 times what
* we need in the hopes it will coalesce into a contiguous chunk of the
* right size. In the latter case we branch back here and try again.
*/
alloc: if ((ret = __db_shalloc(infop, len, MUTEX_ALIGN, &p)) == 0) {
if (mfp != NULL)
c_mp->stat.st_pages++;
R_UNLOCK(dbenv, infop);
found: if (offsetp != NULL)
*offsetp = R_OFFSET(infop, p);
*(void **)retp = p;
/*
* Update the search statistics.
*
* We're not holding the region locked here, these statistics
* can't be trusted.
*/
total_buckets += buckets;
if (total_buckets != 0) {
if (total_buckets > c_mp->stat.st_alloc_max_buckets)
c_mp->stat.st_alloc_max_buckets = total_buckets;
c_mp->stat.st_alloc_buckets += total_buckets;
}
if (buffers != 0) {
if (buffers > c_mp->stat.st_alloc_max_pages)
c_mp->stat.st_alloc_max_pages = buffers;
c_mp->stat.st_alloc_pages += buffers;
}
return (0);
} else if (giveup || c_mp->stat.st_pages == 0) {
R_UNLOCK(dbenv, infop);
__db_err(dbenv,
"unable to allocate space from the buffer cache");
return (ret);
}
/*
* We re-attempt the allocation every time we've freed 3 times what
* we need. Reset our free-space counter.
*/
freed_space = 0;
total_buckets += buckets;
buckets = 0;
/*
* Walk the hash buckets and find the next two with potentially useful
* buffers. Free the buffer with the lowest priority from the buckets'
* chains.
*/
for (;;) {
/* All pages have been freed, make one last try */
if (c_mp->stat.st_pages == 0)
goto alloc;
/* Check for wrap around. */
hp = &dbht[c_mp->last_checked++];
if (hp >= hp_end) {
c_mp->last_checked = 0;
hp = &dbht[c_mp->last_checked++];
}
/*
* Skip empty buckets.
*
* We can check for empty buckets before locking as we
* only care if the pointer is zero or non-zero.
*/
if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
continue;
/*
* The failure mode is when there are too many buffers we can't
* write or there's not enough memory in the system. We don't
* have a way to know that allocation has no way to succeed.
* We fail if there were no pages returned to the cache after
* we've been trying for a relatively long time.
*
* Get aggressive if we've tried to flush the number of hash
* buckets as are in the system and have not found any more
* space. Aggressive means:
*
* a: set a flag to attempt to flush high priority buffers as
* well as other buffers.
* b: sync the mpool to force out queue extent pages. While we
* might not have enough space for what we want and flushing
* is expensive, why not?
* c: look at a buffer in every hash bucket rather than choose
* the more preferable of two.
* d: start to think about giving up.
*
* If we get here twice, sleep for a second, hopefully someone
* else will run and free up some memory.
*
* Always try to allocate memory too, in case some other thread
* returns its memory to the region.
*
* !!!
* This test ignores pathological cases like no buffers in the
* system -- that shouldn't be possible.
*/
if ((++buckets % c_mp->htab_buckets) == 0) {
if (freed_space > 0)
goto alloc;
R_UNLOCK(dbenv, infop);
switch (++aggressive) {
case 1:
break;
case 2:
put_counter = c_mp->put_counter;
/* FALLTHROUGH */
case 3:
case 4:
case 5:
case 6:
(void)__memp_sync_int(
dbenv, NULL, 0, DB_SYNC_ALLOC, NULL);
__os_sleep(dbenv, 1, 0);
break;
default:
aggressive = 1;
if (put_counter == c_mp->put_counter)
giveup = 1;
break;
}
R_LOCK(dbenv, infop);
goto alloc;
}
if (!aggressive) {
/* Skip high priority buckets. */
if (hp->hash_priority > high_priority)
continue;
/*
* Find two buckets and select the one with the lowest
* priority. Performance testing shows that looking
* at two improves the LRUness and looking at more only
* does a little better.
*/
if (hp_tmp == NULL) {
hp_tmp = hp;
continue;
}
if (hp->hash_priority > hp_tmp->hash_priority)
hp = hp_tmp;
hp_tmp = NULL;
}
/* Remember the priority of the buffer we're looking for. */
priority = hp->hash_priority;
/* Unlock the region and lock the hash bucket. */
R_UNLOCK(dbenv, infop);
mutexp = &hp->hash_mutex;
MUTEX_LOCK(dbenv, mutexp);
#ifdef DIAGNOSTIC
__memp_check_order(hp);
#endif
/*
* The lowest priority page is first in the bucket, as they are
* maintained in sorted order.
*
* The buffer may have been freed or its priority changed while
* we switched from the region lock to the hash lock. If so,
* we have to restart. We will still take the first buffer on
* the bucket's list, though, if it has a low enough priority.
*/
if ((bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)) == NULL ||
bhp->ref != 0 || bhp->priority > priority)
goto next_hb;
buffers++;
/* Find the associated MPOOLFILE. */
bh_mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
/* If the page is dirty, pin it and write it. */
ret = 0;
if (F_ISSET(bhp, BH_DIRTY)) {
++bhp->ref;
ret = __memp_bhwrite(dbmp, hp, bh_mfp, bhp, 0);
--bhp->ref;
if (ret == 0)
++c_mp->stat.st_rw_evict;
} else
++c_mp->stat.st_ro_evict;
/*
* If a write fails for any reason, we can't proceed.
*
* We released the hash bucket lock while doing I/O, so another
* thread may have acquired this buffer and incremented the ref
* count after we wrote it, in which case we can't have it.
*
* If there's a write error and we're having problems finding
* something to allocate, avoid selecting this buffer again
* by making it the bucket's least-desirable buffer.
*/
if (ret != 0 || bhp->ref != 0) {
if (ret != 0 && aggressive)
__memp_bad_buffer(hp);
goto next_hb;
}
/*
* Check to see if the buffer is the size we're looking for.
* If so, we can simply reuse it. Else, free the buffer and
* its space and keep looking.
*/
if (mfp != NULL &&
mfp->stat.st_pagesize == bh_mfp->stat.st_pagesize) {
__memp_bhfree(dbmp, hp, bhp, 0);
p = bhp;
goto found;
}
freed_space += __db_shalloc_sizeof(bhp);
__memp_bhfree(dbmp, hp, bhp, BH_FREE_FREEMEM);
if (aggressive > 1)
aggressive = 1;
/*
* Unlock this hash bucket and re-acquire the region lock. If
* we're reaching here as a result of calling memp_bhfree, the
* hash bucket lock has already been discarded.
*/
if (0) {
next_hb: MUTEX_UNLOCK(dbenv, mutexp);
}
R_LOCK(dbenv, infop);
/*
* Retry the allocation as soon as we've freed up sufficient
* space. We're likely to have to coalesce of memory to
* satisfy the request, don't try until it's likely (possible?)
* we'll succeed.
*/
if (freed_space >= 3 * len)
goto alloc;
}
/* NOTREACHED */
}
/*
* __memp_bad_buffer --
* Make the first buffer in a hash bucket the least desirable buffer.
*/
static void
__memp_bad_buffer(hp)
DB_MPOOL_HASH *hp;
{
BH *bhp;
u_int32_t priority;
/* Remove the first buffer from the bucket. */
bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);
/*
* Find the highest priority buffer in the bucket. Buffers are
* sorted by priority, so it's the last one in the bucket.
*/
priority = bhp->priority;
if (!SH_TAILQ_EMPTY(&hp->hash_bucket))
priority = SH_TAILQ_LAST(&hp->hash_bucket, hq, __bh)->priority;
/*
* Set our buffer's priority to be just as bad, and append it to
* the bucket.
*/
bhp->priority = priority;
SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);
/* Reset the hash bucket's priority. */
hp->hash_priority = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
}
#ifdef DIAGNOSTIC
/*
* __memp_check_order --
* Verify the priority ordering of a hash bucket chain.
*
* PUBLIC: #ifdef DIAGNOSTIC
* PUBLIC: void __memp_check_order __P((DB_MPOOL_HASH *));
* PUBLIC: #endif
*/
void
__memp_check_order(hp)
DB_MPOOL_HASH *hp;
{
BH *bhp;
u_int32_t priority;
/*
* Assumes the hash bucket is locked.
*/
if ((bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)) == NULL)
return;
DB_ASSERT(bhp->priority == hp->hash_priority);
for (priority = bhp->priority;
(bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) != NULL;
priority = bhp->priority)
DB_ASSERT(priority <= bhp->priority);
}
#endif
|