MISR Toolkit  1.5.1
H5Cpkg.h
Go to the documentation of this file.
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  * Copyright by The HDF Group. *
3  * Copyright by the Board of Trustees of the University of Illinois. *
4  * All rights reserved. *
5  * *
6  * This file is part of HDF5. The full HDF5 copyright notice, including *
7  * terms governing use, modification, and redistribution, is contained in *
8  * the COPYING file, which can be found at the root of the source code *
9  * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
10  * If you do not have access to either file, you may request a copy from *
11  * help@hdfgroup.org. *
12  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
13 
14 /*
15  * Programmer: John Mainzer -- 10/12/04
16  *
17  * Purpose: This file contains declarations which are normally visible
18  * only within the H5C package.
19  *
20  * Source files outside the H5C package should include
21  * H5Cprivate.h instead.
22  *
23  * The one exception to this rule is test/cache.c. The test
24  * code is easier to write if it can look at the cache's
25  * internal data structures. Indeed, this is the main
26  * reason why this file was created.
27  */
28 
29 #ifndef H5C_PACKAGE
30 #error "Do not include this file outside the H5C package!"
31 #endif
32 
33 #ifndef _H5Cpkg_H
34 #define _H5Cpkg_H
35 
36 
37 /* Get package's private header */
38 #include "H5Cprivate.h"
39 
40 
41 /* Get needed headers */
42 #include "H5SLprivate.h" /* Skip lists */
43 
44 /* With the introduction of the fractal heap, it is now possible for
45  * entries to be dirtied, resized, and/or moved in the flush callbacks.
46  * As a result, on flushes, it may be necessary to make multiple passes
47  * through the slist before it is empty. The H5C__MAX_PASSES_ON_FLUSH
48  * #define is used to set an upper limit on the number of passes.
49  * The current value was obtained via personal communication with
50  * Quincey. I have applied a fudge factor of 2.
51  *
52  * -- JRM
53  */
54 
55 #define H5C__MAX_PASSES_ON_FLUSH 4
56 
57 
58 
59 /****************************************************************************
60  *
61  * structure H5C_t
62  *
63  * Catchall structure for all variables specific to an instance of the cache.
64  *
65  * While the individual fields of the structure are discussed below, the
66  * following overview may be helpful.
67  *
68  * Entries in the cache are stored in an instance of H5TB_TREE, indexed on
69  * the entry's disk address. While the H5TB_TREE is less efficient than
70  * hash table, it keeps the entries in address sorted order. As flushes
71  * in parallel mode are more efficient if they are issued in increasing
72  * address order, this is a significant benefit. Also the H5TB_TREE code
73  * was readily available, which reduced development time.
74  *
75  * While the cache was designed with multiple replacement policies in mind,
76  * at present only a modified form of LRU is supported.
77  *
78  * JRM - 4/26/04
79  *
80  * Profiling has indicated that searches in the instance of H5TB_TREE are
81  * too expensive. To deal with this issue, I have augmented the cache
82  * with a hash table in which all entries will be stored. Given the
83  * advantages of flushing entries in increasing address order, the TBBT
84  * is retained, but only dirty entries are stored in it. At least for
85  * now, we will leave entries in the TBBT after they are flushed.
86  *
87  * Note that index_size and index_len now refer to the total size of
88  * and number of entries in the hash table.
89  *
90  * JRM - 7/19/04
91  *
92  * The TBBT has since been replaced with a skip list. This change
93  * greatly predates this note.
94  *
95  * JRM - 9/26/05
96  *
97  * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC.
98  * This field is used to validate pointers to instances of
99  * H5C_t.
100  *
101  * flush_in_progress: Boolean flag indicating whether a flush is in
102  * progress.
103  *
104  * trace_file_ptr: File pointer pointing to the trace file, which is used
105  * to record cache operations for use in simulations and design
106  * studies. This field will usually be NULL, indicating that
107  * no trace file should be recorded.
108  *
109  * Since much of the code supporting the parallel metadata
110  * cache is in H5AC, we don't write the trace file from
111  * H5C. Instead, H5AC reads the trace_file_ptr as needed.
112  *
113  * When we get to using H5C in other places, we may add
114  * code to write trace file data at the H5C level as well.
115  *
116  * aux_ptr: Pointer to void used to allow wrapper code to associate
117  * its data with an instance of H5C_t. The H5C cache code
118  * sets this field to NULL, and otherwise leaves it alone.
119  *
120  * max_type_id: Integer field containing the maximum type id number assigned
121  * to a type of entry in the cache. All type ids from 0 to
122  * max_type_id inclusive must be defined. The names of the
123  * types are stored in the type_name_table discussed below, and
124  * indexed by the ids.
125  *
126  * type_name_table_ptr: Pointer to an array of pointer to char of length
127  * max_type_id + 1. The strings pointed to by the entries
128  * in the array are the names of the entry types associated
129  * with the indexing type IDs.
130  *
131  * max_cache_size: Nominal maximum number of bytes that may be stored in the
132  * cache. This value should be viewed as a soft limit, as the
133  * cache can exceed this value under the following circumstances:
134  *
135  * a) All entries in the cache are protected, and the cache is
136  * asked to insert a new entry. In this case the new entry
137  * will be created. If this causes the cache to exceed
138  * max_cache_size, it will do so. The cache will attempt
139  * to reduce its size as entries are unprotected.
140  *
141  * b) When running in parallel mode, the cache may not be
142  * permitted to flush a dirty entry in response to a read.
143  * If there are no clean entries available to evict, the
144  * cache will exceed its maximum size. Again the cache
145  * will attempt to reduce its size to the max_cache_size
146  * limit on the next cache write.
147  *
148  * c) When an entry increases in size, the cache may exceed
149  * the max_cache_size limit until the next time the cache
150  * attempts to load or insert an entry.
151  *
152  * min_clean_size: Nominal minimum number of clean bytes in the cache.
153  * The cache attempts to maintain this number of bytes of
154  * clean data so as to avoid case b) above. Again, this is
155  * a soft limit.
156  *
157  *
158  * In addition to the call back functions required for each entry, the
159  * cache requires the following call back functions for this instance of
160  * the cache as a whole:
161  *
162  * check_write_permitted: In certain applications, the cache may not
163  * be allowed to write to disk at certain time. If specified,
164  * the check_write_permitted function is used to determine if
165  * a write is permissible at any given point in time.
166  *
167  * If no such function is specified (i.e. this field is NULL),
168  * the cache uses the following write_permitted field to
169  * determine whether writes are permitted.
170  *
171  * write_permitted: If check_write_permitted is NULL, this boolean flag
172  * indicates whether writes are permitted.
173  *
174  * log_flush: If provided, this function is called whenever a dirty
175  * entry is flushed to disk.
176  *
177  *
178  * In cases where memory is plentiful, and performance is an issue, it
179  * is useful to disable all cache evictions, and thereby postpone metadata
180  * writes. The following field is used to implement this.
181  *
182  * evictions_enabled: Boolean flag that is initialized to TRUE. When
183  * this flag is set to FALSE, the metadata cache will not
184  * attempt to evict entries to make space for newly protected
185  * entries, and instead the will grow without limit.
186  *
187  * Needless to say, this feature must be used with care.
188  *
189  *
190  * The cache requires an index to facilitate searching for entries. The
191  * following fields support that index.
192  *
193  * index_len: Number of entries currently in the hash table used to index
194  * the cache.
195  *
196  * index_size: Number of bytes of cache entries currently stored in the
197  * hash table used to index the cache.
198  *
199  * This value should not be mistaken for footprint of the
200  * cache in memory. The average cache entry is small, and
201  * the cache has a considerable overhead. Multiplying the
202  * index_size by two should yield a conservative estimate
203  * of the cache's memory footprint.
204  *
205  * clean_index_size: Number of bytes of clean entries currently stored in
206  * the hash table. Note that the index_size field (above)
207  * is also the sum of the sizes of all entries in the cache.
208  * Thus we should have the invarient that clean_index_size +
209  * dirty_index_size == index_size.
210  *
211  * WARNING:
212  *
213  * 1) The clean_index_size field is not maintained by the
214  * index macros, as the hash table doesn't care whether
215  * the entry is clean or dirty. Instead the field is
216  * maintained in the H5C__UPDATE_RP macros.
217  *
218  * 2) The value of the clean_index_size must not be mistaken
219  * for the current clean size of the cache. Rather, the
220  * clean size of the cache is the current value of
221  * clean_index_size plus the amount of empty space (if any)
222  * in the cache.
223  *
224  * dirty_index_size: Number of bytes of dirty entries currently stored in
225  * the hash table. Note that the index_size field (above)
226  * is also the sum of the sizes of all entries in the cache.
227  * Thus we should have the invarient that clean_index_size +
228  * dirty_index_size == index_size.
229  *
230  * WARNING:
231  *
232  * 1) The dirty_index_size field is not maintained by the
233  * index macros, as the hash table doesn't care whether
234  * the entry is clean or dirty. Instead the field is
235  * maintained in the H5C__UPDATE_RP macros.
236  *
237  * index: Array of pointer to H5C_cache_entry_t of size
238  * H5C__HASH_TABLE_LEN. At present, this value is a power
239  * of two, not the usual prime number.
240  *
241  * I hope that the variable size of cache elements, the large
242  * hash table size, and the way in which HDF5 allocates space
243  * will combine to avoid problems with periodicity. If so, we
244  * can use a trivial hash function (a bit-and and a 3 bit left
245  * shift) with some small savings.
246  *
247  * If not, it will become evident in the statistics. Changing
248  * to the usual prime number length hash table will require
249  * changing the H5C__HASH_FCN macro and the deletion of the
250  * H5C__HASH_MASK #define. No other changes should be required.
251  *
252  *
253  * When we flush the cache, we need to write entries out in increasing
254  * address order. An instance of a skip list is used to store dirty entries in
255  * sorted order. Whether it is cheaper to sort the dirty entries as needed,
256  * or to maintain the list is an open question. At a guess, it depends
257  * on how frequently the cache is flushed. We will see how it goes.
258  *
259  * For now at least, I will not remove dirty entries from the list as they
260  * are flushed. (this has been changed -- dirty entries are now removed from
261  * the skip list as they are flushed. JRM - 10/25/05)
262  *
263  * slist_len: Number of entries currently in the skip list
264  * used to maintain a sorted list of dirty entries in the
265  * cache.
266  *
267  * slist_size: Number of bytes of cache entries currently stored in the
268  * skip list used to maintain a sorted list of
269  * dirty entries in the cache.
270  *
271  * slist_ptr: pointer to the instance of H5SL_t used maintain a sorted
272  * list of dirty entries in the cache. This sorted list has
273  * two uses:
274  *
275  * a) It allows us to flush dirty entries in increasing address
276  * order, which results in significant savings.
277  *
278  * b) It facilitates checking for adjacent dirty entries when
279  * attempting to evict entries from the cache. While we
280  * don't use this at present, I hope that this will allow
281  * some optimizations when I get to it.
282  *
283  * With the addition of the fractal heap, the cache must now deal with
284  * the case in which entries may be dirtied, moved, or have their sizes
285  * changed during a flush. To allow sanity checks in this situation, the
286  * following two fields have been added. They are only compiled in when
287  * H5C_DO_SANITY_CHECKS is TRUE.
288  *
289  * slist_len_increase: Number of entries that have been added to the
290  * slist since the last time this field was set to zero.
291  *
292  * slist_size_increase: Total size of all entries that have been added
293  * to the slist since the last time this field was set to
294  * zero.
295  *
296  *
297  * When a cache entry is protected, it must be removed from the LRU
298  * list(s) as it cannot be either flushed or evicted until it is unprotected.
299  * The following fields are used to implement the protected list (pl).
300  *
301  * pl_len: Number of entries currently residing on the protected list.
302  *
303  * pl_size: Number of bytes of cache entries currently residing on the
304  * protected list.
305  *
306  * pl_head_ptr: Pointer to the head of the doubly linked list of protected
307  * entries. Note that cache entries on this list are linked
308  * by their next and prev fields.
309  *
310  * This field is NULL if the list is empty.
311  *
312  * pl_tail_ptr: Pointer to the tail of the doubly linked list of protected
313  * entries. Note that cache entries on this list are linked
314  * by their next and prev fields.
315  *
316  * This field is NULL if the list is empty.
317  *
318  *
319  * For very frequently used entries, the protect/unprotect overhead can
320  * become burdensome. To avoid this overhead, I have modified the cache
321  * to allow entries to be "pinned". A pinned entry is similar to a
322  * protected entry, in the sense that it cannot be evicted, and that
323  * the entry can be modified at any time.
324  *
325  * Pinning an entry has the following implications:
326  *
327  * 1) A pinned entry cannot be evicted. Thus unprotected
328  * pinned entries reside in the pinned entry list, instead
329  * of the LRU list(s) (or other lists maintained by the current
330  * replacement policy code).
331  *
332  * 2) A pinned entry can be accessed or modified at any time.
333  * Therefore, the cache must check with the entry owner
334  * before flushing it. If permission is denied, the
335  * cache just skips the entry in the flush.
336  *
337  * 3) A pinned entry can be marked as dirty (and possibly
338  * change size) while it is unprotected.
339  *
340  * 4) The flush-destroy code must allow pinned entries to
341  * be unpinned (and possibly unprotected) during the
342  * flush.
343  *
344  * Since pinned entries cannot be evicted, they must be kept on a pinned
345  * entry list, instead of being entrusted to the replacement policy code.
346  *
347  * Maintaining the pinned entry list requires the following fields:
348  *
349  * pel_len: Number of entries currently residing on the pinned
350  * entry list.
351  *
352  * pel_size: Number of bytes of cache entries currently residing on
353  * the pinned entry list.
354  *
355  * pel_head_ptr: Pointer to the head of the doubly linked list of pinned
356  * but not protected entries. Note that cache entries on
357  * this list are linked by their next and prev fields.
358  *
359  * This field is NULL if the list is empty.
360  *
361  * pel_tail_ptr: Pointer to the tail of the doubly linked list of pinned
362  * but not protected entries. Note that cache entries on
363  * this list are linked by their next and prev fields.
364  *
365  * This field is NULL if the list is empty.
366  *
367  *
368  * The cache must have a replacement policy, and the fields supporting this
369  * policy must be accessible from this structure.
370  *
371  * While there has been interest in several replacement policies for
372  * this cache, the initial development schedule is tight. Thus I have
373  * elected to support only a modified LRU policy for the first cut.
374  *
375  * To further simplify matters, I have simply included the fields needed
376  * by the modified LRU in this structure. When and if we add support for
377  * other policies, it will probably be easiest to just add the necessary
378  * fields to this structure as well -- we only create one instance of this
379  * structure per file, so the overhead is not excessive.
380  *
381  *
382  * Fields supporting the modified LRU policy:
383  *
384  * See most any OS text for a discussion of the LRU replacement policy.
385  *
386  * When operating in parallel mode, we must ensure that a read does not
387  * cause a write. If it does, the process will hang, as the write will
388  * be collective and the other processes will not know to participate.
389  *
390  * To deal with this issue, I have modified the usual LRU policy by adding
391  * clean and dirty LRU lists to the usual LRU list.
392  *
393  * The clean LRU list is simply the regular LRU list with all dirty cache
394  * entries removed.
395  *
396  * Similarly, the dirty LRU list is the regular LRU list with all the clean
397  * cache entries removed.
398  *
399  * When reading in parallel mode, we evict from the clean LRU list only.
400  * This implies that we must try to ensure that the clean LRU list is
401  * reasonably well stocked at all times.
402  *
403  * We attempt to do this by trying to flush enough entries on each write
404  * to keep the cLRU_list_size >= min_clean_size.
405  *
406  * Even if we start with a completely clean cache, a sequence of protects
407  * without unprotects can empty the clean LRU list. In this case, the
408  * cache must grow temporarily. At the next write, we will attempt to
409  * evict enough entries to reduce index_size to less than max_cache_size.
410  * While this will usually be possible, all bets are off if enough entries
411  * are protected.
412  *
413  * Discussions of the individual fields used by the modified LRU replacement
414  * policy follow:
415  *
416  * LRU_list_len: Number of cache entries currently on the LRU list.
417  *
418  * Observe that LRU_list_len + pl_len must always equal
419  * index_len.
420  *
421  * LRU_list_size: Number of bytes of cache entries currently residing on the
422  * LRU list.
423  *
424  * Observe that LRU_list_size + pl_size must always equal
425  * index_size.
426  *
427  * LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache
428  * entries on this list are linked by their next and prev fields.
429  *
430  * This field is NULL if the list is empty.
431  *
432  * LRU_tail_ptr: Pointer to the tail of the doubly linked LRU list. Cache
433  * entries on this list are linked by their next and prev fields.
434  *
435  * This field is NULL if the list is empty.
436  *
437  * cLRU_list_len: Number of cache entries currently on the clean LRU list.
438  *
439  * Observe that cLRU_list_len + dLRU_list_len must always
440  * equal LRU_list_len.
441  *
442  * cLRU_list_size: Number of bytes of cache entries currently residing on
443  * the clean LRU list.
444  *
445  * Observe that cLRU_list_size + dLRU_list_size must always
446  * equal LRU_list_size.
447  *
448  * cLRU_head_ptr: Pointer to the head of the doubly linked clean LRU list.
449  * Cache entries on this list are linked by their aux_next and
450  * aux_prev fields.
451  *
452  * This field is NULL if the list is empty.
453  *
454  * cLRU_tail_ptr: Pointer to the tail of the doubly linked clean LRU list.
455  * Cache entries on this list are linked by their aux_next and
456  * aux_prev fields.
457  *
458  * This field is NULL if the list is empty.
459  *
460  * dLRU_list_len: Number of cache entries currently on the dirty LRU list.
461  *
462  * Observe that cLRU_list_len + dLRU_list_len must always
463  * equal LRU_list_len.
464  *
465  * dLRU_list_size: Number of cache entries currently on the dirty LRU list.
466  *
467  * Observe that cLRU_list_len + dLRU_list_len must always
468  * equal LRU_list_len.
469  *
470  * dLRU_head_ptr: Pointer to the head of the doubly linked dirty LRU list.
471  * Cache entries on this list are linked by their aux_next and
472  * aux_prev fields.
473  *
474  * This field is NULL if the list is empty.
475  *
476  * dLRU_tail_ptr: Pointer to the tail of the doubly linked dirty LRU list.
477  * Cache entries on this list are linked by their aux_next and
478  * aux_prev fields.
479  *
480  * This field is NULL if the list is empty.
481  *
482  *
483  * Automatic cache size adjustment:
484  *
485  * While the default cache size is adequate for most cases, we can run into
486  * cases where the default is too small. Ideally, we will let the user
487  * adjust the cache size as required. However, this is not possible in all
488  * cases. Thus I have added automatic cache size adjustment code.
489  *
490  * The configuration for the automatic cache size adjustment is stored in
491  * the structure described below:
492  *
493  * size_increase_possible: Depending on the configuration data given
494  * in the resize_ctl field, it may or may not be possible
495  * to increase the size of the cache. Rather than test for
496  * all the ways this can happen, we simply set this flag when
497  * we receive a new configuration.
498  *
499  * flash_size_increase_possible: Depending on the configuration data given
500  * in the resize_ctl field, it may or may not be possible
501  * for a flash size increase to occur. We set this flag
502  * whenever we receive a new configuration so as to avoid
503  * repeated calculations.
504  *
505  * flash_size_increase_threshold: If a flash cache size increase is possible,
506  * this field is used to store the minimum size of a new entry
507  * or size increase needed to trigger a flash cache size
508  * increase. Note that this field must be updated whenever
509  * the size of the cache is changed.
510  *
511  * size_decrease_possible: Depending on the configuration data given
512  * in the resize_ctl field, it may or may not be possible
513  * to decrease the size of the cache. Rather than test for
514  * all the ways this can happen, we simply set this flag when
515  * we receive a new configuration.
516  *
517  * cache_full: Boolean flag used to keep track of whether the cache is
518  * full, so we can refrain from increasing the size of a
519  * cache which hasn't used up the space alotted to it.
520  *
521  * The field is initialized to FALSE, and then set to TRUE
522  * whenever we attempt to make space in the cache.
523  *
524  * resize_enabled: This is another convenience flag which is set whenever
525  * a new set of values for resize_ctl are provided. Very
526  * simply,
527  *
528  * resize_enabled = size_increase_possible ||
529  * size_decrease_possible;
530  *
531  * size_decreased: Boolean flag set to TRUE whenever the maximun cache
532  * size is decreased. The flag triggers a call to
533  * H5C_make_space_in_cache() on the next call to H5C_protect().
534  *
535  * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration
536  * data for automatic cache resizing.
537  *
538  * epoch_markers_active: Integer field containing the number of epoch
539  * markers currently in use in the LRU list. This value
540  * must be in the range [0, H5C__MAX_EPOCH_MARKERS - 1].
541  *
542  * epoch_marker_active: Array of boolean of length H5C__MAX_EPOCH_MARKERS.
543  * This array is used to track which epoch markers are currently
544  * in use.
545  *
546  * epoch_marker_ringbuf: Array of int of length H5C__MAX_EPOCH_MARKERS + 1.
547  *
548  * To manage the epoch marker cache entries, it is necessary
549  * to track their order in the LRU list. This is done with
550  * epoch_marker_ringbuf. When markers are inserted at the
551  * head of the LRU list, the index of the marker in the
552  * epoch_markers array is inserted at the tail of the ring
553  * buffer. When it becomes the epoch_marker_active'th marker
554  * in the LRU list, it will have worked its way to the head
555  * of the ring buffer as well. This allows us to remove it
556  * without scanning the LRU list if such is required.
557  *
558  * epoch_marker_ringbuf_first: Integer field containing the index of the
559  * first entry in the ring buffer.
560  *
561  * epoch_marker_ringbuf_last: Integer field containing the index of the
562  * last entry in the ring buffer.
563  *
564  * epoch_marker_ringbuf_size: Integer field containing the number of entries
565  * in the ring buffer.
566  *
567  * epoch_markers: Array of instances of H5C_cache_entry_t of length
568  * H5C__MAX_EPOCH_MARKERS. The entries are used as markers
569  * in the LRU list to identify cache entries that haven't
570  * been accessed for some (small) specified number of
571  * epochs. These entries (if any) can then be evicted and
572  * the cache size reduced -- ideally without evicting any
573  * of the current working set. Needless to say, the epoch
574  * length and the number of epochs before an unused entry
575  * must be chosen so that all, or almost all, the working
576  * set will be accessed before the limit.
577  *
578  * Epoch markers only appear in the LRU list, never in
579  * the index or slist. While they are of type
580  * H5C__EPOCH_MARKER_TYPE, and have associated class
581  * functions, these functions should never be called.
582  *
583  * The addr fields of these instances of H5C_cache_entry_t
584  * are set to the index of the instance in the epoch_markers
585  * array, the size is set to 0, and the type field points
586  * to the constant structure epoch_marker_class defined
587  * in H5C.c. The next and prev fields are used as usual
588  * to link the entry into the LRU list.
589  *
590  * All other fields are unused.
591  *
592  *
593  * Cache hit rate collection fields:
594  *
595  * We supply the current cache hit rate on request, so we must keep a
596  * simple cache hit rate computation regardless of whether statistics
597  * collection is enabled. The following fields support this capability.
598  *
599  * cache_hits: Number of cache hits since the last time the cache hit
600  * rate statistics were reset. Note that when automatic cache
601  * re-sizing is enabled, this field will be reset every automatic
602  * resize epoch.
603  *
604  * cache_accesses: Number of times the cache has been accessed while
605  * since the last since the last time the cache hit rate statistics
606  * were reset. Note that when automatic cache re-sizing is enabled,
607  * this field will be reset every automatic resize epoch.
608  *
609  *
610  * Statistics collection fields:
611  *
612  * When enabled, these fields are used to collect statistics as described
613  * below. The first set are collected only when H5C_COLLECT_CACHE_STATS
614  * is true.
615  *
616  * hits: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
617  * are used to record the number of times an entry with type id
618  * equal to the array index has been in cache when requested in
619  * the current epoch.
620  *
621  * misses: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
622  * are used to record the number of times an entry with type id
623  * equal to the array index has not been in cache when
624  * requested in the current epoch.
625  *
626  * write_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
627  * cells are used to record the number of times an entry with
628  * type id equal to the array index has been write protected
629  * in the current epoch.
630  *
631  * Observe that (hits + misses) = (write_protects + read_protects).
632  *
633  * read_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
634  * cells are used to record the number of times an entry with
635  * type id equal to the array index has been read protected in
636  * the current epoch.
637  *
638  * Observe that (hits + misses) = (write_protects + read_protects).
639  *
640  * max_read_protects: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1.
641  * The cells are used to maximum number of simultaneous read
642  * protects on any entry with type id equal to the array index
643  * in the current epoch.
644  *
645  * insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
646  * are used to record the number of times an entry with type
647  * id equal to the array index has been inserted into the
648  * cache in the current epoch.
649  *
650  * pinned_insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
651  * The cells are used to record the number of times an entry
652  * with type id equal to the array index has been inserted
653  * pinned into the cache in the current epoch.
654  *
655  * clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
656  * are used to record the number of times an entry with type
657  * id equal to the array index has been cleared in the current
658  * epoch.
659  *
660  * flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
661  * are used to record the number of times an entry with type id
662  * equal to the array index has been written to disk in the
663  * current epoch.
664  *
665  * evictions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
666  * are used to record the number of times an entry with type id
667  * equal to the array index has been evicted from the cache in
668  * the current epoch.
669  *
670  * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
671  * are used to record the number of times an entry with type
672  * id equal to the array index has been moved in the current
673  * epoch.
674  *
675  * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
676  * The cells are used to record the number of times an entry
677  * with type id equal to the array index has been moved
678  * during its flush callback in the current epoch.
679  *
680  * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
681  * The cells are used to record the number of times an entry
682  * with type id equal to the array index has been moved
683  * during a cache flush in the current epoch.
684  *
685  * pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
686  * are used to record the number of times an entry with type
687  * id equal to the array index has been pinned in the current
688  * epoch.
689  *
690  * unpins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
691  * are used to record the number of times an entry with type
692  * id equal to the array index has been unpinned in the current
693  * epoch.
694  *
695  * dirty_pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
696  * are used to record the number of times an entry with type
697  * id equal to the array index has been marked dirty while pinned
698  * in the current epoch.
699  *
700  * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
701  * cells are used to record the number of times an entry
702  * with type id equal to the array index has been flushed while
703  * pinned in the current epoch.
704  *
705  * pinned_cleared: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
706  * cells are used to record the number of times an entry
707  * with type id equal to the array index has been cleared while
708  * pinned in the current epoch.
709  *
710  * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
711  * The cells are used to record the number of times an entry
712  * with type id equal to the array index has increased in
713  * size in the current epoch.
714  *
715  * size_decreases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
716  * The cells are used to record the number of times an entry
717  * with type id equal to the array index has decreased in
718  * size in the current epoch.
719  *
720  * entry_flush_size_changes: Array of int64 of length
721  * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
722  * the number of times an entry with type id equal to the
723  * array index has changed size while in its flush callback.
724  *
725  * cache_flush_size_changes: Array of int64 of length
726  * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
727  * the number of times an entry with type id equal to the
728  * array index has changed size during a cache flush
729  *
730  * total_ht_insertions: Number of times entries have been inserted into the
731  * hash table in the current epoch.
732  *
733  * total_ht_deletions: Number of times entries have been deleted from the
734  * hash table in the current epoch.
735  *
736  * successful_ht_searches: int64 containing the total number of successful
737  * searches of the hash table in the current epoch.
738  *
739  * total_successful_ht_search_depth: int64 containing the total number of
740  * entries other than the targets examined in successful
741  * searches of the hash table in the current epoch.
742  *
743  * failed_ht_searches: int64 containing the total number of unsuccessful
744  * searches of the hash table in the current epoch.
745  *
746  * total_failed_ht_search_depth: int64 containing the total number of
747  * entries examined in unsuccessful searches of the hash
748  * table in the current epoch.
749  *
750  * max_index_len: Largest value attained by the index_len field in the
751  * current epoch.
752  *
753  * max_index_size: Largest value attained by the index_size field in the
754  * current epoch.
755  *
756  * max_clean_index_size: Largest value attained by the clean_index_size field
757  * in the current epoch.
758  *
759  * max_dirty_index_size: Largest value attained by the dirty_index_size field
760  * in the current epoch.
761  *
762  * max_slist_len: Largest value attained by the slist_len field in the
763  * current epoch.
764  *
765  * max_slist_size: Largest value attained by the slist_size field in the
766  * current epoch.
767  *
768  * max_pl_len: Largest value attained by the pl_len field in the
769  * current epoch.
770  *
771  * max_pl_size: Largest value attained by the pl_size field in the
772  * current epoch.
773  *
774  * max_pel_len: Largest value attained by the pel_len field in the
775  * current epoch.
776  *
777  * max_pel_size: Largest value attained by the pel_size field in the
778  * current epoch.
779  *
780  * calls_to_msic: Total number of calls to H5C_make_space_in_cache
781  *
782  * total_entries_skipped_in_msic: Number of clean entries skipped while
783  * enforcing the min_clean_fraction in H5C_make_space_in_cache().
784  *
785  * total_entries_scanned_in_msic: Number of clean entries skipped while
786  * enforcing the min_clean_fraction in H5C_make_space_in_cache().
787  *
788  * max_entries_skipped_in_msic: Maximum number of clean entries skipped
789  * in any one call to H5C_make_space_in_cache().
790  *
791  * max_entries_scanned_in_msic: Maximum number of entries scanned over
792  * in any one call to H5C_make_space_in_cache().
793  *
794  * entries_scanned_to_make_space: Number of entries scanned only when looking
795  * for entries to evict in order to make space in cache.
796 
797  * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS
798  * and H5C_COLLECT_CACHE_ENTRY_STATS are true.
799  *
800  * max_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
801  * are used to record the maximum number of times any single
802  * entry with type id equal to the array index has been
803  * accessed in the current epoch.
804  *
805  * min_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
806  * are used to record the minimum number of times any single
807  * entry with type id equal to the array index has been
808  * accessed in the current epoch.
809  *
810  * max_clears: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
811  * are used to record the maximum number of times any single
812  * entry with type id equal to the array index has been cleared
813  * in the current epoch.
814  *
815  * max_flushes: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
816  * are used to record the maximum number of times any single
817  * entry with type id equal to the array index has been
818  * flushed in the current epoch.
819  *
820  * max_size: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
821  * are used to record the maximum size of any single entry
822  * with type id equal to the array index that has resided in
823  * the cache in the current epoch.
824  *
825  * max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
826  * are used to record the maximum number of times that any single
827  * entry with type id equal to the array index that has been
828  * marked as pinned in the cache in the current epoch.
829  *
830  *
831  * Fields supporting testing:
832  *
833  * prefix Array of char used to prefix debugging output. The
834  * field is intended to allow marking of output of with
835  * the processes mpi rank.
836  *
837  ****************************************************************************/
838 
839 #define H5C__HASH_TABLE_LEN (64 * 1024) /* must be a power of 2 */
840 
841 #define H5C__H5C_T_MAGIC 0x005CAC0E
842 #define H5C__MAX_NUM_TYPE_IDS 19
843 #define H5C__PREFIX_LEN 32
844 
845 struct H5C_t
846 {
847  uint32_t magic;
848 
850 
852 
853  void * aux_ptr;
854 
855  int32_t max_type_id;
856  const char * (* type_name_table_ptr);
857 
860 
861  H5C_write_permitted_func_t check_write_permitted;
863 
864  H5C_log_flush_func_t log_flush;
865 
867 
868  int32_t index_len;
869  size_t index_size;
872  H5C_cache_entry_t * (index[H5C__HASH_TABLE_LEN]);
873 
874 
875  int32_t slist_len;
876  size_t slist_size;
877  H5SL_t * slist_ptr;
878 #if H5C_DO_SANITY_CHECKS
879  int64_t slist_len_increase;
880  int64_t slist_size_increase;
881 #endif /* H5C_DO_SANITY_CHECKS */
882 
883  int32_t pl_len;
884  size_t pl_size;
885  H5C_cache_entry_t * pl_head_ptr;
886  H5C_cache_entry_t * pl_tail_ptr;
887 
888  int32_t pel_len;
889  size_t pel_size;
890  H5C_cache_entry_t * pel_head_ptr;
891  H5C_cache_entry_t * pel_tail_ptr;
892 
893  int32_t LRU_list_len;
895  H5C_cache_entry_t * LRU_head_ptr;
896  H5C_cache_entry_t * LRU_tail_ptr;
897 
898  int32_t cLRU_list_len;
900  H5C_cache_entry_t * cLRU_head_ptr;
901  H5C_cache_entry_t * cLRU_tail_ptr;
902 
903  int32_t dLRU_list_len;
905  H5C_cache_entry_t * dLRU_head_ptr;
906  H5C_cache_entry_t * dLRU_tail_ptr;
907 
915  H5C_auto_size_ctl_t resize_ctl;
916 
918  hbool_t epoch_marker_active[H5C__MAX_EPOCH_MARKERS];
919  int32_t epoch_marker_ringbuf[H5C__MAX_EPOCH_MARKERS+1];
923  H5C_cache_entry_t epoch_markers[H5C__MAX_EPOCH_MARKERS];
924 
925  int64_t cache_hits;
926  int64_t cache_accesses;
927 
928 #if H5C_COLLECT_CACHE_STATS
929 
930  /* stats fields */
931  int64_t hits[H5C__MAX_NUM_TYPE_IDS + 1];
932  int64_t misses[H5C__MAX_NUM_TYPE_IDS + 1];
933  int64_t write_protects[H5C__MAX_NUM_TYPE_IDS + 1];
934  int64_t read_protects[H5C__MAX_NUM_TYPE_IDS + 1];
935  int32_t max_read_protects[H5C__MAX_NUM_TYPE_IDS + 1];
936  int64_t insertions[H5C__MAX_NUM_TYPE_IDS + 1];
937  int64_t pinned_insertions[H5C__MAX_NUM_TYPE_IDS + 1];
938  int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1];
939  int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1];
940  int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1];
941  int64_t moves[H5C__MAX_NUM_TYPE_IDS + 1];
942  int64_t entry_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
943  int64_t cache_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
944  int64_t pins[H5C__MAX_NUM_TYPE_IDS + 1];
945  int64_t unpins[H5C__MAX_NUM_TYPE_IDS + 1];
946  int64_t dirty_pins[H5C__MAX_NUM_TYPE_IDS + 1];
947  int64_t pinned_flushes[H5C__MAX_NUM_TYPE_IDS + 1];
948  int64_t pinned_clears[H5C__MAX_NUM_TYPE_IDS + 1];
949  int64_t size_increases[H5C__MAX_NUM_TYPE_IDS + 1];
950  int64_t size_decreases[H5C__MAX_NUM_TYPE_IDS + 1];
951  int64_t entry_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1];
952  int64_t cache_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1];
953 
954  int64_t total_ht_insertions;
955  int64_t total_ht_deletions;
956  int64_t successful_ht_searches;
957  int64_t total_successful_ht_search_depth;
958  int64_t failed_ht_searches;
959  int64_t total_failed_ht_search_depth;
960 
961  int32_t max_index_len;
962  size_t max_index_size;
963  size_t max_clean_index_size;
964  size_t max_dirty_index_size;
965 
966  int32_t max_slist_len;
967  size_t max_slist_size;
968 
969  int32_t max_pl_len;
970  size_t max_pl_size;
971 
972  int32_t max_pel_len;
973  size_t max_pel_size;
974 
975  int64_t calls_to_msic;
976  int64_t total_entries_skipped_in_msic;
977  int64_t total_entries_scanned_in_msic;
978  int32_t max_entries_skipped_in_msic;
979  int32_t max_entries_scanned_in_msic;
980  int64_t entries_scanned_to_make_space;
981 
982 #if H5C_COLLECT_CACHE_ENTRY_STATS
983 
984  int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
985  int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
986  int32_t max_clears[H5C__MAX_NUM_TYPE_IDS + 1];
987  int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS + 1];
988  size_t max_size[H5C__MAX_NUM_TYPE_IDS + 1];
989  int32_t max_pins[H5C__MAX_NUM_TYPE_IDS + 1];
990 
991 #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
992 
993 #endif /* H5C_COLLECT_CACHE_STATS */
994 
996 };
997 
998 
999 /****************************************************************************/
1000 /***************************** Macro Definitions ****************************/
1001 /****************************************************************************/
1002 
1003 /****************************************************************************
1004  *
1005  * We maintain doubly linked lists of instances of H5C_cache_entry_t for a
1006  * variety of reasons -- protected list, LRU list, and the clean and dirty
1007  * LRU lists at present. The following macros support linking and unlinking
1008  * of instances of H5C_cache_entry_t by both their regular and auxiliary next
1009  * and previous pointers.
1010  *
1011  * The size and length fields are also maintained.
1012  *
1013  * Note that the relevant pair of prev and next pointers are presumed to be
1014  * NULL on entry in the insertion macros.
1015  *
1016  * Finally, observe that the sanity checking macros evaluate to the empty
1017  * string when H5C_DO_SANITY_CHECKS is FALSE. They also contain calls
1018  * to the HGOTO_ERROR macro, which may not be appropriate in all cases.
1019  * If so, we will need versions of the insertion and deletion macros which
1020  * do not reference the sanity checking macros.
1021  * JRM - 5/5/04
1022  *
1023  * Changes:
1024  *
1025  * - Removed the line:
1026  *
1027  * ( ( (Size) == (entry_ptr)->size ) && ( (len) != 1 ) ) ||
1028  *
1029  * from the H5C__DLL_PRE_REMOVE_SC macro. With the addition of the
1030  * epoch markers used in the age out based cache size reduction algorithm,
1031  * this invarient need not hold, as the epoch markers are of size 0.
1032  *
1033  * One could argue that I should have given the epoch markers a positive
1034  * size, but this would break the index_size = LRU_list_size + pl_size
1035  * + pel_size invarient.
1036  *
1037  * Alternatively, I could pass the current decr_mode in to the macro,
1038  * and just skip the check whenever epoch markers may be in use.
1039  *
1040  * However, any size errors should be caught when the cache is flushed
1041  * and destroyed. Until we are tracking such an error, this should be
1042  * good enough.
1043  * JRM - 12/9/04
1044  *
1045  *
1046  * - In the H5C__DLL_PRE_INSERT_SC macro, replaced the lines:
1047  *
1048  * ( ( (len) == 1 ) &&
1049  * ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) ||
1050  * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
1051  * )
1052  * ) ||
1053  *
1054  * with:
1055  *
1056  * ( ( (len) == 1 ) &&
1057  * ( ( (head_ptr) != (tail_ptr) ) ||
1058  * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
1059  * )
1060  * ) ||
1061  *
1062  * Epoch markers have size 0, so we can now have a non-empty list with
1063  * zero size. Hence the "( (Size) <= 0 )" clause cause false failures
1064  * in the sanity check. Since "Size" is typically a size_t, it can't
1065  * take on negative values, and thus the revised clause "( (Size) < 0 )"
1066  * caused compiler warnings.
1067  * JRM - 12/22/04
1068  *
1069  * - In the H5C__DLL_SC macro, replaced the lines:
1070  *
1071  * ( ( (len) == 1 ) &&
1072  * ( ( (head_ptr) != (tail_ptr) ) || ( (cache_ptr)->size <= 0 ) ||
1073  * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
1074  * )
1075  * ) ||
1076  *
1077  * with
1078  *
1079  * ( ( (len) == 1 ) &&
1080  * ( ( (head_ptr) != (tail_ptr) ) ||
1081  * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
1082  * )
1083  * ) ||
1084  *
1085  * Epoch markers have size 0, so we can now have a non-empty list with
1086  * zero size. Hence the "( (Size) <= 0 )" clause cause false failures
1087  * in the sanity check. Since "Size" is typically a size_t, it can't
1088  * take on negative values, and thus the revised clause "( (Size) < 0 )"
1089  * caused compiler warnings.
1090  * JRM - 1/10/05
1091  *
1092  * - Added the H5C__DLL_UPDATE_FOR_SIZE_CHANGE macro and the associated
1093  * sanity checking macros. These macro are used to update the size of
1094  * a DLL when one of its entries changes size.
1095  *
1096  * JRM - 9/8/05
1097  *
1098  ****************************************************************************/
1099 
1100 #if H5C_DO_SANITY_CHECKS
1101 
1102 #define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
1103 if ( ( (head_ptr) == NULL ) || \
1104  ( (tail_ptr) == NULL ) || \
1105  ( (entry_ptr) == NULL ) || \
1106  ( (len) <= 0 ) || \
1107  ( (Size) < (entry_ptr)->size ) || \
1108  ( ( (entry_ptr)->prev == NULL ) && ( (head_ptr) != (entry_ptr) ) ) || \
1109  ( ( (entry_ptr)->next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \
1110  ( ( (len) == 1 ) && \
1111  ( ! ( ( (head_ptr) == (entry_ptr) ) && \
1112  ( (tail_ptr) == (entry_ptr) ) && \
1113  ( (entry_ptr)->next == NULL ) && \
1114  ( (entry_ptr)->prev == NULL ) && \
1115  ( (Size) == (entry_ptr)->size ) \
1116  ) \
1117  ) \
1118  ) \
1119  ) { \
1120  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL pre remove SC failed") \
1121 }
1122 
1123 #define H5C__DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
1124 if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
1125  ( (head_ptr) != (tail_ptr) ) \
1126  ) || \
1127  ( (len) < 0 ) || \
1128  ( (Size) < 0 ) || \
1129  ( ( (len) == 1 ) && \
1130  ( ( (head_ptr) != (tail_ptr) ) || \
1131  ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
1132  ) \
1133  ) || \
1134  ( ( (len) >= 1 ) && \
1135  ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \
1136  ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \
1137  ) \
1138  ) \
1139  ) { \
1140  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL sanity check failed") \
1141 }
1142 
1143 #define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
1144 if ( ( (entry_ptr) == NULL ) || \
1145  ( (entry_ptr)->next != NULL ) || \
1146  ( (entry_ptr)->prev != NULL ) || \
1147  ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
1148  ( (head_ptr) != (tail_ptr) ) \
1149  ) || \
1150  ( (len) < 0 ) || \
1151  ( ( (len) == 1 ) && \
1152  ( ( (head_ptr) != (tail_ptr) ) || \
1153  ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
1154  ) \
1155  ) || \
1156  ( ( (len) >= 1 ) && \
1157  ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \
1158  ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \
1159  ) \
1160  ) \
1161  ) { \
1162  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL pre insert SC failed") \
1163 }
1164 
1165 #define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \
1166 if ( ( (dll_len) <= 0 ) || \
1167  ( (dll_size) <= 0 ) || \
1168  ( (old_size) <= 0 ) || \
1169  ( (old_size) > (dll_size) ) || \
1170  ( (new_size) <= 0 ) || \
1171  ( ( (dll_len) == 1 ) && ( (old_size) != (dll_size) ) ) ) { \
1172  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL pre size update SC failed") \
1173 }
1174 
1175 #define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \
1176 if ( ( (new_size) > (dll_size) ) || \
1177  ( ( (dll_len) == 1 ) && ( (new_size) != (dll_size) ) ) ) { \
1178  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL post size update SC failed") \
1179 }
1180 
1181 #else /* H5C_DO_SANITY_CHECKS */
1182 
1183 #define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)
1184 #define H5C__DLL_SC(head_ptr, tail_ptr, len, Size, fv)
1185 #define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)
1186 #define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size)
1187 #define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size)
1188 
1189 #endif /* H5C_DO_SANITY_CHECKS */
1190 
1191 
1192 #define H5C__DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \
1193  H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
1194  fail_val) \
1195  if ( (head_ptr) == NULL ) \
1196  { \
1197  (head_ptr) = (entry_ptr); \
1198  (tail_ptr) = (entry_ptr); \
1199  } \
1200  else \
1201  { \
1202  (tail_ptr)->next = (entry_ptr); \
1203  (entry_ptr)->prev = (tail_ptr); \
1204  (tail_ptr) = (entry_ptr); \
1205  } \
1206  (len)++; \
1207  (Size) += (entry_ptr)->size;
1208 
1209 #define H5C__DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \
1210  H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
1211  fail_val) \
1212  if ( (head_ptr) == NULL ) \
1213  { \
1214  (head_ptr) = (entry_ptr); \
1215  (tail_ptr) = (entry_ptr); \
1216  } \
1217  else \
1218  { \
1219  (head_ptr)->prev = (entry_ptr); \
1220  (entry_ptr)->next = (head_ptr); \
1221  (head_ptr) = (entry_ptr); \
1222  } \
1223  (len)++; \
1224  (Size) += entry_ptr->size;
1225 
1226 #define H5C__DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \
1227  H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
1228  fail_val) \
1229  { \
1230  if ( (head_ptr) == (entry_ptr) ) \
1231  { \
1232  (head_ptr) = (entry_ptr)->next; \
1233  if ( (head_ptr) != NULL ) \
1234  { \
1235  (head_ptr)->prev = NULL; \
1236  } \
1237  } \
1238  else \
1239  { \
1240  (entry_ptr)->prev->next = (entry_ptr)->next; \
1241  } \
1242  if ( (tail_ptr) == (entry_ptr) ) \
1243  { \
1244  (tail_ptr) = (entry_ptr)->prev; \
1245  if ( (tail_ptr) != NULL ) \
1246  { \
1247  (tail_ptr)->next = NULL; \
1248  } \
1249  } \
1250  else \
1251  { \
1252  (entry_ptr)->next->prev = (entry_ptr)->prev; \
1253  } \
1254  entry_ptr->next = NULL; \
1255  entry_ptr->prev = NULL; \
1256  (len)--; \
1257  (Size) -= entry_ptr->size; \
1258  }
1259 
1260 #define H5C__DLL_UPDATE_FOR_SIZE_CHANGE(dll_len, dll_size, old_size, new_size) \
1261  H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \
1262  (dll_size) -= (old_size); \
1263  (dll_size) += (new_size); \
1264  H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size)
1265 
1266 #if H5C_DO_SANITY_CHECKS
1267 
1268 #define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
1269 if ( ( (hd_ptr) == NULL ) || \
1270  ( (tail_ptr) == NULL ) || \
1271  ( (entry_ptr) == NULL ) || \
1272  ( (len) <= 0 ) || \
1273  ( (Size) < (entry_ptr)->size ) || \
1274  ( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \
1275  ( ( (entry_ptr)->aux_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \
1276  ( ( (entry_ptr)->aux_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \
1277  ( ( (len) == 1 ) && \
1278  ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \
1279  ( (entry_ptr)->aux_next == NULL ) && \
1280  ( (entry_ptr)->aux_prev == NULL ) && \
1281  ( (Size) == (entry_ptr)->size ) \
1282  ) \
1283  ) \
1284  ) \
1285  ) { \
1286  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "aux DLL pre remove SC failed") \
1287 }
1288 
1289 #define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
1290 if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
1291  ( (head_ptr) != (tail_ptr) ) \
1292  ) || \
1293  ( (len) < 0 ) || \
1294  ( (Size) < 0 ) || \
1295  ( ( (len) == 1 ) && \
1296  ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
1297  ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
1298  ) \
1299  ) || \
1300  ( ( (len) >= 1 ) && \
1301  ( ( (head_ptr) == NULL ) || ( (head_ptr)->aux_prev != NULL ) || \
1302  ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \
1303  ) \
1304  ) \
1305  ) { \
1306  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "AUX DLL sanity check failed") \
1307 }
1308 
1309 #define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
1310 if ( ( (entry_ptr) == NULL ) || \
1311  ( (entry_ptr)->aux_next != NULL ) || \
1312  ( (entry_ptr)->aux_prev != NULL ) || \
1313  ( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
1314  ( (hd_ptr) != (tail_ptr) ) \
1315  ) || \
1316  ( (len) < 0 ) || \
1317  ( ( (len) == 1 ) && \
1318  ( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
1319  ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \
1320  ) \
1321  ) || \
1322  ( ( (len) >= 1 ) && \
1323  ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->aux_prev != NULL ) || \
1324  ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \
1325  ) \
1326  ) \
1327  ) { \
1328  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "AUX DLL pre insert SC failed") \
1329 }
1330 
1331 #else /* H5C_DO_SANITY_CHECKS */
1332 
1333 #define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv)
1334 #define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, Size, fv)
1335 #define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv)
1336 
1337 #endif /* H5C_DO_SANITY_CHECKS */
1338 
1339 
1340 #define H5C__AUX_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val)\
1341  H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
1342  fail_val) \
1343  if ( (head_ptr) == NULL ) \
1344  { \
1345  (head_ptr) = (entry_ptr); \
1346  (tail_ptr) = (entry_ptr); \
1347  } \
1348  else \
1349  { \
1350  (tail_ptr)->aux_next = (entry_ptr); \
1351  (entry_ptr)->aux_prev = (tail_ptr); \
1352  (tail_ptr) = (entry_ptr); \
1353  } \
1354  (len)++; \
1355  (Size) += entry_ptr->size;
1356 
1357 #define H5C__AUX_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
1358  H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
1359  fv) \
1360  if ( (head_ptr) == NULL ) \
1361  { \
1362  (head_ptr) = (entry_ptr); \
1363  (tail_ptr) = (entry_ptr); \
1364  } \
1365  else \
1366  { \
1367  (head_ptr)->aux_prev = (entry_ptr); \
1368  (entry_ptr)->aux_next = (head_ptr); \
1369  (head_ptr) = (entry_ptr); \
1370  } \
1371  (len)++; \
1372  (Size) += entry_ptr->size;
1373 
1374 #define H5C__AUX_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
1375  H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
1376  fv) \
1377  { \
1378  if ( (head_ptr) == (entry_ptr) ) \
1379  { \
1380  (head_ptr) = (entry_ptr)->aux_next; \
1381  if ( (head_ptr) != NULL ) \
1382  { \
1383  (head_ptr)->aux_prev = NULL; \
1384  } \
1385  } \
1386  else \
1387  { \
1388  (entry_ptr)->aux_prev->aux_next = (entry_ptr)->aux_next; \
1389  } \
1390  if ( (tail_ptr) == (entry_ptr) ) \
1391  { \
1392  (tail_ptr) = (entry_ptr)->aux_prev; \
1393  if ( (tail_ptr) != NULL ) \
1394  { \
1395  (tail_ptr)->aux_next = NULL; \
1396  } \
1397  } \
1398  else \
1399  { \
1400  (entry_ptr)->aux_next->aux_prev = (entry_ptr)->aux_prev; \
1401  } \
1402  entry_ptr->aux_next = NULL; \
1403  entry_ptr->aux_prev = NULL; \
1404  (len)--; \
1405  (Size) -= entry_ptr->size; \
1406  }
1407 
1408 
1409 /***********************************************************************
1410  *
1411  * Stats collection macros
1412  *
1413  * The following macros must handle stats collection when this collection
1414  * is enabled, and evaluate to the empty string when it is not.
1415  *
1416  * The sole exception to this rule is
1417  * H5C__UPDATE_CACHE_HIT_RATE_STATS(), which is always active as
1418  * the cache hit rate stats are always collected and available.
1419  *
1420  * Changes:
1421  *
1422  * JRM -- 3/21/06
1423  * Added / updated macros for pinned entry related stats.
1424  *
1425  * JRM -- 8/9/06
1426  * More pinned entry stats related updates.
1427  *
1428  * JRM -- 3/31/07
1429  * Updated H5C__UPDATE_STATS_FOR_PROTECT() to keep stats on
1430  * read and write protects.
1431  *
1432  * MAM -- 1/15/09
1433  * Created H5C__UPDATE_MAX_INDEX_SIZE_STATS to contain
1434  * common code within macros that update the maximum
1435  * index, clean_index, and dirty_index statistics fields.
1436  *
1437  ***********************************************************************/
1438 
1439 #define H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) \
1440  (cache_ptr->cache_accesses)++; \
1441  if ( hit ) { \
1442  (cache_ptr->cache_hits)++; \
1443  } \
1444 
1445 #if H5C_COLLECT_CACHE_STATS
1446 
1447 #define H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
1448  if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \
1449  (cache_ptr)->max_index_size = (cache_ptr)->index_size; \
1450  if ( (cache_ptr)->clean_index_size > \
1451  (cache_ptr)->max_clean_index_size ) \
1452  (cache_ptr)->max_clean_index_size = \
1453  (cache_ptr)->clean_index_size; \
1454  if ( (cache_ptr)->dirty_index_size > \
1455  (cache_ptr)->max_dirty_index_size ) \
1456  (cache_ptr)->max_dirty_index_size = \
1457  (cache_ptr)->dirty_index_size;
1458 
1459 #define H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) \
1460  (((cache_ptr)->dirty_pins)[(entry_ptr)->type->id])++;
1461 
1462 #define H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) \
1463  if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \
1464  (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
1465  if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
1466  (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
1467  if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
1468  (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
1469  if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
1470  (cache_ptr)->max_pel_size = (cache_ptr)->pel_size;
1471 
1472 #define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) \
1473  if ( cache_ptr->flush_in_progress ) { \
1474  ((cache_ptr)->cache_flush_moves[(entry_ptr)->type->id])++; \
1475  } \
1476  if ( entry_ptr->flush_in_progress ) { \
1477  ((cache_ptr)->entry_flush_moves[(entry_ptr)->type->id])++; \
1478  } \
1479  (((cache_ptr)->moves)[(entry_ptr)->type->id])++;
1480 
1481 #define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size)\
1482  if ( cache_ptr->flush_in_progress ) { \
1483  ((cache_ptr)->cache_flush_size_changes[(entry_ptr)->type->id])++; \
1484  } \
1485  if ( entry_ptr->flush_in_progress ) { \
1486  ((cache_ptr)->entry_flush_size_changes[(entry_ptr)->type->id])++; \
1487  } \
1488  if ( (entry_ptr)->size < (new_size) ) { \
1489  ((cache_ptr)->size_increases[(entry_ptr)->type->id])++; \
1490  H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
1491  if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
1492  (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
1493  if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \
1494  (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \
1495  } else if ( (entry_ptr)->size > (new_size) ) { \
1496  ((cache_ptr)->size_decreases[(entry_ptr)->type->id])++; \
1497  }
1498 
1499 #define H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
1500  (cache_ptr)->total_ht_insertions++;
1501 
1502 #define H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \
1503  (cache_ptr)->total_ht_deletions++;
1504 
1505 #define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth) \
1506  if ( success ) { \
1507  (cache_ptr)->successful_ht_searches++; \
1508  (cache_ptr)->total_successful_ht_search_depth += depth; \
1509  } else { \
1510  (cache_ptr)->failed_ht_searches++; \
1511  (cache_ptr)->total_failed_ht_search_depth += depth; \
1512  }
1513 
1514 #define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) \
1515  ((cache_ptr)->unpins)[(entry_ptr)->type->id]++;
1516 
1517 #if H5C_COLLECT_CACHE_ENTRY_STATS
1518 
1519 #define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) \
1520  (entry_ptr)->accesses = 0; \
1521  (entry_ptr)->clears = 0; \
1522  (entry_ptr)->flushes = 0; \
1523  (entry_ptr)->pins = 0;
1524 
1525 #define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \
1526  (((cache_ptr)->clears)[(entry_ptr)->type->id])++; \
1527  if ( (entry_ptr)->is_pinned ) { \
1528  (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \
1529  } \
1530  ((entry_ptr)->clears)++;
1531 
1532 #define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \
1533  (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \
1534  if ( (entry_ptr)->is_pinned ) { \
1535  (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \
1536  } \
1537  ((entry_ptr)->flushes)++;
1538 
1539 #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \
1540  (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \
1541  if ( (entry_ptr)->accesses > \
1542  ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] ) { \
1543  ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] \
1544  = (entry_ptr)->accesses; \
1545  } \
1546  if ( (entry_ptr)->accesses < \
1547  ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] ) { \
1548  ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] \
1549  = (entry_ptr)->accesses; \
1550  } \
1551  if ( (entry_ptr)->clears > \
1552  ((cache_ptr)->max_clears)[(entry_ptr)->type->id] ) { \
1553  ((cache_ptr)->max_clears)[(entry_ptr)->type->id] \
1554  = (entry_ptr)->clears; \
1555  } \
1556  if ( (entry_ptr)->flushes > \
1557  ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] ) { \
1558  ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] \
1559  = (entry_ptr)->flushes; \
1560  } \
1561  if ( (entry_ptr)->size > \
1562  ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) { \
1563  ((cache_ptr)->max_size)[(entry_ptr)->type->id] \
1564  = (entry_ptr)->size; \
1565  } \
1566  if ( (entry_ptr)->pins > \
1567  ((cache_ptr)->max_pins)[(entry_ptr)->type->id] ) { \
1568  ((cache_ptr)->max_pins)[(entry_ptr)->type->id] \
1569  = (entry_ptr)->pins; \
1570  }
1571 
1572 #define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \
1573  (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \
1574  if ( (entry_ptr)->is_pinned ) { \
1575  (((cache_ptr)->pinned_insertions)[(entry_ptr)->type->id])++; \
1576  ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \
1577  (entry_ptr)->pins++; \
1578  if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
1579  (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
1580  if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
1581  (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \
1582  } \
1583  if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
1584  (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
1585  H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
1586  if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \
1587  (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
1588  if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
1589  (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
1590  if ( (entry_ptr)->size > \
1591  ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) { \
1592  ((cache_ptr)->max_size)[(entry_ptr)->type->id] \
1593  = (entry_ptr)->size; \
1594  }
1595 
1596 #define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
1597  if ( hit ) \
1598  ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \
1599  else \
1600  ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \
1601  if ( ! ((entry_ptr)->is_read_only) ) { \
1602  ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \
1603  } else { \
1604  ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \
1605  if ( ((entry_ptr)->ro_ref_count) > \
1606  ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) { \
1607  ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \
1608  ((entry_ptr)->ro_ref_count); \
1609  } \
1610  } \
1611  if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
1612  (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
1613  H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
1614  if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \
1615  (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \
1616  if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \
1617  (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \
1618  if ( (entry_ptr)->size > \
1619  ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) { \
1620  ((cache_ptr)->max_size)[(entry_ptr)->type->id] \
1621  = (entry_ptr)->size; \
1622  } \
1623  ((entry_ptr)->accesses)++;
1624 
1625 #define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \
1626  ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \
1627  (entry_ptr)->pins++; \
1628  if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
1629  (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
1630  if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
1631  (cache_ptr)->max_pel_size = (cache_ptr)->pel_size;
1632 
1633 #else /* H5C_COLLECT_CACHE_ENTRY_STATS */
1634 
1635 #define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
1636 
1637 #define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \
1638  if ( (entry_ptr)->is_pinned ) { \
1639  (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \
1640  } \
1641  (((cache_ptr)->clears)[(entry_ptr)->type->id])++;
1642 
1643 #define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \
1644  (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \
1645  if ( (entry_ptr)->is_pinned ) { \
1646  (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \
1647  }
1648 
1649 #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \
1650  (((cache_ptr)->evictions)[(entry_ptr)->type->id])++;
1651 
1652 #define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \
1653  (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \
1654  if ( (entry_ptr)->is_pinned ) { \
1655  (((cache_ptr)->pinned_insertions)[(entry_ptr)->type->id])++; \
1656  ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \
1657  if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
1658  (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
1659  if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
1660  (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \
1661  } \
1662  if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
1663  (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
1664  H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
1665  if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \
1666  (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
1667  if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
1668  (cache_ptr)->max_slist_size = (cache_ptr)->slist_size;
1669 
1670 #define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
1671  if ( hit ) \
1672  ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \
1673  else \
1674  ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \
1675  if ( ! ((entry_ptr)->is_read_only) ) { \
1676  ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \
1677  } else { \
1678  ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \
1679  if ( ((entry_ptr)->ro_ref_count) > \
1680  ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) { \
1681  ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \
1682  ((entry_ptr)->ro_ref_count); \
1683  } \
1684  } \
1685  if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
1686  (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
1687  H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
1688  if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \
1689  (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \
1690  if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \
1691  (cache_ptr)->max_pl_size = (cache_ptr)->pl_size;
1692 
1693 #define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \
1694  ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \
1695  if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
1696  (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
1697  if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
1698  (cache_ptr)->max_pel_size = (cache_ptr)->pel_size;
1699 
1700 #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
1701 
1702 #else /* H5C_COLLECT_CACHE_STATS */
1703 
1704 #define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
1705 #define H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
1706 #define H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr)
1707 #define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
1708 #define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size)
1709 #define H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr)
1710 #define H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr)
1711 #define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth)
1712 #define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr)
1713 #define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
1714 #define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
1715 #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr)
1716 #define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit)
1717 #define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
1718 #define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
1719 
1720 #endif /* H5C_COLLECT_CACHE_STATS */
1721 
1722 
1723 /***********************************************************************
1724  *
1725  * Hash table access and manipulation macros:
1726  *
1727  * The following macros handle searches, insertions, and deletion in
1728  * the hash table.
1729  *
1730  * When modifying these macros, remember to modify the similar macros
1731  * in tst/cache.c
1732  *
1733  * Changes:
1734  *
1735  * - Updated existing index macros and sanity check macros to maintain
1736  * the clean_index_size and dirty_index_size fields of H5C_t. Also
1737  * added macros to allow us to track entry cleans and dirties.
1738  *
1739  * JRM -- 11/5/08
1740  *
1741  ***********************************************************************/
1742 
1743 /* H5C__HASH_TABLE_LEN is defined in H5Cpkg.h. It mut be a power of two. */
1744 
1745 #define H5C__HASH_MASK ((size_t)(H5C__HASH_TABLE_LEN - 1) << 3)
1746 
1747 #define H5C__HASH_FCN(x) (int)(((x) & H5C__HASH_MASK) >> 3)
1748 
1749 #if H5C_DO_SANITY_CHECKS
1750 
1751 #define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
1752 if ( ( (cache_ptr) == NULL ) || \
1753  ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
1754  ( (entry_ptr) == NULL ) || \
1755  ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
1756  ( (entry_ptr)->ht_next != NULL ) || \
1757  ( (entry_ptr)->ht_prev != NULL ) || \
1758  ( (entry_ptr)->size <= 0 ) || \
1759  ( (k = H5C__HASH_FCN((entry_ptr)->addr)) < 0 ) || \
1760  ( k >= H5C__HASH_TABLE_LEN ) || \
1761  ( (cache_ptr)->index_size != \
1762  ((cache_ptr)->clean_index_size + \
1763  (cache_ptr)->dirty_index_size) ) ) { \
1764  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
1765  "Pre HT insert SC failed") \
1766 }
1767 
1768 #define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
1769 if ( ( (cache_ptr) == NULL ) || \
1770  ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
1771  ( (cache_ptr)->index_len < 1 ) || \
1772  ( (entry_ptr) == NULL ) || \
1773  ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
1774  ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
1775  ( (entry_ptr)->size <= 0 ) || \
1776  ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \
1777  ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \
1778  ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \
1779  == NULL ) || \
1780  ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \
1781  != (entry_ptr) ) && \
1782  ( (entry_ptr)->ht_prev == NULL ) ) || \
1783  ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] == \
1784  (entry_ptr) ) && \
1785  ( (entry_ptr)->ht_prev != NULL ) ) || \
1786  ( (cache_ptr)->index_size != \
1787  ((cache_ptr)->clean_index_size + \
1788  (cache_ptr)->dirty_index_size) ) ) { \
1789  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Pre HT remove SC failed") \
1790 }
1791 
1792 /* (Keep in sync w/H5C_TEST__PRE_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
1793 #define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
1794 if ( ( (cache_ptr) == NULL ) || \
1795  ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
1796  ( (cache_ptr)->index_size != \
1797  ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
1798  ( ! H5F_addr_defined(Addr) ) || \
1799  ( H5C__HASH_FCN(Addr) < 0 ) || \
1800  ( H5C__HASH_FCN(Addr) >= H5C__HASH_TABLE_LEN ) ) { \
1801  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "Pre HT search SC failed") \
1802 }
1803 
1804 /* (Keep in sync w/H5C_TEST__POST_SUC_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
1805 #define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \
1806 if ( ( (cache_ptr) == NULL ) || \
1807  ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
1808  ( (cache_ptr)->index_len < 1 ) || \
1809  ( (entry_ptr) == NULL ) || \
1810  ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
1811  ( (cache_ptr)->index_size != \
1812  ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
1813  ( H5F_addr_ne((entry_ptr)->addr, (Addr)) ) || \
1814  ( (entry_ptr)->size <= 0 ) || \
1815  ( ((cache_ptr)->index)[k] == NULL ) || \
1816  ( ( ((cache_ptr)->index)[k] != (entry_ptr) ) && \
1817  ( (entry_ptr)->ht_prev == NULL ) ) || \
1818  ( ( ((cache_ptr)->index)[k] == (entry_ptr) ) && \
1819  ( (entry_ptr)->ht_prev != NULL ) ) || \
1820  ( ( (entry_ptr)->ht_prev != NULL ) && \
1821  ( (entry_ptr)->ht_prev->ht_next != (entry_ptr) ) ) || \
1822  ( ( (entry_ptr)->ht_next != NULL ) && \
1823  ( (entry_ptr)->ht_next->ht_prev != (entry_ptr) ) ) ) { \
1824  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
1825  "Post successful HT search SC failed") \
1826 }
1827 
1828 /* (Keep in sync w/H5C_TEST__POST_HT_SHIFT_TO_FRONT macro in test/cache_common.h -QAK) */
1829 #define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
1830 if ( ( (cache_ptr) == NULL ) || \
1831  ( ((cache_ptr)->index)[k] != (entry_ptr) ) || \
1832  ( (entry_ptr)->ht_prev != NULL ) ) { \
1833  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
1834  "Post HT shift to front SC failed") \
1835 }
1836 
1837 #define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
1838  entry_ptr, was_clean) \
1839 if ( ( (cache_ptr) == NULL ) || \
1840  ( (cache_ptr)->index_len <= 0 ) || \
1841  ( (cache_ptr)->index_size <= 0 ) || \
1842  ( (new_size) <= 0 ) || \
1843  ( (old_size) > (cache_ptr)->index_size ) || \
1844  ( (new_size) <= 0 ) || \
1845  ( ( (cache_ptr)->index_len == 1 ) && \
1846  ( (cache_ptr)->index_size != (old_size) ) ) || \
1847  ( (cache_ptr)->index_size != \
1848  ((cache_ptr)->clean_index_size + \
1849  (cache_ptr)->dirty_index_size) ) || \
1850  ( (entry_ptr == NULL) ) || \
1851  ( ( !( was_clean ) || \
1852  ( (cache_ptr)->clean_index_size < (old_size) ) ) && \
1853  ( ( (was_clean) ) || \
1854  ( (cache_ptr)->dirty_index_size < (old_size) ) ) ) || \
1855  ( (entry_ptr) == NULL ) ) { \
1856  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
1857  "Pre HT entry size change SC failed") \
1858 }
1859 
1860 #define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
1861  entry_ptr) \
1862 if ( ( (cache_ptr) == NULL ) || \
1863  ( (cache_ptr)->index_len <= 0 ) || \
1864  ( (cache_ptr)->index_size <= 0 ) || \
1865  ( (new_size) > (cache_ptr)->index_size ) || \
1866  ( (cache_ptr)->index_size != \
1867  ((cache_ptr)->clean_index_size + \
1868  (cache_ptr)->dirty_index_size) ) || \
1869  ( ( !((entry_ptr)->is_dirty ) || \
1870  ( (cache_ptr)->dirty_index_size < (new_size) ) ) && \
1871  ( ( ((entry_ptr)->is_dirty) ) || \
1872  ( (cache_ptr)->clean_index_size < (new_size) ) ) ) || \
1873  ( ( (cache_ptr)->index_len == 1 ) && \
1874  ( (cache_ptr)->index_size != (new_size) ) ) ) { \
1875  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
1876  "Post HT entry size change SC failed") \
1877 }
1878 
1879 #define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
1880 if ( \
1881  ( (cache_ptr) == NULL ) || \
1882  ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
1883  ( (cache_ptr)->index_len <= 0 ) || \
1884  ( (entry_ptr) == NULL ) || \
1885  ( (entry_ptr)->is_dirty != FALSE ) || \
1886  ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
1887  ( (cache_ptr)->dirty_index_size < (entry_ptr)->size ) || \
1888  ( (cache_ptr)->index_size != \
1889  ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) ) { \
1890  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
1891  "Pre HT update for entry clean SC failed") \
1892 }
1893 
1894 #define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
1895 if ( \
1896  ( (cache_ptr) == NULL ) || \
1897  ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
1898  ( (cache_ptr)->index_len <= 0 ) || \
1899  ( (entry_ptr) == NULL ) || \
1900  ( (entry_ptr)->is_dirty != TRUE ) || \
1901  ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
1902  ( (cache_ptr)->clean_index_size < (entry_ptr)->size ) || \
1903  ( (cache_ptr)->index_size != \
1904  ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) ) { \
1905  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
1906  "Pre HT update for entry dirty SC failed") \
1907 }
1908 
1909 #define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
1910 if ( (cache_ptr)->index_size != \
1911  ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) { \
1912  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
1913  "Post HT update for entry clean SC failed") \
1914 }
1915 
1916 #define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
1917 if ( (cache_ptr)->index_size != \
1918  ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) { \
1919  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
1920  "Post HT update for entry dirty SC failed") \
1921 }
1922 
1923 #else /* H5C_DO_SANITY_CHECKS */
1924 
1925 #define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val)
1926 #define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr)
1927 #define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val)
1928 #define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val)
1929 #define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val)
1930 #define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr)
1931 #define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr)
1932 #define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
1933  entry_ptr, was_clean)
1934 #define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
1935  entry_ptr)
1936 #define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr)
1937 #define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr)
1938 
1939 #endif /* H5C_DO_SANITY_CHECKS */
1940 
1941 
1942 #define H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, fail_val) \
1943 { \
1944  int k; \
1945  H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
1946  k = H5C__HASH_FCN((entry_ptr)->addr); \
1947  if ( ((cache_ptr)->index)[k] == NULL ) \
1948  { \
1949  ((cache_ptr)->index)[k] = (entry_ptr); \
1950  } \
1951  else \
1952  { \
1953  (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
1954  (entry_ptr)->ht_next->ht_prev = (entry_ptr); \
1955  ((cache_ptr)->index)[k] = (entry_ptr); \
1956  } \
1957  (cache_ptr)->index_len++; \
1958  (cache_ptr)->index_size += (entry_ptr)->size; \
1959  if ( (entry_ptr)->is_dirty ) { \
1960  (cache_ptr)->dirty_index_size += (entry_ptr)->size; \
1961  } else { \
1962  (cache_ptr)->clean_index_size += (entry_ptr)->size; \
1963  } \
1964  H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
1965 }
1966 
1967 #define H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr) \
1968 { \
1969  int k; \
1970  H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
1971  k = H5C__HASH_FCN((entry_ptr)->addr); \
1972  if ( (entry_ptr)->ht_next ) \
1973  { \
1974  (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
1975  } \
1976  if ( (entry_ptr)->ht_prev ) \
1977  { \
1978  (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
1979  } \
1980  if ( ((cache_ptr)->index)[k] == (entry_ptr) ) \
1981  { \
1982  ((cache_ptr)->index)[k] = (entry_ptr)->ht_next; \
1983  } \
1984  (entry_ptr)->ht_next = NULL; \
1985  (entry_ptr)->ht_prev = NULL; \
1986  (cache_ptr)->index_len--; \
1987  (cache_ptr)->index_size -= (entry_ptr)->size; \
1988  if ( (entry_ptr)->is_dirty ) { \
1989  (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
1990  } else { \
1991  (cache_ptr)->clean_index_size -= (entry_ptr)->size; \
1992  } \
1993  H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \
1994 }
1995 
1996 #define H5C__SEARCH_INDEX(cache_ptr, Addr, entry_ptr, fail_val) \
1997 { \
1998  int k; \
1999  int depth = 0; \
2000  H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
2001  k = H5C__HASH_FCN(Addr); \
2002  entry_ptr = ((cache_ptr)->index)[k]; \
2003  while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) \
2004  { \
2005  (entry_ptr) = (entry_ptr)->ht_next; \
2006  (depth)++; \
2007  } \
2008  if ( entry_ptr ) \
2009  { \
2010  H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \
2011  if ( entry_ptr != ((cache_ptr)->index)[k] ) \
2012  { \
2013  if ( (entry_ptr)->ht_next ) \
2014  { \
2015  (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
2016  } \
2017  HDassert( (entry_ptr)->ht_prev != NULL ); \
2018  (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
2019  ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
2020  (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
2021  (entry_ptr)->ht_prev = NULL; \
2022  ((cache_ptr)->index)[k] = (entry_ptr); \
2023  H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
2024  } \
2025  } \
2026  H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, (entry_ptr != NULL), depth) \
2027 }
2028 
2029 #define H5C__SEARCH_INDEX_NO_STATS(cache_ptr, Addr, entry_ptr, fail_val) \
2030 { \
2031  int k; \
2032  int depth = 0; \
2033  H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
2034  k = H5C__HASH_FCN(Addr); \
2035  entry_ptr = ((cache_ptr)->index)[k]; \
2036  while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) \
2037  { \
2038  (entry_ptr) = (entry_ptr)->ht_next; \
2039  (depth)++; \
2040  } \
2041  if ( entry_ptr ) \
2042  { \
2043  H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \
2044  if ( entry_ptr != ((cache_ptr)->index)[k] ) \
2045  { \
2046  if ( (entry_ptr)->ht_next ) \
2047  { \
2048  (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
2049  } \
2050  HDassert( (entry_ptr)->ht_prev != NULL ); \
2051  (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
2052  ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
2053  (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
2054  (entry_ptr)->ht_prev = NULL; \
2055  ((cache_ptr)->index)[k] = (entry_ptr); \
2056  H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
2057  } \
2058  } \
2059 }
2060 
2061 #define H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr) \
2062 { \
2063  H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \
2064  (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
2065  (cache_ptr)->clean_index_size += (entry_ptr)->size; \
2066  H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \
2067 }
2068 
2069 #define H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr) \
2070 { \
2071  H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \
2072  (cache_ptr)->clean_index_size -= (entry_ptr)->size; \
2073  (cache_ptr)->dirty_index_size += (entry_ptr)->size; \
2074  H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \
2075 }
2076 
2077 #define H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size, \
2078  entry_ptr, was_clean) \
2079 { \
2080  H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
2081  entry_ptr, was_clean) \
2082  (cache_ptr)->index_size -= (old_size); \
2083  (cache_ptr)->index_size += (new_size); \
2084  if ( was_clean ) { \
2085  (cache_ptr)->clean_index_size -= (old_size); \
2086  } else { \
2087  (cache_ptr)->dirty_index_size -= (old_size); \
2088  } \
2089  if ( (entry_ptr)->is_dirty ) { \
2090  (cache_ptr)->dirty_index_size += (new_size); \
2091  } else { \
2092  (cache_ptr)->clean_index_size += (new_size); \
2093  } \
2094  H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
2095  entry_ptr) \
2096 }
2097 
2098 
2099 /**************************************************************************
2100  *
2101  * Skip list insertion and deletion macros:
2102  *
2103  * These used to be functions, but I converted them to macros to avoid some
2104  * function call overhead.
2105  *
2106  **************************************************************************/
2107 
2108 /*-------------------------------------------------------------------------
2109  *
2110  * Macro: H5C__INSERT_ENTRY_IN_SLIST
2111  *
2112  * Purpose: Insert the specified instance of H5C_cache_entry_t into
2113  * the skip list in the specified instance of H5C_t. Update
2114  * the associated length and size fields.
2115  *
2116  * Return: N/A
2117  *
2118  * Programmer: John Mainzer, 5/10/04
2119  *
2120  * Modifications:
2121  *
2122  * JRM -- 7/21/04
2123  * Updated function to set the in_tree flag when inserting
2124  * an entry into the tree. Also modified the function to
2125  * update the tree size and len fields instead of the similar
2126  * index fields.
2127  *
2128  * All of this is part of the modifications to support the
2129  * hash table.
2130  *
2131  * JRM -- 7/27/04
2132  * Converted the function H5C_insert_entry_in_tree() into
2133  * the macro H5C__INSERT_ENTRY_IN_TREE in the hopes of
2134  * wringing a little more speed out of the cache.
2135  *
2136  * Note that we don't bother to check if the entry is already
2137  * in the tree -- if it is, H5SL_insert() will fail.
2138  *
2139  * QAK -- 11/27/04
2140  * Switched over to using skip list routines.
2141  *
2142  * JRM -- 6/27/06
2143  * Added fail_val parameter.
2144  *
2145  * JRM -- 8/25/06
2146  * Added the H5C_DO_SANITY_CHECKS version of the macro.
2147  *
2148  * This version maintains the slist_len_increase and
2149  * slist_size_increase fields that are used in sanity
2150  * checks in the flush routines.
2151  *
2152  * All this is needed as the fractal heap needs to be
2153  * able to dirty, resize and/or move entries during the
2154  * flush.
2155  *
2156  *-------------------------------------------------------------------------
2157  */
2158 
2159 #if H5C_DO_SANITY_CHECKS
2160 
2161 #define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \
2162 { \
2163  HDassert( (cache_ptr) ); \
2164  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2165  HDassert( (entry_ptr) ); \
2166  HDassert( (entry_ptr)->size > 0 ); \
2167  HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
2168  HDassert( !((entry_ptr)->in_slist) ); \
2169  \
2170  if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) \
2171  < 0 ) \
2172  HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
2173  "Can't insert entry in skip list") \
2174  \
2175  (entry_ptr)->in_slist = TRUE; \
2176  (cache_ptr)->slist_len++; \
2177  (cache_ptr)->slist_size += (entry_ptr)->size; \
2178  (cache_ptr)->slist_len_increase++; \
2179  (cache_ptr)->slist_size_increase += (entry_ptr)->size; \
2180  \
2181  HDassert( (cache_ptr)->slist_len > 0 ); \
2182  HDassert( (cache_ptr)->slist_size > 0 ); \
2183  \
2184 } /* H5C__INSERT_ENTRY_IN_SLIST */
2185 
2186 #else /* H5C_DO_SANITY_CHECKS */
2187 
2188 #define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \
2189 { \
2190  HDassert( (cache_ptr) ); \
2191  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2192  HDassert( (entry_ptr) ); \
2193  HDassert( (entry_ptr)->size > 0 ); \
2194  HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
2195  HDassert( !((entry_ptr)->in_slist) ); \
2196  \
2197  if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) \
2198  < 0 ) \
2199  HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
2200  "Can't insert entry in skip list") \
2201  \
2202  (entry_ptr)->in_slist = TRUE; \
2203  (cache_ptr)->slist_len++; \
2204  (cache_ptr)->slist_size += (entry_ptr)->size; \
2205  \
2206  HDassert( (cache_ptr)->slist_len > 0 ); \
2207  HDassert( (cache_ptr)->slist_size > 0 ); \
2208  \
2209 } /* H5C__INSERT_ENTRY_IN_SLIST */
2210 
2211 #endif /* H5C_DO_SANITY_CHECKS */
2212 
2213 
2214 /*-------------------------------------------------------------------------
2215  *
2216  * Function: H5C__REMOVE_ENTRY_FROM_SLIST
2217  *
2218  * Purpose: Remove the specified instance of H5C_cache_entry_t from the
2219  * index skip list in the specified instance of H5C_t. Update
2220  * the associated length and size fields.
2221  *
2222  * Return: N/A
2223  *
2224  * Programmer: John Mainzer, 5/10/04
2225  *
2226  * Modifications:
2227  *
2228  * JRM -- 7/21/04
2229  * Updated function for the addition of the hash table.
2230  *
2231  * JRM - 7/27/04
2232  * Converted from the function H5C_remove_entry_from_tree()
2233  * to the macro H5C__REMOVE_ENTRY_FROM_TREE in the hopes of
2234  * wringing a little more performance out of the cache.
2235  *
2236  * QAK -- 11/27/04
2237  * Switched over to using skip list routines.
2238  *
2239  * JRM -- 3/28/07
2240  * Updated sanity checks for the new is_read_only and
2241  * ro_ref_count fields in H5C_cache_entry_t.
2242  *
2243  *-------------------------------------------------------------------------
2244  */
2245 
2246 #define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr) \
2247 { \
2248  HDassert( (cache_ptr) ); \
2249  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2250  HDassert( (entry_ptr) ); \
2251  HDassert( !((entry_ptr)->is_protected) ); \
2252  HDassert( !((entry_ptr)->is_read_only) ); \
2253  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2254  HDassert( (entry_ptr)->size > 0 ); \
2255  HDassert( (entry_ptr)->in_slist ); \
2256  HDassert( (cache_ptr)->slist_ptr ); \
2257  \
2258  if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
2259  != (entry_ptr) ) \
2260  \
2261  HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
2262  "Can't delete entry from skip list.") \
2263  \
2264  HDassert( (cache_ptr)->slist_len > 0 ); \
2265  (cache_ptr)->slist_len--; \
2266  HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
2267  (cache_ptr)->slist_size -= (entry_ptr)->size; \
2268  (entry_ptr)->in_slist = FALSE; \
2269 } /* H5C__REMOVE_ENTRY_FROM_SLIST */
2270 
2271 
2272 /*-------------------------------------------------------------------------
2273  *
2274  * Function: H5C__UPDATE_SLIST_FOR_SIZE_CHANGE
2275  *
2276  * Purpose: Update cache_ptr->slist_size for a change in the size of
2277  * and entry in the slist.
2278  *
2279  * Return: N/A
2280  *
2281  * Programmer: John Mainzer, 9/07/05
2282  *
2283  * Modifications:
2284  *
2285  * JRM -- 8/27/06
2286  * Added the H5C_DO_SANITY_CHECKS version of the macro.
2287  *
2288  * This version maintains the slist_size_increase field
2289  * that are used in sanity checks in the flush routines.
2290  *
2291  * All this is needed as the fractal heap needs to be
2292  * able to dirty, resize and/or move entries during the
2293  * flush.
2294  *
2295  *-------------------------------------------------------------------------
2296  */
2297 
2298 #if H5C_DO_SANITY_CHECKS
2299 
2300 #define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
2301 { \
2302  HDassert( (cache_ptr) ); \
2303  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2304  HDassert( (old_size) > 0 ); \
2305  HDassert( (new_size) > 0 ); \
2306  HDassert( (old_size) <= (cache_ptr)->slist_size ); \
2307  HDassert( (cache_ptr)->slist_len > 0 ); \
2308  HDassert( ((cache_ptr)->slist_len > 1) || \
2309  ( (cache_ptr)->slist_size == (old_size) ) ); \
2310  \
2311  (cache_ptr)->slist_size -= (old_size); \
2312  (cache_ptr)->slist_size += (new_size); \
2313  \
2314  (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \
2315  (cache_ptr)->slist_size_increase += (int64_t)(new_size); \
2316  \
2317  HDassert( (new_size) <= (cache_ptr)->slist_size ); \
2318  HDassert( ( (cache_ptr)->slist_len > 1 ) || \
2319  ( (cache_ptr)->slist_size == (new_size) ) ); \
2320 } /* H5C__REMOVE_ENTRY_FROM_SLIST */
2321 
2322 #else /* H5C_DO_SANITY_CHECKS */
2323 
2324 #define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
2325 { \
2326  HDassert( (cache_ptr) ); \
2327  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2328  HDassert( (old_size) > 0 ); \
2329  HDassert( (new_size) > 0 ); \
2330  HDassert( (old_size) <= (cache_ptr)->slist_size ); \
2331  HDassert( (cache_ptr)->slist_len > 0 ); \
2332  HDassert( ((cache_ptr)->slist_len > 1) || \
2333  ( (cache_ptr)->slist_size == (old_size) ) ); \
2334  \
2335  (cache_ptr)->slist_size -= (old_size); \
2336  (cache_ptr)->slist_size += (new_size); \
2337  \
2338  HDassert( (new_size) <= (cache_ptr)->slist_size ); \
2339  HDassert( ( (cache_ptr)->slist_len > 1 ) || \
2340  ( (cache_ptr)->slist_size == (new_size) ) ); \
2341 } /* H5C__REMOVE_ENTRY_FROM_SLIST */
2342 
2343 #endif /* H5C_DO_SANITY_CHECKS */
2344 
2345 
2346 /**************************************************************************
2347  *
2348  * Replacement policy update macros:
2349  *
2350  * These used to be functions, but I converted them to macros to avoid some
2351  * function call overhead.
2352  *
2353  **************************************************************************/
2354 
2355 /*-------------------------------------------------------------------------
2356  *
2357  * Macro: H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS
2358  *
2359  * Purpose: For efficiency, we sometimes change the order of flushes --
2360  * but doing so can confuse the replacement policy. This
2361  * macro exists to allow us to specify an entry as the
2362  * most recently touched so we can repair any such
2363  * confusion.
2364  *
2365  * At present, we only support the modified LRU policy, so
2366  * this function deals with that case unconditionally. If
2367  * we ever support other replacement policies, the macro
2368  * should switch on the current policy and act accordingly.
2369  *
2370  * Return: N/A
2371  *
2372  * Programmer: John Mainzer, 10/13/05
2373  *
2374  * Modifications:
2375  *
2376  * JRM -- 3/20/06
2377  * Modified macro to ignore pinned entries. Pinned entries
2378  * do not appear in the data structures maintained by the
2379  * replacement policy code, and thus this macro has nothing
2380  * to do if called for such an entry.
2381  *
2382  * JRM -- 3/28/07
2383  * Added sanity checks using the new is_read_only and
2384  * ro_ref_count fields of struct H5C_cache_entry_t.
2385  *
2386  *-------------------------------------------------------------------------
2387  */
2388 
2389 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
2390 
2391 #define H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS(cache_ptr, entry_ptr, fail_val) \
2392 { \
2393  HDassert( (cache_ptr) ); \
2394  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2395  HDassert( (entry_ptr) ); \
2396  HDassert( !((entry_ptr)->is_protected) ); \
2397  HDassert( !((entry_ptr)->is_read_only) ); \
2398  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2399  HDassert( (entry_ptr)->size > 0 ); \
2400  \
2401  if ( ! ((entry_ptr)->is_pinned) ) { \
2402  \
2403  /* modified LRU specific code */ \
2404  \
2405  /* remove the entry from the LRU list, and re-insert it at the head.\
2406  */ \
2407  \
2408  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2409  (cache_ptr)->LRU_tail_ptr, \
2410  (cache_ptr)->LRU_list_len, \
2411  (cache_ptr)->LRU_list_size, (fail_val)) \
2412  \
2413  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2414  (cache_ptr)->LRU_tail_ptr, \
2415  (cache_ptr)->LRU_list_len, \
2416  (cache_ptr)->LRU_list_size, (fail_val)) \
2417  \
2418  /* Use the dirty flag to infer whether the entry is on the clean or \
2419  * dirty LRU list, and remove it. Then insert it at the head of \
2420  * the same LRU list. \
2421  * \
2422  * At least initially, all entries should be clean. That may \
2423  * change, so we may as well deal with both cases now. \
2424  */ \
2425  \
2426  if ( (entry_ptr)->is_dirty ) { \
2427  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
2428  (cache_ptr)->dLRU_tail_ptr, \
2429  (cache_ptr)->dLRU_list_len, \
2430  (cache_ptr)->dLRU_list_size, (fail_val)) \
2431  \
2432  H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
2433  (cache_ptr)->dLRU_tail_ptr, \
2434  (cache_ptr)->dLRU_list_len, \
2435  (cache_ptr)->dLRU_list_size, (fail_val)) \
2436  } else { \
2437  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
2438  (cache_ptr)->cLRU_tail_ptr, \
2439  (cache_ptr)->cLRU_list_len, \
2440  (cache_ptr)->cLRU_list_size, (fail_val)) \
2441  \
2442  H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
2443  (cache_ptr)->cLRU_tail_ptr, \
2444  (cache_ptr)->cLRU_list_len, \
2445  (cache_ptr)->cLRU_list_size, (fail_val)) \
2446  } \
2447  \
2448  /* End modified LRU specific code. */ \
2449  } \
2450 } /* H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS */
2451 
2452 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2453 
2454 #define H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS(cache_ptr, entry_ptr, fail_val) \
2455 { \
2456  HDassert( (cache_ptr) ); \
2457  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2458  HDassert( (entry_ptr) ); \
2459  HDassert( !((entry_ptr)->is_protected) ); \
2460  HDassert( !((entry_ptr)->is_read_only) ); \
2461  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2462  HDassert( (entry_ptr)->size > 0 ); \
2463  \
2464  if ( ! ((entry_ptr)->is_pinned) ) { \
2465  \
2466  /* modified LRU specific code */ \
2467  \
2468  /* remove the entry from the LRU list, and re-insert it at the head \
2469  */ \
2470  \
2471  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2472  (cache_ptr)->LRU_tail_ptr, \
2473  (cache_ptr)->LRU_list_len, \
2474  (cache_ptr)->LRU_list_size, (fail_val)) \
2475  \
2476  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2477  (cache_ptr)->LRU_tail_ptr, \
2478  (cache_ptr)->LRU_list_len, \
2479  (cache_ptr)->LRU_list_size, (fail_val)) \
2480  \
2481  /* End modified LRU specific code. */ \
2482  } \
2483 } /* H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS */
2484 
2485 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2486 
2487 
2488 /*-------------------------------------------------------------------------
2489  *
2490  * Macro: H5C__UPDATE_RP_FOR_EVICTION
2491  *
2492  * Purpose: Update the replacement policy data structures for an
2493  * eviction of the specified cache entry.
2494  *
2495  * At present, we only support the modified LRU policy, so
2496  * this function deals with that case unconditionally. If
2497  * we ever support other replacement policies, the function
2498  * should switch on the current policy and act accordingly.
2499  *
2500  * Return: Non-negative on success/Negative on failure.
2501  *
2502  * Programmer: John Mainzer, 5/10/04
2503  *
2504  * Modifications:
2505  *
2506  * JRM - 7/27/04
2507  * Converted the function H5C_update_rp_for_eviction() to the
2508  * macro H5C__UPDATE_RP_FOR_EVICTION in an effort to squeeze
2509  * a bit more performance out of the cache.
2510  *
2511  * At least for the first cut, I am leaving the comments and
2512  * white space in the macro. If they cause difficulties with
2513  * the pre-processor, I'll have to remove them.
2514  *
2515  * JRM - 7/28/04
2516  * Split macro into two version, one supporting the clean and
2517  * dirty LRU lists, and the other not. Yet another attempt
2518  * at optimization.
2519  *
2520  * JRM - 3/20/06
2521  * Pinned entries can't be evicted, so this entry should never
2522  * be called on a pinned entry. Added assert to verify this.
2523  *
2524  * JRM -- 3/28/07
2525  * Added sanity checks for the new is_read_only and
2526  * ro_ref_count fields of struct H5C_cache_entry_t.
2527  *
2528  *-------------------------------------------------------------------------
2529  */
2530 
2531 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
2532 
2533 #define H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \
2534 { \
2535  HDassert( (cache_ptr) ); \
2536  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2537  HDassert( (entry_ptr) ); \
2538  HDassert( !((entry_ptr)->is_protected) ); \
2539  HDassert( !((entry_ptr)->is_read_only) ); \
2540  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2541  HDassert( !((entry_ptr)->is_pinned) ); \
2542  HDassert( (entry_ptr)->size > 0 ); \
2543  \
2544  /* modified LRU specific code */ \
2545  \
2546  /* remove the entry from the LRU list. */ \
2547  \
2548  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2549  (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \
2550  (cache_ptr)->LRU_list_size, (fail_val)) \
2551  \
2552  /* If the entry is clean when it is evicted, it should be on the \
2553  * clean LRU list, if it was dirty, it should be on the dirty LRU list. \
2554  * Remove it from the appropriate list according to the value of the \
2555  * dirty flag. \
2556  */ \
2557  \
2558  if ( (entry_ptr)->is_dirty ) { \
2559  \
2560  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
2561  (cache_ptr)->dLRU_tail_ptr, \
2562  (cache_ptr)->dLRU_list_len, \
2563  (cache_ptr)->dLRU_list_size, (fail_val)) \
2564  } else { \
2565  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
2566  (cache_ptr)->cLRU_tail_ptr, \
2567  (cache_ptr)->cLRU_list_len, \
2568  (cache_ptr)->cLRU_list_size, (fail_val)) \
2569  } \
2570  \
2571 } /* H5C__UPDATE_RP_FOR_EVICTION */
2572 
2573 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2574 
2575 #define H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \
2576 { \
2577  HDassert( (cache_ptr) ); \
2578  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2579  HDassert( (entry_ptr) ); \
2580  HDassert( !((entry_ptr)->is_protected) ); \
2581  HDassert( !((entry_ptr)->is_read_only) ); \
2582  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2583  HDassert( !((entry_ptr)->is_pinned) ); \
2584  HDassert( (entry_ptr)->size > 0 ); \
2585  \
2586  /* modified LRU specific code */ \
2587  \
2588  /* remove the entry from the LRU list. */ \
2589  \
2590  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2591  (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \
2592  (cache_ptr)->LRU_list_size, (fail_val)) \
2593  \
2594 } /* H5C__UPDATE_RP_FOR_EVICTION */
2595 
2596 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2597 
2598 
2599 /*-------------------------------------------------------------------------
2600  *
2601  * Macro: H5C__UPDATE_RP_FOR_FLUSH
2602  *
2603  * Purpose: Update the replacement policy data structures for a flush
2604  * of the specified cache entry.
2605  *
2606  * At present, we only support the modified LRU policy, so
2607  * this function deals with that case unconditionally. If
2608  * we ever support other replacement policies, the function
2609  * should switch on the current policy and act accordingly.
2610  *
2611  * Return: N/A
2612  *
2613  * Programmer: John Mainzer, 5/6/04
2614  *
2615  * Modifications:
2616  *
2617  * JRM - 7/27/04
2618  * Converted the function H5C_update_rp_for_flush() to the
2619  * macro H5C__UPDATE_RP_FOR_FLUSH in an effort to squeeze
2620  * a bit more performance out of the cache.
2621  *
2622  * At least for the first cut, I am leaving the comments and
2623  * white space in the macro. If they cause difficulties with
2624  * pre-processor, I'll have to remove them.
2625  *
2626  * JRM - 7/28/04
2627  * Split macro into two versions, one supporting the clean and
2628  * dirty LRU lists, and the other not. Yet another attempt
2629  * at optimization.
2630  *
2631  * JRM - 3/20/06
2632  * While pinned entries can be flushed, they don't reside in
2633  * the replacement policy data structures when unprotected.
2634  * Thus I modified this macro to do nothing if the entry is
2635  * pinned.
2636  *
2637  * JRM - 3/28/07
2638  * Added sanity checks based on the new is_read_only and
2639  * ro_ref_count fields of struct H5C_cache_entry_t.
2640  *
2641  *-------------------------------------------------------------------------
2642  */
2643 
2644 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
2645 
2646 #define H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \
2647 { \
2648  HDassert( (cache_ptr) ); \
2649  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2650  HDassert( (entry_ptr) ); \
2651  HDassert( !((entry_ptr)->is_protected) ); \
2652  HDassert( !((entry_ptr)->is_read_only) ); \
2653  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2654  HDassert( (entry_ptr)->size > 0 ); \
2655  \
2656  if ( ! ((entry_ptr)->is_pinned) ) { \
2657  \
2658  /* modified LRU specific code */ \
2659  \
2660  /* remove the entry from the LRU list, and re-insert it at the \
2661  * head. \
2662  */ \
2663  \
2664  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2665  (cache_ptr)->LRU_tail_ptr, \
2666  (cache_ptr)->LRU_list_len, \
2667  (cache_ptr)->LRU_list_size, (fail_val)) \
2668  \
2669  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2670  (cache_ptr)->LRU_tail_ptr, \
2671  (cache_ptr)->LRU_list_len, \
2672  (cache_ptr)->LRU_list_size, (fail_val)) \
2673  \
2674  /* since the entry is being flushed or cleared, one would think \
2675  * that it must be dirty -- but that need not be the case. Use the \
2676  * dirty flag to infer whether the entry is on the clean or dirty \
2677  * LRU list, and remove it. Then insert it at the head of the \
2678  * clean LRU list. \
2679  * \
2680  * The function presumes that a dirty entry will be either cleared \
2681  * or flushed shortly, so it is OK if we put a dirty entry on the \
2682  * clean LRU list. \
2683  */ \
2684  \
2685  if ( (entry_ptr)->is_dirty ) { \
2686  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
2687  (cache_ptr)->dLRU_tail_ptr, \
2688  (cache_ptr)->dLRU_list_len, \
2689  (cache_ptr)->dLRU_list_size, (fail_val)) \
2690  } else { \
2691  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
2692  (cache_ptr)->cLRU_tail_ptr, \
2693  (cache_ptr)->cLRU_list_len, \
2694  (cache_ptr)->cLRU_list_size, (fail_val)) \
2695  } \
2696  \
2697  H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
2698  (cache_ptr)->cLRU_tail_ptr, \
2699  (cache_ptr)->cLRU_list_len, \
2700  (cache_ptr)->cLRU_list_size, (fail_val)) \
2701  \
2702  /* End modified LRU specific code. */ \
2703  } \
2704 } /* H5C__UPDATE_RP_FOR_FLUSH */
2705 
2706 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2707 
2708 #define H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \
2709 { \
2710  HDassert( (cache_ptr) ); \
2711  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2712  HDassert( (entry_ptr) ); \
2713  HDassert( !((entry_ptr)->is_protected) ); \
2714  HDassert( !((entry_ptr)->is_read_only) ); \
2715  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2716  HDassert( (entry_ptr)->size > 0 ); \
2717  \
2718  if ( ! ((entry_ptr)->is_pinned) ) { \
2719  \
2720  /* modified LRU specific code */ \
2721  \
2722  /* remove the entry from the LRU list, and re-insert it at the \
2723  * head. \
2724  */ \
2725  \
2726  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2727  (cache_ptr)->LRU_tail_ptr, \
2728  (cache_ptr)->LRU_list_len, \
2729  (cache_ptr)->LRU_list_size, (fail_val)) \
2730  \
2731  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2732  (cache_ptr)->LRU_tail_ptr, \
2733  (cache_ptr)->LRU_list_len, \
2734  (cache_ptr)->LRU_list_size, (fail_val)) \
2735  \
2736  /* End modified LRU specific code. */ \
2737  } \
2738 } /* H5C__UPDATE_RP_FOR_FLUSH */
2739 
2740 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2741 
2742 
2743 /*-------------------------------------------------------------------------
2744  *
2745  * Macro: H5C__UPDATE_RP_FOR_INSERTION
2746  *
2747  * Purpose: Update the replacement policy data structures for an
2748  * insertion of the specified cache entry.
2749  *
2750  * At present, we only support the modified LRU policy, so
2751  * this function deals with that case unconditionally. If
2752  * we ever support other replacement policies, the function
2753  * should switch on the current policy and act accordingly.
2754  *
2755  * Return: N/A
2756  *
2757  * Programmer: John Mainzer, 5/17/04
2758  *
2759  * Modifications:
2760  *
2761  * JRM - 7/27/04
2762  * Converted the function H5C_update_rp_for_insertion() to the
2763  * macro H5C__UPDATE_RP_FOR_INSERTION in an effort to squeeze
2764  * a bit more performance out of the cache.
2765  *
2766  * At least for the first cut, I am leaving the comments and
2767  * white space in the macro. If they cause difficulties with
2768  * pre-processor, I'll have to remove them.
2769  *
2770  * JRM - 7/28/04
2771  * Split macro into two version, one supporting the clean and
2772  * dirty LRU lists, and the other not. Yet another attempt
2773  * at optimization.
2774  *
2775  * JRM - 3/10/06
2776  * This macro should never be called on a pinned entry.
2777  * Inserted an assert to verify this.
2778  *
2779  * JRM - 8/9/06
2780  * Not any more. We must now allow insertion of pinned
2781  * entries. Updated macro to support this.
2782  *
2783  * JRM - 3/28/07
2784  * Added sanity checks using the new is_read_only and
2785  * ro_ref_count fields of struct H5C_cache_entry_t.
2786  *
2787  *-------------------------------------------------------------------------
2788  */
2789 
2790 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
2791 
2792 #define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \
2793 { \
2794  HDassert( (cache_ptr) ); \
2795  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2796  HDassert( (entry_ptr) ); \
2797  HDassert( !((entry_ptr)->is_protected) ); \
2798  HDassert( !((entry_ptr)->is_read_only) ); \
2799  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2800  HDassert( (entry_ptr)->size > 0 ); \
2801  \
2802  if ( (entry_ptr)->is_pinned ) { \
2803  \
2804  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
2805  (cache_ptr)->pel_tail_ptr, \
2806  (cache_ptr)->pel_len, \
2807  (cache_ptr)->pel_size, (fail_val)) \
2808  \
2809  } else { \
2810  \
2811  /* modified LRU specific code */ \
2812  \
2813  /* insert the entry at the head of the LRU list. */ \
2814  \
2815  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2816  (cache_ptr)->LRU_tail_ptr, \
2817  (cache_ptr)->LRU_list_len, \
2818  (cache_ptr)->LRU_list_size, (fail_val)) \
2819  \
2820  /* insert the entry at the head of the clean or dirty LRU list as \
2821  * appropriate. \
2822  */ \
2823  \
2824  if ( entry_ptr->is_dirty ) { \
2825  H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
2826  (cache_ptr)->dLRU_tail_ptr, \
2827  (cache_ptr)->dLRU_list_len, \
2828  (cache_ptr)->dLRU_list_size, (fail_val)) \
2829  } else { \
2830  H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
2831  (cache_ptr)->cLRU_tail_ptr, \
2832  (cache_ptr)->cLRU_list_len, \
2833  (cache_ptr)->cLRU_list_size, (fail_val)) \
2834  } \
2835  \
2836  /* End modified LRU specific code. */ \
2837  } \
2838 }
2839 
2840 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2841 
2842 #define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \
2843 { \
2844  HDassert( (cache_ptr) ); \
2845  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2846  HDassert( (entry_ptr) ); \
2847  HDassert( !((entry_ptr)->is_protected) ); \
2848  HDassert( !((entry_ptr)->is_read_only) ); \
2849  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2850  HDassert( (entry_ptr)->size > 0 ); \
2851  \
2852  if ( (entry_ptr)->is_pinned ) { \
2853  \
2854  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
2855  (cache_ptr)->pel_tail_ptr, \
2856  (cache_ptr)->pel_len, \
2857  (cache_ptr)->pel_size, (fail_val)) \
2858  \
2859  } else { \
2860  \
2861  /* modified LRU specific code */ \
2862  \
2863  /* insert the entry at the head of the LRU list. */ \
2864  \
2865  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2866  (cache_ptr)->LRU_tail_ptr, \
2867  (cache_ptr)->LRU_list_len, \
2868  (cache_ptr)->LRU_list_size, (fail_val)) \
2869  \
2870  /* End modified LRU specific code. */ \
2871  } \
2872 }
2873 
2874 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2875 
2876 
2877 /*-------------------------------------------------------------------------
2878  *
2879  * Macro: H5C__UPDATE_RP_FOR_PROTECT
2880  *
2881  * Purpose: Update the replacement policy data structures for a
2882  * protect of the specified cache entry.
2883  *
2884  * To do this, unlink the specified entry from any data
2885  * structures used by the replacement policy, and add the
2886  * entry to the protected list.
2887  *
2888  * At present, we only support the modified LRU policy, so
2889  * this function deals with that case unconditionally. If
2890  * we ever support other replacement policies, the function
2891  * should switch on the current policy and act accordingly.
2892  *
2893  * Return: N/A
2894  *
2895  * Programmer: John Mainzer, 5/17/04
2896  *
2897  * Modifications:
2898  *
2899  * JRM - 7/27/04
2900  * Converted the function H5C_update_rp_for_protect() to the
2901  * macro H5C__UPDATE_RP_FOR_PROTECT in an effort to squeeze
2902  * a bit more performance out of the cache.
2903  *
2904  * At least for the first cut, I am leaving the comments and
2905  * white space in the macro. If they cause difficulties with
2906  * pre-processor, I'll have to remove them.
2907  *
2908  * JRM - 7/28/04
2909  * Split macro into two version, one supporting the clean and
2910  * dirty LRU lists, and the other not. Yet another attempt
2911  * at optimization.
2912  *
2913  * JRM - 3/17/06
2914  * Modified macro to attempt to remove pinned entriese from
2915  * the pinned entry list instead of from the data structures
2916  * maintained by the replacement policy.
2917  *
2918  * JRM - 3/28/07
2919  * Added sanity checks based on the new is_read_only and
2920  * ro_ref_count fields of struct H5C_cache_entry_t.
2921  *
2922  *-------------------------------------------------------------------------
2923  */
2924 
2925 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
2926 
2927 #define H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \
2928 { \
2929  HDassert( (cache_ptr) ); \
2930  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2931  HDassert( (entry_ptr) ); \
2932  HDassert( !((entry_ptr)->is_protected) ); \
2933  HDassert( !((entry_ptr)->is_read_only) ); \
2934  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2935  HDassert( (entry_ptr)->size > 0 ); \
2936  \
2937  if ( (entry_ptr)->is_pinned ) { \
2938  \
2939  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
2940  (cache_ptr)->pel_tail_ptr, \
2941  (cache_ptr)->pel_len, \
2942  (cache_ptr)->pel_size, (fail_val)) \
2943  HDassert( (cache_ptr)->pel_len >= 0 ); \
2944  \
2945  } else { \
2946  \
2947  /* modified LRU specific code */ \
2948  \
2949  /* remove the entry from the LRU list. */ \
2950  \
2951  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2952  (cache_ptr)->LRU_tail_ptr, \
2953  (cache_ptr)->LRU_list_len, \
2954  (cache_ptr)->LRU_list_size, (fail_val)) \
2955  \
2956  /* Similarly, remove the entry from the clean or dirty LRU list \
2957  * as appropriate. \
2958  */ \
2959  \
2960  if ( (entry_ptr)->is_dirty ) { \
2961  \
2962  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
2963  (cache_ptr)->dLRU_tail_ptr, \
2964  (cache_ptr)->dLRU_list_len, \
2965  (cache_ptr)->dLRU_list_size, (fail_val)) \
2966  \
2967  } else { \
2968  \
2969  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
2970  (cache_ptr)->cLRU_tail_ptr, \
2971  (cache_ptr)->cLRU_list_len, \
2972  (cache_ptr)->cLRU_list_size, (fail_val)) \
2973  } \
2974  \
2975  /* End modified LRU specific code. */ \
2976  } \
2977  \
2978  /* Regardless of the replacement policy, or whether the entry is \
2979  * pinned, now add the entry to the protected list. \
2980  */ \
2981  \
2982  H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \
2983  (cache_ptr)->pl_tail_ptr, \
2984  (cache_ptr)->pl_len, \
2985  (cache_ptr)->pl_size, (fail_val)) \
2986 } /* H5C__UPDATE_RP_FOR_PROTECT */
2987 
2988 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2989 
2990 #define H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \
2991 { \
2992  HDassert( (cache_ptr) ); \
2993  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2994  HDassert( (entry_ptr) ); \
2995  HDassert( !((entry_ptr)->is_protected) ); \
2996  HDassert( !((entry_ptr)->is_read_only) ); \
2997  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2998  HDassert( (entry_ptr)->size > 0 ); \
2999  \
3000  if ( (entry_ptr)->is_pinned ) { \
3001  \
3002  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
3003  (cache_ptr)->pel_tail_ptr, \
3004  (cache_ptr)->pel_len, \
3005  (cache_ptr)->pel_size, (fail_val)) \
3006  HDassert( (cache_ptr)->pel_len >= 0 ); \
3007  \
3008  } else { \
3009  \
3010  /* modified LRU specific code */ \
3011  \
3012  /* remove the entry from the LRU list. */ \
3013  \
3014  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
3015  (cache_ptr)->LRU_tail_ptr, \
3016  (cache_ptr)->LRU_list_len, \
3017  (cache_ptr)->LRU_list_size, (fail_val)) \
3018  \
3019  /* End modified LRU specific code. */ \
3020  } \
3021  \
3022  /* Regardless of the replacement policy, or whether the entry is \
3023  * pinned, now add the entry to the protected list. \
3024  */ \
3025  \
3026  H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \
3027  (cache_ptr)->pl_tail_ptr, \
3028  (cache_ptr)->pl_len, \
3029  (cache_ptr)->pl_size, (fail_val)) \
3030 } /* H5C__UPDATE_RP_FOR_PROTECT */
3031 
3032 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
3033 
3034 
3035 /*-------------------------------------------------------------------------
3036  *
3037  * Macro: H5C__UPDATE_RP_FOR_MOVE
3038  *
3039  * Purpose: Update the replacement policy data structures for a
3040  * move of the specified cache entry.
3041  *
3042  * At present, we only support the modified LRU policy, so
3043  * this function deals with that case unconditionally. If
3044  * we ever support other replacement policies, the function
3045  * should switch on the current policy and act accordingly.
3046  *
3047  * Return: N/A
3048  *
3049  * Programmer: John Mainzer, 5/17/04
3050  *
3051  *-------------------------------------------------------------------------
3052  */
3053 
3054 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
3055 
3056 #define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \
3057 { \
3058  HDassert( (cache_ptr) ); \
3059  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
3060  HDassert( (entry_ptr) ); \
3061  HDassert( !((entry_ptr)->is_protected) ); \
3062  HDassert( !((entry_ptr)->is_read_only) ); \
3063  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
3064  HDassert( (entry_ptr)->size > 0 ); \
3065  \
3066  if ( ! ( (entry_ptr)->is_pinned ) ) { \
3067  \
3068  /* modified LRU specific code */ \
3069  \
3070  /* remove the entry from the LRU list, and re-insert it at the head. \
3071  */ \
3072  \
3073  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
3074  (cache_ptr)->LRU_tail_ptr, \
3075  (cache_ptr)->LRU_list_len, \
3076  (cache_ptr)->LRU_list_size, (fail_val)) \
3077  \
3078  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
3079  (cache_ptr)->LRU_tail_ptr, \
3080  (cache_ptr)->LRU_list_len, \
3081  (cache_ptr)->LRU_list_size, (fail_val)) \
3082  \
3083  /* remove the entry from either the clean or dirty LUR list as \
3084  * indicated by the was_dirty parameter \
3085  */ \
3086  if ( was_dirty ) { \
3087  \
3088  H5C__AUX_DLL_REMOVE((entry_ptr), \
3089  (cache_ptr)->dLRU_head_ptr, \
3090  (cache_ptr)->dLRU_tail_ptr, \
3091  (cache_ptr)->dLRU_list_len, \
3092  (cache_ptr)->dLRU_list_size, \
3093  (fail_val)) \
3094  \
3095  } else { \
3096  \
3097  H5C__AUX_DLL_REMOVE((entry_ptr), \
3098  (cache_ptr)->cLRU_head_ptr, \
3099  (cache_ptr)->cLRU_tail_ptr, \
3100  (cache_ptr)->cLRU_list_len, \
3101  (cache_ptr)->cLRU_list_size, \
3102  (fail_val)) \
3103  } \
3104  \
3105  /* insert the entry at the head of either the clean or dirty \
3106  * LRU list as appropriate. \
3107  */ \
3108  \
3109  if ( (entry_ptr)->is_dirty ) { \
3110  \
3111  H5C__AUX_DLL_PREPEND((entry_ptr), \
3112  (cache_ptr)->dLRU_head_ptr, \
3113  (cache_ptr)->dLRU_tail_ptr, \
3114  (cache_ptr)->dLRU_list_len, \
3115  (cache_ptr)->dLRU_list_size, \
3116  (fail_val)) \
3117  \
3118  } else { \
3119  \
3120  H5C__AUX_DLL_PREPEND((entry_ptr), \
3121  (cache_ptr)->cLRU_head_ptr, \
3122  (cache_ptr)->cLRU_tail_ptr, \
3123  (cache_ptr)->cLRU_list_len, \
3124  (cache_ptr)->cLRU_list_size, \
3125  (fail_val)) \
3126  } \
3127  \
3128  /* End modified LRU specific code. */ \
3129  } \
3130 } /* H5C__UPDATE_RP_FOR_MOVE */
3131 
3132 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
3133 
3134 #define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \
3135 { \
3136  HDassert( (cache_ptr) ); \
3137  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
3138  HDassert( (entry_ptr) ); \
3139  HDassert( !((entry_ptr)->is_protected) ); \
3140  HDassert( !((entry_ptr)->is_read_only) ); \
3141  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
3142  HDassert( (entry_ptr)->size > 0 ); \
3143  \
3144  if ( ! ( (entry_ptr)->is_pinned ) ) { \
3145  \
3146  /* modified LRU specific code */ \
3147  \
3148  /* remove the entry from the LRU list, and re-insert it at the head. \
3149  */ \
3150  \
3151  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
3152  (cache_ptr)->LRU_tail_ptr, \
3153  (cache_ptr)->LRU_list_len, \
3154  (cache_ptr)->LRU_list_size, (fail_val)) \
3155  \
3156  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
3157  (cache_ptr)->LRU_tail_ptr, \
3158  (cache_ptr)->LRU_list_len, \
3159  (cache_ptr)->LRU_list_size, (fail_val)) \
3160  \
3161  /* End modified LRU specific code. */ \
3162  } \
3163 } /* H5C__UPDATE_RP_FOR_MOVE */
3164 
3165 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
3166 
3167 
3168 /*-------------------------------------------------------------------------
3169  *
3170  * Macro: H5C__UPDATE_RP_FOR_SIZE_CHANGE
3171  *
3172  * Purpose: Update the replacement policy data structures for a
3173  * size change of the specified cache entry.
3174  *
3175  * To do this, determine if the entry is pinned. If it is,
3176  * update the size of the pinned entry list.
3177  *
3178  * If it isn't pinned, the entry must handled by the
3179  * replacement policy. Update the appropriate replacement
3180  * policy data structures.
3181  *
3182  * At present, we only support the modified LRU policy, so
3183  * this function deals with that case unconditionally. If
3184  * we ever support other replacement policies, the function
3185  * should switch on the current policy and act accordingly.
3186  *
3187  * Return: N/A
3188  *
3189  * Programmer: John Mainzer, 8/23/06
3190  *
3191  * Modifications:
3192  *
3193  * JRM -- 3/28/07
3194  * Added sanity checks based on the new is_read_only and
3195  * ro_ref_count fields of struct H5C_cache_entry_t.
3196  *
3197  *-------------------------------------------------------------------------
3198  */
3199 
3200 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
3201 
3202 #define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \
3203 { \
3204  HDassert( (cache_ptr) ); \
3205  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
3206  HDassert( (entry_ptr) ); \
3207  HDassert( !((entry_ptr)->is_protected) ); \
3208  HDassert( !((entry_ptr)->is_read_only) ); \
3209  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
3210  HDassert( (entry_ptr)->size > 0 ); \
3211  HDassert( new_size > 0 ); \
3212  \
3213  if ( (entry_ptr)->is_pinned ) { \
3214  \
3215  H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \
3216  (cache_ptr)->pel_size, \
3217  (entry_ptr)->size, \
3218  (new_size)); \
3219  \
3220  } else { \
3221  \
3222  /* modified LRU specific code */ \
3223  \
3224  /* Update the size of the LRU list */ \
3225  \
3226  H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \
3227  (cache_ptr)->LRU_list_size, \
3228  (entry_ptr)->size, \
3229  (new_size)); \
3230  \
3231  /* Similarly, update the size of the clean or dirty LRU list as \
3232  * appropriate. At present, the entry must be clean, but that \
3233  * could change. \
3234  */ \
3235  \
3236  if ( (entry_ptr)->is_dirty ) { \
3237  \
3238  H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->dLRU_list_len, \
3239  (cache_ptr)->dLRU_list_size, \
3240  (entry_ptr)->size, \
3241  (new_size)); \
3242  \
3243  } else { \
3244  \
3245  H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->cLRU_list_len, \
3246  (cache_ptr)->cLRU_list_size, \
3247  (entry_ptr)->size, \
3248  (new_size)); \
3249  } \
3250  \
3251  /* End modified LRU specific code. */ \
3252  } \
3253  \
3254 } /* H5C__UPDATE_RP_FOR_SIZE_CHANGE */
3255 
3256 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
3257 
3258 #define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \
3259 { \
3260  HDassert( (cache_ptr) ); \
3261  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
3262  HDassert( (entry_ptr) ); \
3263  HDassert( !((entry_ptr)->is_protected) ); \
3264  HDassert( !((entry_ptr)->is_read_only) ); \
3265  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
3266  HDassert( (entry_ptr)->size > 0 ); \
3267  HDassert( new_size > 0 ); \
3268  \
3269  if ( (entry_ptr)->is_pinned ) { \
3270  \
3271  H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \
3272  (cache_ptr)->pel_size, \
3273  (entry_ptr)->size, \
3274  (new_size)); \
3275  \
3276  } else { \
3277  \
3278  /* modified LRU specific code */ \
3279  \
3280  /* Update the size of the LRU list */ \
3281  \
3282  H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \
3283  (cache_ptr)->LRU_list_size, \
3284  (entry_ptr)->size, \
3285  (new_size)); \
3286  \
3287  /* End modified LRU specific code. */ \
3288  } \
3289  \
3290 } /* H5C__UPDATE_RP_FOR_SIZE_CHANGE */
3291 
3292 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
3293 
3294 
3295 /*-------------------------------------------------------------------------
3296  *
3297  * Macro: H5C__UPDATE_RP_FOR_UNPIN
3298  *
3299  * Purpose: Update the replacement policy data structures for an
3300  * unpin of the specified cache entry.
3301  *
3302  * To do this, unlink the specified entry from the protected
3303  * entry list, and re-insert it in the data structures used
3304  * by the current replacement policy.
3305  *
3306  * At present, we only support the modified LRU policy, so
3307  * this function deals with that case unconditionally. If
3308  * we ever support other replacement policies, the macro
3309  * should switch on the current policy and act accordingly.
3310  *
3311  * Return: N/A
3312  *
3313  * Programmer: John Mainzer, 3/22/06
3314  *
3315  * Modifications:
3316  *
3317  * JRM -- 3/28/07
3318  * Added sanity checks based on the new is_read_only and
3319  * ro_ref_count fields of struct H5C_cache_entry_t.
3320  *
3321  *-------------------------------------------------------------------------
3322  */
3323 
3324 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
3325 
3326 #define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \
3327 { \
3328  HDassert( (cache_ptr) ); \
3329  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
3330  HDassert( (entry_ptr) ); \
3331  HDassert( !((entry_ptr)->is_protected) ); \
3332  HDassert( !((entry_ptr)->is_read_only) ); \
3333  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
3334  HDassert( (entry_ptr)->is_pinned); \
3335  HDassert( (entry_ptr)->size > 0 ); \
3336  \
3337  /* Regardless of the replacement policy, remove the entry from the \
3338  * pinned entry list. \
3339  */ \
3340  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
3341  (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \
3342  (cache_ptr)->pel_size, (fail_val)) \
3343  HDassert( (cache_ptr)->pel_len >= 0 ); \
3344  \
3345  /* modified LRU specific code */ \
3346  \
3347  /* insert the entry at the head of the LRU list. */ \
3348  \
3349  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
3350  (cache_ptr)->LRU_tail_ptr, \
3351  (cache_ptr)->LRU_list_len, \
3352  (cache_ptr)->LRU_list_size, (fail_val)) \
3353  \
3354  /* Similarly, insert the entry at the head of either the clean \
3355  * or dirty LRU list as appropriate. \
3356  */ \
3357  \
3358  if ( (entry_ptr)->is_dirty ) { \
3359  \
3360  H5C__AUX_DLL_PREPEND((entry_ptr), \
3361  (cache_ptr)->dLRU_head_ptr, \
3362  (cache_ptr)->dLRU_tail_ptr, \
3363  (cache_ptr)->dLRU_list_len, \
3364  (cache_ptr)->dLRU_list_size, \
3365  (fail_val)) \
3366  \
3367  } else { \
3368  \
3369  H5C__AUX_DLL_PREPEND((entry_ptr), \
3370  (cache_ptr)->cLRU_head_ptr, \
3371  (cache_ptr)->cLRU_tail_ptr, \
3372  (cache_ptr)->cLRU_list_len, \
3373  (cache_ptr)->cLRU_list_size, \
3374  (fail_val)) \
3375  } \
3376  \
3377  /* End modified LRU specific code. */ \
3378  \
3379 } /* H5C__UPDATE_RP_FOR_UNPIN */
3380 
3381 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
3382 
3383 #define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \
3384 { \
3385  HDassert( (cache_ptr) ); \
3386  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
3387  HDassert( (entry_ptr) ); \
3388  HDassert( !((entry_ptr)->is_protected) ); \
3389  HDassert( !((entry_ptr)->is_read_only) ); \
3390  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
3391  HDassert( (entry_ptr)->is_pinned); \
3392  HDassert( (entry_ptr)->size > 0 ); \
3393  \
3394  /* Regardless of the replacement policy, remove the entry from the \
3395  * pinned entry list. \
3396  */ \
3397  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
3398  (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \
3399  (cache_ptr)->pel_size, (fail_val)) \
3400  HDassert( (cache_ptr)->pel_len >= 0 ); \
3401  \
3402  /* modified LRU specific code */ \
3403  \
3404  /* insert the entry at the head of the LRU list. */ \
3405  \
3406  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
3407  (cache_ptr)->LRU_tail_ptr, \
3408  (cache_ptr)->LRU_list_len, \
3409  (cache_ptr)->LRU_list_size, (fail_val)) \
3410  \
3411  /* End modified LRU specific code. */ \
3412  \
3413 } /* H5C__UPDATE_RP_FOR_UNPIN */
3414 
3415 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
3416 
3417 
3418 /*-------------------------------------------------------------------------
3419  *
3420  * Macro: H5C__UPDATE_RP_FOR_UNPROTECT
3421  *
3422  * Purpose: Update the replacement policy data structures for an
3423  * unprotect of the specified cache entry.
3424  *
3425  * To do this, unlink the specified entry from the protected
3426  * list, and re-insert it in the data structures used by the
3427  * current replacement policy.
3428  *
3429  * At present, we only support the modified LRU policy, so
3430  * this function deals with that case unconditionally. If
3431  * we ever support other replacement policies, the function
3432  * should switch on the current policy and act accordingly.
3433  *
3434  * Return: N/A
3435  *
3436  * Programmer: John Mainzer, 5/19/04
3437  *
3438  * Modifications:
3439  *
3440  * JRM - 7/27/04
3441  * Converted the function H5C_update_rp_for_unprotect() to
3442  * the macro H5C__UPDATE_RP_FOR_UNPROTECT in an effort to
3443  * squeeze a bit more performance out of the cache.
3444  *
3445  * At least for the first cut, I am leaving the comments and
3446  * white space in the macro. If they cause difficulties with
3447  * pre-processor, I'll have to remove them.
3448  *
3449  * JRM - 7/28/04
3450  * Split macro into two version, one supporting the clean and
3451  * dirty LRU lists, and the other not. Yet another attempt
3452  * at optimization.
3453  *
3454  * JRM - 3/17/06
3455  * Modified macro to put pinned entries on the pinned entry
3456  * list instead of inserting them in the data structures
3457  * maintained by the replacement policy.
3458  *
3459  *-------------------------------------------------------------------------
3460  */
3461 
3462 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
3463 
3464 #define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \
3465 { \
3466  HDassert( (cache_ptr) ); \
3467  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
3468  HDassert( (entry_ptr) ); \
3469  HDassert( (entry_ptr)->is_protected); \
3470  HDassert( (entry_ptr)->size > 0 ); \
3471  \
3472  /* Regardless of the replacement policy, remove the entry from the \
3473  * protected list. \
3474  */ \
3475  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \
3476  (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \
3477  (cache_ptr)->pl_size, (fail_val)) \
3478  \
3479  if ( (entry_ptr)->is_pinned ) { \
3480  \
3481  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
3482  (cache_ptr)->pel_tail_ptr, \
3483  (cache_ptr)->pel_len, \
3484  (cache_ptr)->pel_size, (fail_val)) \
3485  \
3486  } else { \
3487  \
3488  /* modified LRU specific code */ \
3489  \
3490  /* insert the entry at the head of the LRU list. */ \
3491  \
3492  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
3493  (cache_ptr)->LRU_tail_ptr, \
3494  (cache_ptr)->LRU_list_len, \
3495  (cache_ptr)->LRU_list_size, (fail_val)) \
3496  \
3497  /* Similarly, insert the entry at the head of either the clean or \
3498  * dirty LRU list as appropriate. \
3499  */ \
3500  \
3501  if ( (entry_ptr)->is_dirty ) { \
3502  \
3503  H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
3504  (cache_ptr)->dLRU_tail_ptr, \
3505  (cache_ptr)->dLRU_list_len, \
3506  (cache_ptr)->dLRU_list_size, (fail_val)) \
3507  \
3508  } else { \
3509  \
3510  H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
3511  (cache_ptr)->cLRU_tail_ptr, \
3512  (cache_ptr)->cLRU_list_len, \
3513  (cache_ptr)->cLRU_list_size, (fail_val)) \
3514  } \
3515  \
3516  /* End modified LRU specific code. */ \
3517  } \
3518  \
3519 } /* H5C__UPDATE_RP_FOR_UNPROTECT */
3520 
3521 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
3522 
3523 #define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \
3524 { \
3525  HDassert( (cache_ptr) ); \
3526  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
3527  HDassert( (entry_ptr) ); \
3528  HDassert( (entry_ptr)->is_protected); \
3529  HDassert( (entry_ptr)->size > 0 ); \
3530  \
3531  /* Regardless of the replacement policy, remove the entry from the \
3532  * protected list. \
3533  */ \
3534  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \
3535  (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \
3536  (cache_ptr)->pl_size, (fail_val)) \
3537  \
3538  if ( (entry_ptr)->is_pinned ) { \
3539  \
3540  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
3541  (cache_ptr)->pel_tail_ptr, \
3542  (cache_ptr)->pel_len, \
3543  (cache_ptr)->pel_size, (fail_val)) \
3544  \
3545  } else { \
3546  \
3547  /* modified LRU specific code */ \
3548  \
3549  /* insert the entry at the head of the LRU list. */ \
3550  \
3551  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
3552  (cache_ptr)->LRU_tail_ptr, \
3553  (cache_ptr)->LRU_list_len, \
3554  (cache_ptr)->LRU_list_size, (fail_val)) \
3555  \
3556  /* End modified LRU specific code. */ \
3557  } \
3558 } /* H5C__UPDATE_RP_FOR_UNPROTECT */
3559 
3560 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
3561 
3562 
3563 #endif /* _H5Cpkg_H */
3564 
H5C_cache_entry_t * pl_tail_ptr
Definition: H5Cpkg.h:886
Definition: H5Cpkg.h:845
int64_t cache_accesses
Definition: H5Cpkg.h:926
unsigned int hbool_t
Definition: H5public.h:142
size_t min_clean_size
Definition: H5Cpkg.h:859
#define H5C__HASH_TABLE_LEN
Definition: H5Cpkg.h:839
uint32_t magic
Definition: H5Cpkg.h:847
hbool_t evictions_enabled
Definition: H5Cpkg.h:866
size_t cLRU_list_size
Definition: H5Cpkg.h:899
int32_t epoch_markers_active
Definition: H5Cpkg.h:917
H5C_cache_entry_t * cLRU_tail_ptr
Definition: H5Cpkg.h:901
size_t pl_size
Definition: H5Cpkg.h:884
H5SL_t * slist_ptr
Definition: H5Cpkg.h:877
H5C_cache_entry_t * pel_head_ptr
Definition: H5Cpkg.h:890
H5C_cache_entry_t * dLRU_tail_ptr
Definition: H5Cpkg.h:906
H5C_cache_entry_t * cLRU_head_ptr
Definition: H5Cpkg.h:900
H5C_auto_size_ctl_t resize_ctl
Definition: H5Cpkg.h:915
hbool_t cache_full
Definition: H5Cpkg.h:913
size_t LRU_list_size
Definition: H5Cpkg.h:894
hbool_t size_increase_possible
Definition: H5Cpkg.h:908
#define H5C__PREFIX_LEN
Definition: H5Cpkg.h:843
int32_t epoch_marker_ringbuf_first
Definition: H5Cpkg.h:920
int32_t cLRU_list_len
Definition: H5Cpkg.h:898
H5C_cache_entry_t * pl_head_ptr
Definition: H5Cpkg.h:885
size_t slist_size
Definition: H5Cpkg.h:876
H5C_cache_entry_t epoch_markers[H5C__MAX_EPOCH_MARKERS]
Definition: H5Cpkg.h:923
H5C_cache_entry_t * index[H5C__HASH_TABLE_LEN]
Definition: H5Cpkg.h:872
hbool_t flush_in_progress
Definition: H5Cpkg.h:849
hbool_t flash_size_increase_possible
Definition: H5Cpkg.h:909
hbool_t size_decreased
Definition: H5Cpkg.h:914
size_t max_cache_size
Definition: H5Cpkg.h:858
int32_t index_len
Definition: H5Cpkg.h:868
hbool_t size_decrease_possible
Definition: H5Cpkg.h:911
int32_t dLRU_list_len
Definition: H5Cpkg.h:903
size_t pel_size
Definition: H5Cpkg.h:889
H5C_cache_entry_t * pel_tail_ptr
Definition: H5Cpkg.h:891
int32_t pel_len
Definition: H5Cpkg.h:888
int64_t cache_hits
Definition: H5Cpkg.h:925
int32_t epoch_marker_ringbuf_last
Definition: H5Cpkg.h:921
hbool_t epoch_marker_active[H5C__MAX_EPOCH_MARKERS]
Definition: H5Cpkg.h:918
int32_t epoch_marker_ringbuf_size
Definition: H5Cpkg.h:922
H5C_write_permitted_func_t check_write_permitted
Definition: H5Cpkg.h:861
size_t dirty_index_size
Definition: H5Cpkg.h:871
size_t index_size
Definition: H5Cpkg.h:869
hbool_t resize_enabled
Definition: H5Cpkg.h:912
H5C_cache_entry_t * LRU_tail_ptr
Definition: H5Cpkg.h:896
FILE * trace_file_ptr
Definition: H5Cpkg.h:851
H5C_log_flush_func_t log_flush
Definition: H5Cpkg.h:864
int32_t pl_len
Definition: H5Cpkg.h:883
hbool_t write_permitted
Definition: H5Cpkg.h:862
size_t flash_size_increase_threshold
Definition: H5Cpkg.h:910
int32_t epoch_marker_ringbuf[H5C__MAX_EPOCH_MARKERS+1]
Definition: H5Cpkg.h:919
char prefix[H5C__PREFIX_LEN]
Definition: H5Cpkg.h:995
#define H5C__MAX_NUM_TYPE_IDS
Definition: H5Cpkg.h:842
H5C_cache_entry_t * LRU_head_ptr
Definition: H5Cpkg.h:895
int32_t LRU_list_len
Definition: H5Cpkg.h:893
int32_t slist_len
Definition: H5Cpkg.h:875
int32_t max_type_id
Definition: H5Cpkg.h:855
size_t dLRU_list_size
Definition: H5Cpkg.h:904
size_t clean_index_size
Definition: H5Cpkg.h:870
void * aux_ptr
Definition: H5Cpkg.h:853
H5C_cache_entry_t * dLRU_head_ptr
Definition: H5Cpkg.h:905

MISR Toolkit - Copyright © 2005 - 2020 Jet Propulsion Laboratory
Generated on Fri Jun 19 2020 22:49:52