1
0

cache.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. #include <stdio.h>
  2. #include <string.h>
  3. #include "cache.h"
  4. /*--------------------------------------------------------- */
  5. size_t lub_heap_cache__get_max_free(lub_heap_cache_t * this)
  6. {
  7. size_t size = 0;
  8. /* get the size from the cache */
  9. lub_heap_cache_bucket_t **ptr;
  10. for (ptr = this->m_bucket_start; ptr < this->m_bucket_end; ++ptr) {
  11. lub_blockpool_stats_t stats;
  12. lub_blockpool__get_stats(&(*ptr)->m_blockpool, &stats);
  13. if (stats.free_blocks
  14. && ((stats.block_size - sizeof(lub_heap_cache_bucket_t *)) >
  15. size)) {
  16. size =
  17. stats.block_size -
  18. sizeof(lub_heap_cache_bucket_t *);
  19. }
  20. }
  21. return size;
  22. }
  23. /*--------------------------------------------------------- */
  24. static lub_heap_cache_bucket_t
  25. *_lub_heap_cache_find_bucket_from_size(lub_heap_cache_t * this, size_t size)
  26. {
  27. lub_heap_cache_bucket_t **ptr = this->m_bucket_start;
  28. /* leave space to put the bucket reference into place */
  29. size += sizeof(lub_heap_cache_bucket_t *);
  30. /* place powers of two into the correct bucket */
  31. --size;
  32. /* ignore sizes of 1,2 and 4 bytes */
  33. size >>= 3;
  34. for (; (ptr < this->m_bucket_end) && size; ++ptr) {
  35. size >>= 1;
  36. }
  37. return (ptr == this->m_bucket_end) ? 0 : *ptr;
  38. }
  39. /*--------------------------------------------------------- */
  40. static size_t lub_heap_cache_num_buckets(lub_heap_align_t max_block_size)
  41. {
  42. unsigned num_buckets = 0;
  43. /* count the number of buckets to be created */
  44. max_block_size >>= 3; /* ignore buckets of size 1,2 and 4 bytes */
  45. while (max_block_size) {
  46. ++num_buckets;
  47. max_block_size >>= 1;
  48. }
  49. return num_buckets;
  50. }
  51. /*--------------------------------------------------------- */
  52. size_t
  53. lub_heap_cache_overhead_size(lub_heap_align_t max_block_size,
  54. size_t num_max_blocks)
  55. {
  56. size_t overhead = 0;
  57. if (num_max_blocks && max_block_size) {
  58. /*
  59. * account for the cache overheads
  60. */
  61. size_t num_buckets = lub_heap_cache_num_buckets(max_block_size);
  62. /* cache control block */
  63. overhead += sizeof(lub_heap_cache_t);
  64. /* array of bucket pointers (-1 because one is contained in control block) */
  65. overhead +=
  66. sizeof(lub_heap_cache_bucket_t *) * (num_buckets - 1);
  67. /* fast size lookup array */
  68. overhead += sizeof(lub_heap_cache_bucket_t *) * max_block_size;
  69. /* buckets themselves */
  70. overhead +=
  71. num_buckets * offsetof(lub_heap_cache_bucket_t,
  72. m_memory_start);
  73. }
  74. return overhead;
  75. }
  76. /*--------------------------------------------------------- */
  77. size_t
  78. lub_heap_overhead_size(lub_heap_align_t max_block_size, size_t num_max_blocks)
  79. {
  80. /* heap control block */
  81. size_t overhead = sizeof(lub_heap_t);
  82. /* need at least one free blocks worth */
  83. overhead += sizeof(lub_heap_free_block_t);
  84. if (max_block_size && num_max_blocks) {
  85. size_t num_buckets = lub_heap_cache_num_buckets(max_block_size);
  86. size_t bucket_size = max_block_size * num_max_blocks;
  87. /* now add any cache overhead contribution */
  88. overhead +=
  89. lub_heap_cache_overhead_size(max_block_size,
  90. num_max_blocks);
  91. /* add the bucket contents */
  92. overhead += (num_buckets * bucket_size);
  93. }
  94. return overhead;
  95. }
  96. /*--------------------------------------------------------- */
  97. lub_heap_status_t
  98. lub_heap_cache_init(lub_heap_t * this,
  99. lub_heap_align_t max_block_size, size_t num_max_blocks)
  100. {
  101. lub_heap_status_t status = LUB_HEAP_FAILED;
  102. do {
  103. size_t bucket_size = (max_block_size * num_max_blocks);
  104. unsigned num_buckets =
  105. lub_heap_cache_num_buckets(max_block_size);
  106. size_t block_size = 8;
  107. lub_heap_cache_t *cache;
  108. lub_heap_cache_bucket_t **ptr;
  109. int i;
  110. /* cannot call this multiple times */
  111. if (this->cache)
  112. break;
  113. /* allocate a cache control block */
  114. cache = this->cache = lub_heap_static_alloc(this,
  115. sizeof
  116. (lub_heap_cache_t) +
  117. sizeof
  118. (lub_heap_cache_bucket_t
  119. *) * (num_buckets -
  120. 1));
  121. if (!cache)
  122. break;
  123. cache->m_max_block_size = max_block_size;
  124. cache->m_num_max_blocks = num_max_blocks;
  125. cache->m_num_buckets = num_buckets;
  126. cache->m_bucket_size = bucket_size;
  127. cache->m_misses = 0;
  128. cache->m_bucket_end = &cache->m_bucket_start[num_buckets];
  129. /* allocate each bucket for the cache */
  130. for (ptr = cache->m_bucket_start;
  131. ptr < cache->m_bucket_end; ++ptr) {
  132. *ptr = lub_heap_static_alloc(this,
  133. bucket_size +
  134. offsetof
  135. (lub_heap_cache_bucket_t,
  136. m_memory_start));
  137. if (!*ptr)
  138. break;
  139. /* set up each bucket */
  140. lub_heap_cache_bucket_init(*ptr,
  141. cache,
  142. block_size, bucket_size);
  143. block_size <<= 1;
  144. }
  145. if (ptr != cache->m_bucket_end)
  146. break;
  147. /* now create a fast lookup for resolving size to bucket */
  148. cache->m_bucket_lookup =
  149. lub_heap_static_alloc(this,
  150. sizeof(lub_heap_cache_bucket_t *) *
  151. max_block_size);
  152. if (!cache->m_bucket_lookup)
  153. break;
  154. for (i = 0; i < cache->m_max_block_size; ++i) {
  155. cache->m_bucket_lookup[i] =
  156. _lub_heap_cache_find_bucket_from_size(cache, i + 1);
  157. }
  158. status = LUB_HEAP_OK;
  159. } while (0);
  160. return status;
  161. }
  162. /*--------------------------------------------------------- */
  163. lub_heap_cache_bucket_t
  164. *lub_heap_cache_find_bucket_from_address(lub_heap_cache_t * this,
  165. const void *address)
  166. {
  167. lub_heap_cache_bucket_t **bucket = (void *)address;
  168. /* get the bucket reference */
  169. --bucket;
  170. if ((*bucket > this->m_bucket_start[0])
  171. || (*bucket < this->m_bucket_end[-1])) {
  172. bucket = 0;
  173. }
  174. return bucket ? *bucket : 0;
  175. }
  176. /*--------------------------------------------------------- */
  177. lub_heap_cache_bucket_t *lub_heap_cache_find_bucket_from_size(lub_heap_cache_t *
  178. this, size_t size)
  179. {
  180. lub_heap_cache_bucket_t *bucket = 0;
  181. /* simply get the result from the fast lookup table */
  182. if (size > 0) {
  183. if (size <= this->m_max_block_size) {
  184. bucket = this->m_bucket_lookup[size - 1];
  185. } else {
  186. ++this->m_misses;
  187. }
  188. }
  189. return bucket;
  190. }
  191. /*--------------------------------------------------------- */
  192. lub_heap_status_t
  193. lub_heap_cache_realloc(lub_heap_t * this, char **ptr, size_t size)
  194. {
  195. lub_heap_status_t status = LUB_HEAP_FAILED;
  196. lub_heap_cache_bucket_t *old_bucket = 0;
  197. lub_heap_cache_bucket_t *new_bucket =
  198. size ? lub_heap_cache_find_bucket_from_size(this->cache, size) : 0;
  199. char *new_ptr = 0;
  200. do {
  201. char *old_ptr = *ptr;
  202. if (old_ptr) {
  203. old_bucket =
  204. lub_heap_cache_find_bucket_from_address(this->cache,
  205. old_ptr);
  206. } else if (size == 0) {
  207. /* nothing to do */
  208. status = LUB_HEAP_OK;
  209. break;
  210. }
  211. if (old_bucket) {
  212. /********************************
  213. * old from CACHE, new from CACHE
  214. ******************************** */
  215. if (new_bucket) {
  216. if (old_bucket == new_bucket) {
  217. /* keep the existing cache block which is big enough */
  218. new_ptr = old_ptr;
  219. status = LUB_HEAP_OK;
  220. break;
  221. }
  222. /* try and get memory from the cache */
  223. new_ptr =
  224. lub_heap_cache_bucket_alloc(new_bucket);
  225. if (new_ptr) {
  226. if (old_ptr) {
  227. /* copy the old details across */
  228. memcpy(new_ptr, old_ptr, size);
  229. /* release the old cache block */
  230. status =
  231. lub_heap_cache_bucket_free
  232. (old_bucket, old_ptr);
  233. if (LUB_HEAP_OK != status) {
  234. break;
  235. }
  236. }
  237. /* all done */
  238. status = LUB_HEAP_OK;
  239. break;
  240. }
  241. }
  242. /********************************
  243. * old from CACHE, new from HEAP
  244. ******************************** */
  245. if (size) {
  246. /* get new memory from the dynamic heap */
  247. status =
  248. lub_heap_raw_realloc(this, &new_ptr, size,
  249. LUB_HEAP_ALIGN_NATIVE);
  250. if (LUB_HEAP_OK != status) {
  251. break;
  252. }
  253. /* copy the old details across */
  254. memcpy(new_ptr, old_ptr, size);
  255. }
  256. /* release the old cache block */
  257. status =
  258. lub_heap_cache_bucket_free(old_bucket, old_ptr);
  259. } else {
  260. /********************************
  261. * old from HEAP, new from CACHE
  262. ******************************** */
  263. if (size) {
  264. if (new_bucket) {
  265. /* try and get memory from the cache */
  266. new_ptr =
  267. lub_heap_cache_bucket_alloc
  268. (new_bucket);
  269. if (new_ptr) {
  270. if (old_ptr) {
  271. /* copy the old details across */
  272. memcpy(new_ptr, old_ptr,
  273. size);
  274. size = 0;
  275. } else {
  276. /* all done */
  277. status = LUB_HEAP_OK;
  278. break;
  279. }
  280. }
  281. }
  282. } else if (!old_ptr) {
  283. /* nothing to do */
  284. status = LUB_HEAP_OK;
  285. break;
  286. }
  287. /********************************
  288. * old from HEAP, new from HEAP
  289. ******************************** */
  290. /* release old memory and potentially get new memory */
  291. status =
  292. lub_heap_raw_realloc(this, &old_ptr, size,
  293. LUB_HEAP_ALIGN_NATIVE);
  294. if (LUB_HEAP_OK != status) {
  295. break;
  296. } else if (size) {
  297. new_ptr = old_ptr;
  298. }
  299. }
  300. } while (0);
  301. if (LUB_HEAP_OK == status) {
  302. /* set up the new pointer value */
  303. *ptr = new_ptr;
  304. }
  305. return status;
  306. }
  307. /*--------------------------------------------------------- */
  308. void lub_heap_cache_show(lub_heap_cache_t * this)
  309. {
  310. lub_heap_cache_bucket_t **ptr;
  311. printf
  312. (" size num blocks current high-tide cumulative misses\n");
  313. printf
  314. (" ---------- ---------- ---------- ---------- ---------- ----------\n");
  315. for (ptr = this->m_bucket_start; ptr < this->m_bucket_end; ++ptr) {
  316. lub_blockpool_t *blockpool = &(*ptr)->m_blockpool;
  317. lub_blockpool_stats_t stats;
  318. lub_blockpool__get_stats(blockpool, &stats);
  319. printf(" %10" SIZE_FMT " %10" SIZE_FMT " %10" SIZE_FMT " %10"
  320. SIZE_FMT " %10" SIZE_FMT " %10" SIZE_FMT "\n",
  321. stats.block_size, stats.num_blocks, stats.alloc_blocks,
  322. stats.alloc_hightide_blocks, stats.alloc_total_blocks,
  323. stats.alloc_failures);
  324. }
  325. printf(" %10u %10s %10s %10s %10s %10" SIZE_FMT "\n",
  326. this->m_max_block_size, "-", "-", "-", "-", this->m_misses);
  327. }
  328. /*--------------------------------------------------------- */
  329. void
  330. lub_heap_cache__get_stats(lub_heap_cache_t * this, lub_heap_stats_t * stats)
  331. {
  332. lub_heap_cache_bucket_t **ptr;
  333. memset(stats, 0, sizeof(lub_heap_stats_t));
  334. stats->free_overhead =
  335. lub_heap_cache_overhead_size(this->m_max_block_size,
  336. this->m_num_max_blocks);
  337. for (ptr = this->m_bucket_start; ptr < this->m_bucket_end; ++ptr) {
  338. lub_blockpool_t *blockpool = &(*ptr)->m_blockpool;
  339. lub_blockpool_stats_t bucket_stats;
  340. size_t block_size;
  341. size_t block_overhead = sizeof(lub_heap_cache_bucket_t *); /* used for fast lookup from address */
  342. lub_blockpool__get_stats(blockpool, &bucket_stats);
  343. block_size = (bucket_stats.block_size - block_overhead);
  344. stats->free_blocks += bucket_stats.free_blocks;
  345. stats->free_bytes += block_size * bucket_stats.free_blocks;
  346. stats->free_overhead +=
  347. block_overhead * bucket_stats.free_blocks;
  348. stats->alloc_blocks += bucket_stats.alloc_blocks;
  349. stats->alloc_bytes += block_size * bucket_stats.alloc_blocks;
  350. stats->alloc_overhead +=
  351. block_overhead * bucket_stats.alloc_blocks;
  352. stats->alloc_total_blocks += bucket_stats.alloc_total_blocks;
  353. stats->alloc_total_bytes +=
  354. block_size * bucket_stats.alloc_total_blocks;
  355. }
  356. }
  357. /*--------------------------------------------------------- */