buf.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624
  1. /** @file buf.c
  2. * @brief Dynamic buffer.
  3. *
  4. */
  5. #include <stdlib.h>
  6. #include <stdint.h>
  7. #include <stdio.h>
  8. #include <string.h>
  9. #include <assert.h>
  10. #include "faux/faux.h"
  11. #include "faux/str.h"
  12. #include "faux/buf.h"
  13. // Default chunk size
  14. #define DATA_CHUNK 4096
  15. struct faux_buf_s {
  16. faux_list_t *list; // List of chunks
  17. faux_list_node_t *wchunk; // Chunk to write to
  18. size_t rpos; // Read position within first chunk
  19. size_t wpos; // Write position within wchunk (can be non-last chunk)
  20. size_t chunk_size; // Size of chunk
  21. size_t len; // Whole data length
  22. size_t limit; // Overflow limit
  23. size_t rblocked;
  24. size_t wblocked;
  25. };
  26. /** @brief Create new dynamic buffer object.
  27. *
  28. * @param [in] chunk_size Chunk size. If "0" then default size will be used.
  29. * @return Allocated object or NULL on error.
  30. */
  31. faux_buf_t *faux_buf_new(size_t chunk_size)
  32. {
  33. faux_buf_t *buf = NULL;
  34. buf = faux_zmalloc(sizeof(*buf));
  35. assert(buf);
  36. if (!buf)
  37. return NULL;
  38. // Init
  39. buf->chunk_size = (chunk_size != 0) ? chunk_size : DATA_CHUNK;
  40. buf->limit = FAUX_BUF_UNLIMITED;
  41. buf->list = faux_list_new(FAUX_LIST_UNSORTED, FAUX_LIST_NONUNIQUE,
  42. NULL, NULL, faux_free);
  43. buf->rpos = 0;
  44. buf->wpos = buf->chunk_size;
  45. buf->len = 0;
  46. buf->wchunk = NULL;
  47. buf->rblocked = 0; // Unblocked
  48. buf->wblocked = 0; // Unblocked
  49. return buf;
  50. }
  51. /** @brief Free dynamic buffer object.
  52. *
  53. * @param [in] buf Buffer object.
  54. */
  55. void faux_buf_free(faux_buf_t *buf)
  56. {
  57. if (!buf)
  58. return;
  59. faux_list_free(buf->list);
  60. faux_free(buf);
  61. }
  62. ssize_t faux_buf_len(const faux_buf_t *buf)
  63. {
  64. assert(buf);
  65. if (!buf)
  66. return -1;
  67. return buf->len;
  68. }
  69. static ssize_t faux_buf_chunk_num(const faux_buf_t *buf)
  70. {
  71. assert(buf);
  72. if (!buf)
  73. return -1;
  74. assert(buf->list);
  75. if (!buf->list)
  76. return -1;
  77. return faux_list_len(buf->list);
  78. }
  79. ssize_t faux_buf_limit(const faux_buf_t *buf)
  80. {
  81. assert(buf);
  82. if (!buf)
  83. return -1;
  84. return buf->limit;
  85. }
  86. /** @brief Set size limit.
  87. *
  88. * Read limits define conditions when the read callback will be executed.
  89. * Buffer must contain data amount greater or equal to "min" value. Callback
  90. * will not get data amount greater than "max" value. If min == max then
  91. * callback will be executed with fixed data size. The "max" value can be "0".
  92. * It means indefinite i.e. data transferred to callback can be really large.
  93. *
  94. * @param [in] buf Allocated and initialized buf I/O object.
  95. * @param [in] min Minimal data amount.
  96. * @param [in] max Maximal data amount. The "0" means indefinite.
  97. * @return BOOL_TRUE - success, BOOL_FALSE - error.
  98. */
  99. bool_t faux_buf_set_limit(faux_buf_t *buf, size_t limit)
  100. {
  101. assert(buf);
  102. if (!buf)
  103. return BOOL_FALSE;
  104. buf->limit = limit;
  105. return BOOL_TRUE;
  106. }
  107. /** @brief Get amount of unused space within current data chunk.
  108. *
  109. * Inernal static function.
  110. *
  111. * @param [in] list Internal buffer (list of chunks) to inspect.
  112. * @param [in] pos Current write position within last chunk
  113. * @return Size of unused space or < 0 on error.
  114. */
  115. static ssize_t faux_buf_wavail(const faux_buf_t *buf)
  116. {
  117. assert(buf);
  118. if (!buf)
  119. return -1;
  120. if (faux_buf_chunk_num(buf) == 0)
  121. return 0; // Empty list
  122. return (buf->chunk_size - buf->wpos);
  123. }
  124. static ssize_t faux_buf_ravail(const faux_buf_t *buf)
  125. {
  126. ssize_t num = 0;
  127. assert(buf);
  128. if (!buf)
  129. return -1;
  130. num = faux_buf_chunk_num(buf);
  131. if (num == 0)
  132. return 0; // Empty list
  133. if (num > 1)
  134. return (buf->chunk_size - buf->rpos);
  135. // Single chunk
  136. return (buf->wpos - buf->rpos);
  137. }
  138. size_t faux_buf_is_wblocked(const faux_buf_t *buf)
  139. {
  140. assert(buf);
  141. if (!buf)
  142. return BOOL_FALSE;
  143. return buf->wblocked;
  144. }
  145. size_t faux_buf_is_rblocked(const faux_buf_t *buf)
  146. {
  147. assert(buf);
  148. if (!buf)
  149. return BOOL_FALSE;
  150. return buf->rblocked;
  151. }
  152. static faux_list_node_t *faux_buf_alloc_chunk(faux_buf_t *buf)
  153. {
  154. char *chunk = NULL;
  155. assert(buf);
  156. if (!buf)
  157. return NULL;
  158. assert(buf->list);
  159. if (!buf->list)
  160. return NULL;
  161. chunk = faux_malloc(buf->chunk_size);
  162. assert(chunk);
  163. if (!chunk)
  164. return NULL;
  165. return faux_list_add(buf->list, chunk);
  166. }
  167. static bool_t faux_buf_will_be_overflow(const faux_buf_t *buf, size_t add_len)
  168. {
  169. assert(buf);
  170. if (!buf)
  171. return BOOL_FALSE;
  172. if (FAUX_BUF_UNLIMITED == buf->limit)
  173. return BOOL_FALSE;
  174. if ((buf->len + add_len) > buf->limit)
  175. return BOOL_TRUE;
  176. return BOOL_FALSE;
  177. }
  178. bool_t faux_buf_is_overflow(const faux_buf_t *buf)
  179. {
  180. return faux_buf_will_be_overflow(buf, 0);
  181. }
  182. /** @brief buf data write.
  183. *
  184. * All given data will be stored to internal buffer (list of data chunks).
  185. * Then function will try to write stored data to file descriptor in
  186. * non-blocking mode. Note some data can be left within buffer. In this case
  187. * the "stall" callback will be executed to inform about it. To try to write
  188. * the rest of the data user can be call faux_buf_out() function. Both
  189. * functions will not block.
  190. *
  191. * @param [in] buf Allocated and initialized buf I/O object.
  192. * @param [in] data Data buffer to write.
  193. * @param [in] len Data length to write.
  194. * @return Length of stored/writed data or < 0 on error.
  195. */
  196. ssize_t faux_buf_write(faux_buf_t *buf, const void *data, size_t len)
  197. {
  198. size_t data_left = len;
  199. assert(buf);
  200. if (!buf)
  201. return -1;
  202. assert(data);
  203. if (!data)
  204. return -1;
  205. // It will be overflow after writing
  206. if (faux_buf_will_be_overflow(buf, len))
  207. return -1;
  208. // Don't write to the space reserved for direct write
  209. if (faux_buf_is_wblocked(buf))
  210. return -1;
  211. while (data_left > 0) {
  212. ssize_t bytes_free = 0;
  213. size_t copy_len = 0;
  214. char *chunk_ptr = NULL;
  215. // Allocate new chunk if necessary
  216. bytes_free = faux_buf_wavail(buf);
  217. if (bytes_free < 0)
  218. return -1;
  219. if (0 == bytes_free) {
  220. faux_list_node_t *node = faux_buf_alloc_chunk(buf);
  221. assert(node);
  222. if (!node) // Something went wrong. Strange.
  223. return -1;
  224. buf->wpos = 0;
  225. bytes_free = faux_buf_wavail(buf);
  226. }
  227. // Copy data
  228. chunk_ptr = faux_list_data(faux_list_tail(buf->list));
  229. copy_len = (data_left < (size_t)bytes_free) ? data_left : (size_t)bytes_free;
  230. memcpy(chunk_ptr + buf->wpos, data + len - data_left, copy_len);
  231. buf->wpos += copy_len;
  232. data_left -= copy_len;
  233. buf->len += copy_len;
  234. }
  235. return len;
  236. }
  237. /** @brief Write output buffer to fd in non-blocking mode.
  238. *
  239. * Previously data must be written to internal buffer by faux_buf_write()
  240. * function. But some data can be left within internal buffer because can't be
  241. * written to fd in non-blocking mode. This function tries to write the rest of
  242. * data to fd in non-blocking mode. So function doesn't block. It can be called
  243. * after select() or poll() if fd is ready to be written to. If function can't
  244. * to write all buffer to fd it executes "stall" callback to inform about it.
  245. *
  246. * @param [in] buf Allocated and initialized buf I/O object.
  247. * @return Length of data actually written or < 0 on error.
  248. */
  249. ssize_t faux_buf_read(faux_buf_t *buf, void *data, size_t len)
  250. {
  251. struct iovec *iov = NULL;
  252. size_t iov_num;
  253. ssize_t total = 0;
  254. char *dst = (char *)data;
  255. size_t i = 0;
  256. assert(data);
  257. if (!data)
  258. return -1;
  259. total = faux_buf_dread_block(buf, len, &iov, &iov_num);
  260. if (total <= 0)
  261. return total;
  262. for (i = 0; i < iov_num; i++) {
  263. memcpy(dst, iov[i].iov_base, iov[i].iov_len);
  264. dst += iov[i].iov_len;
  265. }
  266. if (faux_buf_dread_unblock(buf, total, iov) != total)
  267. return -1;
  268. return total;
  269. }
  270. ssize_t faux_buf_dread_block(faux_buf_t *buf, size_t len,
  271. struct iovec **iov_out, size_t *iov_num_out)
  272. {
  273. size_t vec_entries_num = 0;
  274. struct iovec *iov = NULL;
  275. unsigned int i = 0;
  276. faux_list_node_t *iter = NULL;
  277. size_t len_to_block = 0;
  278. size_t avail = 0;
  279. size_t must_be_read = 0;
  280. assert(buf);
  281. if (!buf)
  282. return -1;
  283. assert(iov_out);
  284. if (!iov_out)
  285. return -1;
  286. assert(iov_num_out);
  287. if (!iov_num_out)
  288. return -1;
  289. // Don't use already blocked buffer
  290. if (faux_buf_is_rblocked(buf))
  291. return -1;
  292. len_to_block = (len < buf->len) ? len : buf->len;
  293. // Nothing to block
  294. if (0 == len_to_block) {
  295. *iov_out = NULL;
  296. *iov_num_out = 0;
  297. return 0;
  298. }
  299. // Calculate number of struct iovec entries
  300. avail = faux_buf_ravail(buf);
  301. vec_entries_num = 1; // Guaranteed
  302. if (avail < len_to_block) {
  303. size_t l = buf->len - avail; // length wo first chunk
  304. vec_entries_num += l / buf->chunk_size;
  305. if ((l % buf->chunk_size) > 0)
  306. vec_entries_num++;
  307. }
  308. iov = faux_zmalloc(vec_entries_num * sizeof(*iov));
  309. // Iterate chunks
  310. must_be_read = len_to_block;
  311. iter = faux_list_head(buf->list);
  312. while ((must_be_read > 0) && (iter)) {
  313. char *p = (char *)faux_list_data(iter);
  314. size_t l = buf->chunk_size;
  315. size_t p_len = 0;
  316. if (iter == faux_list_head(buf->list)) { // First chunk
  317. p += buf->rpos;
  318. l = avail;
  319. }
  320. p_len = (must_be_read < l) ? must_be_read : l;
  321. iov[i].iov_base = p;
  322. iov[i].iov_len = p_len;
  323. i++;
  324. must_be_read -= p_len;
  325. iter = faux_list_next_node(iter);
  326. }
  327. *iov_out = iov;
  328. *iov_num_out = vec_entries_num;
  329. buf->rblocked = len_to_block;
  330. return len_to_block;
  331. }
  332. ssize_t faux_buf_dread_unblock(faux_buf_t *buf, size_t really_readed,
  333. struct iovec *iov)
  334. {
  335. size_t must_be_read = 0;
  336. assert(buf);
  337. if (!buf)
  338. return -1;
  339. // Can't unblock non-blocked buffer
  340. if (!faux_buf_is_rblocked(buf))
  341. return -1;
  342. if (buf->rblocked < really_readed)
  343. return -1; // Something went wrong
  344. if (buf->len < really_readed)
  345. return -1; // Something went wrong
  346. // Unblock whole buffer. Not 'really readed' bytes only
  347. buf->rblocked = 0;
  348. faux_free(iov);
  349. if (0 == really_readed)
  350. return really_readed;
  351. must_be_read = really_readed;
  352. while (must_be_read > 0) {
  353. size_t avail = faux_buf_ravail(buf);
  354. ssize_t data_to_rm = (must_be_read < avail) ? must_be_read : avail;
  355. buf->len -= data_to_rm;
  356. buf->rpos += data_to_rm;
  357. must_be_read -= data_to_rm;
  358. // Current chunk was fully readed. So remove it from list.
  359. if ((buf->rpos == buf->chunk_size) ||
  360. ((faux_buf_chunk_num(buf) == 1) && (buf->rpos == buf->wpos))
  361. ) {
  362. buf->rpos = 0; // 0 position within next chunk
  363. faux_list_del(buf->list, faux_list_head(buf->list));
  364. }
  365. if (faux_buf_chunk_num(buf) == 0)
  366. buf->wpos = buf->chunk_size;
  367. }
  368. return really_readed;
  369. }
  370. ssize_t faux_buf_dwrite_block(faux_buf_t *buf, size_t len,
  371. struct iovec **iov_out, size_t *iov_num_out)
  372. {
  373. size_t vec_entries_num = 0;
  374. struct iovec *iov = NULL;
  375. unsigned int i = 0;
  376. faux_list_node_t *iter = NULL;
  377. faux_list_node_t *first_node = NULL;
  378. size_t avail = 0;
  379. size_t must_be_write = len;
  380. assert(buf);
  381. if (!buf)
  382. return -1;
  383. assert(iov_out);
  384. if (!iov_out)
  385. return -1;
  386. assert(iov_num_out);
  387. if (!iov_num_out)
  388. return -1;
  389. // Don't use already blocked buffer
  390. if (faux_buf_is_wblocked(buf))
  391. return -1;
  392. // It will be overflow after writing
  393. if (faux_buf_will_be_overflow(buf, len))
  394. return -1;
  395. // Nothing to block
  396. if (0 == len) {
  397. *iov_out = NULL;
  398. *iov_num_out = 0;
  399. return 0;
  400. }
  401. // Save wchunk
  402. buf->wchunk = faux_list_tail(buf->list); // Can be NULL
  403. buf->wblocked = len;
  404. // Calculate number of struct iovec entries
  405. avail = faux_buf_wavail(buf);
  406. if (avail > 0)
  407. vec_entries_num += 1;
  408. else
  409. buf->wpos = 0; // New chunk will be created when avail == 0
  410. if (avail < len) {
  411. size_t i = 0;
  412. size_t new_chunk_num = 0;
  413. size_t l = len - avail; // length wo first chunk
  414. new_chunk_num += l / buf->chunk_size;
  415. if ((l % buf->chunk_size) > 0)
  416. new_chunk_num++;
  417. vec_entries_num += new_chunk_num;
  418. for (i = 0; i < new_chunk_num; i++)
  419. faux_buf_alloc_chunk(buf);
  420. }
  421. iov = faux_zmalloc(vec_entries_num * sizeof(*iov));
  422. // Iterate chunks
  423. if (NULL == buf->wchunk)
  424. buf->wchunk = faux_list_head(buf->list);
  425. iter = buf->wchunk;
  426. first_node = iter;
  427. while ((must_be_write > 0) && (iter)) {
  428. char *p = (char *)faux_list_data(iter);
  429. size_t l = buf->chunk_size;
  430. size_t p_len = 0;
  431. if (iter == first_node) {
  432. p += buf->wpos;
  433. l = faux_buf_wavail(buf);
  434. }
  435. p_len = (must_be_write < l) ? must_be_write : l;
  436. iov[i].iov_base = p;
  437. iov[i].iov_len = p_len;
  438. i++;
  439. must_be_write -= p_len;
  440. iter = faux_list_next_node(iter);
  441. }
  442. *iov_out = iov;
  443. *iov_num_out = vec_entries_num;
  444. return len;
  445. }
  446. static bool_t faux_buf_rm_trailing_empty_chunks(faux_buf_t *buf)
  447. {
  448. faux_list_node_t *node = NULL;
  449. assert(buf);
  450. if (!buf)
  451. return BOOL_FALSE;
  452. assert(buf->list);
  453. if (!buf->list)
  454. return BOOL_FALSE;
  455. if (faux_buf_chunk_num(buf) == 0)
  456. return BOOL_TRUE; // Empty list
  457. while ((node = faux_list_tail(buf->list)) != buf->wchunk)
  458. faux_list_del(buf->list, node);
  459. if (buf->wchunk &&
  460. ((buf->wpos == 0) || // Empty chunk
  461. ((faux_buf_chunk_num(buf) == 1) && (buf->rpos == buf->wpos)))
  462. ) {
  463. faux_list_del(buf->list, buf->wchunk);
  464. buf->wchunk = NULL;
  465. buf->wpos = buf->chunk_size;
  466. }
  467. return BOOL_TRUE;
  468. }
  469. ssize_t faux_buf_dwrite_unblock(faux_buf_t *buf, size_t really_written,
  470. struct iovec *iov)
  471. {
  472. size_t must_be_write = 0;
  473. assert(buf);
  474. if (!buf)
  475. return -1;
  476. // Can't unblock non-blocked buffer
  477. if (!faux_buf_is_wblocked(buf))
  478. return -1;
  479. // Empty wchunk - strange
  480. if (!buf->wchunk)
  481. return -1;
  482. if (0 == really_written)
  483. return really_written;
  484. // Unblock whole buffer. Not 'really written' bytes only
  485. buf->wblocked = 0;
  486. faux_free(iov);
  487. if (buf->wblocked < really_written)
  488. return -1; // Something went wrong
  489. must_be_write = really_written;
  490. while (must_be_write > 0) {
  491. size_t avail = faux_buf_wavail(buf);
  492. ssize_t data_to_add = (must_be_write < avail) ? must_be_write : avail;
  493. buf->len += data_to_add;
  494. buf->wpos += data_to_add;
  495. must_be_write -= data_to_add;
  496. // Current chunk was fully written. So move to next one
  497. if (buf->wpos == buf->chunk_size) {
  498. buf->wpos = 0; // 0 position within next chunk
  499. buf->wchunk = faux_list_next_node(buf->wchunk);
  500. }
  501. }
  502. faux_buf_rm_trailing_empty_chunks(buf);
  503. return really_written;
  504. }