buf.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881
  1. /** @file buf.c
  2. * @brief Dynamic buffer.
  3. *
  4. * Dynamic buffer can be written to and readed from. It grows while write
  5. * commands.
  6. *
  7. * User can get direct access to this buffer. For example we need to
  8. * read from file some data and save it to dynamic buffer. We pre-allocate
  9. * necessary space within buffer and lock it. Lock function returns a
  10. * "struct iovec" array to write to. After that we unlock buffer. So we don't
  11. * need additional temporary buffer beetween file's read() and dynamic buffer.
  12. * Dynamic buffer has the same functionality for reading from it.
  13. */
  14. #include <stdlib.h>
  15. #include <stdint.h>
  16. #include <stdio.h>
  17. #include <string.h>
  18. #include <assert.h>
  19. #include <syslog.h>
  20. #include "faux/faux.h"
  21. #include "faux/str.h"
  22. #include "faux/buf.h"
  23. // Default chunk size
  24. #define DATA_CHUNK 4096
  25. struct faux_buf_s {
  26. faux_list_t *list; // List of chunks
  27. faux_list_node_t *wchunk; // Chunk to write to. NULL if list is empty
  28. size_t rpos; // Read position within first chunk
  29. size_t wpos; // Write position within wchunk (can be non-last chunk)
  30. size_t chunk_size; // Size of chunk
  31. size_t len; // Whole data length
  32. size_t limit; // Overflow limit
  33. size_t rlocked; // How much space is locked for reading
  34. size_t wlocked; // How much space is locked for writing
  35. };
  36. /** @brief Create new dynamic buffer object.
  37. *
  38. * @param [in] chunk_size Chunk size. If "0" then default size will be used.
  39. * @return Allocated object or NULL on error.
  40. */
  41. faux_buf_t *faux_buf_new(size_t chunk_size)
  42. {
  43. faux_buf_t *buf = NULL;
  44. buf = faux_zmalloc(sizeof(*buf));
  45. assert(buf);
  46. if (!buf)
  47. return NULL;
  48. // Init
  49. buf->chunk_size = (chunk_size != 0) ? chunk_size : DATA_CHUNK;
  50. buf->limit = FAUX_BUF_UNLIMITED;
  51. buf->list = faux_list_new(FAUX_LIST_UNSORTED, FAUX_LIST_NONUNIQUE,
  52. NULL, NULL, faux_free);
  53. buf->rpos = 0;
  54. buf->wpos = buf->chunk_size;
  55. buf->len = 0;
  56. buf->wchunk = NULL;
  57. buf->rlocked = 0; // Unlocked
  58. buf->wlocked = 0; // Unlocked
  59. return buf;
  60. }
  61. /** @brief Free dynamic buffer object.
  62. *
  63. * @param [in] buf Buffer object.
  64. */
  65. void faux_buf_free(faux_buf_t *buf)
  66. {
  67. if (!buf)
  68. return;
  69. faux_list_free(buf->list);
  70. faux_free(buf);
  71. }
  72. /** @brief Empty dynamic buffer object.
  73. *
  74. * @param [in] buf Buffer object.
  75. */
  76. bool_t faux_buf_empty(faux_buf_t *buf)
  77. {
  78. if (!buf)
  79. return BOOL_FALSE;
  80. // Don't empty locked buffer
  81. if (faux_buf_is_rlocked(buf) ||
  82. faux_buf_is_wlocked(buf))
  83. return BOOL_FALSE;
  84. faux_list_del_all(buf->list);
  85. buf->rpos = 0;
  86. buf->wpos = buf->chunk_size;
  87. buf->len = 0;
  88. buf->wchunk = NULL;
  89. return BOOL_TRUE;
  90. }
  91. /** @brief Returns length of buffer.
  92. *
  93. * @param [in] buf Allocated and initialized buffer object.
  94. * @return Length of buffer or < 0 on error.
  95. */
  96. ssize_t faux_buf_len(const faux_buf_t *buf)
  97. {
  98. assert(buf);
  99. if (!buf)
  100. return -1;
  101. return buf->len;
  102. }
  103. /** @brief Returns number of allocated data chunks.
  104. *
  105. * Function is not exported to DSO.
  106. *
  107. * @param [in] buf Allocated and initialized buffer object.
  108. * @return Number of allocated chunks or < 0 on error.
  109. */
  110. FAUX_HIDDEN ssize_t faux_buf_chunk_num(const faux_buf_t *buf)
  111. {
  112. assert(buf);
  113. if (!buf)
  114. return -1;
  115. assert(buf->list);
  116. if (!buf->list)
  117. return -1;
  118. return faux_list_len(buf->list);
  119. }
  120. /** @brief Returns limit of buffer length.
  121. *
  122. * The returned "0" means unlimited.
  123. *
  124. * @param [in] buf Allocated and initialized buffer object.
  125. * @return Maximum buffer length or < 0 on error.
  126. */
  127. ssize_t faux_buf_limit(const faux_buf_t *buf)
  128. {
  129. assert(buf);
  130. if (!buf)
  131. return -1;
  132. return buf->limit;
  133. }
  134. /** @brief Set buffer length limit.
  135. *
  136. * Writing more data than this limit will lead to error. The "0" value means
  137. * unlimited buffer. Default is unlimited.
  138. *
  139. * @param [in] buf Allocated and initialized buffer object.
  140. * @param [in] limit Maximum buffer length.
  141. * @return BOOL_TRUE - success, BOOL_FALSE - error.
  142. */
  143. bool_t faux_buf_set_limit(faux_buf_t *buf, size_t limit)
  144. {
  145. assert(buf);
  146. if (!buf)
  147. return BOOL_FALSE;
  148. buf->limit = limit;
  149. return BOOL_TRUE;
  150. }
  151. /** @brief Get amount of unused space within current data chunk.
  152. *
  153. * Inernal static function. Current chunk is "wchunk".
  154. *
  155. * @param [in] buf Allocated and initialized buffer object.
  156. * @return Size of unused space or < 0 on error.
  157. */
  158. static ssize_t faux_buf_wavail(const faux_buf_t *buf)
  159. {
  160. assert(buf);
  161. if (!buf)
  162. return -1;
  163. if (!buf->wchunk)
  164. return 0; // Empty list
  165. return (buf->chunk_size - buf->wpos);
  166. }
  167. /** @brief Get amount of available data within current data chunk.
  168. *
  169. * Inernal static function. Current chunk first chunk.
  170. *
  171. * @param [in] buf Allocated and initialized buffer object.
  172. * @return Size of available data or < 0 on error.
  173. */
  174. static ssize_t faux_buf_ravail(const faux_buf_t *buf)
  175. {
  176. assert(buf);
  177. if (!buf)
  178. return -1;
  179. // Empty list
  180. if (buf->len == 0)
  181. return 0;
  182. // Read and write within the same chunk
  183. if (faux_list_head(buf->list) == buf->wchunk)
  184. return (buf->wpos - buf->rpos);
  185. // Write pointer is far away from read pointer (more than chunk)
  186. return (buf->chunk_size - buf->rpos);
  187. }
  188. /** @brief Get amount of locked space for writing.
  189. *
  190. * The "0" means that buffer is not locked for writing.
  191. *
  192. * @param [in] buf Allocated and initialized buffer object.
  193. * @return Size of locked space or "0" if unlocked.
  194. */
  195. size_t faux_buf_is_wlocked(const faux_buf_t *buf)
  196. {
  197. assert(buf);
  198. if (!buf)
  199. return BOOL_FALSE;
  200. return buf->wlocked;
  201. }
  202. /** @brief Get amount of locked space for reading.
  203. *
  204. * The "0" means that buffer is not locked for reading.
  205. *
  206. * @param [in] buf Allocated and initialized buffer object.
  207. * @return Size of locked data or "0" if unlocked.
  208. */
  209. size_t faux_buf_is_rlocked(const faux_buf_t *buf)
  210. {
  211. assert(buf);
  212. if (!buf)
  213. return BOOL_FALSE;
  214. return buf->rlocked;
  215. }
  216. /** @brief Allocates new chunk and adds it to the end of chunk list.
  217. *
  218. * Static internal function.
  219. *
  220. * @param [in] buf Allocated and initialized buffer object.
  221. * @return Newly created list node or NULL on error.
  222. */
  223. static faux_list_node_t *faux_buf_alloc_chunk(faux_buf_t *buf)
  224. {
  225. char *chunk = NULL;
  226. assert(buf);
  227. if (!buf)
  228. return NULL;
  229. assert(buf->list);
  230. if (!buf->list)
  231. return NULL;
  232. chunk = faux_malloc(buf->chunk_size);
  233. assert(chunk);
  234. if (!chunk)
  235. return NULL;
  236. return faux_list_add(buf->list, chunk);
  237. }
  238. /** @brief Checks if it will be overflow while writing some data.
  239. *
  240. * It uses previously set "limit" value for calculations.
  241. *
  242. * @param [in] buf Allocated and initialized buffer object.
  243. * @param [in] add_len Length of data we want to write to buffer.
  244. * @return BOOL_TRUE - it will be overflow, BOOL_FALSE - enough space.
  245. */
  246. bool_t faux_buf_will_be_overflow(const faux_buf_t *buf, size_t add_len)
  247. {
  248. assert(buf);
  249. if (!buf)
  250. return BOOL_FALSE;
  251. if (FAUX_BUF_UNLIMITED == buf->limit)
  252. return BOOL_FALSE;
  253. if ((buf->len + add_len) > buf->limit)
  254. return BOOL_TRUE;
  255. return BOOL_FALSE;
  256. }
  257. /** @brief Reads dynamic buffer data to specified linear buffer.
  258. *
  259. * @param [in] buf Allocated and initialized dynamic buffer object.
  260. * @param [in] data Linear buffer to read data to.
  261. * @param [in] len Length of data to read.
  262. * @return Length of data actually readed or < 0 on error.
  263. */
  264. ssize_t faux_buf_read(faux_buf_t *buf, void *data, size_t len)
  265. {
  266. struct iovec *iov = NULL;
  267. size_t iov_num = 0;
  268. ssize_t total = 0;
  269. char *dst = (char *)data;
  270. size_t i = 0;
  271. assert(buf);
  272. if (!buf)
  273. return -1;
  274. assert(data);
  275. if (!data)
  276. return -1;
  277. total = faux_buf_dread_lock(buf, len, &iov, &iov_num);
  278. if (total <= 0)
  279. return total;
  280. for (i = 0; i < iov_num; i++) {
  281. memcpy(dst, iov[i].iov_base, iov[i].iov_len);
  282. dst += iov[i].iov_len;
  283. }
  284. if (faux_buf_dread_unlock(buf, total, iov) != total)
  285. return -1;
  286. return total;
  287. }
  288. /** @brief Gets "struct iovec" array for direct reading and locks data.
  289. *
  290. * The length of actually locked data can differ from length specified by user.
  291. * When buffer length is less than specified length then return value will be
  292. * equal to buffer length.
  293. *
  294. * @param [in] buf Allocated and initialized dynamic buffer object.
  295. * @param [in] len Length of data to read.
  296. * @param [out] iov_out "struct iovec" array to direct read from.
  297. * @param [out] iov_num_out Number of "struct iovec" array elements.
  298. * @return Length of data actually locked or < 0 on error.
  299. */
  300. ssize_t faux_buf_dread_lock(faux_buf_t *buf, size_t len,
  301. struct iovec **iov_out, size_t *iov_num_out)
  302. {
  303. size_t vec_entries_num = 0;
  304. struct iovec *iov = NULL;
  305. unsigned int i = 0;
  306. faux_list_node_t *iter = NULL;
  307. size_t len_to_lock = 0;
  308. size_t avail = 0;
  309. size_t must_be_read = 0;
  310. assert(buf);
  311. if (!buf)
  312. return -1;
  313. assert(iov_out);
  314. if (!iov_out)
  315. return -1;
  316. assert(iov_num_out);
  317. if (!iov_num_out)
  318. return -1;
  319. // Don't use already locked buffer
  320. if (faux_buf_is_rlocked(buf))
  321. return -1;
  322. len_to_lock = (len < buf->len) ? len : buf->len;
  323. // Nothing to lock
  324. if (0 == len_to_lock) {
  325. *iov_out = NULL;
  326. *iov_num_out = 0;
  327. return 0;
  328. }
  329. // Calculate number of struct iovec entries
  330. avail = faux_buf_ravail(buf);
  331. if (avail > 0)
  332. vec_entries_num++;
  333. if (avail < len_to_lock) {
  334. size_t l = buf->len - avail; // length w/o first chunk
  335. vec_entries_num += l / buf->chunk_size;
  336. if ((l % buf->chunk_size) > 0)
  337. vec_entries_num++;
  338. }
  339. iov = faux_zmalloc(vec_entries_num * sizeof(*iov));
  340. // Iterate chunks. Suppose list is not empty
  341. must_be_read = len_to_lock;
  342. iter = NULL;
  343. while (must_be_read > 0) {
  344. char *data = NULL;
  345. off_t data_offset = 0;
  346. size_t data_len = buf->chunk_size;
  347. size_t p_len = 0;
  348. // First chunk
  349. if (!iter) {
  350. iter = faux_list_head(buf->list);
  351. if (avail > 0) {
  352. data_offset = buf->rpos;
  353. data_len = avail; // Calculated earlier
  354. } else { // Empty chunk. Go to next
  355. iter = faux_list_next_node(iter);
  356. }
  357. // Not-first chunks
  358. } else {
  359. iter = faux_list_next_node(iter);
  360. }
  361. data = (char *)faux_list_data(iter) + data_offset;
  362. p_len = (must_be_read < data_len) ? must_be_read : data_len;
  363. must_be_read -= p_len;
  364. iov[i].iov_base = data;
  365. iov[i].iov_len = p_len;
  366. i++;
  367. }
  368. *iov_out = iov;
  369. *iov_num_out = vec_entries_num;
  370. buf->rlocked = len_to_lock;
  371. return len_to_lock;
  372. }
  373. /** @brief Locks data for reading.
  374. *
  375. * The complimentary function is faux_buf_dread_unlock_easy().
  376. * This function has the same functionality as faux_buf_dread_lock() but
  377. * chooses the lentgh of locked space itself to return single continuous buffer.
  378. *
  379. * @param [in] buf Allocated and initialized dynamic buffer object.
  380. * @param [out] data Continuous buffer for direct reading.
  381. * @return Length of data actually locked or < 0 on error.
  382. */
  383. ssize_t faux_buf_dread_lock_easy(faux_buf_t *buf, void **data)
  384. {
  385. struct iovec *iov = NULL;
  386. size_t iov_num = 0;
  387. ssize_t len_to_lock = 0;
  388. ssize_t avail = 0;
  389. ssize_t locked_len = 0;
  390. assert(buf);
  391. if (!buf)
  392. return -1;
  393. assert(data);
  394. if (!data)
  395. return -1;
  396. // Don't use already locked buffer
  397. if (faux_buf_is_rlocked(buf))
  398. return -1;
  399. avail = faux_buf_ravail(buf);
  400. if (avail < 0)
  401. return -1;
  402. if (0 == avail)
  403. avail = buf->chunk_size; // Next chunk
  404. len_to_lock = ((size_t)avail < buf->len) ? (size_t)avail : buf->len;
  405. // Nothing to lock
  406. if (0 == len_to_lock) {
  407. *data = NULL;
  408. return 0;
  409. }
  410. locked_len = faux_buf_dread_lock(buf, len_to_lock, &iov, &iov_num);
  411. if (locked_len <= 0)
  412. return -1;
  413. if (iov_num < 1) {
  414. faux_free(iov);
  415. return -1;
  416. }
  417. *data = iov[0].iov_base;
  418. locked_len = iov[0].iov_len;
  419. faux_free(iov);
  420. return locked_len;
  421. }
  422. /** @brief Frees "struct iovec" array and unlocks read data.
  423. *
  424. * The length of actually readed data can be less than length of locked data.
  425. * In this case all the data will be unlocked but only actually readed length
  426. * will be removed from buffer.
  427. *
  428. * Function gets "struct iovec" array to free it. It was previously allocated
  429. * by faux_dread_lock() function.
  430. *
  431. * @param [in] buf Allocated and initialized dynamic buffer object.
  432. * @param [in] really_readed Length of data actually read.
  433. * @param [out] iov "struct iovec" array to free.
  434. * @param [out] iov_num_out Number of "struct iovec" array elements.
  435. * @return Length of data actually unlocked or < 0 on error.
  436. */
  437. ssize_t faux_buf_dread_unlock(faux_buf_t *buf, size_t really_readed,
  438. struct iovec *iov)
  439. {
  440. size_t must_be_read = really_readed;
  441. assert(buf);
  442. if (!buf)
  443. return -1;
  444. // Can't unlock non-locked buffer
  445. if (!faux_buf_is_rlocked(buf))
  446. return -1;
  447. if (buf->rlocked < really_readed)
  448. return -1; // Something went wrong
  449. if (buf->len < really_readed)
  450. return -1; // Something went wrong
  451. if (0 == really_readed)
  452. goto unlock;
  453. // Suppose list is not empty
  454. while (must_be_read > 0) {
  455. size_t avail = faux_buf_ravail(buf);
  456. ssize_t data_to_rm = (must_be_read < avail) ? must_be_read : avail;
  457. faux_list_node_t *iter = faux_list_head(buf->list);
  458. buf->len -= data_to_rm;
  459. buf->rpos += data_to_rm;
  460. must_be_read -= data_to_rm;
  461. // Current chunk was fully readed. So remove it from list.
  462. // Chunk is not wchunk
  463. if ((iter != buf->wchunk) &&
  464. (buf->rpos == buf->chunk_size)) {
  465. buf->rpos = 0; // 0 position within next chunk
  466. faux_list_del(buf->list, iter);
  467. if (faux_buf_chunk_num(buf) == 0) { // Empty list w/o locks
  468. buf->wchunk = NULL;
  469. buf->wpos = buf->chunk_size;
  470. }
  471. // Chunk is wchunk
  472. } else if ((iter == buf->wchunk) &&
  473. (buf->rpos == buf->wpos) &&
  474. (!buf->wlocked || // Chunk can be locked for writing
  475. (buf->wpos == buf->chunk_size))) { // Chunk can be filled
  476. buf->rpos = 0; // 0 position within next chunk
  477. buf->wchunk = NULL;
  478. buf->wpos = buf->chunk_size;
  479. faux_list_del(buf->list, iter);
  480. }
  481. }
  482. unlock:
  483. // Unlock whole buffer. Not 'really readed' bytes only
  484. buf->rlocked = 0;
  485. faux_free(iov);
  486. return really_readed;
  487. }
  488. /** @brief Unlocks read data.
  489. *
  490. * It's a function complementary to faux_buf_dread_lock_easy().
  491. * It has the same functionality as faux_dread_unlock() but doesn't free
  492. * "struct iovec" array.
  493. *
  494. * @param [in] buf Allocated and initialized dynamic buffer object.
  495. * @param [in] really_readed Length of data actually readed.
  496. * @return Length of data actually unlocked or < 0 on error.
  497. */
  498. ssize_t faux_buf_dread_unlock_easy(faux_buf_t *buf, size_t really_readed)
  499. {
  500. return faux_buf_dread_unlock(buf, really_readed, NULL);
  501. }
  502. /** @brief Write data from linear buffer to dynamic buffer.
  503. *
  504. * @param [in] buf Allocated and initialized dynamic buffer object.
  505. * @param [in] data Linear buffer. Source of data.
  506. * @param [in] len Length of data to write.
  507. * @return Length of data actually written or < 0 on error.
  508. */
  509. ssize_t faux_buf_write(faux_buf_t *buf, const void *data, size_t len)
  510. {
  511. struct iovec *iov = NULL;
  512. size_t iov_num = 0;
  513. ssize_t total = 0;
  514. char *src = (char *)data;
  515. size_t i = 0;
  516. assert(buf);
  517. if (!buf)
  518. return -1;
  519. assert(data);
  520. if (!data)
  521. return -1;
  522. total = faux_buf_dwrite_lock(buf, len, &iov, &iov_num);
  523. if (total <= 0)
  524. return total;
  525. for (i = 0; i < iov_num; i++) {
  526. memcpy(iov[i].iov_base, src, iov[i].iov_len);
  527. src += iov[i].iov_len;
  528. }
  529. if (faux_buf_dwrite_unlock(buf, total, iov) != total)
  530. return -1;
  531. return total;
  532. }
  533. /** @brief Gets "struct iovec" array for direct writing and locks data.
  534. *
  535. * @param [in] buf Allocated and initialized dynamic buffer object.
  536. * @param [in] len Length of data to lock.
  537. * @param [out] iov_out "struct iovec" array to direct write to.
  538. * @param [out] iov_num_out Number of "struct iovec" array elements.
  539. * @return Length of data actually locked or < 0 on error.
  540. */
  541. ssize_t faux_buf_dwrite_lock(faux_buf_t *buf, size_t len,
  542. struct iovec **iov_out, size_t *iov_num_out)
  543. {
  544. size_t vec_entries_num = 0;
  545. struct iovec *iov = NULL;
  546. unsigned int i = 0;
  547. faux_list_node_t *iter = NULL;
  548. size_t avail = 0;
  549. size_t must_be_write = len;
  550. assert(buf);
  551. if (!buf)
  552. return -1;
  553. assert(iov_out);
  554. if (!iov_out)
  555. return -1;
  556. assert(iov_num_out);
  557. if (!iov_num_out)
  558. return -1;
  559. // Don't use already locked buffer
  560. if (faux_buf_is_wlocked(buf))
  561. return -1;
  562. // It will be overflow after writing
  563. if (faux_buf_will_be_overflow(buf, len))
  564. return -1;
  565. // Nothing to lock
  566. if (0 == len) {
  567. *iov_out = NULL;
  568. *iov_num_out = 0;
  569. return 0;
  570. }
  571. // Write lock
  572. buf->wlocked = len;
  573. // Calculate number of struct iovec entries
  574. avail = faux_buf_wavail(buf);
  575. if (avail > 0)
  576. vec_entries_num++;
  577. if (avail < len) {
  578. size_t i = 0;
  579. size_t new_chunk_num = 0;
  580. size_t l = len - avail; // length w/o first chunk
  581. new_chunk_num += l / buf->chunk_size;
  582. if ((l % buf->chunk_size) > 0)
  583. new_chunk_num++;
  584. vec_entries_num += new_chunk_num;
  585. for (i = 0; i < new_chunk_num; i++)
  586. faux_buf_alloc_chunk(buf);
  587. }
  588. iov = faux_zmalloc(vec_entries_num * sizeof(*iov));
  589. assert(iov);
  590. // Iterate chunks
  591. iter = buf->wchunk;
  592. i = 0;
  593. while ((must_be_write > 0)) {
  594. char *data = NULL;
  595. off_t data_offset = 0;
  596. size_t data_len = buf->chunk_size;
  597. size_t p_len = 0;
  598. // List was empty before writing
  599. if (!iter) {
  600. iter = faux_list_head(buf->list);
  601. // Not empty list. First element
  602. } else if ((iter == buf->wchunk) && (i == 0)) {
  603. size_t l = faux_buf_wavail(buf);
  604. if (0 == l) { // Not enough space within current chunk
  605. iter = faux_list_next_node(iter);
  606. } else {
  607. data_offset = buf->wpos;
  608. data_len = l;
  609. }
  610. // Not empty list. Fully free chunk
  611. } else {
  612. iter = faux_list_next_node(iter);
  613. }
  614. p_len = (must_be_write < data_len) ? must_be_write : data_len;
  615. data = (char *)faux_list_data(iter) + data_offset;
  616. must_be_write -= p_len;
  617. iov[i].iov_base = data;
  618. iov[i].iov_len = p_len;
  619. i++;
  620. }
  621. *iov_out = iov;
  622. *iov_num_out = vec_entries_num;
  623. return len;
  624. }
  625. /** @brief Gets a data buffer for direct writing and locks it.
  626. *
  627. * The complimentary function is faux_buf_dwrite_unlock_easy().
  628. * This function has the same functionality as faux_buf_dwrite_lock() but
  629. * choose the lentgh of locked space itself to return single continuous buffer.
  630. *
  631. * @param [in] buf Allocated and initialized dynamic buffer object.
  632. * @param [out] data Continuous buffer for direct writting.
  633. * @return Length of data actually locked or < 0 on error.
  634. */
  635. ssize_t faux_buf_dwrite_lock_easy(faux_buf_t *buf, void **data)
  636. {
  637. struct iovec *iov = NULL;
  638. size_t iov_num = 0;
  639. ssize_t len = 0;
  640. ssize_t locked_len = 0;
  641. assert(buf);
  642. if (!buf)
  643. return -1;
  644. assert(data);
  645. if (!data)
  646. return -1;
  647. // Don't use already locked buffer
  648. if (faux_buf_is_wlocked(buf))
  649. return -1;
  650. len = faux_buf_wavail(buf);
  651. if (len < 0)
  652. return -1;
  653. if (0 == len)
  654. len = buf->chunk_size; // It will use next chunk
  655. locked_len = faux_buf_dwrite_lock(buf, len, &iov, &iov_num);
  656. if (locked_len <= 0)
  657. return -1;
  658. if (iov_num < 1) {
  659. faux_free(iov);
  660. return -1;
  661. }
  662. *data = iov[0].iov_base;
  663. locked_len = iov[0].iov_len;
  664. faux_free(iov);
  665. return locked_len;
  666. }
  667. /** @brief Frees "struct iovec" array and unlocks written data.
  668. *
  669. * The length of actually written data can be less than length of locked data.
  670. * In this case all the data will be unlocked but only actually written length
  671. * will be stored within buffer.
  672. *
  673. * Function gets "struct iovec" array to free it. It was previously allocated
  674. * by faux_dwrite_lock() function.
  675. *
  676. * @param [in] buf Allocated and initialized dynamic buffer object.
  677. * @param [in] really_written Length of data actually written.
  678. * @param [out] iov "struct iovec" array to free.
  679. * @return Length of data actually unlocked or < 0 on error.
  680. */
  681. ssize_t faux_buf_dwrite_unlock(faux_buf_t *buf, size_t really_written,
  682. struct iovec *iov)
  683. {
  684. size_t must_be_write = really_written;
  685. assert(buf);
  686. if (!buf)
  687. return -1;
  688. // Can't unlock non-locked buffer
  689. if (!faux_buf_is_wlocked(buf))
  690. return -1;
  691. if (buf->wlocked < really_written)
  692. return -1; // Something went wrong
  693. while (must_be_write > 0) {
  694. size_t avail = 0;
  695. ssize_t data_to_add = 0;
  696. avail = faux_buf_wavail(buf);
  697. // Current chunk was fully written. So move to next one
  698. if (0 == avail) {
  699. buf->wpos = 0; // 0 position within next chunk
  700. if (buf->wchunk)
  701. buf->wchunk = faux_list_next_node(buf->wchunk);
  702. else
  703. buf->wchunk = faux_list_head(buf->list);
  704. avail = faux_buf_wavail(buf);
  705. }
  706. data_to_add = (must_be_write < avail) ? must_be_write : avail;
  707. buf->len += data_to_add;
  708. buf->wpos += data_to_add;
  709. must_be_write -= data_to_add;
  710. }
  711. if (buf->wchunk) {
  712. faux_list_node_t *iter = NULL;
  713. // Remove trailing empty chunks after wchunk
  714. while ((iter = faux_list_next_node(buf->wchunk)))
  715. faux_list_del(buf->list, iter);
  716. // When really_written == 0 then all data can be read after
  717. // dwrite_lock() and dwrite_unlock() so chunk can be empty.
  718. if ((faux_list_head(buf->list) == buf->wchunk) &&
  719. (buf->wpos == buf->rpos)) {
  720. faux_list_del(buf->list, buf->wchunk);
  721. buf->wchunk = NULL;
  722. buf->wpos = buf->chunk_size;
  723. buf->rpos = 0;
  724. }
  725. }
  726. // Unlock whole buffer. Not 'really written' bytes only
  727. buf->wlocked = 0;
  728. faux_free(iov);
  729. return really_written;
  730. }
  731. /** @brief Unlocks written data.
  732. *
  733. * It's a function complementary to faux_buf_dwrite_lock_easy().
  734. * It has the same functionality as faux_dwrite_unlock() but doesn't free
  735. * "struct iovec" array.
  736. *
  737. * @param [in] buf Allocated and initialized dynamic buffer object.
  738. * @param [in] really_written Length of data actually written.
  739. * @return Length of data actually unlocked or < 0 on error.
  740. */
  741. ssize_t faux_buf_dwrite_unlock_easy(faux_buf_t *buf, size_t really_written)
  742. {
  743. return faux_buf_dwrite_unlock(buf, really_written, NULL);
  744. }