codir.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. /*
  2. * 9p backend
  3. *
  4. * Copyright IBM, Corp. 2011
  5. *
  6. * Authors:
  7. * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. */
  13. /*
  14. * Not so fast! You might want to read the 9p developer docs first:
  15. * https://wiki.qemu.org/Documentation/9p
  16. */
  17. #include "qemu/osdep.h"
  18. #include "fsdev/qemu-fsdev.h"
  19. #include "qemu/thread.h"
  20. #include "qemu/main-loop.h"
  21. #include "coth.h"
  22. #include "9p-xattr.h"
  23. #include "9p-util.h"
  24. /*
  25. * Intended to be called from bottom-half (e.g. background I/O thread)
  26. * context.
  27. */
  28. static int do_readdir(V9fsPDU *pdu, V9fsFidState *fidp, struct dirent **dent)
  29. {
  30. int err = 0;
  31. V9fsState *s = pdu->s;
  32. struct dirent *entry;
  33. errno = 0;
  34. entry = s->ops->readdir(&s->ctx, &fidp->fs);
  35. if (!entry && errno) {
  36. *dent = NULL;
  37. err = -errno;
  38. } else {
  39. *dent = entry;
  40. }
  41. return err;
  42. }
  43. /*
  44. * TODO: This will be removed for performance reasons.
  45. * Use v9fs_co_readdir_many() instead.
  46. */
  47. int coroutine_fn v9fs_co_readdir(V9fsPDU *pdu, V9fsFidState *fidp,
  48. struct dirent **dent)
  49. {
  50. int err;
  51. if (v9fs_request_cancelled(pdu)) {
  52. return -EINTR;
  53. }
  54. v9fs_co_run_in_worker({
  55. err = do_readdir(pdu, fidp, dent);
  56. });
  57. return err;
  58. }
  59. /*
  60. * This is solely executed on a background IO thread.
  61. *
  62. * See v9fs_co_readdir_many() (as its only user) below for details.
  63. */
  64. static int coroutine_fn
  65. do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, struct V9fsDirEnt **entries,
  66. off_t offset, int32_t maxsize, bool dostat)
  67. {
  68. V9fsState *s = pdu->s;
  69. V9fsString name;
  70. int len, err = 0;
  71. int32_t size = 0;
  72. off_t saved_dir_pos;
  73. struct dirent *dent;
  74. struct V9fsDirEnt *e = NULL;
  75. V9fsPath path;
  76. struct stat stbuf;
  77. *entries = NULL;
  78. v9fs_path_init(&path);
  79. /*
  80. * TODO: Here should be a warn_report_once() if lock failed.
  81. *
  82. * With a good 9p client we should not get into concurrency here,
  83. * because a good client would not use the same fid for concurrent
  84. * requests. We do the lock here for safety reasons though. However
  85. * the client would then suffer performance issues, so better log that
  86. * issue here.
  87. */
  88. v9fs_readdir_lock(&fidp->fs.dir);
  89. /* seek directory to requested initial position */
  90. if (offset == 0) {
  91. s->ops->rewinddir(&s->ctx, &fidp->fs);
  92. } else {
  93. s->ops->seekdir(&s->ctx, &fidp->fs, offset);
  94. }
  95. /* save the directory position */
  96. saved_dir_pos = s->ops->telldir(&s->ctx, &fidp->fs);
  97. if (saved_dir_pos < 0) {
  98. err = saved_dir_pos;
  99. goto out;
  100. }
  101. while (true) {
  102. /* interrupt loop if request was cancelled by a Tflush request */
  103. if (v9fs_request_cancelled(pdu)) {
  104. err = -EINTR;
  105. break;
  106. }
  107. /* get directory entry from fs driver */
  108. err = do_readdir(pdu, fidp, &dent);
  109. if (err || !dent) {
  110. break;
  111. }
  112. /*
  113. * stop this loop as soon as it would exceed the allowed maximum
  114. * response message size for the directory entries collected so far,
  115. * because anything beyond that size would need to be discarded by
  116. * 9p controller (main thread / top half) anyway
  117. */
  118. v9fs_string_init(&name);
  119. v9fs_string_sprintf(&name, "%s", dent->d_name);
  120. len = v9fs_readdir_response_size(&name);
  121. v9fs_string_free(&name);
  122. if (size + len > maxsize) {
  123. /* this is not an error case actually */
  124. break;
  125. }
  126. /* append next node to result chain */
  127. if (!e) {
  128. *entries = e = g_new0(V9fsDirEnt, 1);
  129. } else {
  130. e = e->next = g_new0(V9fsDirEnt, 1);
  131. }
  132. e->dent = qemu_dirent_dup(dent);
  133. /* perform a full stat() for directory entry if requested by caller */
  134. if (dostat) {
  135. err = s->ops->name_to_path(
  136. &s->ctx, &fidp->path, dent->d_name, &path
  137. );
  138. if (err < 0) {
  139. err = -errno;
  140. break;
  141. }
  142. err = s->ops->lstat(&s->ctx, &path, &stbuf);
  143. if (err < 0) {
  144. err = -errno;
  145. break;
  146. }
  147. e->st = g_new0(struct stat, 1);
  148. memcpy(e->st, &stbuf, sizeof(struct stat));
  149. }
  150. size += len;
  151. saved_dir_pos = qemu_dirent_off(dent);
  152. }
  153. /* restore (last) saved position */
  154. s->ops->seekdir(&s->ctx, &fidp->fs, saved_dir_pos);
  155. out:
  156. v9fs_readdir_unlock(&fidp->fs.dir);
  157. v9fs_path_free(&path);
  158. if (err < 0) {
  159. return err;
  160. }
  161. return size;
  162. }
  163. /**
  164. * v9fs_co_readdir_many() - Reads multiple directory entries in one rush.
  165. *
  166. * @pdu: the causing 9p (T_readdir) client request
  167. * @fidp: already opened directory where readdir shall be performed on
  168. * @entries: output for directory entries (must not be NULL)
  169. * @offset: initial position inside the directory the function shall
  170. * seek to before retrieving the directory entries
  171. * @maxsize: maximum result message body size (in bytes)
  172. * @dostat: whether a stat() should be performed and returned for
  173. * each directory entry
  174. * Return: resulting response message body size (in bytes) on success,
  175. * negative error code otherwise
  176. *
  177. * Retrieves the requested (max. amount of) directory entries from the fs
  178. * driver. This function must only be called by the main IO thread (top half).
  179. * Internally this function call will be dispatched to a background IO thread
  180. * (bottom half) where it is eventually executed by the fs driver.
  181. *
  182. * Acquiring multiple directory entries in one rush from the fs
  183. * driver, instead of retrieving each directory entry individually, is very
  184. * beneficial from performance point of view. Because for every fs driver
  185. * request latency is added, which in practice could lead to overall
  186. * latencies of several hundred ms for reading all entries (of just a single
  187. * directory) if every directory entry was individually requested from fs
  188. * driver.
  189. *
  190. * NOTE: You must ALWAYS call v9fs_free_dirents(entries) after calling
  191. * v9fs_co_readdir_many(), both on success and on error cases of this
  192. * function, to avoid memory leaks once @entries are no longer needed.
  193. */
  194. int coroutine_fn v9fs_co_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp,
  195. struct V9fsDirEnt **entries,
  196. off_t offset, int32_t maxsize,
  197. bool dostat)
  198. {
  199. int err = 0;
  200. if (v9fs_request_cancelled(pdu)) {
  201. return -EINTR;
  202. }
  203. v9fs_co_run_in_worker({
  204. err = do_readdir_many(pdu, fidp, entries, offset, maxsize, dostat);
  205. });
  206. return err;
  207. }
  208. off_t v9fs_co_telldir(V9fsPDU *pdu, V9fsFidState *fidp)
  209. {
  210. off_t err;
  211. V9fsState *s = pdu->s;
  212. if (v9fs_request_cancelled(pdu)) {
  213. return -EINTR;
  214. }
  215. v9fs_co_run_in_worker(
  216. {
  217. err = s->ops->telldir(&s->ctx, &fidp->fs);
  218. if (err < 0) {
  219. err = -errno;
  220. }
  221. });
  222. return err;
  223. }
  224. void coroutine_fn v9fs_co_seekdir(V9fsPDU *pdu, V9fsFidState *fidp,
  225. off_t offset)
  226. {
  227. V9fsState *s = pdu->s;
  228. if (v9fs_request_cancelled(pdu)) {
  229. return;
  230. }
  231. v9fs_co_run_in_worker(
  232. {
  233. s->ops->seekdir(&s->ctx, &fidp->fs, offset);
  234. });
  235. }
  236. void coroutine_fn v9fs_co_rewinddir(V9fsPDU *pdu, V9fsFidState *fidp)
  237. {
  238. V9fsState *s = pdu->s;
  239. if (v9fs_request_cancelled(pdu)) {
  240. return;
  241. }
  242. v9fs_co_run_in_worker(
  243. {
  244. s->ops->rewinddir(&s->ctx, &fidp->fs);
  245. });
  246. }
  247. int coroutine_fn v9fs_co_mkdir(V9fsPDU *pdu, V9fsFidState *fidp,
  248. V9fsString *name, mode_t mode, uid_t uid,
  249. gid_t gid, struct stat *stbuf)
  250. {
  251. int err;
  252. FsCred cred;
  253. V9fsPath path;
  254. V9fsState *s = pdu->s;
  255. if (v9fs_request_cancelled(pdu)) {
  256. return -EINTR;
  257. }
  258. cred_init(&cred);
  259. cred.fc_mode = mode;
  260. cred.fc_uid = uid;
  261. cred.fc_gid = gid;
  262. v9fs_path_read_lock(s);
  263. v9fs_co_run_in_worker(
  264. {
  265. err = s->ops->mkdir(&s->ctx, &fidp->path, name->data, &cred);
  266. if (err < 0) {
  267. err = -errno;
  268. } else {
  269. v9fs_path_init(&path);
  270. err = v9fs_name_to_path(s, &fidp->path, name->data, &path);
  271. if (!err) {
  272. err = s->ops->lstat(&s->ctx, &path, stbuf);
  273. if (err < 0) {
  274. err = -errno;
  275. }
  276. }
  277. v9fs_path_free(&path);
  278. }
  279. });
  280. v9fs_path_unlock(s);
  281. return err;
  282. }
  283. int coroutine_fn v9fs_co_opendir(V9fsPDU *pdu, V9fsFidState *fidp)
  284. {
  285. int err;
  286. V9fsState *s = pdu->s;
  287. if (v9fs_request_cancelled(pdu)) {
  288. return -EINTR;
  289. }
  290. v9fs_path_read_lock(s);
  291. v9fs_co_run_in_worker(
  292. {
  293. err = s->ops->opendir(&s->ctx, &fidp->path, &fidp->fs);
  294. if (err < 0) {
  295. err = -errno;
  296. } else {
  297. err = 0;
  298. }
  299. });
  300. v9fs_path_unlock(s);
  301. if (!err) {
  302. total_open_fd++;
  303. if (total_open_fd > open_fd_hw) {
  304. v9fs_reclaim_fd(pdu);
  305. }
  306. }
  307. return err;
  308. }
  309. int coroutine_fn v9fs_co_closedir(V9fsPDU *pdu, V9fsFidOpenState *fs)
  310. {
  311. int err;
  312. V9fsState *s = pdu->s;
  313. if (v9fs_request_cancelled(pdu)) {
  314. return -EINTR;
  315. }
  316. v9fs_co_run_in_worker(
  317. {
  318. err = s->ops->closedir(&s->ctx, fs);
  319. if (err < 0) {
  320. err = -errno;
  321. }
  322. });
  323. if (!err) {
  324. total_open_fd--;
  325. }
  326. return err;
  327. }