Skip to content

Commit 0a7e54c

Browse files
Jia Zhubrauner
authored andcommitted
cachefiles: resend an open request if the read request's object is closed
When an anonymous fd is closed by user daemon, if there is a new read request for this file comes up, the anonymous fd should be re-opened to handle that read request rather than fail it directly. 1. Introduce reopening state for objects that are closed but have inflight/subsequent read requests. 2. No longer flush READ requests but only CLOSE requests when anonymous fd is closed. 3. Enqueue the reopen work to workqueue, thus user daemon could get rid of daemon_read context and handle that request smoothly. Otherwise, the user daemon will send a reopen request and wait for itself to process the request. Signed-off-by: Jia Zhu <[email protected]> Link: https://lore.kernel.org/r/[email protected] Reviewed-by: Jingbo Xu <[email protected]> Reviewed-by: David Howells <[email protected]> Signed-off-by: Christian Brauner <[email protected]>
1 parent 3c5ecfe commit 0a7e54c

File tree

2 files changed

+72
-29
lines changed

2 files changed

+72
-29
lines changed

fs/cachefiles/internal.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,11 @@ struct cachefiles_volume {
4747
enum cachefiles_object_state {
4848
CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */
4949
CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */
50+
CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */
5051
};
5152

5253
struct cachefiles_ondemand_info {
54+
struct work_struct ondemand_work;
5355
int ondemand_id;
5456
enum cachefiles_object_state state;
5557
struct cachefiles_object *object;
@@ -326,6 +328,7 @@ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \
326328

327329
CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN);
328330
CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE);
331+
CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING);
329332
#else
330333
static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
331334
char __user *_buffer, size_t buflen)

fs/cachefiles/ondemand.c

Lines changed: 69 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -18,14 +18,10 @@ static int cachefiles_ondemand_fd_release(struct inode *inode,
1818
info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
1919
cachefiles_ondemand_set_object_close(object);
2020

21-
/*
22-
* Flush all pending READ requests since their completion depends on
23-
* anon_fd.
24-
*/
25-
xas_for_each(&xas, req, ULONG_MAX) {
21+
/* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
22+
xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
2623
if (req->msg.object_id == object_id &&
27-
req->msg.opcode == CACHEFILES_OP_READ) {
28-
req->error = -EIO;
24+
req->msg.opcode == CACHEFILES_OP_CLOSE) {
2925
complete(&req->done);
3026
xas_store(&xas, NULL);
3127
}
@@ -179,6 +175,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
179175
trace_cachefiles_ondemand_copen(req->object, id, size);
180176

181177
cachefiles_ondemand_set_object_open(req->object);
178+
wake_up_all(&cache->daemon_pollwq);
182179

183180
out:
184181
complete(&req->done);
@@ -222,7 +219,6 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
222219

223220
load = (void *)req->msg.data;
224221
load->fd = fd;
225-
req->msg.object_id = object_id;
226222
object->ondemand->ondemand_id = object_id;
227223

228224
cachefiles_get_unbind_pincount(cache);
@@ -238,6 +234,43 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
238234
return ret;
239235
}
240236

237+
static void ondemand_object_worker(struct work_struct *work)
238+
{
239+
struct cachefiles_ondemand_info *info =
240+
container_of(work, struct cachefiles_ondemand_info, ondemand_work);
241+
242+
cachefiles_ondemand_init_object(info->object);
243+
}
244+
245+
/*
246+
* If there are any inflight or subsequent READ requests on the
247+
* closed object, reopen it.
248+
* Skip read requests whose related object is reopening.
249+
*/
250+
static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xas,
251+
unsigned long xa_max)
252+
{
253+
struct cachefiles_req *req;
254+
struct cachefiles_object *object;
255+
struct cachefiles_ondemand_info *info;
256+
257+
xas_for_each_marked(xas, req, xa_max, CACHEFILES_REQ_NEW) {
258+
if (req->msg.opcode != CACHEFILES_OP_READ)
259+
return req;
260+
object = req->object;
261+
info = object->ondemand;
262+
if (cachefiles_ondemand_object_is_close(object)) {
263+
cachefiles_ondemand_set_object_reopening(object);
264+
queue_work(fscache_wq, &info->ondemand_work);
265+
continue;
266+
}
267+
if (cachefiles_ondemand_object_is_reopening(object))
268+
continue;
269+
return req;
270+
}
271+
return NULL;
272+
}
273+
241274
ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
242275
char __user *_buffer, size_t buflen)
243276
{
@@ -248,16 +281,16 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
248281
int ret = 0;
249282
XA_STATE(xas, &cache->reqs, cache->req_id_next);
250283

284+
xa_lock(&cache->reqs);
251285
/*
252286
* Cyclically search for a request that has not ever been processed,
253287
* to prevent requests from being processed repeatedly, and make
254288
* request distribution fair.
255289
*/
256-
xa_lock(&cache->reqs);
257-
req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
290+
req = cachefiles_ondemand_select_req(&xas, ULONG_MAX);
258291
if (!req && cache->req_id_next > 0) {
259292
xas_set(&xas, 0);
260-
req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
293+
req = cachefiles_ondemand_select_req(&xas, cache->req_id_next - 1);
261294
}
262295
if (!req) {
263296
xa_unlock(&cache->reqs);
@@ -277,14 +310,18 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
277310
xa_unlock(&cache->reqs);
278311

279312
id = xas.xa_index;
280-
msg->msg_id = id;
281313

282314
if (msg->opcode == CACHEFILES_OP_OPEN) {
283315
ret = cachefiles_ondemand_get_fd(req);
284-
if (ret)
316+
if (ret) {
317+
cachefiles_ondemand_set_object_close(req->object);
285318
goto error;
319+
}
286320
}
287321

322+
msg->msg_id = id;
323+
msg->object_id = req->object->ondemand->ondemand_id;
324+
288325
if (copy_to_user(_buffer, msg, n) != 0) {
289326
ret = -EFAULT;
290327
goto err_put_fd;
@@ -317,19 +354,23 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
317354
void *private)
318355
{
319356
struct cachefiles_cache *cache = object->volume->cache;
320-
struct cachefiles_req *req;
357+
struct cachefiles_req *req = NULL;
321358
XA_STATE(xas, &cache->reqs, 0);
322359
int ret;
323360

324361
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
325362
return 0;
326363

327-
if (test_bit(CACHEFILES_DEAD, &cache->flags))
328-
return -EIO;
364+
if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
365+
ret = -EIO;
366+
goto out;
367+
}
329368

330369
req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL);
331-
if (!req)
332-
return -ENOMEM;
370+
if (!req) {
371+
ret = -ENOMEM;
372+
goto out;
373+
}
333374

334375
req->object = object;
335376
init_completion(&req->done);
@@ -367,7 +408,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
367408
/* coupled with the barrier in cachefiles_flush_reqs() */
368409
smp_mb();
369410

370-
if (opcode != CACHEFILES_OP_OPEN &&
411+
if (opcode == CACHEFILES_OP_CLOSE &&
371412
!cachefiles_ondemand_object_is_open(object)) {
372413
WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
373414
xas_unlock(&xas);
@@ -392,7 +433,15 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
392433
wake_up_all(&cache->daemon_pollwq);
393434
wait_for_completion(&req->done);
394435
ret = req->error;
436+
kfree(req);
437+
return ret;
395438
out:
439+
/* Reset the object to close state in error handling path.
440+
* If error occurs after creating the anonymous fd,
441+
* cachefiles_ondemand_fd_release() will set object to close.
442+
*/
443+
if (opcode == CACHEFILES_OP_OPEN)
444+
cachefiles_ondemand_set_object_close(object);
396445
kfree(req);
397446
return ret;
398447
}
@@ -439,7 +488,6 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
439488
if (!cachefiles_ondemand_object_is_open(object))
440489
return -ENOENT;
441490

442-
req->msg.object_id = object->ondemand->ondemand_id;
443491
trace_cachefiles_ondemand_close(object, &req->msg);
444492
return 0;
445493
}
@@ -455,16 +503,7 @@ static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
455503
struct cachefiles_object *object = req->object;
456504
struct cachefiles_read *load = (void *)req->msg.data;
457505
struct cachefiles_read_ctx *read_ctx = private;
458-
int object_id = object->ondemand->ondemand_id;
459-
460-
/* Stop enqueuing requests when daemon has closed anon_fd. */
461-
if (!cachefiles_ondemand_object_is_open(object)) {
462-
WARN_ON_ONCE(object_id == 0);
463-
pr_info_once("READ: anonymous fd closed prematurely.\n");
464-
return -EIO;
465-
}
466506

467-
req->msg.object_id = object_id;
468507
load->off = read_ctx->off;
469508
load->len = read_ctx->len;
470509
trace_cachefiles_ondemand_read(object, &req->msg, load);
@@ -513,6 +552,7 @@ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
513552
return -ENOMEM;
514553

515554
object->ondemand->object = object;
555+
INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
516556
return 0;
517557
}
518558

0 commit comments

Comments
 (0)