@@ -18,14 +18,10 @@ static int cachefiles_ondemand_fd_release(struct inode *inode,
18
18
info -> ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED ;
19
19
cachefiles_ondemand_set_object_close (object );
20
20
21
- /*
22
- * Flush all pending READ requests since their completion depends on
23
- * anon_fd.
24
- */
25
- xas_for_each (& xas , req , ULONG_MAX ) {
21
+ /* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
22
+ xas_for_each_marked (& xas , req , ULONG_MAX , CACHEFILES_REQ_NEW ) {
26
23
if (req -> msg .object_id == object_id &&
27
- req -> msg .opcode == CACHEFILES_OP_READ ) {
28
- req -> error = - EIO ;
24
+ req -> msg .opcode == CACHEFILES_OP_CLOSE ) {
29
25
complete (& req -> done );
30
26
xas_store (& xas , NULL );
31
27
}
@@ -179,6 +175,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
179
175
trace_cachefiles_ondemand_copen (req -> object , id , size );
180
176
181
177
cachefiles_ondemand_set_object_open (req -> object );
178
+ wake_up_all (& cache -> daemon_pollwq );
182
179
183
180
out :
184
181
complete (& req -> done );
@@ -222,7 +219,6 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
222
219
223
220
load = (void * )req -> msg .data ;
224
221
load -> fd = fd ;
225
- req -> msg .object_id = object_id ;
226
222
object -> ondemand -> ondemand_id = object_id ;
227
223
228
224
cachefiles_get_unbind_pincount (cache );
@@ -238,6 +234,43 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
238
234
return ret ;
239
235
}
240
236
237
+ static void ondemand_object_worker (struct work_struct * work )
238
+ {
239
+ struct cachefiles_ondemand_info * info =
240
+ container_of (work , struct cachefiles_ondemand_info , ondemand_work );
241
+
242
+ cachefiles_ondemand_init_object (info -> object );
243
+ }
244
+
245
+ /*
246
+ * If there are any inflight or subsequent READ requests on the
247
+ * closed object, reopen it.
248
+ * Skip read requests whose related object is reopening.
249
+ */
250
+ static struct cachefiles_req * cachefiles_ondemand_select_req (struct xa_state * xas ,
251
+ unsigned long xa_max )
252
+ {
253
+ struct cachefiles_req * req ;
254
+ struct cachefiles_object * object ;
255
+ struct cachefiles_ondemand_info * info ;
256
+
257
+ xas_for_each_marked (xas , req , xa_max , CACHEFILES_REQ_NEW ) {
258
+ if (req -> msg .opcode != CACHEFILES_OP_READ )
259
+ return req ;
260
+ object = req -> object ;
261
+ info = object -> ondemand ;
262
+ if (cachefiles_ondemand_object_is_close (object )) {
263
+ cachefiles_ondemand_set_object_reopening (object );
264
+ queue_work (fscache_wq , & info -> ondemand_work );
265
+ continue ;
266
+ }
267
+ if (cachefiles_ondemand_object_is_reopening (object ))
268
+ continue ;
269
+ return req ;
270
+ }
271
+ return NULL ;
272
+ }
273
+
241
274
ssize_t cachefiles_ondemand_daemon_read (struct cachefiles_cache * cache ,
242
275
char __user * _buffer , size_t buflen )
243
276
{
@@ -248,16 +281,16 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
248
281
int ret = 0 ;
249
282
XA_STATE (xas , & cache -> reqs , cache -> req_id_next );
250
283
284
+ xa_lock (& cache -> reqs );
251
285
/*
252
286
* Cyclically search for a request that has not ever been processed,
253
287
* to prevent requests from being processed repeatedly, and make
254
288
* request distribution fair.
255
289
*/
256
- xa_lock (& cache -> reqs );
257
- req = xas_find_marked (& xas , UINT_MAX , CACHEFILES_REQ_NEW );
290
+ req = cachefiles_ondemand_select_req (& xas , ULONG_MAX );
258
291
if (!req && cache -> req_id_next > 0 ) {
259
292
xas_set (& xas , 0 );
260
- req = xas_find_marked (& xas , cache -> req_id_next - 1 , CACHEFILES_REQ_NEW );
293
+ req = cachefiles_ondemand_select_req (& xas , cache -> req_id_next - 1 );
261
294
}
262
295
if (!req ) {
263
296
xa_unlock (& cache -> reqs );
@@ -277,14 +310,18 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
277
310
xa_unlock (& cache -> reqs );
278
311
279
312
id = xas .xa_index ;
280
- msg -> msg_id = id ;
281
313
282
314
if (msg -> opcode == CACHEFILES_OP_OPEN ) {
283
315
ret = cachefiles_ondemand_get_fd (req );
284
- if (ret )
316
+ if (ret ) {
317
+ cachefiles_ondemand_set_object_close (req -> object );
285
318
goto error ;
319
+ }
286
320
}
287
321
322
+ msg -> msg_id = id ;
323
+ msg -> object_id = req -> object -> ondemand -> ondemand_id ;
324
+
288
325
if (copy_to_user (_buffer , msg , n ) != 0 ) {
289
326
ret = - EFAULT ;
290
327
goto err_put_fd ;
@@ -317,19 +354,23 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
317
354
void * private )
318
355
{
319
356
struct cachefiles_cache * cache = object -> volume -> cache ;
320
- struct cachefiles_req * req ;
357
+ struct cachefiles_req * req = NULL ;
321
358
XA_STATE (xas , & cache -> reqs , 0 );
322
359
int ret ;
323
360
324
361
if (!test_bit (CACHEFILES_ONDEMAND_MODE , & cache -> flags ))
325
362
return 0 ;
326
363
327
- if (test_bit (CACHEFILES_DEAD , & cache -> flags ))
328
- return - EIO ;
364
+ if (test_bit (CACHEFILES_DEAD , & cache -> flags )) {
365
+ ret = - EIO ;
366
+ goto out ;
367
+ }
329
368
330
369
req = kzalloc (sizeof (* req ) + data_len , GFP_KERNEL );
331
- if (!req )
332
- return - ENOMEM ;
370
+ if (!req ) {
371
+ ret = - ENOMEM ;
372
+ goto out ;
373
+ }
333
374
334
375
req -> object = object ;
335
376
init_completion (& req -> done );
@@ -367,7 +408,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
367
408
/* coupled with the barrier in cachefiles_flush_reqs() */
368
409
smp_mb ();
369
410
370
- if (opcode != CACHEFILES_OP_OPEN &&
411
+ if (opcode == CACHEFILES_OP_CLOSE &&
371
412
!cachefiles_ondemand_object_is_open (object )) {
372
413
WARN_ON_ONCE (object -> ondemand -> ondemand_id == 0 );
373
414
xas_unlock (& xas );
@@ -392,7 +433,15 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
392
433
wake_up_all (& cache -> daemon_pollwq );
393
434
wait_for_completion (& req -> done );
394
435
ret = req -> error ;
436
+ kfree (req );
437
+ return ret ;
395
438
out :
439
+ /* Reset the object to close state in error handling path.
440
+ * If error occurs after creating the anonymous fd,
441
+ * cachefiles_ondemand_fd_release() will set object to close.
442
+ */
443
+ if (opcode == CACHEFILES_OP_OPEN )
444
+ cachefiles_ondemand_set_object_close (object );
396
445
kfree (req );
397
446
return ret ;
398
447
}
@@ -439,7 +488,6 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
439
488
if (!cachefiles_ondemand_object_is_open (object ))
440
489
return - ENOENT ;
441
490
442
- req -> msg .object_id = object -> ondemand -> ondemand_id ;
443
491
trace_cachefiles_ondemand_close (object , & req -> msg );
444
492
return 0 ;
445
493
}
@@ -455,16 +503,7 @@ static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
455
503
struct cachefiles_object * object = req -> object ;
456
504
struct cachefiles_read * load = (void * )req -> msg .data ;
457
505
struct cachefiles_read_ctx * read_ctx = private ;
458
- int object_id = object -> ondemand -> ondemand_id ;
459
-
460
- /* Stop enqueuing requests when daemon has closed anon_fd. */
461
- if (!cachefiles_ondemand_object_is_open (object )) {
462
- WARN_ON_ONCE (object_id == 0 );
463
- pr_info_once ("READ: anonymous fd closed prematurely.\n" );
464
- return - EIO ;
465
- }
466
506
467
- req -> msg .object_id = object_id ;
468
507
load -> off = read_ctx -> off ;
469
508
load -> len = read_ctx -> len ;
470
509
trace_cachefiles_ondemand_read (object , & req -> msg , load );
@@ -513,6 +552,7 @@ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
513
552
return - ENOMEM ;
514
553
515
554
object -> ondemand -> object = object ;
555
+ INIT_WORK (& object -> ondemand -> ondemand_work , ondemand_object_worker );
516
556
return 0 ;
517
557
}
518
558
0 commit comments