|
15 | 15 | #include <linux/seq_file.h>
|
16 | 16 | #include <linux/slab.h>
|
17 | 17 | #include <linux/iversion.h>
|
| 18 | +#include <linux/xarray.h> |
| 19 | +#include <linux/fscache.h> |
| 20 | +#include <linux/netfs.h> |
18 | 21 |
|
19 | 22 | #include "internal.h"
|
20 | 23 | #include "iostat.h"
|
@@ -235,108 +238,153 @@ void nfs_fscache_release_file(struct inode *inode, struct file *filp)
|
235 | 238 | fscache_unuse_cookie(cookie, &auxdata, &i_size);
|
236 | 239 | }
|
237 | 240 |
|
238 |
| -/* |
239 |
| - * Fallback page reading interface. |
240 |
| - */ |
241 |
| -static int fscache_fallback_read_page(struct inode *inode, struct page *page) |
| 241 | +int nfs_netfs_read_folio(struct file *file, struct folio *folio) |
242 | 242 | {
|
243 |
| - struct netfs_cache_resources cres; |
244 |
| - struct fscache_cookie *cookie = netfs_i_cookie(&NFS_I(inode)->netfs); |
245 |
| - struct iov_iter iter; |
246 |
| - struct bio_vec bvec; |
247 |
| - int ret; |
248 |
| - |
249 |
| - memset(&cres, 0, sizeof(cres)); |
250 |
| - bvec_set_page(&bvec, page, PAGE_SIZE, 0); |
251 |
| - iov_iter_bvec(&iter, ITER_DEST, &bvec, 1, PAGE_SIZE); |
252 |
| - |
253 |
| - ret = fscache_begin_read_operation(&cres, cookie); |
254 |
| - if (ret < 0) |
255 |
| - return ret; |
256 |
| - |
257 |
| - ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL, |
258 |
| - NULL, NULL); |
259 |
| - fscache_end_operation(&cres); |
260 |
| - return ret; |
| 243 | + if (!netfs_inode(folio_inode(folio))->cache) |
| 244 | + return -ENOBUFS; |
| 245 | + |
| 246 | + return netfs_read_folio(file, folio); |
261 | 247 | }
|
262 | 248 |
|
263 |
| -/* |
264 |
| - * Fallback page writing interface. |
265 |
| - */ |
266 |
| -static int fscache_fallback_write_page(struct inode *inode, struct page *page, |
267 |
| - bool no_space_allocated_yet) |
| 249 | +int nfs_netfs_readahead(struct readahead_control *ractl) |
268 | 250 | {
|
269 |
| - struct netfs_cache_resources cres; |
270 |
| - struct fscache_cookie *cookie = netfs_i_cookie(&NFS_I(inode)->netfs); |
271 |
| - struct iov_iter iter; |
272 |
| - struct bio_vec bvec; |
273 |
| - loff_t start = page_offset(page); |
274 |
| - size_t len = PAGE_SIZE; |
275 |
| - int ret; |
276 |
| - |
277 |
| - memset(&cres, 0, sizeof(cres)); |
278 |
| - bvec_set_page(&bvec, page, PAGE_SIZE, 0); |
279 |
| - iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE); |
280 |
| - |
281 |
| - ret = fscache_begin_write_operation(&cres, cookie); |
282 |
| - if (ret < 0) |
283 |
| - return ret; |
284 |
| - |
285 |
| - ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode), |
286 |
| - no_space_allocated_yet); |
287 |
| - if (ret == 0) |
288 |
| - ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL); |
289 |
| - fscache_end_operation(&cres); |
290 |
| - return ret; |
| 251 | + struct inode *inode = ractl->mapping->host; |
| 252 | + |
| 253 | + if (!netfs_inode(inode)->cache) |
| 254 | + return -ENOBUFS; |
| 255 | + |
| 256 | + netfs_readahead(ractl); |
| 257 | + return 0; |
291 | 258 | }
|
292 | 259 |
|
293 |
| -/* |
294 |
| - * Retrieve a page from fscache |
295 |
| - */ |
296 |
| -int __nfs_fscache_read_page(struct inode *inode, struct page *page) |
| 260 | +atomic_t nfs_netfs_debug_id; |
| 261 | +static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file) |
297 | 262 | {
|
298 |
| - int ret; |
| 263 | + rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file)); |
| 264 | + rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id); |
299 | 265 |
|
300 |
| - trace_nfs_fscache_read_page(inode, page); |
301 |
| - if (PageChecked(page)) { |
302 |
| - ClearPageChecked(page); |
303 |
| - ret = 1; |
304 |
| - goto out; |
305 |
| - } |
| 266 | + return 0; |
| 267 | +} |
306 | 268 |
|
307 |
| - ret = fscache_fallback_read_page(inode, page); |
308 |
| - if (ret < 0) { |
309 |
| - nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL); |
310 |
| - SetPageChecked(page); |
311 |
| - goto out; |
312 |
| - } |
| 269 | +static void nfs_netfs_free_request(struct netfs_io_request *rreq) |
| 270 | +{ |
| 271 | + put_nfs_open_context(rreq->netfs_priv); |
| 272 | +} |
313 | 273 |
|
314 |
| - /* Read completed synchronously */ |
315 |
| - nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK); |
316 |
| - SetPageUptodate(page); |
317 |
| - ret = 0; |
318 |
| -out: |
319 |
| - trace_nfs_fscache_read_page_exit(inode, page, ret); |
320 |
| - return ret; |
| 274 | +static inline int nfs_netfs_begin_cache_operation(struct netfs_io_request *rreq) |
| 275 | +{ |
| 276 | + return fscache_begin_read_operation(&rreq->cache_resources, |
| 277 | + netfs_i_cookie(netfs_inode(rreq->inode))); |
321 | 278 | }
|
322 | 279 |
|
323 |
| -/* |
324 |
| - * Store a newly fetched page in fscache. We can be certain there's no page |
325 |
| - * stored in the cache as yet otherwise we would've read it from there. |
326 |
| - */ |
327 |
| -void __nfs_fscache_write_page(struct inode *inode, struct page *page) |
| 280 | +static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq) |
328 | 281 | {
|
329 |
| - int ret; |
| 282 | + struct nfs_netfs_io_data *netfs; |
| 283 | + |
| 284 | + netfs = kzalloc(sizeof(*netfs), GFP_KERNEL_ACCOUNT); |
| 285 | + if (!netfs) |
| 286 | + return NULL; |
| 287 | + netfs->sreq = sreq; |
| 288 | + refcount_set(&netfs->refcount, 1); |
| 289 | + return netfs; |
| 290 | +} |
330 | 291 |
|
331 |
| - trace_nfs_fscache_write_page(inode, page); |
| 292 | +static bool nfs_netfs_clamp_length(struct netfs_io_subrequest *sreq) |
| 293 | +{ |
| 294 | + size_t rsize = NFS_SB(sreq->rreq->inode->i_sb)->rsize; |
332 | 295 |
|
333 |
| - ret = fscache_fallback_write_page(inode, page, true); |
| 296 | + sreq->len = min(sreq->len, rsize); |
| 297 | + return true; |
| 298 | +} |
334 | 299 |
|
335 |
| - if (ret != 0) { |
336 |
| - nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL); |
337 |
| - nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED); |
338 |
| - } else { |
339 |
| - nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_OK); |
| 300 | +static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq) |
| 301 | +{ |
| 302 | + struct nfs_netfs_io_data *netfs; |
| 303 | + struct nfs_pageio_descriptor pgio; |
| 304 | + struct inode *inode = sreq->rreq->inode; |
| 305 | + struct nfs_open_context *ctx = sreq->rreq->netfs_priv; |
| 306 | + struct page *page; |
| 307 | + int err; |
| 308 | + pgoff_t start = (sreq->start + sreq->transferred) >> PAGE_SHIFT; |
| 309 | + pgoff_t last = ((sreq->start + sreq->len - |
| 310 | + sreq->transferred - 1) >> PAGE_SHIFT); |
| 311 | + XA_STATE(xas, &sreq->rreq->mapping->i_pages, start); |
| 312 | + |
| 313 | + nfs_pageio_init_read(&pgio, inode, false, |
| 314 | + &nfs_async_read_completion_ops); |
| 315 | + |
| 316 | + netfs = nfs_netfs_alloc(sreq); |
| 317 | + if (!netfs) |
| 318 | + return netfs_subreq_terminated(sreq, -ENOMEM, false); |
| 319 | + |
| 320 | + pgio.pg_netfs = netfs; /* used in completion */ |
| 321 | + |
| 322 | + xas_lock(&xas); |
| 323 | + xas_for_each(&xas, page, last) { |
| 324 | + /* nfs_read_add_folio() may schedule() due to pNFS layout and other RPCs */ |
| 325 | + xas_pause(&xas); |
| 326 | + xas_unlock(&xas); |
| 327 | + err = nfs_read_add_folio(&pgio, ctx, page_folio(page)); |
| 328 | + if (err < 0) { |
| 329 | + netfs->error = err; |
| 330 | + goto out; |
| 331 | + } |
| 332 | + xas_lock(&xas); |
340 | 333 | }
|
341 |
| - trace_nfs_fscache_write_page_exit(inode, page, ret); |
| 334 | + xas_unlock(&xas); |
| 335 | +out: |
| 336 | + nfs_pageio_complete_read(&pgio); |
| 337 | + nfs_netfs_put(netfs); |
342 | 338 | }
|
| 339 | + |
| 340 | +void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr) |
| 341 | +{ |
| 342 | + struct nfs_netfs_io_data *netfs = hdr->netfs; |
| 343 | + |
| 344 | + if (!netfs) |
| 345 | + return; |
| 346 | + |
| 347 | + nfs_netfs_get(netfs); |
| 348 | +} |
| 349 | + |
| 350 | +int nfs_netfs_folio_unlock(struct folio *folio) |
| 351 | +{ |
| 352 | + struct inode *inode = folio_file_mapping(folio)->host; |
| 353 | + |
| 354 | + /* |
| 355 | + * If fscache is enabled, netfs will unlock pages. |
| 356 | + */ |
| 357 | + if (netfs_inode(inode)->cache) |
| 358 | + return 0; |
| 359 | + |
| 360 | + return 1; |
| 361 | +} |
| 362 | + |
| 363 | +void nfs_netfs_read_completion(struct nfs_pgio_header *hdr) |
| 364 | +{ |
| 365 | + struct nfs_netfs_io_data *netfs = hdr->netfs; |
| 366 | + struct netfs_io_subrequest *sreq; |
| 367 | + |
| 368 | + if (!netfs) |
| 369 | + return; |
| 370 | + |
| 371 | + sreq = netfs->sreq; |
| 372 | + if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) |
| 373 | + __set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags); |
| 374 | + |
| 375 | + if (hdr->error) |
| 376 | + netfs->error = hdr->error; |
| 377 | + else |
| 378 | + atomic64_add(hdr->res.count, &netfs->transferred); |
| 379 | + |
| 380 | + nfs_netfs_put(netfs); |
| 381 | + hdr->netfs = NULL; |
| 382 | +} |
| 383 | + |
| 384 | +const struct netfs_request_ops nfs_netfs_ops = { |
| 385 | + .init_request = nfs_netfs_init_request, |
| 386 | + .free_request = nfs_netfs_free_request, |
| 387 | + .begin_cache_operation = nfs_netfs_begin_cache_operation, |
| 388 | + .issue_read = nfs_netfs_issue_read, |
| 389 | + .clamp_length = nfs_netfs_clamp_length |
| 390 | +}; |
0 commit comments