Skip to content

Commit a08d55f

Browse files
committed
fusefronted: optimize NFS streaming writes by saving one Stat()
Stat() calls are expensive on NFS as they need a full network round-trip. We detect when a write immediately follows the last one and skip the Stat in this case because the write cannot create a file hole. On my (slow) NAS, this takes the write speed from 24MB/s to 41MB/s.
1 parent 9b71352 commit a08d55f

File tree

2 files changed

+37
-4
lines changed

2 files changed

+37
-4
lines changed

internal/fusefrontend/file.go

Lines changed: 29 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import (
99
"log"
1010
"os"
1111
"sync"
12+
"sync/atomic"
1213
"syscall"
1314
"time"
1415

@@ -43,6 +44,11 @@ type file struct {
4344
header *contentenc.FileHeader
4445
// go-fuse nodefs.loopbackFile
4546
loopbackFile nodefs.File
47+
// Store what the last byte was written
48+
lastWrittenOffset int64
49+
// The opCount is used to judge whether "lastWrittenOffset" is still
50+
// guaranteed to be correct.
51+
lastOpCount uint64
4652
}
4753

4854
// NewFile returns a new go-fuse File instance.
@@ -282,6 +288,16 @@ func (f *file) doWrite(data []byte, off int64) (uint32, fuse.Status) {
282288
return written, status
283289
}
284290

291+
// isConsecutiveWrite returns true if the current write
292+
// directly (in time and space) follows the last write.
293+
// This is an optimisation for streaming writes on NFS where a
294+
// Stat() call is very expensive.
295+
// The caller must "wlock.lock(f.ino)" otherwise this check would be racy.
296+
func (f *file) isConsecutiveWrite(off int64) bool {
297+
opCount := atomic.LoadUint64(&wlock.opCount)
298+
return opCount == f.lastOpCount+1 && off == f.lastWrittenOffset+1
299+
}
300+
285301
// Write - FUSE call
286302
//
287303
// If the write creates a hole, pads the file to the next block boundary.
@@ -299,11 +315,20 @@ func (f *file) Write(data []byte, off int64) (uint32, fuse.Status) {
299315
defer wlock.unlock(f.ino)
300316
tlog.Debug.Printf("ino%d: FUSE Write: offset=%d length=%d", f.ino, off, len(data))
301317
// If the write creates a file hole, we have to zero-pad the last block.
302-
status := f.writePadHole(off)
303-
if !status.Ok() {
304-
return 0, status
318+
// But if the write directly follows an earlier write, it cannot create a
319+
// hole, and we can save one Stat() call.
320+
if !f.isConsecutiveWrite(off) {
321+
status := f.writePadHole(off)
322+
if !status.Ok() {
323+
return 0, status
324+
}
325+
}
326+
n, status := f.doWrite(data, off)
327+
if status.Ok() {
328+
f.lastOpCount = atomic.LoadUint64(&wlock.opCount)
329+
f.lastWrittenOffset = off + int64(len(data)) - 1
305330
}
306-
return f.doWrite(data, off)
331+
return n, status
307332
}
308333

309334
// Release - FUSE call, close file

internal/fusefrontend/write_lock.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package fusefrontend
22

33
import (
44
"sync"
5+
"sync/atomic"
56
)
67

78
func init() {
@@ -20,6 +21,12 @@ var wlock wlockMap
2021
// 2) lock ... unlock ...
2122
// 3) unregister
2223
type wlockMap struct {
24+
// Counts lock() calls. As every operation that modifies a file should
25+
// call it, this effectively serves as a write-operation counter.
26+
// The variable is accessed without holding any locks so atomic operations
27+
// must be used. It must be the first element of the struct to guarantee
28+
// 64-bit alignment.
29+
opCount uint64
2330
// Protects map access
2431
sync.Mutex
2532
inodeLocks map[uint64]*refCntMutex
@@ -62,6 +69,7 @@ func (w *wlockMap) unregister(ino uint64) {
6269

6370
// lock retrieves the entry for "ino" and locks it.
6471
func (w *wlockMap) lock(ino uint64) {
72+
atomic.AddUint64(&w.opCount, 1)
6573
w.Lock()
6674
r := w.inodeLocks[ino]
6775
w.Unlock()

0 commit comments

Comments
 (0)