About Kernel Documentation Linux Kernel Contact Linux Resources Linux Blog

Documentation / filesystems / Locking




Custom Search

Based on kernel version 3.15.4. Page generated on 2014-07-07 09:03 EST.

1		The text below describes the locking rules for VFS-related methods.
2	It is (believed to be) up-to-date. *Please*, if you change anything in
3	prototypes or locking protocols - update this file. And update the relevant
4	instances in the tree, don't leave that to maintainers of filesystems/devices/
5	etc. At the very least, put the list of dubious cases in the end of this file.
6	Don't turn it into log - maintainers of out-of-the-tree code are supposed to
7	be able to use diff(1).
8		Thing currently missing here: socket operations. Alexey?
9	
10	--------------------------- dentry_operations --------------------------
11	prototypes:
12		int (*d_revalidate)(struct dentry *, unsigned int);
13		int (*d_weak_revalidate)(struct dentry *, unsigned int);
14		int (*d_hash)(const struct dentry *, struct qstr *);
15		int (*d_compare)(const struct dentry *, const struct dentry *,
16				unsigned int, const char *, const struct qstr *);
17		int (*d_delete)(struct dentry *);
18		void (*d_release)(struct dentry *);
19		void (*d_iput)(struct dentry *, struct inode *);
20		char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
21		struct vfsmount *(*d_automount)(struct path *path);
22		int (*d_manage)(struct dentry *, bool);
23	
24	locking rules:
25			rename_lock	->d_lock	may block	rcu-walk
26	d_revalidate:	no		no		yes (ref-walk)	maybe
27	d_weak_revalidate:no		no		yes	 	no
28	d_hash		no		no		no		maybe
29	d_compare:	yes		no		no		maybe
30	d_delete:	no		yes		no		no
31	d_release:	no		no		yes		no
32	d_prune:        no              yes             no              no
33	d_iput:		no		no		yes		no
34	d_dname:	no		no		no		no
35	d_automount:	no		no		yes		no
36	d_manage:	no		no		yes (ref-walk)	maybe
37	
38	--------------------------- inode_operations --------------------------- 
39	prototypes:
40		int (*create) (struct inode *,struct dentry *,umode_t, bool);
41		struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
42		int (*link) (struct dentry *,struct inode *,struct dentry *);
43		int (*unlink) (struct inode *,struct dentry *);
44		int (*symlink) (struct inode *,struct dentry *,const char *);
45		int (*mkdir) (struct inode *,struct dentry *,umode_t);
46		int (*rmdir) (struct inode *,struct dentry *);
47		int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
48		int (*rename) (struct inode *, struct dentry *,
49				struct inode *, struct dentry *);
50		int (*rename2) (struct inode *, struct dentry *,
51				struct inode *, struct dentry *, unsigned int);
52		int (*readlink) (struct dentry *, char __user *,int);
53		void * (*follow_link) (struct dentry *, struct nameidata *);
54		void (*put_link) (struct dentry *, struct nameidata *, void *);
55		void (*truncate) (struct inode *);
56		int (*permission) (struct inode *, int, unsigned int);
57		int (*get_acl)(struct inode *, int);
58		int (*setattr) (struct dentry *, struct iattr *);
59		int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *);
60		int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
61		ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
62		ssize_t (*listxattr) (struct dentry *, char *, size_t);
63		int (*removexattr) (struct dentry *, const char *);
64		int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
65		void (*update_time)(struct inode *, struct timespec *, int);
66		int (*atomic_open)(struct inode *, struct dentry *,
67					struct file *, unsigned open_flag,
68					umode_t create_mode, int *opened);
69		int (*tmpfile) (struct inode *, struct dentry *, umode_t);
70	
71	locking rules:
72		all may block
73			i_mutex(inode)
74	lookup:		yes
75	create:		yes
76	link:		yes (both)
77	mknod:		yes
78	symlink:	yes
79	mkdir:		yes
80	unlink:		yes (both)
81	rmdir:		yes (both)	(see below)
82	rename:		yes (all)	(see below)
83	rename2:	yes (all)	(see below)
84	readlink:	no
85	follow_link:	no
86	put_link:	no
87	setattr:	yes
88	permission:	no (may not block if called in rcu-walk mode)
89	get_acl:	no
90	getattr:	no
91	setxattr:	yes
92	getxattr:	no
93	listxattr:	no
94	removexattr:	yes
95	fiemap:		no
96	update_time:	no
97	atomic_open:	yes
98	tmpfile:	no
99	
100		Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
101	victim.
102		cross-directory ->rename() and rename2() has (per-superblock)
103	->s_vfs_rename_sem.
104	
105	See Documentation/filesystems/directory-locking for more detailed discussion
106	of the locking scheme for directory operations.
107	
108	--------------------------- super_operations ---------------------------
109	prototypes:
110		struct inode *(*alloc_inode)(struct super_block *sb);
111		void (*destroy_inode)(struct inode *);
112		void (*dirty_inode) (struct inode *, int flags);
113		int (*write_inode) (struct inode *, struct writeback_control *wbc);
114		int (*drop_inode) (struct inode *);
115		void (*evict_inode) (struct inode *);
116		void (*put_super) (struct super_block *);
117		int (*sync_fs)(struct super_block *sb, int wait);
118		int (*freeze_fs) (struct super_block *);
119		int (*unfreeze_fs) (struct super_block *);
120		int (*statfs) (struct dentry *, struct kstatfs *);
121		int (*remount_fs) (struct super_block *, int *, char *);
122		void (*umount_begin) (struct super_block *);
123		int (*show_options)(struct seq_file *, struct dentry *);
124		ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
125		ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
126		int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
127	
128	locking rules:
129		All may block [not true, see below]
130				s_umount
131	alloc_inode:
132	destroy_inode:
133	dirty_inode:
134	write_inode:
135	drop_inode:				!!!inode->i_lock!!!
136	evict_inode:
137	put_super:		write
138	sync_fs:		read
139	freeze_fs:		write
140	unfreeze_fs:		write
141	statfs:			maybe(read)	(see below)
142	remount_fs:		write
143	umount_begin:		no
144	show_options:		no		(namespace_sem)
145	quota_read:		no		(see below)
146	quota_write:		no		(see below)
147	bdev_try_to_free_page:	no		(see below)
148	
149	->statfs() has s_umount (shared) when called by ustat(2) (native or
150	compat), but that's an accident of bad API; s_umount is used to pin
151	the superblock down when we only have dev_t given us by userland to
152	identify the superblock.  Everything else (statfs(), fstatfs(), etc.)
153	doesn't hold it when calling ->statfs() - superblock is pinned down
154	by resolving the pathname passed to syscall.
155	->quota_read() and ->quota_write() functions are both guaranteed to
156	be the only ones operating on the quota file by the quota code (via
157	dqio_sem) (unless an admin really wants to screw up something and
158	writes to quota files with quotas on). For other details about locking
159	see also dquot_operations section.
160	->bdev_try_to_free_page is called from the ->releasepage handler of
161	the block device inode.  See there for more details.
162	
163	--------------------------- file_system_type ---------------------------
164	prototypes:
165		int (*get_sb) (struct file_system_type *, int,
166			       const char *, void *, struct vfsmount *);
167		struct dentry *(*mount) (struct file_system_type *, int,
168			       const char *, void *);
169		void (*kill_sb) (struct super_block *);
170	locking rules:
171			may block
172	mount		yes
173	kill_sb		yes
174	
175	->mount() returns ERR_PTR or the root dentry; its superblock should be locked
176	on return.
177	->kill_sb() takes a write-locked superblock, does all shutdown work on it,
178	unlocks and drops the reference.
179	
180	--------------------------- address_space_operations --------------------------
181	prototypes:
182		int (*writepage)(struct page *page, struct writeback_control *wbc);
183		int (*readpage)(struct file *, struct page *);
184		int (*sync_page)(struct page *);
185		int (*writepages)(struct address_space *, struct writeback_control *);
186		int (*set_page_dirty)(struct page *page);
187		int (*readpages)(struct file *filp, struct address_space *mapping,
188				struct list_head *pages, unsigned nr_pages);
189		int (*write_begin)(struct file *, struct address_space *mapping,
190					loff_t pos, unsigned len, unsigned flags,
191					struct page **pagep, void **fsdata);
192		int (*write_end)(struct file *, struct address_space *mapping,
193					loff_t pos, unsigned len, unsigned copied,
194					struct page *page, void *fsdata);
195		sector_t (*bmap)(struct address_space *, sector_t);
196		void (*invalidatepage) (struct page *, unsigned int, unsigned int);
197		int (*releasepage) (struct page *, int);
198		void (*freepage)(struct page *);
199		int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
200				loff_t offset, unsigned long nr_segs);
201		int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **,
202					unsigned long *);
203		int (*migratepage)(struct address_space *, struct page *, struct page *);
204		int (*launder_page)(struct page *);
205		int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
206		int (*error_remove_page)(struct address_space *, struct page *);
207		int (*swap_activate)(struct file *);
208		int (*swap_deactivate)(struct file *);
209	
210	locking rules:
211		All except set_page_dirty and freepage may block
212	
213				PageLocked(page)	i_mutex
214	writepage:		yes, unlocks (see below)
215	readpage:		yes, unlocks
216	sync_page:		maybe
217	writepages:
218	set_page_dirty		no
219	readpages:
220	write_begin:		locks the page		yes
221	write_end:		yes, unlocks		yes
222	bmap:
223	invalidatepage:		yes
224	releasepage:		yes
225	freepage:		yes
226	direct_IO:
227	get_xip_mem:					maybe
228	migratepage:		yes (both)
229	launder_page:		yes
230	is_partially_uptodate:	yes
231	error_remove_page:	yes
232	swap_activate:		no
233	swap_deactivate:	no
234	
235		->write_begin(), ->write_end(), ->sync_page() and ->readpage()
236	may be called from the request handler (/dev/loop).
237	
238		->readpage() unlocks the page, either synchronously or via I/O
239	completion.
240	
241		->readpages() populates the pagecache with the passed pages and starts
242	I/O against them.  They come unlocked upon I/O completion.
243	
244		->writepage() is used for two purposes: for "memory cleansing" and for
245	"sync".  These are quite different operations and the behaviour may differ
246	depending upon the mode.
247	
248	If writepage is called for sync (wbc->sync_mode != WBC_SYNC_NONE) then
249	it *must* start I/O against the page, even if that would involve
250	blocking on in-progress I/O.
251	
252	If writepage is called for memory cleansing (sync_mode ==
253	WBC_SYNC_NONE) then its role is to get as much writeout underway as
254	possible.  So writepage should try to avoid blocking against
255	currently-in-progress I/O.
256	
257	If the filesystem is not called for "sync" and it determines that it
258	would need to block against in-progress I/O to be able to start new I/O
259	against the page the filesystem should redirty the page with
260	redirty_page_for_writepage(), then unlock the page and return zero.
261	This may also be done to avoid internal deadlocks, but rarely.
262	
263	If the filesystem is called for sync then it must wait on any
264	in-progress I/O and then start new I/O.
265	
266	The filesystem should unlock the page synchronously, before returning to the
267	caller, unless ->writepage() returns special WRITEPAGE_ACTIVATE
268	value. WRITEPAGE_ACTIVATE means that page cannot really be written out
269	currently, and VM should stop calling ->writepage() on this page for some
270	time. VM does this by moving page to the head of the active list, hence the
271	name.
272	
273	Unless the filesystem is going to redirty_page_for_writepage(), unlock the page
274	and return zero, writepage *must* run set_page_writeback() against the page,
275	followed by unlocking it.  Once set_page_writeback() has been run against the
276	page, write I/O can be submitted and the write I/O completion handler must run
277	end_page_writeback() once the I/O is complete.  If no I/O is submitted, the
278	filesystem must run end_page_writeback() against the page before returning from
279	writepage.
280	
281	That is: after 2.5.12, pages which are under writeout are *not* locked.  Note,
282	if the filesystem needs the page to be locked during writeout, that is ok, too,
283	the page is allowed to be unlocked at any point in time between the calls to
284	set_page_writeback() and end_page_writeback().
285	
286	Note, failure to run either redirty_page_for_writepage() or the combination of
287	set_page_writeback()/end_page_writeback() on a page submitted to writepage
288	will leave the page itself marked clean but it will be tagged as dirty in the
289	radix tree.  This incoherency can lead to all sorts of hard-to-debug problems
290	in the filesystem like having dirty inodes at umount and losing written data.
291	
292		->sync_page() locking rules are not well-defined - usually it is called
293	with lock on page, but that is not guaranteed. Considering the currently
294	existing instances of this method ->sync_page() itself doesn't look
295	well-defined...
296	
297		->writepages() is used for periodic writeback and for syscall-initiated
298	sync operations.  The address_space should start I/O against at least
299	*nr_to_write pages.  *nr_to_write must be decremented for each page which is
300	written.  The address_space implementation may write more (or less) pages
301	than *nr_to_write asks for, but it should try to be reasonably close.  If
302	nr_to_write is NULL, all dirty pages must be written.
303	
304	writepages should _only_ write pages which are present on
305	mapping->io_pages.
306	
307		->set_page_dirty() is called from various places in the kernel
308	when the target page is marked as needing writeback.  It may be called
309	under spinlock (it cannot block) and is sometimes called with the page
310	not locked.
311	
312		->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some
313	filesystems and by the swapper. The latter will eventually go away.  Please,
314	keep it that way and don't breed new callers.
315	
316		->invalidatepage() is called when the filesystem must attempt to drop
317	some or all of the buffers from the page when it is being truncated. It
318	returns zero on success. If ->invalidatepage is zero, the kernel uses
319	block_invalidatepage() instead.
320	
321		->releasepage() is called when the kernel is about to try to drop the
322	buffers from the page in preparation for freeing it.  It returns zero to
323	indicate that the buffers are (or may be) freeable.  If ->releasepage is zero,
324	the kernel assumes that the fs has no private interest in the buffers.
325	
326		->freepage() is called when the kernel is done dropping the page
327	from the page cache.
328	
329		->launder_page() may be called prior to releasing a page if
330	it is still found to be dirty. It returns zero if the page was successfully
331	cleaned, or an error value if not. Note that in order to prevent the page
332	getting mapped back in and redirtied, it needs to be kept locked
333	across the entire operation.
334	
335		->swap_activate will be called with a non-zero argument on
336	files backing (non block device backed) swapfiles. A return value
337	of zero indicates success, in which case this file can be used for
338	backing swapspace. The swapspace operations will be proxied to the
339	address space operations.
340	
341		->swap_deactivate() will be called in the sys_swapoff()
342	path after ->swap_activate() returned success.
343	
344	----------------------- file_lock_operations ------------------------------
345	prototypes:
346		void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
347		void (*fl_release_private)(struct file_lock *);
348	
349	
350	locking rules:
351				inode->i_lock	may block
352	fl_copy_lock:		yes		no
353	fl_release_private:	maybe		no
354	
355	----------------------- lock_manager_operations ---------------------------
356	prototypes:
357		int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
358		unsigned long (*lm_owner_key)(struct file_lock *);
359		void (*lm_notify)(struct file_lock *);  /* unblock callback */
360		int (*lm_grant)(struct file_lock *, struct file_lock *, int);
361		void (*lm_break)(struct file_lock *); /* break_lease callback */
362		int (*lm_change)(struct file_lock **, int);
363	
364	locking rules:
365	
366				inode->i_lock	blocked_lock_lock	may block
367	lm_compare_owner:	yes[1]		maybe			no
368	lm_owner_key		yes[1]		yes			no
369	lm_notify:		yes		yes			no
370	lm_grant:		no		no			no
371	lm_break:		yes		no			no
372	lm_change		yes		no			no
373	
374	[1]:	->lm_compare_owner and ->lm_owner_key are generally called with
375	*an* inode->i_lock held. It may not be the i_lock of the inode
376	associated with either file_lock argument! This is the case with deadlock
377	detection, since the code has to chase down the owners of locks that may
378	be entirely unrelated to the one on which the lock is being acquired.
379	For deadlock detection however, the blocked_lock_lock is also held. The
380	fact that these locks are held ensures that the file_locks do not
381	disappear out from under you while doing the comparison or generating an
382	owner key.
383	
384	--------------------------- buffer_head -----------------------------------
385	prototypes:
386		void (*b_end_io)(struct buffer_head *bh, int uptodate);
387	
388	locking rules:
389		called from interrupts. In other words, extreme care is needed here.
390	bh is locked, but that's all warranties we have here. Currently only RAID1,
391	highmem, fs/buffer.c, and fs/ntfs/aops.c are providing these. Block devices
392	call this method upon the IO completion.
393	
394	--------------------------- block_device_operations -----------------------
395	prototypes:
396		int (*open) (struct block_device *, fmode_t);
397		int (*release) (struct gendisk *, fmode_t);
398		int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
399		int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
400		int (*direct_access) (struct block_device *, sector_t, void **, unsigned long *);
401		int (*media_changed) (struct gendisk *);
402		void (*unlock_native_capacity) (struct gendisk *);
403		int (*revalidate_disk) (struct gendisk *);
404		int (*getgeo)(struct block_device *, struct hd_geometry *);
405		void (*swap_slot_free_notify) (struct block_device *, unsigned long);
406	
407	locking rules:
408				bd_mutex
409	open:			yes
410	release:		yes
411	ioctl:			no
412	compat_ioctl:		no
413	direct_access:		no
414	media_changed:		no
415	unlock_native_capacity:	no
416	revalidate_disk:	no
417	getgeo:			no
418	swap_slot_free_notify:	no	(see below)
419	
420	media_changed, unlock_native_capacity and revalidate_disk are called only from
421	check_disk_change().
422	
423	swap_slot_free_notify is called with swap_lock and sometimes the page lock
424	held.
425	
426	
427	--------------------------- file_operations -------------------------------
428	prototypes:
429		loff_t (*llseek) (struct file *, loff_t, int);
430		ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
431		ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
432		ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
433		ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
434		int (*iterate) (struct file *, struct dir_context *);
435		unsigned int (*poll) (struct file *, struct poll_table_struct *);
436		long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
437		long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
438		int (*mmap) (struct file *, struct vm_area_struct *);
439		int (*open) (struct inode *, struct file *);
440		int (*flush) (struct file *);
441		int (*release) (struct inode *, struct file *);
442		int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
443		int (*aio_fsync) (struct kiocb *, int datasync);
444		int (*fasync) (int, struct file *, int);
445		int (*lock) (struct file *, int, struct file_lock *);
446		ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
447				loff_t *);
448		ssize_t (*writev) (struct file *, const struct iovec *, unsigned long,
449				loff_t *);
450		ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t,
451				void __user *);
452		ssize_t (*sendpage) (struct file *, struct page *, int, size_t,
453				loff_t *, int);
454		unsigned long (*get_unmapped_area)(struct file *, unsigned long,
455				unsigned long, unsigned long, unsigned long);
456		int (*check_flags)(int);
457		int (*flock) (struct file *, int, struct file_lock *);
458		ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *,
459				size_t, unsigned int);
460		ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *,
461				size_t, unsigned int);
462		int (*setlease)(struct file *, long, struct file_lock **);
463		long (*fallocate)(struct file *, int, loff_t, loff_t);
464	};
465	
466	locking rules:
467		All may block except for ->setlease.
468		No VFS locks held on entry except for ->setlease.
469	
470	->setlease has the file_list_lock held and must not sleep.
471	
472	->llseek() locking has moved from llseek to the individual llseek
473	implementations.  If your fs is not using generic_file_llseek, you
474	need to acquire and release the appropriate locks in your ->llseek().
475	For many filesystems, it is probably safe to acquire the inode
476	mutex or just to use i_size_read() instead.
477	Note: this does not protect the file->f_pos against concurrent modifications
478	since this is something the userspace has to take care about.
479	
480	->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags.
481	Most instances call fasync_helper(), which does that maintenance, so it's
482	not normally something one needs to worry about.  Return values > 0 will be
483	mapped to zero in the VFS layer.
484	
485	->readdir() and ->ioctl() on directories must be changed. Ideally we would
486	move ->readdir() to inode_operations and use a separate method for directory
487	->ioctl() or kill the latter completely. One of the problems is that for
488	anything that resembles union-mount we won't have a struct file for all
489	components. And there are other reasons why the current interface is a mess...
490	
491	->read on directories probably must go away - we should just enforce -EISDIR
492	in sys_read() and friends.
493	
494	--------------------------- dquot_operations -------------------------------
495	prototypes:
496		int (*write_dquot) (struct dquot *);
497		int (*acquire_dquot) (struct dquot *);
498		int (*release_dquot) (struct dquot *);
499		int (*mark_dirty) (struct dquot *);
500		int (*write_info) (struct super_block *, int);
501	
502	These operations are intended to be more or less wrapping functions that ensure
503	a proper locking wrt the filesystem and call the generic quota operations.
504	
505	What filesystem should expect from the generic quota functions:
506	
507			FS recursion	Held locks when called
508	write_dquot:	yes		dqonoff_sem or dqptr_sem
509	acquire_dquot:	yes		dqonoff_sem or dqptr_sem
510	release_dquot:	yes		dqonoff_sem or dqptr_sem
511	mark_dirty:	no		-
512	write_info:	yes		dqonoff_sem
513	
514	FS recursion means calling ->quota_read() and ->quota_write() from superblock
515	operations.
516	
517	More details about quota locking can be found in fs/dquot.c.
518	
519	--------------------------- vm_operations_struct -----------------------------
520	prototypes:
521		void (*open)(struct vm_area_struct*);
522		void (*close)(struct vm_area_struct*);
523		int (*fault)(struct vm_area_struct*, struct vm_fault *);
524		int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);
525		int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
526	
527	locking rules:
528			mmap_sem	PageLocked(page)
529	open:		yes
530	close:		yes
531	fault:		yes		can return with page locked
532	map_pages:	yes
533	page_mkwrite:	yes		can return with page locked
534	access:		yes
535	
536		->fault() is called when a previously not present pte is about
537	to be faulted in. The filesystem must find and return the page associated
538	with the passed in "pgoff" in the vm_fault structure. If it is possible that
539	the page may be truncated and/or invalidated, then the filesystem must lock
540	the page, then ensure it is not already truncated (the page lock will block
541	subsequent truncate), and then return with VM_FAULT_LOCKED, and the page
542	locked. The VM will unlock the page.
543	
544		->map_pages() is called when VM asks to map easy accessible pages.
545	Filesystem should find and map pages associated with offsets from "pgoff"
546	till "max_pgoff". ->map_pages() is called with page table locked and must
547	not block.  If it's not possible to reach a page without blocking,
548	filesystem should skip it. Filesystem should use do_set_pte() to setup
549	page table entry. Pointer to entry associated with offset "pgoff" is
550	passed in "pte" field in vm_fault structure. Pointers to entries for other
551	offsets should be calculated relative to "pte".
552	
553		->page_mkwrite() is called when a previously read-only pte is
554	about to become writeable. The filesystem again must ensure that there are
555	no truncate/invalidate races, and then return with the page locked. If
556	the page has been truncated, the filesystem should not look up a new page
557	like the ->fault() handler, but simply return with VM_FAULT_NOPAGE, which
558	will cause the VM to retry the fault.
559	
560		->access() is called when get_user_pages() fails in
561	access_process_vm(), typically used to debug a process through
562	/proc/pid/mem or ptrace.  This function is needed only for
563	VM_IO | VM_PFNMAP VMAs.
564	
565	================================================================================
566				Dubious stuff
567	
568	(if you break something or notice that it is broken and do not fix it yourself
569	- at least put it here)
Hide Line Numbers
About Kernel Documentation Linux Kernel Contact Linux Resources Linux Blog

Information is copyright its respective author. All material is available from the Linux Kernel Source distributed under a GPL License. This page is provided as a free service by mjmwired.net.