About Kernel Documentation Linux Kernel Contact Linux Resources Linux Blog

Documentation / filesystems / Locking




Custom Search

Based on kernel version 3.16. Page generated on 2014-08-06 21:39 EST.

1		The text below describes the locking rules for VFS-related methods.
2	It is (believed to be) up-to-date. *Please*, if you change anything in
3	prototypes or locking protocols - update this file. And update the relevant
4	instances in the tree, don't leave that to maintainers of filesystems/devices/
5	etc. At the very least, put the list of dubious cases in the end of this file.
6	Don't turn it into log - maintainers of out-of-the-tree code are supposed to
7	be able to use diff(1).
8		Thing currently missing here: socket operations. Alexey?
9	
10	--------------------------- dentry_operations --------------------------
11	prototypes:
12		int (*d_revalidate)(struct dentry *, unsigned int);
13		int (*d_weak_revalidate)(struct dentry *, unsigned int);
14		int (*d_hash)(const struct dentry *, struct qstr *);
15		int (*d_compare)(const struct dentry *, const struct dentry *,
16				unsigned int, const char *, const struct qstr *);
17		int (*d_delete)(struct dentry *);
18		void (*d_release)(struct dentry *);
19		void (*d_iput)(struct dentry *, struct inode *);
20		char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
21		struct vfsmount *(*d_automount)(struct path *path);
22		int (*d_manage)(struct dentry *, bool);
23	
24	locking rules:
25			rename_lock	->d_lock	may block	rcu-walk
26	d_revalidate:	no		no		yes (ref-walk)	maybe
27	d_weak_revalidate:no		no		yes	 	no
28	d_hash		no		no		no		maybe
29	d_compare:	yes		no		no		maybe
30	d_delete:	no		yes		no		no
31	d_release:	no		no		yes		no
32	d_prune:        no              yes             no              no
33	d_iput:		no		no		yes		no
34	d_dname:	no		no		no		no
35	d_automount:	no		no		yes		no
36	d_manage:	no		no		yes (ref-walk)	maybe
37	
38	--------------------------- inode_operations --------------------------- 
39	prototypes:
40		int (*create) (struct inode *,struct dentry *,umode_t, bool);
41		struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
42		int (*link) (struct dentry *,struct inode *,struct dentry *);
43		int (*unlink) (struct inode *,struct dentry *);
44		int (*symlink) (struct inode *,struct dentry *,const char *);
45		int (*mkdir) (struct inode *,struct dentry *,umode_t);
46		int (*rmdir) (struct inode *,struct dentry *);
47		int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
48		int (*rename) (struct inode *, struct dentry *,
49				struct inode *, struct dentry *);
50		int (*rename2) (struct inode *, struct dentry *,
51				struct inode *, struct dentry *, unsigned int);
52		int (*readlink) (struct dentry *, char __user *,int);
53		void * (*follow_link) (struct dentry *, struct nameidata *);
54		void (*put_link) (struct dentry *, struct nameidata *, void *);
55		void (*truncate) (struct inode *);
56		int (*permission) (struct inode *, int, unsigned int);
57		int (*get_acl)(struct inode *, int);
58		int (*setattr) (struct dentry *, struct iattr *);
59		int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *);
60		int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
61		ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
62		ssize_t (*listxattr) (struct dentry *, char *, size_t);
63		int (*removexattr) (struct dentry *, const char *);
64		int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
65		void (*update_time)(struct inode *, struct timespec *, int);
66		int (*atomic_open)(struct inode *, struct dentry *,
67					struct file *, unsigned open_flag,
68					umode_t create_mode, int *opened);
69		int (*tmpfile) (struct inode *, struct dentry *, umode_t);
70	
71	locking rules:
72		all may block
73			i_mutex(inode)
74	lookup:		yes
75	create:		yes
76	link:		yes (both)
77	mknod:		yes
78	symlink:	yes
79	mkdir:		yes
80	unlink:		yes (both)
81	rmdir:		yes (both)	(see below)
82	rename:		yes (all)	(see below)
83	rename2:	yes (all)	(see below)
84	readlink:	no
85	follow_link:	no
86	put_link:	no
87	setattr:	yes
88	permission:	no (may not block if called in rcu-walk mode)
89	get_acl:	no
90	getattr:	no
91	setxattr:	yes
92	getxattr:	no
93	listxattr:	no
94	removexattr:	yes
95	fiemap:		no
96	update_time:	no
97	atomic_open:	yes
98	tmpfile:	no
99	
100		Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
101	victim.
102		cross-directory ->rename() and rename2() has (per-superblock)
103	->s_vfs_rename_sem.
104	
105	See Documentation/filesystems/directory-locking for more detailed discussion
106	of the locking scheme for directory operations.
107	
108	--------------------------- super_operations ---------------------------
109	prototypes:
110		struct inode *(*alloc_inode)(struct super_block *sb);
111		void (*destroy_inode)(struct inode *);
112		void (*dirty_inode) (struct inode *, int flags);
113		int (*write_inode) (struct inode *, struct writeback_control *wbc);
114		int (*drop_inode) (struct inode *);
115		void (*evict_inode) (struct inode *);
116		void (*put_super) (struct super_block *);
117		int (*sync_fs)(struct super_block *sb, int wait);
118		int (*freeze_fs) (struct super_block *);
119		int (*unfreeze_fs) (struct super_block *);
120		int (*statfs) (struct dentry *, struct kstatfs *);
121		int (*remount_fs) (struct super_block *, int *, char *);
122		void (*umount_begin) (struct super_block *);
123		int (*show_options)(struct seq_file *, struct dentry *);
124		ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
125		ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
126		int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
127	
128	locking rules:
129		All may block [not true, see below]
130				s_umount
131	alloc_inode:
132	destroy_inode:
133	dirty_inode:
134	write_inode:
135	drop_inode:				!!!inode->i_lock!!!
136	evict_inode:
137	put_super:		write
138	sync_fs:		read
139	freeze_fs:		write
140	unfreeze_fs:		write
141	statfs:			maybe(read)	(see below)
142	remount_fs:		write
143	umount_begin:		no
144	show_options:		no		(namespace_sem)
145	quota_read:		no		(see below)
146	quota_write:		no		(see below)
147	bdev_try_to_free_page:	no		(see below)
148	
149	->statfs() has s_umount (shared) when called by ustat(2) (native or
150	compat), but that's an accident of bad API; s_umount is used to pin
151	the superblock down when we only have dev_t given us by userland to
152	identify the superblock.  Everything else (statfs(), fstatfs(), etc.)
153	doesn't hold it when calling ->statfs() - superblock is pinned down
154	by resolving the pathname passed to syscall.
155	->quota_read() and ->quota_write() functions are both guaranteed to
156	be the only ones operating on the quota file by the quota code (via
157	dqio_sem) (unless an admin really wants to screw up something and
158	writes to quota files with quotas on). For other details about locking
159	see also dquot_operations section.
160	->bdev_try_to_free_page is called from the ->releasepage handler of
161	the block device inode.  See there for more details.
162	
163	--------------------------- file_system_type ---------------------------
164	prototypes:
165		int (*get_sb) (struct file_system_type *, int,
166			       const char *, void *, struct vfsmount *);
167		struct dentry *(*mount) (struct file_system_type *, int,
168			       const char *, void *);
169		void (*kill_sb) (struct super_block *);
170	locking rules:
171			may block
172	mount		yes
173	kill_sb		yes
174	
175	->mount() returns ERR_PTR or the root dentry; its superblock should be locked
176	on return.
177	->kill_sb() takes a write-locked superblock, does all shutdown work on it,
178	unlocks and drops the reference.
179	
180	--------------------------- address_space_operations --------------------------
181	prototypes:
182		int (*writepage)(struct page *page, struct writeback_control *wbc);
183		int (*readpage)(struct file *, struct page *);
184		int (*sync_page)(struct page *);
185		int (*writepages)(struct address_space *, struct writeback_control *);
186		int (*set_page_dirty)(struct page *page);
187		int (*readpages)(struct file *filp, struct address_space *mapping,
188				struct list_head *pages, unsigned nr_pages);
189		int (*write_begin)(struct file *, struct address_space *mapping,
190					loff_t pos, unsigned len, unsigned flags,
191					struct page **pagep, void **fsdata);
192		int (*write_end)(struct file *, struct address_space *mapping,
193					loff_t pos, unsigned len, unsigned copied,
194					struct page *page, void *fsdata);
195		sector_t (*bmap)(struct address_space *, sector_t);
196		void (*invalidatepage) (struct page *, unsigned int, unsigned int);
197		int (*releasepage) (struct page *, int);
198		void (*freepage)(struct page *);
199		int (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
200		int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **,
201					unsigned long *);
202		int (*migratepage)(struct address_space *, struct page *, struct page *);
203		int (*launder_page)(struct page *);
204		int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
205		int (*error_remove_page)(struct address_space *, struct page *);
206		int (*swap_activate)(struct file *);
207		int (*swap_deactivate)(struct file *);
208	
209	locking rules:
210		All except set_page_dirty and freepage may block
211	
212				PageLocked(page)	i_mutex
213	writepage:		yes, unlocks (see below)
214	readpage:		yes, unlocks
215	sync_page:		maybe
216	writepages:
217	set_page_dirty		no
218	readpages:
219	write_begin:		locks the page		yes
220	write_end:		yes, unlocks		yes
221	bmap:
222	invalidatepage:		yes
223	releasepage:		yes
224	freepage:		yes
225	direct_IO:
226	get_xip_mem:					maybe
227	migratepage:		yes (both)
228	launder_page:		yes
229	is_partially_uptodate:	yes
230	error_remove_page:	yes
231	swap_activate:		no
232	swap_deactivate:	no
233	
234		->write_begin(), ->write_end(), ->sync_page() and ->readpage()
235	may be called from the request handler (/dev/loop).
236	
237		->readpage() unlocks the page, either synchronously or via I/O
238	completion.
239	
240		->readpages() populates the pagecache with the passed pages and starts
241	I/O against them.  They come unlocked upon I/O completion.
242	
243		->writepage() is used for two purposes: for "memory cleansing" and for
244	"sync".  These are quite different operations and the behaviour may differ
245	depending upon the mode.
246	
247	If writepage is called for sync (wbc->sync_mode != WBC_SYNC_NONE) then
248	it *must* start I/O against the page, even if that would involve
249	blocking on in-progress I/O.
250	
251	If writepage is called for memory cleansing (sync_mode ==
252	WBC_SYNC_NONE) then its role is to get as much writeout underway as
253	possible.  So writepage should try to avoid blocking against
254	currently-in-progress I/O.
255	
256	If the filesystem is not called for "sync" and it determines that it
257	would need to block against in-progress I/O to be able to start new I/O
258	against the page the filesystem should redirty the page with
259	redirty_page_for_writepage(), then unlock the page and return zero.
260	This may also be done to avoid internal deadlocks, but rarely.
261	
262	If the filesystem is called for sync then it must wait on any
263	in-progress I/O and then start new I/O.
264	
265	The filesystem should unlock the page synchronously, before returning to the
266	caller, unless ->writepage() returns special WRITEPAGE_ACTIVATE
267	value. WRITEPAGE_ACTIVATE means that page cannot really be written out
268	currently, and VM should stop calling ->writepage() on this page for some
269	time. VM does this by moving page to the head of the active list, hence the
270	name.
271	
272	Unless the filesystem is going to redirty_page_for_writepage(), unlock the page
273	and return zero, writepage *must* run set_page_writeback() against the page,
274	followed by unlocking it.  Once set_page_writeback() has been run against the
275	page, write I/O can be submitted and the write I/O completion handler must run
276	end_page_writeback() once the I/O is complete.  If no I/O is submitted, the
277	filesystem must run end_page_writeback() against the page before returning from
278	writepage.
279	
280	That is: after 2.5.12, pages which are under writeout are *not* locked.  Note,
281	if the filesystem needs the page to be locked during writeout, that is ok, too,
282	the page is allowed to be unlocked at any point in time between the calls to
283	set_page_writeback() and end_page_writeback().
284	
285	Note, failure to run either redirty_page_for_writepage() or the combination of
286	set_page_writeback()/end_page_writeback() on a page submitted to writepage
287	will leave the page itself marked clean but it will be tagged as dirty in the
288	radix tree.  This incoherency can lead to all sorts of hard-to-debug problems
289	in the filesystem like having dirty inodes at umount and losing written data.
290	
291		->sync_page() locking rules are not well-defined - usually it is called
292	with lock on page, but that is not guaranteed. Considering the currently
293	existing instances of this method ->sync_page() itself doesn't look
294	well-defined...
295	
296		->writepages() is used for periodic writeback and for syscall-initiated
297	sync operations.  The address_space should start I/O against at least
298	*nr_to_write pages.  *nr_to_write must be decremented for each page which is
299	written.  The address_space implementation may write more (or less) pages
300	than *nr_to_write asks for, but it should try to be reasonably close.  If
301	nr_to_write is NULL, all dirty pages must be written.
302	
303	writepages should _only_ write pages which are present on
304	mapping->io_pages.
305	
306		->set_page_dirty() is called from various places in the kernel
307	when the target page is marked as needing writeback.  It may be called
308	under spinlock (it cannot block) and is sometimes called with the page
309	not locked.
310	
311		->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some
312	filesystems and by the swapper. The latter will eventually go away.  Please,
313	keep it that way and don't breed new callers.
314	
315		->invalidatepage() is called when the filesystem must attempt to drop
316	some or all of the buffers from the page when it is being truncated. It
317	returns zero on success. If ->invalidatepage is zero, the kernel uses
318	block_invalidatepage() instead.
319	
320		->releasepage() is called when the kernel is about to try to drop the
321	buffers from the page in preparation for freeing it.  It returns zero to
322	indicate that the buffers are (or may be) freeable.  If ->releasepage is zero,
323	the kernel assumes that the fs has no private interest in the buffers.
324	
325		->freepage() is called when the kernel is done dropping the page
326	from the page cache.
327	
328		->launder_page() may be called prior to releasing a page if
329	it is still found to be dirty. It returns zero if the page was successfully
330	cleaned, or an error value if not. Note that in order to prevent the page
331	getting mapped back in and redirtied, it needs to be kept locked
332	across the entire operation.
333	
334		->swap_activate will be called with a non-zero argument on
335	files backing (non block device backed) swapfiles. A return value
336	of zero indicates success, in which case this file can be used for
337	backing swapspace. The swapspace operations will be proxied to the
338	address space operations.
339	
340		->swap_deactivate() will be called in the sys_swapoff()
341	path after ->swap_activate() returned success.
342	
343	----------------------- file_lock_operations ------------------------------
344	prototypes:
345		void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
346		void (*fl_release_private)(struct file_lock *);
347	
348	
349	locking rules:
350				inode->i_lock	may block
351	fl_copy_lock:		yes		no
352	fl_release_private:	maybe		no
353	
354	----------------------- lock_manager_operations ---------------------------
355	prototypes:
356		int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
357		unsigned long (*lm_owner_key)(struct file_lock *);
358		void (*lm_notify)(struct file_lock *);  /* unblock callback */
359		int (*lm_grant)(struct file_lock *, struct file_lock *, int);
360		void (*lm_break)(struct file_lock *); /* break_lease callback */
361		int (*lm_change)(struct file_lock **, int);
362	
363	locking rules:
364	
365				inode->i_lock	blocked_lock_lock	may block
366	lm_compare_owner:	yes[1]		maybe			no
367	lm_owner_key		yes[1]		yes			no
368	lm_notify:		yes		yes			no
369	lm_grant:		no		no			no
370	lm_break:		yes		no			no
371	lm_change		yes		no			no
372	
373	[1]:	->lm_compare_owner and ->lm_owner_key are generally called with
374	*an* inode->i_lock held. It may not be the i_lock of the inode
375	associated with either file_lock argument! This is the case with deadlock
376	detection, since the code has to chase down the owners of locks that may
377	be entirely unrelated to the one on which the lock is being acquired.
378	For deadlock detection however, the blocked_lock_lock is also held. The
379	fact that these locks are held ensures that the file_locks do not
380	disappear out from under you while doing the comparison or generating an
381	owner key.
382	
383	--------------------------- buffer_head -----------------------------------
384	prototypes:
385		void (*b_end_io)(struct buffer_head *bh, int uptodate);
386	
387	locking rules:
388		called from interrupts. In other words, extreme care is needed here.
389	bh is locked, but that's all warranties we have here. Currently only RAID1,
390	highmem, fs/buffer.c, and fs/ntfs/aops.c are providing these. Block devices
391	call this method upon the IO completion.
392	
393	--------------------------- block_device_operations -----------------------
394	prototypes:
395		int (*open) (struct block_device *, fmode_t);
396		int (*release) (struct gendisk *, fmode_t);
397		int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
398		int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
399		int (*direct_access) (struct block_device *, sector_t, void **, unsigned long *);
400		int (*media_changed) (struct gendisk *);
401		void (*unlock_native_capacity) (struct gendisk *);
402		int (*revalidate_disk) (struct gendisk *);
403		int (*getgeo)(struct block_device *, struct hd_geometry *);
404		void (*swap_slot_free_notify) (struct block_device *, unsigned long);
405	
406	locking rules:
407				bd_mutex
408	open:			yes
409	release:		yes
410	ioctl:			no
411	compat_ioctl:		no
412	direct_access:		no
413	media_changed:		no
414	unlock_native_capacity:	no
415	revalidate_disk:	no
416	getgeo:			no
417	swap_slot_free_notify:	no	(see below)
418	
419	media_changed, unlock_native_capacity and revalidate_disk are called only from
420	check_disk_change().
421	
422	swap_slot_free_notify is called with swap_lock and sometimes the page lock
423	held.
424	
425	
426	--------------------------- file_operations -------------------------------
427	prototypes:
428		loff_t (*llseek) (struct file *, loff_t, int);
429		ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
430		ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
431		ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
432		ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
433		ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
434		ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
435		int (*iterate) (struct file *, struct dir_context *);
436		unsigned int (*poll) (struct file *, struct poll_table_struct *);
437		long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
438		long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
439		int (*mmap) (struct file *, struct vm_area_struct *);
440		int (*open) (struct inode *, struct file *);
441		int (*flush) (struct file *);
442		int (*release) (struct inode *, struct file *);
443		int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
444		int (*aio_fsync) (struct kiocb *, int datasync);
445		int (*fasync) (int, struct file *, int);
446		int (*lock) (struct file *, int, struct file_lock *);
447		ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
448				loff_t *);
449		ssize_t (*writev) (struct file *, const struct iovec *, unsigned long,
450				loff_t *);
451		ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t,
452				void __user *);
453		ssize_t (*sendpage) (struct file *, struct page *, int, size_t,
454				loff_t *, int);
455		unsigned long (*get_unmapped_area)(struct file *, unsigned long,
456				unsigned long, unsigned long, unsigned long);
457		int (*check_flags)(int);
458		int (*flock) (struct file *, int, struct file_lock *);
459		ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *,
460				size_t, unsigned int);
461		ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *,
462				size_t, unsigned int);
463		int (*setlease)(struct file *, long, struct file_lock **);
464		long (*fallocate)(struct file *, int, loff_t, loff_t);
465	};
466	
467	locking rules:
468		All may block except for ->setlease.
469		No VFS locks held on entry except for ->setlease.
470	
471	->setlease has the file_list_lock held and must not sleep.
472	
473	->llseek() locking has moved from llseek to the individual llseek
474	implementations.  If your fs is not using generic_file_llseek, you
475	need to acquire and release the appropriate locks in your ->llseek().
476	For many filesystems, it is probably safe to acquire the inode
477	mutex or just to use i_size_read() instead.
478	Note: this does not protect the file->f_pos against concurrent modifications
479	since this is something the userspace has to take care about.
480	
481	->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags.
482	Most instances call fasync_helper(), which does that maintenance, so it's
483	not normally something one needs to worry about.  Return values > 0 will be
484	mapped to zero in the VFS layer.
485	
486	->readdir() and ->ioctl() on directories must be changed. Ideally we would
487	move ->readdir() to inode_operations and use a separate method for directory
488	->ioctl() or kill the latter completely. One of the problems is that for
489	anything that resembles union-mount we won't have a struct file for all
490	components. And there are other reasons why the current interface is a mess...
491	
492	->read on directories probably must go away - we should just enforce -EISDIR
493	in sys_read() and friends.
494	
495	--------------------------- dquot_operations -------------------------------
496	prototypes:
497		int (*write_dquot) (struct dquot *);
498		int (*acquire_dquot) (struct dquot *);
499		int (*release_dquot) (struct dquot *);
500		int (*mark_dirty) (struct dquot *);
501		int (*write_info) (struct super_block *, int);
502	
503	These operations are intended to be more or less wrapping functions that ensure
504	a proper locking wrt the filesystem and call the generic quota operations.
505	
506	What filesystem should expect from the generic quota functions:
507	
508			FS recursion	Held locks when called
509	write_dquot:	yes		dqonoff_sem or dqptr_sem
510	acquire_dquot:	yes		dqonoff_sem or dqptr_sem
511	release_dquot:	yes		dqonoff_sem or dqptr_sem
512	mark_dirty:	no		-
513	write_info:	yes		dqonoff_sem
514	
515	FS recursion means calling ->quota_read() and ->quota_write() from superblock
516	operations.
517	
518	More details about quota locking can be found in fs/dquot.c.
519	
520	--------------------------- vm_operations_struct -----------------------------
521	prototypes:
522		void (*open)(struct vm_area_struct*);
523		void (*close)(struct vm_area_struct*);
524		int (*fault)(struct vm_area_struct*, struct vm_fault *);
525		int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);
526		int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
527	
528	locking rules:
529			mmap_sem	PageLocked(page)
530	open:		yes
531	close:		yes
532	fault:		yes		can return with page locked
533	map_pages:	yes
534	page_mkwrite:	yes		can return with page locked
535	access:		yes
536	
537		->fault() is called when a previously not present pte is about
538	to be faulted in. The filesystem must find and return the page associated
539	with the passed in "pgoff" in the vm_fault structure. If it is possible that
540	the page may be truncated and/or invalidated, then the filesystem must lock
541	the page, then ensure it is not already truncated (the page lock will block
542	subsequent truncate), and then return with VM_FAULT_LOCKED, and the page
543	locked. The VM will unlock the page.
544	
545		->map_pages() is called when VM asks to map easy accessible pages.
546	Filesystem should find and map pages associated with offsets from "pgoff"
547	till "max_pgoff". ->map_pages() is called with page table locked and must
548	not block.  If it's not possible to reach a page without blocking,
549	filesystem should skip it. Filesystem should use do_set_pte() to setup
550	page table entry. Pointer to entry associated with offset "pgoff" is
551	passed in "pte" field in vm_fault structure. Pointers to entries for other
552	offsets should be calculated relative to "pte".
553	
554		->page_mkwrite() is called when a previously read-only pte is
555	about to become writeable. The filesystem again must ensure that there are
556	no truncate/invalidate races, and then return with the page locked. If
557	the page has been truncated, the filesystem should not look up a new page
558	like the ->fault() handler, but simply return with VM_FAULT_NOPAGE, which
559	will cause the VM to retry the fault.
560	
561		->access() is called when get_user_pages() fails in
562	access_process_vm(), typically used to debug a process through
563	/proc/pid/mem or ptrace.  This function is needed only for
564	VM_IO | VM_PFNMAP VMAs.
565	
566	================================================================================
567				Dubious stuff
568	
569	(if you break something or notice that it is broken and do not fix it yourself
570	- at least put it here)
Hide Line Numbers
About Kernel Documentation Linux Kernel Contact Linux Resources Linux Blog

Information is copyright its respective author. All material is available from the Linux Kernel Source distributed under a GPL License. This page is provided as a free service by mjmwired.net.