1 /**
2  * attrib.c - NTFS attribute operations.  Part of the Linux-NTFS project.
3  *
4  * Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc.
5  * Copyright (c) 2002 Richard Russon
6  *
7  * This program/include file is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as published
9  * by the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program/include file is distributed in the hope that it will be
13  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
14  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program (in the main directory of the Linux-NTFS
19  * distribution in the file COPYING); if not, write to the Free Software
20  * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  */
22 
23 #include <linux/buffer_head.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/swap.h>
27 #include <linux/writeback.h>
28 
29 #include "attrib.h"
30 #include "debug.h"
31 #include "layout.h"
32 #include "lcnalloc.h"
33 #include "malloc.h"
34 #include "mft.h"
35 #include "ntfs.h"
36 #include "types.h"
37 
38 /**
39  * ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode
40  * @ni:		ntfs inode for which to map (part of) a runlist
41  * @vcn:	map runlist part containing this vcn
42  * @ctx:	active attribute search context if present or NULL if not
43  *
44  * Map the part of a runlist containing the @vcn of the ntfs inode @ni.
45  *
46  * If @ctx is specified, it is an active search context of @ni and its base mft
47  * record.  This is needed when ntfs_map_runlist_nolock() encounters unmapped
48  * runlist fragments and allows their mapping.  If you do not have the mft
49  * record mapped, you can specify @ctx as NULL and ntfs_map_runlist_nolock()
50  * will perform the necessary mapping and unmapping.
51  *
52  * Note, ntfs_map_runlist_nolock() saves the state of @ctx on entry and
53  * restores it before returning.  Thus, @ctx will be left pointing to the same
54  * attribute on return as on entry.  However, the actual pointers in @ctx may
55  * point to different memory locations on return, so you must remember to reset
56  * any cached pointers from the @ctx, i.e. after the call to
57  * ntfs_map_runlist_nolock(), you will probably want to do:
58  *	m = ctx->mrec;
59  *	a = ctx->attr;
60  * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
61  * you cache ctx->mrec in a variable @m of type MFT_RECORD *.
62  *
63  * Return 0 on success and -errno on error.  There is one special error code
64  * which is not an error as such.  This is -ENOENT.  It means that @vcn is out
65  * of bounds of the runlist.
66  *
67  * Note the runlist can be NULL after this function returns if @vcn is zero and
68  * the attribute has zero allocated size, i.e. there simply is no runlist.
69  *
70  * WARNING: If @ctx is supplied, regardless of whether success or failure is
71  *	    returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
72  *	    is no longer valid, i.e. you need to either call
73  *	    ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
74  *	    In that case PTR_ERR(@ctx->mrec) will give you the error code for
75  *	    why the mapping of the old inode failed.
76  *
77  * Locking: - The runlist described by @ni must be locked for writing on entry
78  *	      and is locked on return.  Note the runlist will be modified.
79  *	    - If @ctx is NULL, the base mft record of @ni must not be mapped on
80  *	      entry and it will be left unmapped on return.
81  *	    - If @ctx is not NULL, the base mft record must be mapped on entry
82  *	      and it will be left mapped on return.
83  */
ntfs_map_runlist_nolock(ntfs_inode * ni,VCN vcn,ntfs_attr_search_ctx * ctx)84 int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
85 {
86 	VCN end_vcn;
87 	unsigned long flags;
88 	ntfs_inode *base_ni;
89 	MFT_RECORD *m;
90 	ATTR_RECORD *a;
91 	runlist_element *rl;
92 	struct page *put_this_page = NULL;
93 	int err = 0;
94 	bool ctx_is_temporary, ctx_needs_reset;
95 	ntfs_attr_search_ctx old_ctx = { NULL, };
96 
97 	ntfs_debug("Mapping runlist part containing vcn 0x%llx.",
98 			(unsigned long long)vcn);
99 	if (!NInoAttr(ni))
100 		base_ni = ni;
101 	else
102 		base_ni = ni->ext.base_ntfs_ino;
103 	if (!ctx) {
104 		ctx_is_temporary = ctx_needs_reset = true;
105 		m = map_mft_record(base_ni);
106 		if (IS_ERR(m))
107 			return PTR_ERR(m);
108 		ctx = ntfs_attr_get_search_ctx(base_ni, m);
109 		if (unlikely(!ctx)) {
110 			err = -ENOMEM;
111 			goto err_out;
112 		}
113 	} else {
114 		VCN allocated_size_vcn;
115 
116 		BUG_ON(IS_ERR(ctx->mrec));
117 		a = ctx->attr;
118 		BUG_ON(!a->non_resident);
119 		ctx_is_temporary = false;
120 		end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
121 		read_lock_irqsave(&ni->size_lock, flags);
122 		allocated_size_vcn = ni->allocated_size >>
123 				ni->vol->cluster_size_bits;
124 		read_unlock_irqrestore(&ni->size_lock, flags);
125 		if (!a->data.non_resident.lowest_vcn && end_vcn <= 0)
126 			end_vcn = allocated_size_vcn - 1;
127 		/*
128 		 * If we already have the attribute extent containing @vcn in
129 		 * @ctx, no need to look it up again.  We slightly cheat in
130 		 * that if vcn exceeds the allocated size, we will refuse to
131 		 * map the runlist below, so there is definitely no need to get
132 		 * the right attribute extent.
133 		 */
134 		if (vcn >= allocated_size_vcn || (a->type == ni->type &&
135 				a->name_length == ni->name_len &&
136 				!memcmp((u8*)a + le16_to_cpu(a->name_offset),
137 				ni->name, ni->name_len) &&
138 				sle64_to_cpu(a->data.non_resident.lowest_vcn)
139 				<= vcn && end_vcn >= vcn))
140 			ctx_needs_reset = false;
141 		else {
142 			/* Save the old search context. */
143 			old_ctx = *ctx;
144 			/*
145 			 * If the currently mapped (extent) inode is not the
146 			 * base inode we will unmap it when we reinitialize the
147 			 * search context which means we need to get a
148 			 * reference to the page containing the mapped mft
149 			 * record so we do not accidentally drop changes to the
150 			 * mft record when it has not been marked dirty yet.
151 			 */
152 			if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino !=
153 					old_ctx.base_ntfs_ino) {
154 				put_this_page = old_ctx.ntfs_ino->page;
155 				get_page(put_this_page);
156 			}
157 			/*
158 			 * Reinitialize the search context so we can lookup the
159 			 * needed attribute extent.
160 			 */
161 			ntfs_attr_reinit_search_ctx(ctx);
162 			ctx_needs_reset = true;
163 		}
164 	}
165 	if (ctx_needs_reset) {
166 		err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
167 				CASE_SENSITIVE, vcn, NULL, 0, ctx);
168 		if (unlikely(err)) {
169 			if (err == -ENOENT)
170 				err = -EIO;
171 			goto err_out;
172 		}
173 		BUG_ON(!ctx->attr->non_resident);
174 	}
175 	a = ctx->attr;
176 	/*
177 	 * Only decompress the mapping pairs if @vcn is inside it.  Otherwise
178 	 * we get into problems when we try to map an out of bounds vcn because
179 	 * we then try to map the already mapped runlist fragment and
180 	 * ntfs_mapping_pairs_decompress() fails.
181 	 */
182 	end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn) + 1;
183 	if (unlikely(vcn && vcn >= end_vcn)) {
184 		err = -ENOENT;
185 		goto err_out;
186 	}
187 	rl = ntfs_mapping_pairs_decompress(ni->vol, a, ni->runlist.rl);
188 	if (IS_ERR(rl))
189 		err = PTR_ERR(rl);
190 	else
191 		ni->runlist.rl = rl;
192 err_out:
193 	if (ctx_is_temporary) {
194 		if (likely(ctx))
195 			ntfs_attr_put_search_ctx(ctx);
196 		unmap_mft_record(base_ni);
197 	} else if (ctx_needs_reset) {
198 		/*
199 		 * If there is no attribute list, restoring the search context
200 		 * is accomplished simply by copying the saved context back over
201 		 * the caller supplied context.  If there is an attribute list,
202 		 * things are more complicated as we need to deal with mapping
203 		 * of mft records and resulting potential changes in pointers.
204 		 */
205 		if (NInoAttrList(base_ni)) {
206 			/*
207 			 * If the currently mapped (extent) inode is not the
208 			 * one we had before, we need to unmap it and map the
209 			 * old one.
210 			 */
211 			if (ctx->ntfs_ino != old_ctx.ntfs_ino) {
212 				/*
213 				 * If the currently mapped inode is not the
214 				 * base inode, unmap it.
215 				 */
216 				if (ctx->base_ntfs_ino && ctx->ntfs_ino !=
217 						ctx->base_ntfs_ino) {
218 					unmap_extent_mft_record(ctx->ntfs_ino);
219 					ctx->mrec = ctx->base_mrec;
220 					BUG_ON(!ctx->mrec);
221 				}
222 				/*
223 				 * If the old mapped inode is not the base
224 				 * inode, map it.
225 				 */
226 				if (old_ctx.base_ntfs_ino &&
227 						old_ctx.ntfs_ino !=
228 						old_ctx.base_ntfs_ino) {
229 retry_map:
230 					ctx->mrec = map_mft_record(
231 							old_ctx.ntfs_ino);
232 					/*
233 					 * Something bad has happened.  If out
234 					 * of memory retry till it succeeds.
235 					 * Any other errors are fatal and we
236 					 * return the error code in ctx->mrec.
237 					 * Let the caller deal with it...  We
238 					 * just need to fudge things so the
239 					 * caller can reinit and/or put the
240 					 * search context safely.
241 					 */
242 					if (IS_ERR(ctx->mrec)) {
243 						if (PTR_ERR(ctx->mrec) ==
244 								-ENOMEM) {
245 							schedule();
246 							goto retry_map;
247 						} else
248 							old_ctx.ntfs_ino =
249 								old_ctx.
250 								base_ntfs_ino;
251 					}
252 				}
253 			}
254 			/* Update the changed pointers in the saved context. */
255 			if (ctx->mrec != old_ctx.mrec) {
256 				if (!IS_ERR(ctx->mrec))
257 					old_ctx.attr = (ATTR_RECORD*)(
258 							(u8*)ctx->mrec +
259 							((u8*)old_ctx.attr -
260 							(u8*)old_ctx.mrec));
261 				old_ctx.mrec = ctx->mrec;
262 			}
263 		}
264 		/* Restore the search context to the saved one. */
265 		*ctx = old_ctx;
266 		/*
267 		 * We drop the reference on the page we took earlier.  In the
268 		 * case that IS_ERR(ctx->mrec) is true this means we might lose
269 		 * some changes to the mft record that had been made between
270 		 * the last time it was marked dirty/written out and now.  This
271 		 * at this stage is not a problem as the mapping error is fatal
272 		 * enough that the mft record cannot be written out anyway and
273 		 * the caller is very likely to shutdown the whole inode
274 		 * immediately and mark the volume dirty for chkdsk to pick up
275 		 * the pieces anyway.
276 		 */
277 		if (put_this_page)
278 			put_page(put_this_page);
279 	}
280 	return err;
281 }
282 
283 /**
284  * ntfs_map_runlist - map (a part of) a runlist of an ntfs inode
285  * @ni:		ntfs inode for which to map (part of) a runlist
286  * @vcn:	map runlist part containing this vcn
287  *
288  * Map the part of a runlist containing the @vcn of the ntfs inode @ni.
289  *
290  * Return 0 on success and -errno on error.  There is one special error code
291  * which is not an error as such.  This is -ENOENT.  It means that @vcn is out
292  * of bounds of the runlist.
293  *
294  * Locking: - The runlist must be unlocked on entry and is unlocked on return.
295  *	    - This function takes the runlist lock for writing and may modify
296  *	      the runlist.
297  */
ntfs_map_runlist(ntfs_inode * ni,VCN vcn)298 int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
299 {
300 	int err = 0;
301 
302 	down_write(&ni->runlist.lock);
303 	/* Make sure someone else didn't do the work while we were sleeping. */
304 	if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <=
305 			LCN_RL_NOT_MAPPED))
306 		err = ntfs_map_runlist_nolock(ni, vcn, NULL);
307 	up_write(&ni->runlist.lock);
308 	return err;
309 }
310 
311 /**
312  * ntfs_attr_vcn_to_lcn_nolock - convert a vcn into a lcn given an ntfs inode
313  * @ni:			ntfs inode of the attribute whose runlist to search
314  * @vcn:		vcn to convert
315  * @write_locked:	true if the runlist is locked for writing
316  *
317  * Find the virtual cluster number @vcn in the runlist of the ntfs attribute
318  * described by the ntfs inode @ni and return the corresponding logical cluster
319  * number (lcn).
320  *
321  * If the @vcn is not mapped yet, the attempt is made to map the attribute
322  * extent containing the @vcn and the vcn to lcn conversion is retried.
323  *
324  * If @write_locked is true the caller has locked the runlist for writing and
325  * if false for reading.
326  *
327  * Since lcns must be >= 0, we use negative return codes with special meaning:
328  *
329  * Return code	Meaning / Description
330  * ==========================================
331  *  LCN_HOLE	Hole / not allocated on disk.
332  *  LCN_ENOENT	There is no such vcn in the runlist, i.e. @vcn is out of bounds.
333  *  LCN_ENOMEM	Not enough memory to map runlist.
334  *  LCN_EIO	Critical error (runlist/file is corrupt, i/o error, etc).
335  *
336  * Locking: - The runlist must be locked on entry and is left locked on return.
337  *	    - If @write_locked is 'false', i.e. the runlist is locked for reading,
338  *	      the lock may be dropped inside the function so you cannot rely on
339  *	      the runlist still being the same when this function returns.
340  */
ntfs_attr_vcn_to_lcn_nolock(ntfs_inode * ni,const VCN vcn,const bool write_locked)341 LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
342 		const bool write_locked)
343 {
344 	LCN lcn;
345 	unsigned long flags;
346 	bool is_retry = false;
347 
348 	BUG_ON(!ni);
349 	ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.",
350 			ni->mft_no, (unsigned long long)vcn,
351 			write_locked ? "write" : "read");
352 	BUG_ON(!NInoNonResident(ni));
353 	BUG_ON(vcn < 0);
354 	if (!ni->runlist.rl) {
355 		read_lock_irqsave(&ni->size_lock, flags);
356 		if (!ni->allocated_size) {
357 			read_unlock_irqrestore(&ni->size_lock, flags);
358 			return LCN_ENOENT;
359 		}
360 		read_unlock_irqrestore(&ni->size_lock, flags);
361 	}
362 retry_remap:
363 	/* Convert vcn to lcn.  If that fails map the runlist and retry once. */
364 	lcn = ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn);
365 	if (likely(lcn >= LCN_HOLE)) {
366 		ntfs_debug("Done, lcn 0x%llx.", (long long)lcn);
367 		return lcn;
368 	}
369 	if (lcn != LCN_RL_NOT_MAPPED) {
370 		if (lcn != LCN_ENOENT)
371 			lcn = LCN_EIO;
372 	} else if (!is_retry) {
373 		int err;
374 
375 		if (!write_locked) {
376 			up_read(&ni->runlist.lock);
377 			down_write(&ni->runlist.lock);
378 			if (unlikely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) !=
379 					LCN_RL_NOT_MAPPED)) {
380 				up_write(&ni->runlist.lock);
381 				down_read(&ni->runlist.lock);
382 				goto retry_remap;
383 			}
384 		}
385 		err = ntfs_map_runlist_nolock(ni, vcn, NULL);
386 		if (!write_locked) {
387 			up_write(&ni->runlist.lock);
388 			down_read(&ni->runlist.lock);
389 		}
390 		if (likely(!err)) {
391 			is_retry = true;
392 			goto retry_remap;
393 		}
394 		if (err == -ENOENT)
395 			lcn = LCN_ENOENT;
396 		else if (err == -ENOMEM)
397 			lcn = LCN_ENOMEM;
398 		else
399 			lcn = LCN_EIO;
400 	}
401 	if (lcn != LCN_ENOENT)
402 		ntfs_error(ni->vol->sb, "Failed with error code %lli.",
403 				(long long)lcn);
404 	return lcn;
405 }
406 
407 /**
408  * ntfs_attr_find_vcn_nolock - find a vcn in the runlist of an ntfs inode
409  * @ni:		ntfs inode describing the runlist to search
410  * @vcn:	vcn to find
411  * @ctx:	active attribute search context if present or NULL if not
412  *
413  * Find the virtual cluster number @vcn in the runlist described by the ntfs
414  * inode @ni and return the address of the runlist element containing the @vcn.
415  *
416  * If the @vcn is not mapped yet, the attempt is made to map the attribute
417  * extent containing the @vcn and the vcn to lcn conversion is retried.
418  *
419  * If @ctx is specified, it is an active search context of @ni and its base mft
420  * record.  This is needed when ntfs_attr_find_vcn_nolock() encounters unmapped
421  * runlist fragments and allows their mapping.  If you do not have the mft
422  * record mapped, you can specify @ctx as NULL and ntfs_attr_find_vcn_nolock()
423  * will perform the necessary mapping and unmapping.
424  *
425  * Note, ntfs_attr_find_vcn_nolock() saves the state of @ctx on entry and
426  * restores it before returning.  Thus, @ctx will be left pointing to the same
427  * attribute on return as on entry.  However, the actual pointers in @ctx may
428  * point to different memory locations on return, so you must remember to reset
429  * any cached pointers from the @ctx, i.e. after the call to
430  * ntfs_attr_find_vcn_nolock(), you will probably want to do:
431  *	m = ctx->mrec;
432  *	a = ctx->attr;
433  * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
434  * you cache ctx->mrec in a variable @m of type MFT_RECORD *.
435  * Note you need to distinguish between the lcn of the returned runlist element
436  * being >= 0 and LCN_HOLE.  In the later case you have to return zeroes on
437  * read and allocate clusters on write.
438  *
439  * Return the runlist element containing the @vcn on success and
440  * ERR_PTR(-errno) on error.  You need to test the return value with IS_ERR()
441  * to decide if the return is success or failure and PTR_ERR() to get to the
442  * error code if IS_ERR() is true.
443  *
444  * The possible error return codes are:
445  *	-ENOENT - No such vcn in the runlist, i.e. @vcn is out of bounds.
446  *	-ENOMEM - Not enough memory to map runlist.
447  *	-EIO	- Critical error (runlist/file is corrupt, i/o error, etc).
448  *
449  * WARNING: If @ctx is supplied, regardless of whether success or failure is
450  *	    returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
451  *	    is no longer valid, i.e. you need to either call
452  *	    ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
453  *	    In that case PTR_ERR(@ctx->mrec) will give you the error code for
454  *	    why the mapping of the old inode failed.
455  *
456  * Locking: - The runlist described by @ni must be locked for writing on entry
457  *	      and is locked on return.  Note the runlist may be modified when
458  *	      needed runlist fragments need to be mapped.
459  *	    - If @ctx is NULL, the base mft record of @ni must not be mapped on
460  *	      entry and it will be left unmapped on return.
461  *	    - If @ctx is not NULL, the base mft record must be mapped on entry
462  *	      and it will be left mapped on return.
463  */
ntfs_attr_find_vcn_nolock(ntfs_inode * ni,const VCN vcn,ntfs_attr_search_ctx * ctx)464 runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
465 		ntfs_attr_search_ctx *ctx)
466 {
467 	unsigned long flags;
468 	runlist_element *rl;
469 	int err = 0;
470 	bool is_retry = false;
471 
472 	BUG_ON(!ni);
473 	ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.",
474 			ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out");
475 	BUG_ON(!NInoNonResident(ni));
476 	BUG_ON(vcn < 0);
477 	if (!ni->runlist.rl) {
478 		read_lock_irqsave(&ni->size_lock, flags);
479 		if (!ni->allocated_size) {
480 			read_unlock_irqrestore(&ni->size_lock, flags);
481 			return ERR_PTR(-ENOENT);
482 		}
483 		read_unlock_irqrestore(&ni->size_lock, flags);
484 	}
485 retry_remap:
486 	rl = ni->runlist.rl;
487 	if (likely(rl && vcn >= rl[0].vcn)) {
488 		while (likely(rl->length)) {
489 			if (unlikely(vcn < rl[1].vcn)) {
490 				if (likely(rl->lcn >= LCN_HOLE)) {
491 					ntfs_debug("Done.");
492 					return rl;
493 				}
494 				break;
495 			}
496 			rl++;
497 		}
498 		if (likely(rl->lcn != LCN_RL_NOT_MAPPED)) {
499 			if (likely(rl->lcn == LCN_ENOENT))
500 				err = -ENOENT;
501 			else
502 				err = -EIO;
503 		}
504 	}
505 	if (!err && !is_retry) {
506 		/*
507 		 * If the search context is invalid we cannot map the unmapped
508 		 * region.
509 		 */
510 		if (IS_ERR(ctx->mrec))
511 			err = PTR_ERR(ctx->mrec);
512 		else {
513 			/*
514 			 * The @vcn is in an unmapped region, map the runlist
515 			 * and retry.
516 			 */
517 			err = ntfs_map_runlist_nolock(ni, vcn, ctx);
518 			if (likely(!err)) {
519 				is_retry = true;
520 				goto retry_remap;
521 			}
522 		}
523 		if (err == -EINVAL)
524 			err = -EIO;
525 	} else if (!err)
526 		err = -EIO;
527 	if (err != -ENOENT)
528 		ntfs_error(ni->vol->sb, "Failed with error code %i.", err);
529 	return ERR_PTR(err);
530 }
531 
532 /**
533  * ntfs_attr_find - find (next) attribute in mft record
534  * @type:	attribute type to find
535  * @name:	attribute name to find (optional, i.e. NULL means don't care)
536  * @name_len:	attribute name length (only needed if @name present)
537  * @ic:		IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
538  * @val:	attribute value to find (optional, resident attributes only)
539  * @val_len:	attribute value length
540  * @ctx:	search context with mft record and attribute to search from
541  *
542  * You should not need to call this function directly.  Use ntfs_attr_lookup()
543  * instead.
544  *
545  * ntfs_attr_find() takes a search context @ctx as parameter and searches the
546  * mft record specified by @ctx->mrec, beginning at @ctx->attr, for an
547  * attribute of @type, optionally @name and @val.
548  *
549  * If the attribute is found, ntfs_attr_find() returns 0 and @ctx->attr will
550  * point to the found attribute.
551  *
552  * If the attribute is not found, ntfs_attr_find() returns -ENOENT and
553  * @ctx->attr will point to the attribute before which the attribute being
554  * searched for would need to be inserted if such an action were to be desired.
555  *
556  * On actual error, ntfs_attr_find() returns -EIO.  In this case @ctx->attr is
557  * undefined and in particular do not rely on it not changing.
558  *
559  * If @ctx->is_first is 'true', the search begins with @ctx->attr itself.  If it
560  * is 'false', the search begins after @ctx->attr.
561  *
562  * If @ic is IGNORE_CASE, the @name comparisson is not case sensitive and
563  * @ctx->ntfs_ino must be set to the ntfs inode to which the mft record
564  * @ctx->mrec belongs.  This is so we can get at the ntfs volume and hence at
565  * the upcase table.  If @ic is CASE_SENSITIVE, the comparison is case
566  * sensitive.  When @name is present, @name_len is the @name length in Unicode
567  * characters.
568  *
569  * If @name is not present (NULL), we assume that the unnamed attribute is
570  * being searched for.
571  *
572  * Finally, the resident attribute value @val is looked for, if present.  If
573  * @val is not present (NULL), @val_len is ignored.
574  *
575  * ntfs_attr_find() only searches the specified mft record and it ignores the
576  * presence of an attribute list attribute (unless it is the one being searched
577  * for, obviously).  If you need to take attribute lists into consideration,
578  * use ntfs_attr_lookup() instead (see below).  This also means that you cannot
579  * use ntfs_attr_find() to search for extent records of non-resident
580  * attributes, as extents with lowest_vcn != 0 are usually described by the
581  * attribute list attribute only. - Note that it is possible that the first
582  * extent is only in the attribute list while the last extent is in the base
583  * mft record, so do not rely on being able to find the first extent in the
584  * base mft record.
585  *
586  * Warning: Never use @val when looking for attribute types which can be
587  *	    non-resident as this most likely will result in a crash!
588  */
ntfs_attr_find(const ATTR_TYPE type,const ntfschar * name,const u32 name_len,const IGNORE_CASE_BOOL ic,const u8 * val,const u32 val_len,ntfs_attr_search_ctx * ctx)589 static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
590 		const u32 name_len, const IGNORE_CASE_BOOL ic,
591 		const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
592 {
593 	ATTR_RECORD *a;
594 	ntfs_volume *vol = ctx->ntfs_ino->vol;
595 	ntfschar *upcase = vol->upcase;
596 	u32 upcase_len = vol->upcase_len;
597 
598 	/*
599 	 * Iterate over attributes in mft record starting at @ctx->attr, or the
600 	 * attribute following that, if @ctx->is_first is 'true'.
601 	 */
602 	if (ctx->is_first) {
603 		a = ctx->attr;
604 		ctx->is_first = false;
605 	} else
606 		a = (ATTR_RECORD*)((u8*)ctx->attr +
607 				le32_to_cpu(ctx->attr->length));
608 	for (;;	a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
609 		u8 *mrec_end = (u8 *)ctx->mrec +
610 		               le32_to_cpu(ctx->mrec->bytes_allocated);
611 		u8 *name_end;
612 
613 		/* check whether ATTR_RECORD wrap */
614 		if ((u8 *)a < (u8 *)ctx->mrec)
615 			break;
616 
617 		/* check whether Attribute Record Header is within bounds */
618 		if ((u8 *)a > mrec_end ||
619 		    (u8 *)a + sizeof(ATTR_RECORD) > mrec_end)
620 			break;
621 
622 		/* check whether ATTR_RECORD's name is within bounds */
623 		name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
624 			   a->name_length * sizeof(ntfschar);
625 		if (name_end > mrec_end)
626 			break;
627 
628 		ctx->attr = a;
629 		if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
630 				a->type == AT_END))
631 			return -ENOENT;
632 		if (unlikely(!a->length))
633 			break;
634 
635 		/* check whether ATTR_RECORD's length wrap */
636 		if ((u8 *)a + le32_to_cpu(a->length) < (u8 *)a)
637 			break;
638 		/* check whether ATTR_RECORD's length is within bounds */
639 		if ((u8 *)a + le32_to_cpu(a->length) > mrec_end)
640 			break;
641 
642 		if (a->type != type)
643 			continue;
644 		/*
645 		 * If @name is present, compare the two names.  If @name is
646 		 * missing, assume we want an unnamed attribute.
647 		 */
648 		if (!name) {
649 			/* The search failed if the found attribute is named. */
650 			if (a->name_length)
651 				return -ENOENT;
652 		} else if (!ntfs_are_names_equal(name, name_len,
653 			    (ntfschar*)((u8*)a + le16_to_cpu(a->name_offset)),
654 			    a->name_length, ic, upcase, upcase_len)) {
655 			register int rc;
656 
657 			rc = ntfs_collate_names(name, name_len,
658 					(ntfschar*)((u8*)a +
659 					le16_to_cpu(a->name_offset)),
660 					a->name_length, 1, IGNORE_CASE,
661 					upcase, upcase_len);
662 			/*
663 			 * If @name collates before a->name, there is no
664 			 * matching attribute.
665 			 */
666 			if (rc == -1)
667 				return -ENOENT;
668 			/* If the strings are not equal, continue search. */
669 			if (rc)
670 				continue;
671 			rc = ntfs_collate_names(name, name_len,
672 					(ntfschar*)((u8*)a +
673 					le16_to_cpu(a->name_offset)),
674 					a->name_length, 1, CASE_SENSITIVE,
675 					upcase, upcase_len);
676 			if (rc == -1)
677 				return -ENOENT;
678 			if (rc)
679 				continue;
680 		}
681 		/*
682 		 * The names match or @name not present and attribute is
683 		 * unnamed.  If no @val specified, we have found the attribute
684 		 * and are done.
685 		 */
686 		if (!val)
687 			return 0;
688 		/* @val is present; compare values. */
689 		else {
690 			register int rc;
691 
692 			rc = memcmp(val, (u8*)a + le16_to_cpu(
693 					a->data.resident.value_offset),
694 					min_t(u32, val_len, le32_to_cpu(
695 					a->data.resident.value_length)));
696 			/*
697 			 * If @val collates before the current attribute's
698 			 * value, there is no matching attribute.
699 			 */
700 			if (!rc) {
701 				register u32 avl;
702 
703 				avl = le32_to_cpu(
704 						a->data.resident.value_length);
705 				if (val_len == avl)
706 					return 0;
707 				if (val_len < avl)
708 					return -ENOENT;
709 			} else if (rc < 0)
710 				return -ENOENT;
711 		}
712 	}
713 	ntfs_error(vol->sb, "Inode is corrupt.  Run chkdsk.");
714 	NVolSetErrors(vol);
715 	return -EIO;
716 }
717 
718 /**
719  * load_attribute_list - load an attribute list into memory
720  * @vol:		ntfs volume from which to read
721  * @runlist:		runlist of the attribute list
722  * @al_start:		destination buffer
723  * @size:		size of the destination buffer in bytes
724  * @initialized_size:	initialized size of the attribute list
725  *
726  * Walk the runlist @runlist and load all clusters from it copying them into
727  * the linear buffer @al. The maximum number of bytes copied to @al is @size
728  * bytes. Note, @size does not need to be a multiple of the cluster size. If
729  * @initialized_size is less than @size, the region in @al between
730  * @initialized_size and @size will be zeroed and not read from disk.
731  *
732  * Return 0 on success or -errno on error.
733  */
load_attribute_list(ntfs_volume * vol,runlist * runlist,u8 * al_start,const s64 size,const s64 initialized_size)734 int load_attribute_list(ntfs_volume *vol, runlist *runlist, u8 *al_start,
735 		const s64 size, const s64 initialized_size)
736 {
737 	LCN lcn;
738 	u8 *al = al_start;
739 	u8 *al_end = al + initialized_size;
740 	runlist_element *rl;
741 	struct buffer_head *bh;
742 	struct super_block *sb;
743 	unsigned long block_size;
744 	unsigned long block, max_block;
745 	int err = 0;
746 	unsigned char block_size_bits;
747 
748 	ntfs_debug("Entering.");
749 	if (!vol || !runlist || !al || size <= 0 || initialized_size < 0 ||
750 			initialized_size > size)
751 		return -EINVAL;
752 	if (!initialized_size) {
753 		memset(al, 0, size);
754 		return 0;
755 	}
756 	sb = vol->sb;
757 	block_size = sb->s_blocksize;
758 	block_size_bits = sb->s_blocksize_bits;
759 	down_read(&runlist->lock);
760 	rl = runlist->rl;
761 	if (!rl) {
762 		ntfs_error(sb, "Cannot read attribute list since runlist is "
763 				"missing.");
764 		goto err_out;
765 	}
766 	/* Read all clusters specified by the runlist one run at a time. */
767 	while (rl->length) {
768 		lcn = ntfs_rl_vcn_to_lcn(rl, rl->vcn);
769 		ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
770 				(unsigned long long)rl->vcn,
771 				(unsigned long long)lcn);
772 		/* The attribute list cannot be sparse. */
773 		if (lcn < 0) {
774 			ntfs_error(sb, "ntfs_rl_vcn_to_lcn() failed.  Cannot "
775 					"read attribute list.");
776 			goto err_out;
777 		}
778 		block = lcn << vol->cluster_size_bits >> block_size_bits;
779 		/* Read the run from device in chunks of block_size bytes. */
780 		max_block = block + (rl->length << vol->cluster_size_bits >>
781 				block_size_bits);
782 		ntfs_debug("max_block = 0x%lx.", max_block);
783 		do {
784 			ntfs_debug("Reading block = 0x%lx.", block);
785 			bh = sb_bread(sb, block);
786 			if (!bh) {
787 				ntfs_error(sb, "sb_bread() failed. Cannot "
788 						"read attribute list.");
789 				goto err_out;
790 			}
791 			if (al + block_size >= al_end)
792 				goto do_final;
793 			memcpy(al, bh->b_data, block_size);
794 			brelse(bh);
795 			al += block_size;
796 		} while (++block < max_block);
797 		rl++;
798 	}
799 	if (initialized_size < size) {
800 initialize:
801 		memset(al_start + initialized_size, 0, size - initialized_size);
802 	}
803 done:
804 	up_read(&runlist->lock);
805 	return err;
806 do_final:
807 	if (al < al_end) {
808 		/*
809 		 * Partial block.
810 		 *
811 		 * Note: The attribute list can be smaller than its allocation
812 		 * by multiple clusters.  This has been encountered by at least
813 		 * two people running Windows XP, thus we cannot do any
814 		 * truncation sanity checking here. (AIA)
815 		 */
816 		memcpy(al, bh->b_data, al_end - al);
817 		brelse(bh);
818 		if (initialized_size < size)
819 			goto initialize;
820 		goto done;
821 	}
822 	brelse(bh);
823 	/* Real overflow! */
824 	ntfs_error(sb, "Attribute list buffer overflow. Read attribute list "
825 			"is truncated.");
826 err_out:
827 	err = -EIO;
828 	goto done;
829 }
830 
831 /**
832  * ntfs_external_attr_find - find an attribute in the attribute list of an inode
833  * @type:	attribute type to find
834  * @name:	attribute name to find (optional, i.e. NULL means don't care)
835  * @name_len:	attribute name length (only needed if @name present)
836  * @ic:		IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
837  * @lowest_vcn:	lowest vcn to find (optional, non-resident attributes only)
838  * @val:	attribute value to find (optional, resident attributes only)
839  * @val_len:	attribute value length
840  * @ctx:	search context with mft record and attribute to search from
841  *
842  * You should not need to call this function directly.  Use ntfs_attr_lookup()
843  * instead.
844  *
845  * Find an attribute by searching the attribute list for the corresponding
846  * attribute list entry.  Having found the entry, map the mft record if the
847  * attribute is in a different mft record/inode, ntfs_attr_find() the attribute
848  * in there and return it.
849  *
850  * On first search @ctx->ntfs_ino must be the base mft record and @ctx must
851  * have been obtained from a call to ntfs_attr_get_search_ctx().  On subsequent
852  * calls @ctx->ntfs_ino can be any extent inode, too (@ctx->base_ntfs_ino is
853  * then the base inode).
854  *
855  * After finishing with the attribute/mft record you need to call
856  * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
857  * mapped inodes, etc).
858  *
859  * If the attribute is found, ntfs_external_attr_find() returns 0 and
860  * @ctx->attr will point to the found attribute.  @ctx->mrec will point to the
861  * mft record in which @ctx->attr is located and @ctx->al_entry will point to
862  * the attribute list entry for the attribute.
863  *
864  * If the attribute is not found, ntfs_external_attr_find() returns -ENOENT and
865  * @ctx->attr will point to the attribute in the base mft record before which
866  * the attribute being searched for would need to be inserted if such an action
867  * were to be desired.  @ctx->mrec will point to the mft record in which
868  * @ctx->attr is located and @ctx->al_entry will point to the attribute list
869  * entry of the attribute before which the attribute being searched for would
870  * need to be inserted if such an action were to be desired.
871  *
872  * Thus to insert the not found attribute, one wants to add the attribute to
873  * @ctx->mrec (the base mft record) and if there is not enough space, the
874  * attribute should be placed in a newly allocated extent mft record.  The
875  * attribute list entry for the inserted attribute should be inserted in the
876  * attribute list attribute at @ctx->al_entry.
877  *
878  * On actual error, ntfs_external_attr_find() returns -EIO.  In this case
879  * @ctx->attr is undefined and in particular do not rely on it not changing.
880  */
ntfs_external_attr_find(const ATTR_TYPE type,const ntfschar * name,const u32 name_len,const IGNORE_CASE_BOOL ic,const VCN lowest_vcn,const u8 * val,const u32 val_len,ntfs_attr_search_ctx * ctx)881 static int ntfs_external_attr_find(const ATTR_TYPE type,
882 		const ntfschar *name, const u32 name_len,
883 		const IGNORE_CASE_BOOL ic, const VCN lowest_vcn,
884 		const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
885 {
886 	ntfs_inode *base_ni, *ni;
887 	ntfs_volume *vol;
888 	ATTR_LIST_ENTRY *al_entry, *next_al_entry;
889 	u8 *al_start, *al_end;
890 	ATTR_RECORD *a;
891 	ntfschar *al_name;
892 	u32 al_name_len;
893 	int err = 0;
894 	static const char *es = " Unmount and run chkdsk.";
895 
896 	ni = ctx->ntfs_ino;
897 	base_ni = ctx->base_ntfs_ino;
898 	ntfs_debug("Entering for inode 0x%lx, type 0x%x.", ni->mft_no, type);
899 	if (!base_ni) {
900 		/* First call happens with the base mft record. */
901 		base_ni = ctx->base_ntfs_ino = ctx->ntfs_ino;
902 		ctx->base_mrec = ctx->mrec;
903 	}
904 	if (ni == base_ni)
905 		ctx->base_attr = ctx->attr;
906 	if (type == AT_END)
907 		goto not_found;
908 	vol = base_ni->vol;
909 	al_start = base_ni->attr_list;
910 	al_end = al_start + base_ni->attr_list_size;
911 	if (!ctx->al_entry)
912 		ctx->al_entry = (ATTR_LIST_ENTRY*)al_start;
913 	/*
914 	 * Iterate over entries in attribute list starting at @ctx->al_entry,
915 	 * or the entry following that, if @ctx->is_first is 'true'.
916 	 */
917 	if (ctx->is_first) {
918 		al_entry = ctx->al_entry;
919 		ctx->is_first = false;
920 	} else
921 		al_entry = (ATTR_LIST_ENTRY*)((u8*)ctx->al_entry +
922 				le16_to_cpu(ctx->al_entry->length));
923 	for (;; al_entry = next_al_entry) {
924 		/* Out of bounds check. */
925 		if ((u8*)al_entry < base_ni->attr_list ||
926 				(u8*)al_entry > al_end)
927 			break;	/* Inode is corrupt. */
928 		ctx->al_entry = al_entry;
929 		/* Catch the end of the attribute list. */
930 		if ((u8*)al_entry == al_end)
931 			goto not_found;
932 		if (!al_entry->length)
933 			break;
934 		if ((u8*)al_entry + 6 > al_end || (u8*)al_entry +
935 				le16_to_cpu(al_entry->length) > al_end)
936 			break;
937 		next_al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
938 				le16_to_cpu(al_entry->length));
939 		if (le32_to_cpu(al_entry->type) > le32_to_cpu(type))
940 			goto not_found;
941 		if (type != al_entry->type)
942 			continue;
943 		/*
944 		 * If @name is present, compare the two names.  If @name is
945 		 * missing, assume we want an unnamed attribute.
946 		 */
947 		al_name_len = al_entry->name_length;
948 		al_name = (ntfschar*)((u8*)al_entry + al_entry->name_offset);
949 		if (!name) {
950 			if (al_name_len)
951 				goto not_found;
952 		} else if (!ntfs_are_names_equal(al_name, al_name_len, name,
953 				name_len, ic, vol->upcase, vol->upcase_len)) {
954 			register int rc;
955 
956 			rc = ntfs_collate_names(name, name_len, al_name,
957 					al_name_len, 1, IGNORE_CASE,
958 					vol->upcase, vol->upcase_len);
959 			/*
960 			 * If @name collates before al_name, there is no
961 			 * matching attribute.
962 			 */
963 			if (rc == -1)
964 				goto not_found;
965 			/* If the strings are not equal, continue search. */
966 			if (rc)
967 				continue;
968 			/*
969 			 * FIXME: Reverse engineering showed 0, IGNORE_CASE but
970 			 * that is inconsistent with ntfs_attr_find().  The
971 			 * subsequent rc checks were also different.  Perhaps I
972 			 * made a mistake in one of the two.  Need to recheck
973 			 * which is correct or at least see what is going on...
974 			 * (AIA)
975 			 */
976 			rc = ntfs_collate_names(name, name_len, al_name,
977 					al_name_len, 1, CASE_SENSITIVE,
978 					vol->upcase, vol->upcase_len);
979 			if (rc == -1)
980 				goto not_found;
981 			if (rc)
982 				continue;
983 		}
984 		/*
985 		 * The names match or @name not present and attribute is
986 		 * unnamed.  Now check @lowest_vcn.  Continue search if the
987 		 * next attribute list entry still fits @lowest_vcn.  Otherwise
988 		 * we have reached the right one or the search has failed.
989 		 */
990 		if (lowest_vcn && (u8*)next_al_entry >= al_start	    &&
991 				(u8*)next_al_entry + 6 < al_end		    &&
992 				(u8*)next_al_entry + le16_to_cpu(
993 					next_al_entry->length) <= al_end    &&
994 				sle64_to_cpu(next_al_entry->lowest_vcn) <=
995 					lowest_vcn			    &&
996 				next_al_entry->type == al_entry->type	    &&
997 				next_al_entry->name_length == al_name_len   &&
998 				ntfs_are_names_equal((ntfschar*)((u8*)
999 					next_al_entry +
1000 					next_al_entry->name_offset),
1001 					next_al_entry->name_length,
1002 					al_name, al_name_len, CASE_SENSITIVE,
1003 					vol->upcase, vol->upcase_len))
1004 			continue;
1005 		if (MREF_LE(al_entry->mft_reference) == ni->mft_no) {
1006 			if (MSEQNO_LE(al_entry->mft_reference) != ni->seq_no) {
1007 				ntfs_error(vol->sb, "Found stale mft "
1008 						"reference in attribute list "
1009 						"of base inode 0x%lx.%s",
1010 						base_ni->mft_no, es);
1011 				err = -EIO;
1012 				break;
1013 			}
1014 		} else { /* Mft references do not match. */
1015 			/* If there is a mapped record unmap it first. */
1016 			if (ni != base_ni)
1017 				unmap_extent_mft_record(ni);
1018 			/* Do we want the base record back? */
1019 			if (MREF_LE(al_entry->mft_reference) ==
1020 					base_ni->mft_no) {
1021 				ni = ctx->ntfs_ino = base_ni;
1022 				ctx->mrec = ctx->base_mrec;
1023 			} else {
1024 				/* We want an extent record. */
1025 				ctx->mrec = map_extent_mft_record(base_ni,
1026 						le64_to_cpu(
1027 						al_entry->mft_reference), &ni);
1028 				if (IS_ERR(ctx->mrec)) {
1029 					ntfs_error(vol->sb, "Failed to map "
1030 							"extent mft record "
1031 							"0x%lx of base inode "
1032 							"0x%lx.%s",
1033 							MREF_LE(al_entry->
1034 							mft_reference),
1035 							base_ni->mft_no, es);
1036 					err = PTR_ERR(ctx->mrec);
1037 					if (err == -ENOENT)
1038 						err = -EIO;
1039 					/* Cause @ctx to be sanitized below. */
1040 					ni = NULL;
1041 					break;
1042 				}
1043 				ctx->ntfs_ino = ni;
1044 			}
1045 			ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1046 					le16_to_cpu(ctx->mrec->attrs_offset));
1047 		}
1048 		/*
1049 		 * ctx->vfs_ino, ctx->mrec, and ctx->attr now point to the
1050 		 * mft record containing the attribute represented by the
1051 		 * current al_entry.
1052 		 */
1053 		/*
1054 		 * We could call into ntfs_attr_find() to find the right
1055 		 * attribute in this mft record but this would be less
1056 		 * efficient and not quite accurate as ntfs_attr_find() ignores
1057 		 * the attribute instance numbers for example which become
1058 		 * important when one plays with attribute lists.  Also,
1059 		 * because a proper match has been found in the attribute list
1060 		 * entry above, the comparison can now be optimized.  So it is
1061 		 * worth re-implementing a simplified ntfs_attr_find() here.
1062 		 */
1063 		a = ctx->attr;
1064 		/*
1065 		 * Use a manual loop so we can still use break and continue
1066 		 * with the same meanings as above.
1067 		 */
1068 do_next_attr_loop:
1069 		if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
1070 				le32_to_cpu(ctx->mrec->bytes_allocated))
1071 			break;
1072 		if (a->type == AT_END)
1073 			break;
1074 		if (!a->length)
1075 			break;
1076 		if (al_entry->instance != a->instance)
1077 			goto do_next_attr;
1078 		/*
1079 		 * If the type and/or the name are mismatched between the
1080 		 * attribute list entry and the attribute record, there is
1081 		 * corruption so we break and return error EIO.
1082 		 */
1083 		if (al_entry->type != a->type)
1084 			break;
1085 		if (!ntfs_are_names_equal((ntfschar*)((u8*)a +
1086 				le16_to_cpu(a->name_offset)), a->name_length,
1087 				al_name, al_name_len, CASE_SENSITIVE,
1088 				vol->upcase, vol->upcase_len))
1089 			break;
1090 		ctx->attr = a;
1091 		/*
1092 		 * If no @val specified or @val specified and it matches, we
1093 		 * have found it!
1094 		 */
1095 		if (!val || (!a->non_resident && le32_to_cpu(
1096 				a->data.resident.value_length) == val_len &&
1097 				!memcmp((u8*)a +
1098 				le16_to_cpu(a->data.resident.value_offset),
1099 				val, val_len))) {
1100 			ntfs_debug("Done, found.");
1101 			return 0;
1102 		}
1103 do_next_attr:
1104 		/* Proceed to the next attribute in the current mft record. */
1105 		a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length));
1106 		goto do_next_attr_loop;
1107 	}
1108 	if (!err) {
1109 		ntfs_error(vol->sb, "Base inode 0x%lx contains corrupt "
1110 				"attribute list attribute.%s", base_ni->mft_no,
1111 				es);
1112 		err = -EIO;
1113 	}
1114 	if (ni != base_ni) {
1115 		if (ni)
1116 			unmap_extent_mft_record(ni);
1117 		ctx->ntfs_ino = base_ni;
1118 		ctx->mrec = ctx->base_mrec;
1119 		ctx->attr = ctx->base_attr;
1120 	}
1121 	if (err != -ENOMEM)
1122 		NVolSetErrors(vol);
1123 	return err;
1124 not_found:
1125 	/*
1126 	 * If we were looking for AT_END, we reset the search context @ctx and
1127 	 * use ntfs_attr_find() to seek to the end of the base mft record.
1128 	 */
1129 	if (type == AT_END) {
1130 		ntfs_attr_reinit_search_ctx(ctx);
1131 		return ntfs_attr_find(AT_END, name, name_len, ic, val, val_len,
1132 				ctx);
1133 	}
1134 	/*
1135 	 * The attribute was not found.  Before we return, we want to ensure
1136 	 * @ctx->mrec and @ctx->attr indicate the position at which the
1137 	 * attribute should be inserted in the base mft record.  Since we also
1138 	 * want to preserve @ctx->al_entry we cannot reinitialize the search
1139 	 * context using ntfs_attr_reinit_search_ctx() as this would set
1140 	 * @ctx->al_entry to NULL.  Thus we do the necessary bits manually (see
1141 	 * ntfs_attr_init_search_ctx() below).  Note, we _only_ preserve
1142 	 * @ctx->al_entry as the remaining fields (base_*) are identical to
1143 	 * their non base_ counterparts and we cannot set @ctx->base_attr
1144 	 * correctly yet as we do not know what @ctx->attr will be set to by
1145 	 * the call to ntfs_attr_find() below.
1146 	 */
1147 	if (ni != base_ni)
1148 		unmap_extent_mft_record(ni);
1149 	ctx->mrec = ctx->base_mrec;
1150 	ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1151 			le16_to_cpu(ctx->mrec->attrs_offset));
1152 	ctx->is_first = true;
1153 	ctx->ntfs_ino = base_ni;
1154 	ctx->base_ntfs_ino = NULL;
1155 	ctx->base_mrec = NULL;
1156 	ctx->base_attr = NULL;
1157 	/*
1158 	 * In case there are multiple matches in the base mft record, need to
1159 	 * keep enumerating until we get an attribute not found response (or
1160 	 * another error), otherwise we would keep returning the same attribute
1161 	 * over and over again and all programs using us for enumeration would
1162 	 * lock up in a tight loop.
1163 	 */
1164 	do {
1165 		err = ntfs_attr_find(type, name, name_len, ic, val, val_len,
1166 				ctx);
1167 	} while (!err);
1168 	ntfs_debug("Done, not found.");
1169 	return err;
1170 }
1171 
1172 /**
1173  * ntfs_attr_lookup - find an attribute in an ntfs inode
1174  * @type:	attribute type to find
1175  * @name:	attribute name to find (optional, i.e. NULL means don't care)
1176  * @name_len:	attribute name length (only needed if @name present)
1177  * @ic:		IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
1178  * @lowest_vcn:	lowest vcn to find (optional, non-resident attributes only)
1179  * @val:	attribute value to find (optional, resident attributes only)
1180  * @val_len:	attribute value length
1181  * @ctx:	search context with mft record and attribute to search from
1182  *
1183  * Find an attribute in an ntfs inode.  On first search @ctx->ntfs_ino must
1184  * be the base mft record and @ctx must have been obtained from a call to
1185  * ntfs_attr_get_search_ctx().
1186  *
1187  * This function transparently handles attribute lists and @ctx is used to
1188  * continue searches where they were left off at.
1189  *
1190  * After finishing with the attribute/mft record you need to call
1191  * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
1192  * mapped inodes, etc).
1193  *
1194  * Return 0 if the search was successful and -errno if not.
1195  *
1196  * When 0, @ctx->attr is the found attribute and it is in mft record
1197  * @ctx->mrec.  If an attribute list attribute is present, @ctx->al_entry is
1198  * the attribute list entry of the found attribute.
1199  *
1200  * When -ENOENT, @ctx->attr is the attribute which collates just after the
1201  * attribute being searched for, i.e. if one wants to add the attribute to the
1202  * mft record this is the correct place to insert it into.  If an attribute
1203  * list attribute is present, @ctx->al_entry is the attribute list entry which
1204  * collates just after the attribute list entry of the attribute being searched
1205  * for, i.e. if one wants to add the attribute to the mft record this is the
1206  * correct place to insert its attribute list entry into.
1207  *
1208  * When -errno != -ENOENT, an error occurred during the lookup.  @ctx->attr is
1209  * then undefined and in particular you should not rely on it not changing.
1210  */
ntfs_attr_lookup(const ATTR_TYPE type,const ntfschar * name,const u32 name_len,const IGNORE_CASE_BOOL ic,const VCN lowest_vcn,const u8 * val,const u32 val_len,ntfs_attr_search_ctx * ctx)1211 int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
1212 		const u32 name_len, const IGNORE_CASE_BOOL ic,
1213 		const VCN lowest_vcn, const u8 *val, const u32 val_len,
1214 		ntfs_attr_search_ctx *ctx)
1215 {
1216 	ntfs_inode *base_ni;
1217 
1218 	ntfs_debug("Entering.");
1219 	BUG_ON(IS_ERR(ctx->mrec));
1220 	if (ctx->base_ntfs_ino)
1221 		base_ni = ctx->base_ntfs_ino;
1222 	else
1223 		base_ni = ctx->ntfs_ino;
1224 	/* Sanity check, just for debugging really. */
1225 	BUG_ON(!base_ni);
1226 	if (!NInoAttrList(base_ni) || type == AT_ATTRIBUTE_LIST)
1227 		return ntfs_attr_find(type, name, name_len, ic, val, val_len,
1228 				ctx);
1229 	return ntfs_external_attr_find(type, name, name_len, ic, lowest_vcn,
1230 			val, val_len, ctx);
1231 }
1232 
1233 /**
1234  * ntfs_attr_init_search_ctx - initialize an attribute search context
1235  * @ctx:	attribute search context to initialize
1236  * @ni:		ntfs inode with which to initialize the search context
1237  * @mrec:	mft record with which to initialize the search context
1238  *
1239  * Initialize the attribute search context @ctx with @ni and @mrec.
1240  */
ntfs_attr_init_search_ctx(ntfs_attr_search_ctx * ctx,ntfs_inode * ni,MFT_RECORD * mrec)1241 static inline void ntfs_attr_init_search_ctx(ntfs_attr_search_ctx *ctx,
1242 		ntfs_inode *ni, MFT_RECORD *mrec)
1243 {
1244 	*ctx = (ntfs_attr_search_ctx) {
1245 		.mrec = mrec,
1246 		/* Sanity checks are performed elsewhere. */
1247 		.attr = (ATTR_RECORD*)((u8*)mrec +
1248 				le16_to_cpu(mrec->attrs_offset)),
1249 		.is_first = true,
1250 		.ntfs_ino = ni,
1251 	};
1252 }
1253 
1254 /**
1255  * ntfs_attr_reinit_search_ctx - reinitialize an attribute search context
1256  * @ctx:	attribute search context to reinitialize
1257  *
1258  * Reinitialize the attribute search context @ctx, unmapping an associated
1259  * extent mft record if present, and initialize the search context again.
1260  *
1261  * This is used when a search for a new attribute is being started to reset
1262  * the search context to the beginning.
1263  */
ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx * ctx)1264 void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx)
1265 {
1266 	if (likely(!ctx->base_ntfs_ino)) {
1267 		/* No attribute list. */
1268 		ctx->is_first = true;
1269 		/* Sanity checks are performed elsewhere. */
1270 		ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1271 				le16_to_cpu(ctx->mrec->attrs_offset));
1272 		/*
1273 		 * This needs resetting due to ntfs_external_attr_find() which
1274 		 * can leave it set despite having zeroed ctx->base_ntfs_ino.
1275 		 */
1276 		ctx->al_entry = NULL;
1277 		return;
1278 	} /* Attribute list. */
1279 	if (ctx->ntfs_ino != ctx->base_ntfs_ino)
1280 		unmap_extent_mft_record(ctx->ntfs_ino);
1281 	ntfs_attr_init_search_ctx(ctx, ctx->base_ntfs_ino, ctx->base_mrec);
1282 	return;
1283 }
1284 
1285 /**
1286  * ntfs_attr_get_search_ctx - allocate/initialize a new attribute search context
1287  * @ni:		ntfs inode with which to initialize the search context
1288  * @mrec:	mft record with which to initialize the search context
1289  *
1290  * Allocate a new attribute search context, initialize it with @ni and @mrec,
1291  * and return it. Return NULL if allocation failed.
1292  */
ntfs_attr_get_search_ctx(ntfs_inode * ni,MFT_RECORD * mrec)1293 ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec)
1294 {
1295 	ntfs_attr_search_ctx *ctx;
1296 
1297 	ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS);
1298 	if (ctx)
1299 		ntfs_attr_init_search_ctx(ctx, ni, mrec);
1300 	return ctx;
1301 }
1302 
1303 /**
1304  * ntfs_attr_put_search_ctx - release an attribute search context
1305  * @ctx:	attribute search context to free
1306  *
1307  * Release the attribute search context @ctx, unmapping an associated extent
1308  * mft record if present.
1309  */
ntfs_attr_put_search_ctx(ntfs_attr_search_ctx * ctx)1310 void ntfs_attr_put_search_ctx(ntfs_attr_search_ctx *ctx)
1311 {
1312 	if (ctx->base_ntfs_ino && ctx->ntfs_ino != ctx->base_ntfs_ino)
1313 		unmap_extent_mft_record(ctx->ntfs_ino);
1314 	kmem_cache_free(ntfs_attr_ctx_cache, ctx);
1315 	return;
1316 }
1317 
1318 #ifdef NTFS_RW
1319 
1320 /**
1321  * ntfs_attr_find_in_attrdef - find an attribute in the $AttrDef system file
1322  * @vol:	ntfs volume to which the attribute belongs
1323  * @type:	attribute type which to find
1324  *
1325  * Search for the attribute definition record corresponding to the attribute
1326  * @type in the $AttrDef system file.
1327  *
1328  * Return the attribute type definition record if found and NULL if not found.
1329  */
ntfs_attr_find_in_attrdef(const ntfs_volume * vol,const ATTR_TYPE type)1330 static ATTR_DEF *ntfs_attr_find_in_attrdef(const ntfs_volume *vol,
1331 		const ATTR_TYPE type)
1332 {
1333 	ATTR_DEF *ad;
1334 
1335 	BUG_ON(!vol->attrdef);
1336 	BUG_ON(!type);
1337 	for (ad = vol->attrdef; (u8*)ad - (u8*)vol->attrdef <
1338 			vol->attrdef_size && ad->type; ++ad) {
1339 		/* We have not found it yet, carry on searching. */
1340 		if (likely(le32_to_cpu(ad->type) < le32_to_cpu(type)))
1341 			continue;
1342 		/* We found the attribute; return it. */
1343 		if (likely(ad->type == type))
1344 			return ad;
1345 		/* We have gone too far already.  No point in continuing. */
1346 		break;
1347 	}
1348 	/* Attribute not found. */
1349 	ntfs_debug("Attribute type 0x%x not found in $AttrDef.",
1350 			le32_to_cpu(type));
1351 	return NULL;
1352 }
1353 
1354 /**
1355  * ntfs_attr_size_bounds_check - check a size of an attribute type for validity
1356  * @vol:	ntfs volume to which the attribute belongs
1357  * @type:	attribute type which to check
1358  * @size:	size which to check
1359  *
1360  * Check whether the @size in bytes is valid for an attribute of @type on the
1361  * ntfs volume @vol.  This information is obtained from $AttrDef system file.
1362  *
1363  * Return 0 if valid, -ERANGE if not valid, or -ENOENT if the attribute is not
1364  * listed in $AttrDef.
1365  */
ntfs_attr_size_bounds_check(const ntfs_volume * vol,const ATTR_TYPE type,const s64 size)1366 int ntfs_attr_size_bounds_check(const ntfs_volume *vol, const ATTR_TYPE type,
1367 		const s64 size)
1368 {
1369 	ATTR_DEF *ad;
1370 
1371 	BUG_ON(size < 0);
1372 	/*
1373 	 * $ATTRIBUTE_LIST has a maximum size of 256kiB, but this is not
1374 	 * listed in $AttrDef.
1375 	 */
1376 	if (unlikely(type == AT_ATTRIBUTE_LIST && size > 256 * 1024))
1377 		return -ERANGE;
1378 	/* Get the $AttrDef entry for the attribute @type. */
1379 	ad = ntfs_attr_find_in_attrdef(vol, type);
1380 	if (unlikely(!ad))
1381 		return -ENOENT;
1382 	/* Do the bounds check. */
1383 	if (((sle64_to_cpu(ad->min_size) > 0) &&
1384 			size < sle64_to_cpu(ad->min_size)) ||
1385 			((sle64_to_cpu(ad->max_size) > 0) && size >
1386 			sle64_to_cpu(ad->max_size)))
1387 		return -ERANGE;
1388 	return 0;
1389 }
1390 
1391 /**
1392  * ntfs_attr_can_be_non_resident - check if an attribute can be non-resident
1393  * @vol:	ntfs volume to which the attribute belongs
1394  * @type:	attribute type which to check
1395  *
1396  * Check whether the attribute of @type on the ntfs volume @vol is allowed to
1397  * be non-resident.  This information is obtained from $AttrDef system file.
1398  *
1399  * Return 0 if the attribute is allowed to be non-resident, -EPERM if not, and
1400  * -ENOENT if the attribute is not listed in $AttrDef.
1401  */
ntfs_attr_can_be_non_resident(const ntfs_volume * vol,const ATTR_TYPE type)1402 int ntfs_attr_can_be_non_resident(const ntfs_volume *vol, const ATTR_TYPE type)
1403 {
1404 	ATTR_DEF *ad;
1405 
1406 	/* Find the attribute definition record in $AttrDef. */
1407 	ad = ntfs_attr_find_in_attrdef(vol, type);
1408 	if (unlikely(!ad))
1409 		return -ENOENT;
1410 	/* Check the flags and return the result. */
1411 	if (ad->flags & ATTR_DEF_RESIDENT)
1412 		return -EPERM;
1413 	return 0;
1414 }
1415 
1416 /**
1417  * ntfs_attr_can_be_resident - check if an attribute can be resident
1418  * @vol:	ntfs volume to which the attribute belongs
1419  * @type:	attribute type which to check
1420  *
1421  * Check whether the attribute of @type on the ntfs volume @vol is allowed to
1422  * be resident.  This information is derived from our ntfs knowledge and may
1423  * not be completely accurate, especially when user defined attributes are
1424  * present.  Basically we allow everything to be resident except for index
1425  * allocation and $EA attributes.
1426  *
1427  * Return 0 if the attribute is allowed to be non-resident and -EPERM if not.
1428  *
1429  * Warning: In the system file $MFT the attribute $Bitmap must be non-resident
1430  *	    otherwise windows will not boot (blue screen of death)!  We cannot
1431  *	    check for this here as we do not know which inode's $Bitmap is
1432  *	    being asked about so the caller needs to special case this.
1433  */
ntfs_attr_can_be_resident(const ntfs_volume * vol,const ATTR_TYPE type)1434 int ntfs_attr_can_be_resident(const ntfs_volume *vol, const ATTR_TYPE type)
1435 {
1436 	if (type == AT_INDEX_ALLOCATION)
1437 		return -EPERM;
1438 	return 0;
1439 }
1440 
1441 /**
1442  * ntfs_attr_record_resize - resize an attribute record
1443  * @m:		mft record containing attribute record
1444  * @a:		attribute record to resize
1445  * @new_size:	new size in bytes to which to resize the attribute record @a
1446  *
1447  * Resize the attribute record @a, i.e. the resident part of the attribute, in
1448  * the mft record @m to @new_size bytes.
1449  *
1450  * Return 0 on success and -errno on error.  The following error codes are
1451  * defined:
1452  *	-ENOSPC	- Not enough space in the mft record @m to perform the resize.
1453  *
1454  * Note: On error, no modifications have been performed whatsoever.
1455  *
1456  * Warning: If you make a record smaller without having copied all the data you
1457  *	    are interested in the data may be overwritten.
1458  */
ntfs_attr_record_resize(MFT_RECORD * m,ATTR_RECORD * a,u32 new_size)1459 int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size)
1460 {
1461 	ntfs_debug("Entering for new_size %u.", new_size);
1462 	/* Align to 8 bytes if it is not already done. */
1463 	if (new_size & 7)
1464 		new_size = (new_size + 7) & ~7;
1465 	/* If the actual attribute length has changed, move things around. */
1466 	if (new_size != le32_to_cpu(a->length)) {
1467 		u32 new_muse = le32_to_cpu(m->bytes_in_use) -
1468 				le32_to_cpu(a->length) + new_size;
1469 		/* Not enough space in this mft record. */
1470 		if (new_muse > le32_to_cpu(m->bytes_allocated))
1471 			return -ENOSPC;
1472 		/* Move attributes following @a to their new location. */
1473 		memmove((u8*)a + new_size, (u8*)a + le32_to_cpu(a->length),
1474 				le32_to_cpu(m->bytes_in_use) - ((u8*)a -
1475 				(u8*)m) - le32_to_cpu(a->length));
1476 		/* Adjust @m to reflect the change in used space. */
1477 		m->bytes_in_use = cpu_to_le32(new_muse);
1478 		/* Adjust @a to reflect the new size. */
1479 		if (new_size >= offsetof(ATTR_REC, length) + sizeof(a->length))
1480 			a->length = cpu_to_le32(new_size);
1481 	}
1482 	return 0;
1483 }
1484 
1485 /**
1486  * ntfs_resident_attr_value_resize - resize the value of a resident attribute
1487  * @m:		mft record containing attribute record
1488  * @a:		attribute record whose value to resize
1489  * @new_size:	new size in bytes to which to resize the attribute value of @a
1490  *
1491  * Resize the value of the attribute @a in the mft record @m to @new_size bytes.
1492  * If the value is made bigger, the newly allocated space is cleared.
1493  *
1494  * Return 0 on success and -errno on error.  The following error codes are
1495  * defined:
1496  *	-ENOSPC	- Not enough space in the mft record @m to perform the resize.
1497  *
1498  * Note: On error, no modifications have been performed whatsoever.
1499  *
1500  * Warning: If you make a record smaller without having copied all the data you
1501  *	    are interested in the data may be overwritten.
1502  */
ntfs_resident_attr_value_resize(MFT_RECORD * m,ATTR_RECORD * a,const u32 new_size)1503 int ntfs_resident_attr_value_resize(MFT_RECORD *m, ATTR_RECORD *a,
1504 		const u32 new_size)
1505 {
1506 	u32 old_size;
1507 
1508 	/* Resize the resident part of the attribute record. */
1509 	if (ntfs_attr_record_resize(m, a,
1510 			le16_to_cpu(a->data.resident.value_offset) + new_size))
1511 		return -ENOSPC;
1512 	/*
1513 	 * The resize succeeded!  If we made the attribute value bigger, clear
1514 	 * the area between the old size and @new_size.
1515 	 */
1516 	old_size = le32_to_cpu(a->data.resident.value_length);
1517 	if (new_size > old_size)
1518 		memset((u8*)a + le16_to_cpu(a->data.resident.value_offset) +
1519 				old_size, 0, new_size - old_size);
1520 	/* Finally update the length of the attribute value. */
1521 	a->data.resident.value_length = cpu_to_le32(new_size);
1522 	return 0;
1523 }
1524 
1525 /**
1526  * ntfs_attr_make_non_resident - convert a resident to a non-resident attribute
1527  * @ni:		ntfs inode describing the attribute to convert
1528  * @data_size:	size of the resident data to copy to the non-resident attribute
1529  *
1530  * Convert the resident ntfs attribute described by the ntfs inode @ni to a
1531  * non-resident one.
1532  *
1533  * @data_size must be equal to the attribute value size.  This is needed since
1534  * we need to know the size before we can map the mft record and our callers
1535  * always know it.  The reason we cannot simply read the size from the vfs
1536  * inode i_size is that this is not necessarily uptodate.  This happens when
1537  * ntfs_attr_make_non_resident() is called in the ->truncate call path(s).
1538  *
1539  * Return 0 on success and -errno on error.  The following error return codes
1540  * are defined:
1541  *	-EPERM	- The attribute is not allowed to be non-resident.
1542  *	-ENOMEM	- Not enough memory.
1543  *	-ENOSPC	- Not enough disk space.
1544  *	-EINVAL	- Attribute not defined on the volume.
1545  *	-EIO	- I/o error or other error.
1546  * Note that -ENOSPC is also returned in the case that there is not enough
1547  * space in the mft record to do the conversion.  This can happen when the mft
1548  * record is already very full.  The caller is responsible for trying to make
1549  * space in the mft record and trying again.  FIXME: Do we need a separate
1550  * error return code for this kind of -ENOSPC or is it always worth trying
1551  * again in case the attribute may then fit in a resident state so no need to
1552  * make it non-resident at all?  Ho-hum...  (AIA)
1553  *
1554  * NOTE to self: No changes in the attribute list are required to move from
1555  *		 a resident to a non-resident attribute.
1556  *
1557  * Locking: - The caller must hold i_mutex on the inode.
1558  */
ntfs_attr_make_non_resident(ntfs_inode * ni,const u32 data_size)1559 int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1560 {
1561 	s64 new_size;
1562 	struct inode *vi = VFS_I(ni);
1563 	ntfs_volume *vol = ni->vol;
1564 	ntfs_inode *base_ni;
1565 	MFT_RECORD *m;
1566 	ATTR_RECORD *a;
1567 	ntfs_attr_search_ctx *ctx;
1568 	struct page *page;
1569 	runlist_element *rl;
1570 	u8 *kaddr;
1571 	unsigned long flags;
1572 	int mp_size, mp_ofs, name_ofs, arec_size, err, err2;
1573 	u32 attr_size;
1574 	u8 old_res_attr_flags;
1575 
1576 	/* Check that the attribute is allowed to be non-resident. */
1577 	err = ntfs_attr_can_be_non_resident(vol, ni->type);
1578 	if (unlikely(err)) {
1579 		if (err == -EPERM)
1580 			ntfs_debug("Attribute is not allowed to be "
1581 					"non-resident.");
1582 		else
1583 			ntfs_debug("Attribute not defined on the NTFS "
1584 					"volume!");
1585 		return err;
1586 	}
1587 	/*
1588 	 * FIXME: Compressed and encrypted attributes are not supported when
1589 	 * writing and we should never have gotten here for them.
1590 	 */
1591 	BUG_ON(NInoCompressed(ni));
1592 	BUG_ON(NInoEncrypted(ni));
1593 	/*
1594 	 * The size needs to be aligned to a cluster boundary for allocation
1595 	 * purposes.
1596 	 */
1597 	new_size = (data_size + vol->cluster_size - 1) &
1598 			~(vol->cluster_size - 1);
1599 	if (new_size > 0) {
1600 		/*
1601 		 * Will need the page later and since the page lock nests
1602 		 * outside all ntfs locks, we need to get the page now.
1603 		 */
1604 		page = find_or_create_page(vi->i_mapping, 0,
1605 				mapping_gfp_mask(vi->i_mapping));
1606 		if (unlikely(!page))
1607 			return -ENOMEM;
1608 		/* Start by allocating clusters to hold the attribute value. */
1609 		rl = ntfs_cluster_alloc(vol, 0, new_size >>
1610 				vol->cluster_size_bits, -1, DATA_ZONE, true);
1611 		if (IS_ERR(rl)) {
1612 			err = PTR_ERR(rl);
1613 			ntfs_debug("Failed to allocate cluster%s, error code "
1614 					"%i.", (new_size >>
1615 					vol->cluster_size_bits) > 1 ? "s" : "",
1616 					err);
1617 			goto page_err_out;
1618 		}
1619 	} else {
1620 		rl = NULL;
1621 		page = NULL;
1622 	}
1623 	/* Determine the size of the mapping pairs array. */
1624 	mp_size = ntfs_get_size_for_mapping_pairs(vol, rl, 0, -1);
1625 	if (unlikely(mp_size < 0)) {
1626 		err = mp_size;
1627 		ntfs_debug("Failed to get size for mapping pairs array, error "
1628 				"code %i.", err);
1629 		goto rl_err_out;
1630 	}
1631 	down_write(&ni->runlist.lock);
1632 	if (!NInoAttr(ni))
1633 		base_ni = ni;
1634 	else
1635 		base_ni = ni->ext.base_ntfs_ino;
1636 	m = map_mft_record(base_ni);
1637 	if (IS_ERR(m)) {
1638 		err = PTR_ERR(m);
1639 		m = NULL;
1640 		ctx = NULL;
1641 		goto err_out;
1642 	}
1643 	ctx = ntfs_attr_get_search_ctx(base_ni, m);
1644 	if (unlikely(!ctx)) {
1645 		err = -ENOMEM;
1646 		goto err_out;
1647 	}
1648 	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1649 			CASE_SENSITIVE, 0, NULL, 0, ctx);
1650 	if (unlikely(err)) {
1651 		if (err == -ENOENT)
1652 			err = -EIO;
1653 		goto err_out;
1654 	}
1655 	m = ctx->mrec;
1656 	a = ctx->attr;
1657 	BUG_ON(NInoNonResident(ni));
1658 	BUG_ON(a->non_resident);
1659 	/*
1660 	 * Calculate new offsets for the name and the mapping pairs array.
1661 	 */
1662 	if (NInoSparse(ni) || NInoCompressed(ni))
1663 		name_ofs = (offsetof(ATTR_REC,
1664 				data.non_resident.compressed_size) +
1665 				sizeof(a->data.non_resident.compressed_size) +
1666 				7) & ~7;
1667 	else
1668 		name_ofs = (offsetof(ATTR_REC,
1669 				data.non_resident.compressed_size) + 7) & ~7;
1670 	mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
1671 	/*
1672 	 * Determine the size of the resident part of the now non-resident
1673 	 * attribute record.
1674 	 */
1675 	arec_size = (mp_ofs + mp_size + 7) & ~7;
1676 	/*
1677 	 * If the page is not uptodate bring it uptodate by copying from the
1678 	 * attribute value.
1679 	 */
1680 	attr_size = le32_to_cpu(a->data.resident.value_length);
1681 	BUG_ON(attr_size != data_size);
1682 	if (page && !PageUptodate(page)) {
1683 		kaddr = kmap_atomic(page);
1684 		memcpy(kaddr, (u8*)a +
1685 				le16_to_cpu(a->data.resident.value_offset),
1686 				attr_size);
1687 		memset(kaddr + attr_size, 0, PAGE_SIZE - attr_size);
1688 		kunmap_atomic(kaddr);
1689 		flush_dcache_page(page);
1690 		SetPageUptodate(page);
1691 	}
1692 	/* Backup the attribute flag. */
1693 	old_res_attr_flags = a->data.resident.flags;
1694 	/* Resize the resident part of the attribute record. */
1695 	err = ntfs_attr_record_resize(m, a, arec_size);
1696 	if (unlikely(err))
1697 		goto err_out;
1698 	/*
1699 	 * Convert the resident part of the attribute record to describe a
1700 	 * non-resident attribute.
1701 	 */
1702 	a->non_resident = 1;
1703 	/* Move the attribute name if it exists and update the offset. */
1704 	if (a->name_length)
1705 		memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
1706 				a->name_length * sizeof(ntfschar));
1707 	a->name_offset = cpu_to_le16(name_ofs);
1708 	/* Setup the fields specific to non-resident attributes. */
1709 	a->data.non_resident.lowest_vcn = 0;
1710 	a->data.non_resident.highest_vcn = cpu_to_sle64((new_size - 1) >>
1711 			vol->cluster_size_bits);
1712 	a->data.non_resident.mapping_pairs_offset = cpu_to_le16(mp_ofs);
1713 	memset(&a->data.non_resident.reserved, 0,
1714 			sizeof(a->data.non_resident.reserved));
1715 	a->data.non_resident.allocated_size = cpu_to_sle64(new_size);
1716 	a->data.non_resident.data_size =
1717 			a->data.non_resident.initialized_size =
1718 			cpu_to_sle64(attr_size);
1719 	if (NInoSparse(ni) || NInoCompressed(ni)) {
1720 		a->data.non_resident.compression_unit = 0;
1721 		if (NInoCompressed(ni) || vol->major_ver < 3)
1722 			a->data.non_resident.compression_unit = 4;
1723 		a->data.non_resident.compressed_size =
1724 				a->data.non_resident.allocated_size;
1725 	} else
1726 		a->data.non_resident.compression_unit = 0;
1727 	/* Generate the mapping pairs array into the attribute record. */
1728 	err = ntfs_mapping_pairs_build(vol, (u8*)a + mp_ofs,
1729 			arec_size - mp_ofs, rl, 0, -1, NULL);
1730 	if (unlikely(err)) {
1731 		ntfs_debug("Failed to build mapping pairs, error code %i.",
1732 				err);
1733 		goto undo_err_out;
1734 	}
1735 	/* Setup the in-memory attribute structure to be non-resident. */
1736 	ni->runlist.rl = rl;
1737 	write_lock_irqsave(&ni->size_lock, flags);
1738 	ni->allocated_size = new_size;
1739 	if (NInoSparse(ni) || NInoCompressed(ni)) {
1740 		ni->itype.compressed.size = ni->allocated_size;
1741 		if (a->data.non_resident.compression_unit) {
1742 			ni->itype.compressed.block_size = 1U << (a->data.
1743 					non_resident.compression_unit +
1744 					vol->cluster_size_bits);
1745 			ni->itype.compressed.block_size_bits =
1746 					ffs(ni->itype.compressed.block_size) -
1747 					1;
1748 			ni->itype.compressed.block_clusters = 1U <<
1749 					a->data.non_resident.compression_unit;
1750 		} else {
1751 			ni->itype.compressed.block_size = 0;
1752 			ni->itype.compressed.block_size_bits = 0;
1753 			ni->itype.compressed.block_clusters = 0;
1754 		}
1755 		vi->i_blocks = ni->itype.compressed.size >> 9;
1756 	} else
1757 		vi->i_blocks = ni->allocated_size >> 9;
1758 	write_unlock_irqrestore(&ni->size_lock, flags);
1759 	/*
1760 	 * This needs to be last since the address space operations ->readpage
1761 	 * and ->writepage can run concurrently with us as they are not
1762 	 * serialized on i_mutex.  Note, we are not allowed to fail once we flip
1763 	 * this switch, which is another reason to do this last.
1764 	 */
1765 	NInoSetNonResident(ni);
1766 	/* Mark the mft record dirty, so it gets written back. */
1767 	flush_dcache_mft_record_page(ctx->ntfs_ino);
1768 	mark_mft_record_dirty(ctx->ntfs_ino);
1769 	ntfs_attr_put_search_ctx(ctx);
1770 	unmap_mft_record(base_ni);
1771 	up_write(&ni->runlist.lock);
1772 	if (page) {
1773 		set_page_dirty(page);
1774 		unlock_page(page);
1775 		put_page(page);
1776 	}
1777 	ntfs_debug("Done.");
1778 	return 0;
1779 undo_err_out:
1780 	/* Convert the attribute back into a resident attribute. */
1781 	a->non_resident = 0;
1782 	/* Move the attribute name if it exists and update the offset. */
1783 	name_ofs = (offsetof(ATTR_RECORD, data.resident.reserved) +
1784 			sizeof(a->data.resident.reserved) + 7) & ~7;
1785 	if (a->name_length)
1786 		memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
1787 				a->name_length * sizeof(ntfschar));
1788 	mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
1789 	a->name_offset = cpu_to_le16(name_ofs);
1790 	arec_size = (mp_ofs + attr_size + 7) & ~7;
1791 	/* Resize the resident part of the attribute record. */
1792 	err2 = ntfs_attr_record_resize(m, a, arec_size);
1793 	if (unlikely(err2)) {
1794 		/*
1795 		 * This cannot happen (well if memory corruption is at work it
1796 		 * could happen in theory), but deal with it as well as we can.
1797 		 * If the old size is too small, truncate the attribute,
1798 		 * otherwise simply give it a larger allocated size.
1799 		 * FIXME: Should check whether chkdsk complains when the
1800 		 * allocated size is much bigger than the resident value size.
1801 		 */
1802 		arec_size = le32_to_cpu(a->length);
1803 		if ((mp_ofs + attr_size) > arec_size) {
1804 			err2 = attr_size;
1805 			attr_size = arec_size - mp_ofs;
1806 			ntfs_error(vol->sb, "Failed to undo partial resident "
1807 					"to non-resident attribute "
1808 					"conversion.  Truncating inode 0x%lx, "
1809 					"attribute type 0x%x from %i bytes to "
1810 					"%i bytes to maintain metadata "
1811 					"consistency.  THIS MEANS YOU ARE "
1812 					"LOSING %i BYTES DATA FROM THIS %s.",
1813 					vi->i_ino,
1814 					(unsigned)le32_to_cpu(ni->type),
1815 					err2, attr_size, err2 - attr_size,
1816 					((ni->type == AT_DATA) &&
1817 					!ni->name_len) ? "FILE": "ATTRIBUTE");
1818 			write_lock_irqsave(&ni->size_lock, flags);
1819 			ni->initialized_size = attr_size;
1820 			i_size_write(vi, attr_size);
1821 			write_unlock_irqrestore(&ni->size_lock, flags);
1822 		}
1823 	}
1824 	/* Setup the fields specific to resident attributes. */
1825 	a->data.resident.value_length = cpu_to_le32(attr_size);
1826 	a->data.resident.value_offset = cpu_to_le16(mp_ofs);
1827 	a->data.resident.flags = old_res_attr_flags;
1828 	memset(&a->data.resident.reserved, 0,
1829 			sizeof(a->data.resident.reserved));
1830 	/* Copy the data from the page back to the attribute value. */
1831 	if (page) {
1832 		kaddr = kmap_atomic(page);
1833 		memcpy((u8*)a + mp_ofs, kaddr, attr_size);
1834 		kunmap_atomic(kaddr);
1835 	}
1836 	/* Setup the allocated size in the ntfs inode in case it changed. */
1837 	write_lock_irqsave(&ni->size_lock, flags);
1838 	ni->allocated_size = arec_size - mp_ofs;
1839 	write_unlock_irqrestore(&ni->size_lock, flags);
1840 	/* Mark the mft record dirty, so it gets written back. */
1841 	flush_dcache_mft_record_page(ctx->ntfs_ino);
1842 	mark_mft_record_dirty(ctx->ntfs_ino);
1843 err_out:
1844 	if (ctx)
1845 		ntfs_attr_put_search_ctx(ctx);
1846 	if (m)
1847 		unmap_mft_record(base_ni);
1848 	ni->runlist.rl = NULL;
1849 	up_write(&ni->runlist.lock);
1850 rl_err_out:
1851 	if (rl) {
1852 		if (ntfs_cluster_free_from_rl(vol, rl) < 0) {
1853 			ntfs_error(vol->sb, "Failed to release allocated "
1854 					"cluster(s) in error code path.  Run "
1855 					"chkdsk to recover the lost "
1856 					"cluster(s).");
1857 			NVolSetErrors(vol);
1858 		}
1859 		ntfs_free(rl);
1860 page_err_out:
1861 		unlock_page(page);
1862 		put_page(page);
1863 	}
1864 	if (err == -EINVAL)
1865 		err = -EIO;
1866 	return err;
1867 }
1868 
1869 /**
1870  * ntfs_attr_extend_allocation - extend the allocated space of an attribute
1871  * @ni:			ntfs inode of the attribute whose allocation to extend
1872  * @new_alloc_size:	new size in bytes to which to extend the allocation to
1873  * @new_data_size:	new size in bytes to which to extend the data to
1874  * @data_start:		beginning of region which is required to be non-sparse
1875  *
1876  * Extend the allocated space of an attribute described by the ntfs inode @ni
1877  * to @new_alloc_size bytes.  If @data_start is -1, the whole extension may be
1878  * implemented as a hole in the file (as long as both the volume and the ntfs
1879  * inode @ni have sparse support enabled).  If @data_start is >= 0, then the
1880  * region between the old allocated size and @data_start - 1 may be made sparse
1881  * but the regions between @data_start and @new_alloc_size must be backed by
1882  * actual clusters.
1883  *
1884  * If @new_data_size is -1, it is ignored.  If it is >= 0, then the data size
1885  * of the attribute is extended to @new_data_size.  Note that the i_size of the
1886  * vfs inode is not updated.  Only the data size in the base attribute record
1887  * is updated.  The caller has to update i_size separately if this is required.
1888  * WARNING: It is a BUG() for @new_data_size to be smaller than the old data
1889  * size as well as for @new_data_size to be greater than @new_alloc_size.
1890  *
1891  * For resident attributes this involves resizing the attribute record and if
1892  * necessary moving it and/or other attributes into extent mft records and/or
1893  * converting the attribute to a non-resident attribute which in turn involves
1894  * extending the allocation of a non-resident attribute as described below.
1895  *
1896  * For non-resident attributes this involves allocating clusters in the data
1897  * zone on the volume (except for regions that are being made sparse) and
1898  * extending the run list to describe the allocated clusters as well as
1899  * updating the mapping pairs array of the attribute.  This in turn involves
1900  * resizing the attribute record and if necessary moving it and/or other
1901  * attributes into extent mft records and/or splitting the attribute record
1902  * into multiple extent attribute records.
1903  *
1904  * Also, the attribute list attribute is updated if present and in some of the
1905  * above cases (the ones where extent mft records/attributes come into play),
1906  * an attribute list attribute is created if not already present.
1907  *
1908  * Return the new allocated size on success and -errno on error.  In the case
1909  * that an error is encountered but a partial extension at least up to
1910  * @data_start (if present) is possible, the allocation is partially extended
1911  * and this is returned.  This means the caller must check the returned size to
1912  * determine if the extension was partial.  If @data_start is -1 then partial
1913  * allocations are not performed.
1914  *
1915  * WARNING: Do not call ntfs_attr_extend_allocation() for $MFT/$DATA.
1916  *
1917  * Locking: This function takes the runlist lock of @ni for writing as well as
1918  * locking the mft record of the base ntfs inode.  These locks are maintained
1919  * throughout execution of the function.  These locks are required so that the
1920  * attribute can be resized safely and so that it can for example be converted
1921  * from resident to non-resident safely.
1922  *
1923  * TODO: At present attribute list attribute handling is not implemented.
1924  *
1925  * TODO: At present it is not safe to call this function for anything other
1926  * than the $DATA attribute(s) of an uncompressed and unencrypted file.
1927  */
ntfs_attr_extend_allocation(ntfs_inode * ni,s64 new_alloc_size,const s64 new_data_size,const s64 data_start)1928 s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
1929 		const s64 new_data_size, const s64 data_start)
1930 {
1931 	VCN vcn;
1932 	s64 ll, allocated_size, start = data_start;
1933 	struct inode *vi = VFS_I(ni);
1934 	ntfs_volume *vol = ni->vol;
1935 	ntfs_inode *base_ni;
1936 	MFT_RECORD *m;
1937 	ATTR_RECORD *a;
1938 	ntfs_attr_search_ctx *ctx;
1939 	runlist_element *rl, *rl2;
1940 	unsigned long flags;
1941 	int err, mp_size;
1942 	u32 attr_len = 0; /* Silence stupid gcc warning. */
1943 	bool mp_rebuilt;
1944 
1945 #ifdef DEBUG
1946 	read_lock_irqsave(&ni->size_lock, flags);
1947 	allocated_size = ni->allocated_size;
1948 	read_unlock_irqrestore(&ni->size_lock, flags);
1949 	ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
1950 			"old_allocated_size 0x%llx, "
1951 			"new_allocated_size 0x%llx, new_data_size 0x%llx, "
1952 			"data_start 0x%llx.", vi->i_ino,
1953 			(unsigned)le32_to_cpu(ni->type),
1954 			(unsigned long long)allocated_size,
1955 			(unsigned long long)new_alloc_size,
1956 			(unsigned long long)new_data_size,
1957 			(unsigned long long)start);
1958 #endif
1959 retry_extend:
1960 	/*
1961 	 * For non-resident attributes, @start and @new_size need to be aligned
1962 	 * to cluster boundaries for allocation purposes.
1963 	 */
1964 	if (NInoNonResident(ni)) {
1965 		if (start > 0)
1966 			start &= ~(s64)vol->cluster_size_mask;
1967 		new_alloc_size = (new_alloc_size + vol->cluster_size - 1) &
1968 				~(s64)vol->cluster_size_mask;
1969 	}
1970 	BUG_ON(new_data_size >= 0 && new_data_size > new_alloc_size);
1971 	/* Check if new size is allowed in $AttrDef. */
1972 	err = ntfs_attr_size_bounds_check(vol, ni->type, new_alloc_size);
1973 	if (unlikely(err)) {
1974 		/* Only emit errors when the write will fail completely. */
1975 		read_lock_irqsave(&ni->size_lock, flags);
1976 		allocated_size = ni->allocated_size;
1977 		read_unlock_irqrestore(&ni->size_lock, flags);
1978 		if (start < 0 || start >= allocated_size) {
1979 			if (err == -ERANGE) {
1980 				ntfs_error(vol->sb, "Cannot extend allocation "
1981 						"of inode 0x%lx, attribute "
1982 						"type 0x%x, because the new "
1983 						"allocation would exceed the "
1984 						"maximum allowed size for "
1985 						"this attribute type.",
1986 						vi->i_ino, (unsigned)
1987 						le32_to_cpu(ni->type));
1988 			} else {
1989 				ntfs_error(vol->sb, "Cannot extend allocation "
1990 						"of inode 0x%lx, attribute "
1991 						"type 0x%x, because this "
1992 						"attribute type is not "
1993 						"defined on the NTFS volume.  "
1994 						"Possible corruption!  You "
1995 						"should run chkdsk!",
1996 						vi->i_ino, (unsigned)
1997 						le32_to_cpu(ni->type));
1998 			}
1999 		}
2000 		/* Translate error code to be POSIX conformant for write(2). */
2001 		if (err == -ERANGE)
2002 			err = -EFBIG;
2003 		else
2004 			err = -EIO;
2005 		return err;
2006 	}
2007 	if (!NInoAttr(ni))
2008 		base_ni = ni;
2009 	else
2010 		base_ni = ni->ext.base_ntfs_ino;
2011 	/*
2012 	 * We will be modifying both the runlist (if non-resident) and the mft
2013 	 * record so lock them both down.
2014 	 */
2015 	down_write(&ni->runlist.lock);
2016 	m = map_mft_record(base_ni);
2017 	if (IS_ERR(m)) {
2018 		err = PTR_ERR(m);
2019 		m = NULL;
2020 		ctx = NULL;
2021 		goto err_out;
2022 	}
2023 	ctx = ntfs_attr_get_search_ctx(base_ni, m);
2024 	if (unlikely(!ctx)) {
2025 		err = -ENOMEM;
2026 		goto err_out;
2027 	}
2028 	read_lock_irqsave(&ni->size_lock, flags);
2029 	allocated_size = ni->allocated_size;
2030 	read_unlock_irqrestore(&ni->size_lock, flags);
2031 	/*
2032 	 * If non-resident, seek to the last extent.  If resident, there is
2033 	 * only one extent, so seek to that.
2034 	 */
2035 	vcn = NInoNonResident(ni) ? allocated_size >> vol->cluster_size_bits :
2036 			0;
2037 	/*
2038 	 * Abort if someone did the work whilst we waited for the locks.  If we
2039 	 * just converted the attribute from resident to non-resident it is
2040 	 * likely that exactly this has happened already.  We cannot quite
2041 	 * abort if we need to update the data size.
2042 	 */
2043 	if (unlikely(new_alloc_size <= allocated_size)) {
2044 		ntfs_debug("Allocated size already exceeds requested size.");
2045 		new_alloc_size = allocated_size;
2046 		if (new_data_size < 0)
2047 			goto done;
2048 		/*
2049 		 * We want the first attribute extent so that we can update the
2050 		 * data size.
2051 		 */
2052 		vcn = 0;
2053 	}
2054 	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
2055 			CASE_SENSITIVE, vcn, NULL, 0, ctx);
2056 	if (unlikely(err)) {
2057 		if (err == -ENOENT)
2058 			err = -EIO;
2059 		goto err_out;
2060 	}
2061 	m = ctx->mrec;
2062 	a = ctx->attr;
2063 	/* Use goto to reduce indentation. */
2064 	if (a->non_resident)
2065 		goto do_non_resident_extend;
2066 	BUG_ON(NInoNonResident(ni));
2067 	/* The total length of the attribute value. */
2068 	attr_len = le32_to_cpu(a->data.resident.value_length);
2069 	/*
2070 	 * Extend the attribute record to be able to store the new attribute
2071 	 * size.  ntfs_attr_record_resize() will not do anything if the size is
2072 	 * not changing.
2073 	 */
2074 	if (new_alloc_size < vol->mft_record_size &&
2075 			!ntfs_attr_record_resize(m, a,
2076 			le16_to_cpu(a->data.resident.value_offset) +
2077 			new_alloc_size)) {
2078 		/* The resize succeeded! */
2079 		write_lock_irqsave(&ni->size_lock, flags);
2080 		ni->allocated_size = le32_to_cpu(a->length) -
2081 				le16_to_cpu(a->data.resident.value_offset);
2082 		write_unlock_irqrestore(&ni->size_lock, flags);
2083 		if (new_data_size >= 0) {
2084 			BUG_ON(new_data_size < attr_len);
2085 			a->data.resident.value_length =
2086 					cpu_to_le32((u32)new_data_size);
2087 		}
2088 		goto flush_done;
2089 	}
2090 	/*
2091 	 * We have to drop all the locks so we can call
2092 	 * ntfs_attr_make_non_resident().  This could be optimised by try-
2093 	 * locking the first page cache page and only if that fails dropping
2094 	 * the locks, locking the page, and redoing all the locking and
2095 	 * lookups.  While this would be a huge optimisation, it is not worth
2096 	 * it as this is definitely a slow code path.
2097 	 */
2098 	ntfs_attr_put_search_ctx(ctx);
2099 	unmap_mft_record(base_ni);
2100 	up_write(&ni->runlist.lock);
2101 	/*
2102 	 * Not enough space in the mft record, try to make the attribute
2103 	 * non-resident and if successful restart the extension process.
2104 	 */
2105 	err = ntfs_attr_make_non_resident(ni, attr_len);
2106 	if (likely(!err))
2107 		goto retry_extend;
2108 	/*
2109 	 * Could not make non-resident.  If this is due to this not being
2110 	 * permitted for this attribute type or there not being enough space,
2111 	 * try to make other attributes non-resident.  Otherwise fail.
2112 	 */
2113 	if (unlikely(err != -EPERM && err != -ENOSPC)) {
2114 		/* Only emit errors when the write will fail completely. */
2115 		read_lock_irqsave(&ni->size_lock, flags);
2116 		allocated_size = ni->allocated_size;
2117 		read_unlock_irqrestore(&ni->size_lock, flags);
2118 		if (start < 0 || start >= allocated_size)
2119 			ntfs_error(vol->sb, "Cannot extend allocation of "
2120 					"inode 0x%lx, attribute type 0x%x, "
2121 					"because the conversion from resident "
2122 					"to non-resident attribute failed "
2123 					"with error code %i.", vi->i_ino,
2124 					(unsigned)le32_to_cpu(ni->type), err);
2125 		if (err != -ENOMEM)
2126 			err = -EIO;
2127 		goto conv_err_out;
2128 	}
2129 	/* TODO: Not implemented from here, abort. */
2130 	read_lock_irqsave(&ni->size_lock, flags);
2131 	allocated_size = ni->allocated_size;
2132 	read_unlock_irqrestore(&ni->size_lock, flags);
2133 	if (start < 0 || start >= allocated_size) {
2134 		if (err == -ENOSPC)
2135 			ntfs_error(vol->sb, "Not enough space in the mft "
2136 					"record/on disk for the non-resident "
2137 					"attribute value.  This case is not "
2138 					"implemented yet.");
2139 		else /* if (err == -EPERM) */
2140 			ntfs_error(vol->sb, "This attribute type may not be "
2141 					"non-resident.  This case is not "
2142 					"implemented yet.");
2143 	}
2144 	err = -EOPNOTSUPP;
2145 	goto conv_err_out;
2146 #if 0
2147 	// TODO: Attempt to make other attributes non-resident.
2148 	if (!err)
2149 		goto do_resident_extend;
2150 	/*
2151 	 * Both the attribute list attribute and the standard information
2152 	 * attribute must remain in the base inode.  Thus, if this is one of
2153 	 * these attributes, we have to try to move other attributes out into
2154 	 * extent mft records instead.
2155 	 */
2156 	if (ni->type == AT_ATTRIBUTE_LIST ||
2157 			ni->type == AT_STANDARD_INFORMATION) {
2158 		// TODO: Attempt to move other attributes into extent mft
2159 		// records.
2160 		err = -EOPNOTSUPP;
2161 		if (!err)
2162 			goto do_resident_extend;
2163 		goto err_out;
2164 	}
2165 	// TODO: Attempt to move this attribute to an extent mft record, but
2166 	// only if it is not already the only attribute in an mft record in
2167 	// which case there would be nothing to gain.
2168 	err = -EOPNOTSUPP;
2169 	if (!err)
2170 		goto do_resident_extend;
2171 	/* There is nothing we can do to make enough space. )-: */
2172 	goto err_out;
2173 #endif
2174 do_non_resident_extend:
2175 	BUG_ON(!NInoNonResident(ni));
2176 	if (new_alloc_size == allocated_size) {
2177 		BUG_ON(vcn);
2178 		goto alloc_done;
2179 	}
2180 	/*
2181 	 * If the data starts after the end of the old allocation, this is a
2182 	 * $DATA attribute and sparse attributes are enabled on the volume and
2183 	 * for this inode, then create a sparse region between the old
2184 	 * allocated size and the start of the data.  Otherwise simply proceed
2185 	 * with filling the whole space between the old allocated size and the
2186 	 * new allocated size with clusters.
2187 	 */
2188 	if ((start >= 0 && start <= allocated_size) || ni->type != AT_DATA ||
2189 			!NVolSparseEnabled(vol) || NInoSparseDisabled(ni))
2190 		goto skip_sparse;
2191 	// TODO: This is not implemented yet.  We just fill in with real
2192 	// clusters for now...
2193 	ntfs_debug("Inserting holes is not-implemented yet.  Falling back to "
2194 			"allocating real clusters instead.");
2195 skip_sparse:
2196 	rl = ni->runlist.rl;
2197 	if (likely(rl)) {
2198 		/* Seek to the end of the runlist. */
2199 		while (rl->length)
2200 			rl++;
2201 	}
2202 	/* If this attribute extent is not mapped, map it now. */
2203 	if (unlikely(!rl || rl->lcn == LCN_RL_NOT_MAPPED ||
2204 			(rl->lcn == LCN_ENOENT && rl > ni->runlist.rl &&
2205 			(rl-1)->lcn == LCN_RL_NOT_MAPPED))) {
2206 		if (!rl && !allocated_size)
2207 			goto first_alloc;
2208 		rl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl);
2209 		if (IS_ERR(rl)) {
2210 			err = PTR_ERR(rl);
2211 			if (start < 0 || start >= allocated_size)
2212 				ntfs_error(vol->sb, "Cannot extend allocation "
2213 						"of inode 0x%lx, attribute "
2214 						"type 0x%x, because the "
2215 						"mapping of a runlist "
2216 						"fragment failed with error "
2217 						"code %i.", vi->i_ino,
2218 						(unsigned)le32_to_cpu(ni->type),
2219 						err);
2220 			if (err != -ENOMEM)
2221 				err = -EIO;
2222 			goto err_out;
2223 		}
2224 		ni->runlist.rl = rl;
2225 		/* Seek to the end of the runlist. */
2226 		while (rl->length)
2227 			rl++;
2228 	}
2229 	/*
2230 	 * We now know the runlist of the last extent is mapped and @rl is at
2231 	 * the end of the runlist.  We want to begin allocating clusters
2232 	 * starting at the last allocated cluster to reduce fragmentation.  If
2233 	 * there are no valid LCNs in the attribute we let the cluster
2234 	 * allocator choose the starting cluster.
2235 	 */
2236 	/* If the last LCN is a hole or simillar seek back to last real LCN. */
2237 	while (rl->lcn < 0 && rl > ni->runlist.rl)
2238 		rl--;
2239 first_alloc:
2240 	// FIXME: Need to implement partial allocations so at least part of the
2241 	// write can be performed when start >= 0.  (Needed for POSIX write(2)
2242 	// conformance.)
2243 	rl2 = ntfs_cluster_alloc(vol, allocated_size >> vol->cluster_size_bits,
2244 			(new_alloc_size - allocated_size) >>
2245 			vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ?
2246 			rl->lcn + rl->length : -1, DATA_ZONE, true);
2247 	if (IS_ERR(rl2)) {
2248 		err = PTR_ERR(rl2);
2249 		if (start < 0 || start >= allocated_size)
2250 			ntfs_error(vol->sb, "Cannot extend allocation of "
2251 					"inode 0x%lx, attribute type 0x%x, "
2252 					"because the allocation of clusters "
2253 					"failed with error code %i.", vi->i_ino,
2254 					(unsigned)le32_to_cpu(ni->type), err);
2255 		if (err != -ENOMEM && err != -ENOSPC)
2256 			err = -EIO;
2257 		goto err_out;
2258 	}
2259 	rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
2260 	if (IS_ERR(rl)) {
2261 		err = PTR_ERR(rl);
2262 		if (start < 0 || start >= allocated_size)
2263 			ntfs_error(vol->sb, "Cannot extend allocation of "
2264 					"inode 0x%lx, attribute type 0x%x, "
2265 					"because the runlist merge failed "
2266 					"with error code %i.", vi->i_ino,
2267 					(unsigned)le32_to_cpu(ni->type), err);
2268 		if (err != -ENOMEM)
2269 			err = -EIO;
2270 		if (ntfs_cluster_free_from_rl(vol, rl2)) {
2271 			ntfs_error(vol->sb, "Failed to release allocated "
2272 					"cluster(s) in error code path.  Run "
2273 					"chkdsk to recover the lost "
2274 					"cluster(s).");
2275 			NVolSetErrors(vol);
2276 		}
2277 		ntfs_free(rl2);
2278 		goto err_out;
2279 	}
2280 	ni->runlist.rl = rl;
2281 	ntfs_debug("Allocated 0x%llx clusters.", (long long)(new_alloc_size -
2282 			allocated_size) >> vol->cluster_size_bits);
2283 	/* Find the runlist element with which the attribute extent starts. */
2284 	ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
2285 	rl2 = ntfs_rl_find_vcn_nolock(rl, ll);
2286 	BUG_ON(!rl2);
2287 	BUG_ON(!rl2->length);
2288 	BUG_ON(rl2->lcn < LCN_HOLE);
2289 	mp_rebuilt = false;
2290 	/* Get the size for the new mapping pairs array for this extent. */
2291 	mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
2292 	if (unlikely(mp_size <= 0)) {
2293 		err = mp_size;
2294 		if (start < 0 || start >= allocated_size)
2295 			ntfs_error(vol->sb, "Cannot extend allocation of "
2296 					"inode 0x%lx, attribute type 0x%x, "
2297 					"because determining the size for the "
2298 					"mapping pairs failed with error code "
2299 					"%i.", vi->i_ino,
2300 					(unsigned)le32_to_cpu(ni->type), err);
2301 		err = -EIO;
2302 		goto undo_alloc;
2303 	}
2304 	/* Extend the attribute record to fit the bigger mapping pairs array. */
2305 	attr_len = le32_to_cpu(a->length);
2306 	err = ntfs_attr_record_resize(m, a, mp_size +
2307 			le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
2308 	if (unlikely(err)) {
2309 		BUG_ON(err != -ENOSPC);
2310 		// TODO: Deal with this by moving this extent to a new mft
2311 		// record or by starting a new extent in a new mft record,
2312 		// possibly by extending this extent partially and filling it
2313 		// and creating a new extent for the remainder, or by making
2314 		// other attributes non-resident and/or by moving other
2315 		// attributes out of this mft record.
2316 		if (start < 0 || start >= allocated_size)
2317 			ntfs_error(vol->sb, "Not enough space in the mft "
2318 					"record for the extended attribute "
2319 					"record.  This case is not "
2320 					"implemented yet.");
2321 		err = -EOPNOTSUPP;
2322 		goto undo_alloc;
2323 	}
2324 	mp_rebuilt = true;
2325 	/* Generate the mapping pairs array directly into the attr record. */
2326 	err = ntfs_mapping_pairs_build(vol, (u8*)a +
2327 			le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
2328 			mp_size, rl2, ll, -1, NULL);
2329 	if (unlikely(err)) {
2330 		if (start < 0 || start >= allocated_size)
2331 			ntfs_error(vol->sb, "Cannot extend allocation of "
2332 					"inode 0x%lx, attribute type 0x%x, "
2333 					"because building the mapping pairs "
2334 					"failed with error code %i.", vi->i_ino,
2335 					(unsigned)le32_to_cpu(ni->type), err);
2336 		err = -EIO;
2337 		goto undo_alloc;
2338 	}
2339 	/* Update the highest_vcn. */
2340 	a->data.non_resident.highest_vcn = cpu_to_sle64((new_alloc_size >>
2341 			vol->cluster_size_bits) - 1);
2342 	/*
2343 	 * We now have extended the allocated size of the attribute.  Reflect
2344 	 * this in the ntfs_inode structure and the attribute record.
2345 	 */
2346 	if (a->data.non_resident.lowest_vcn) {
2347 		/*
2348 		 * We are not in the first attribute extent, switch to it, but
2349 		 * first ensure the changes will make it to disk later.
2350 		 */
2351 		flush_dcache_mft_record_page(ctx->ntfs_ino);
2352 		mark_mft_record_dirty(ctx->ntfs_ino);
2353 		ntfs_attr_reinit_search_ctx(ctx);
2354 		err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
2355 				CASE_SENSITIVE, 0, NULL, 0, ctx);
2356 		if (unlikely(err))
2357 			goto restore_undo_alloc;
2358 		/* @m is not used any more so no need to set it. */
2359 		a = ctx->attr;
2360 	}
2361 	write_lock_irqsave(&ni->size_lock, flags);
2362 	ni->allocated_size = new_alloc_size;
2363 	a->data.non_resident.allocated_size = cpu_to_sle64(new_alloc_size);
2364 	/*
2365 	 * FIXME: This would fail if @ni is a directory, $MFT, or an index,
2366 	 * since those can have sparse/compressed set.  For example can be
2367 	 * set compressed even though it is not compressed itself and in that
2368 	 * case the bit means that files are to be created compressed in the
2369 	 * directory...  At present this is ok as this code is only called for
2370 	 * regular files, and only for their $DATA attribute(s).
2371 	 * FIXME: The calculation is wrong if we created a hole above.  For now
2372 	 * it does not matter as we never create holes.
2373 	 */
2374 	if (NInoSparse(ni) || NInoCompressed(ni)) {
2375 		ni->itype.compressed.size += new_alloc_size - allocated_size;
2376 		a->data.non_resident.compressed_size =
2377 				cpu_to_sle64(ni->itype.compressed.size);
2378 		vi->i_blocks = ni->itype.compressed.size >> 9;
2379 	} else
2380 		vi->i_blocks = new_alloc_size >> 9;
2381 	write_unlock_irqrestore(&ni->size_lock, flags);
2382 alloc_done:
2383 	if (new_data_size >= 0) {
2384 		BUG_ON(new_data_size <
2385 				sle64_to_cpu(a->data.non_resident.data_size));
2386 		a->data.non_resident.data_size = cpu_to_sle64(new_data_size);
2387 	}
2388 flush_done:
2389 	/* Ensure the changes make it to disk. */
2390 	flush_dcache_mft_record_page(ctx->ntfs_ino);
2391 	mark_mft_record_dirty(ctx->ntfs_ino);
2392 done:
2393 	ntfs_attr_put_search_ctx(ctx);
2394 	unmap_mft_record(base_ni);
2395 	up_write(&ni->runlist.lock);
2396 	ntfs_debug("Done, new_allocated_size 0x%llx.",
2397 			(unsigned long long)new_alloc_size);
2398 	return new_alloc_size;
2399 restore_undo_alloc:
2400 	if (start < 0 || start >= allocated_size)
2401 		ntfs_error(vol->sb, "Cannot complete extension of allocation "
2402 				"of inode 0x%lx, attribute type 0x%x, because "
2403 				"lookup of first attribute extent failed with "
2404 				"error code %i.", vi->i_ino,
2405 				(unsigned)le32_to_cpu(ni->type), err);
2406 	if (err == -ENOENT)
2407 		err = -EIO;
2408 	ntfs_attr_reinit_search_ctx(ctx);
2409 	if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE,
2410 			allocated_size >> vol->cluster_size_bits, NULL, 0,
2411 			ctx)) {
2412 		ntfs_error(vol->sb, "Failed to find last attribute extent of "
2413 				"attribute in error code path.  Run chkdsk to "
2414 				"recover.");
2415 		write_lock_irqsave(&ni->size_lock, flags);
2416 		ni->allocated_size = new_alloc_size;
2417 		/*
2418 		 * FIXME: This would fail if @ni is a directory...  See above.
2419 		 * FIXME: The calculation is wrong if we created a hole above.
2420 		 * For now it does not matter as we never create holes.
2421 		 */
2422 		if (NInoSparse(ni) || NInoCompressed(ni)) {
2423 			ni->itype.compressed.size += new_alloc_size -
2424 					allocated_size;
2425 			vi->i_blocks = ni->itype.compressed.size >> 9;
2426 		} else
2427 			vi->i_blocks = new_alloc_size >> 9;
2428 		write_unlock_irqrestore(&ni->size_lock, flags);
2429 		ntfs_attr_put_search_ctx(ctx);
2430 		unmap_mft_record(base_ni);
2431 		up_write(&ni->runlist.lock);
2432 		/*
2433 		 * The only thing that is now wrong is the allocated size of the
2434 		 * base attribute extent which chkdsk should be able to fix.
2435 		 */
2436 		NVolSetErrors(vol);
2437 		return err;
2438 	}
2439 	ctx->attr->data.non_resident.highest_vcn = cpu_to_sle64(
2440 			(allocated_size >> vol->cluster_size_bits) - 1);
2441 undo_alloc:
2442 	ll = allocated_size >> vol->cluster_size_bits;
2443 	if (ntfs_cluster_free(ni, ll, -1, ctx) < 0) {
2444 		ntfs_error(vol->sb, "Failed to release allocated cluster(s) "
2445 				"in error code path.  Run chkdsk to recover "
2446 				"the lost cluster(s).");
2447 		NVolSetErrors(vol);
2448 	}
2449 	m = ctx->mrec;
2450 	a = ctx->attr;
2451 	/*
2452 	 * If the runlist truncation fails and/or the search context is no
2453 	 * longer valid, we cannot resize the attribute record or build the
2454 	 * mapping pairs array thus we mark the inode bad so that no access to
2455 	 * the freed clusters can happen.
2456 	 */
2457 	if (ntfs_rl_truncate_nolock(vol, &ni->runlist, ll) || IS_ERR(m)) {
2458 		ntfs_error(vol->sb, "Failed to %s in error code path.  Run "
2459 				"chkdsk to recover.", IS_ERR(m) ?
2460 				"restore attribute search context" :
2461 				"truncate attribute runlist");
2462 		NVolSetErrors(vol);
2463 	} else if (mp_rebuilt) {
2464 		if (ntfs_attr_record_resize(m, a, attr_len)) {
2465 			ntfs_error(vol->sb, "Failed to restore attribute "
2466 					"record in error code path.  Run "
2467 					"chkdsk to recover.");
2468 			NVolSetErrors(vol);
2469 		} else /* if (success) */ {
2470 			if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
2471 					a->data.non_resident.
2472 					mapping_pairs_offset), attr_len -
2473 					le16_to_cpu(a->data.non_resident.
2474 					mapping_pairs_offset), rl2, ll, -1,
2475 					NULL)) {
2476 				ntfs_error(vol->sb, "Failed to restore "
2477 						"mapping pairs array in error "
2478 						"code path.  Run chkdsk to "
2479 						"recover.");
2480 				NVolSetErrors(vol);
2481 			}
2482 			flush_dcache_mft_record_page(ctx->ntfs_ino);
2483 			mark_mft_record_dirty(ctx->ntfs_ino);
2484 		}
2485 	}
2486 err_out:
2487 	if (ctx)
2488 		ntfs_attr_put_search_ctx(ctx);
2489 	if (m)
2490 		unmap_mft_record(base_ni);
2491 	up_write(&ni->runlist.lock);
2492 conv_err_out:
2493 	ntfs_debug("Failed.  Returning error code %i.", err);
2494 	return err;
2495 }
2496 
2497 /**
2498  * ntfs_attr_set - fill (a part of) an attribute with a byte
2499  * @ni:		ntfs inode describing the attribute to fill
2500  * @ofs:	offset inside the attribute at which to start to fill
2501  * @cnt:	number of bytes to fill
2502  * @val:	the unsigned 8-bit value with which to fill the attribute
2503  *
2504  * Fill @cnt bytes of the attribute described by the ntfs inode @ni starting at
2505  * byte offset @ofs inside the attribute with the constant byte @val.
2506  *
2507  * This function is effectively like memset() applied to an ntfs attribute.
2508  * Note thie function actually only operates on the page cache pages belonging
2509  * to the ntfs attribute and it marks them dirty after doing the memset().
2510  * Thus it relies on the vm dirty page write code paths to cause the modified
2511  * pages to be written to the mft record/disk.
2512  *
2513  * Return 0 on success and -errno on error.  An error code of -ESPIPE means
2514  * that @ofs + @cnt were outside the end of the attribute and no write was
2515  * performed.
2516  */
ntfs_attr_set(ntfs_inode * ni,const s64 ofs,const s64 cnt,const u8 val)2517 int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2518 {
2519 	ntfs_volume *vol = ni->vol;
2520 	struct address_space *mapping;
2521 	struct page *page;
2522 	u8 *kaddr;
2523 	pgoff_t idx, end;
2524 	unsigned start_ofs, end_ofs, size;
2525 
2526 	ntfs_debug("Entering for ofs 0x%llx, cnt 0x%llx, val 0x%hx.",
2527 			(long long)ofs, (long long)cnt, val);
2528 	BUG_ON(ofs < 0);
2529 	BUG_ON(cnt < 0);
2530 	if (!cnt)
2531 		goto done;
2532 	/*
2533 	 * FIXME: Compressed and encrypted attributes are not supported when
2534 	 * writing and we should never have gotten here for them.
2535 	 */
2536 	BUG_ON(NInoCompressed(ni));
2537 	BUG_ON(NInoEncrypted(ni));
2538 	mapping = VFS_I(ni)->i_mapping;
2539 	/* Work out the starting index and page offset. */
2540 	idx = ofs >> PAGE_SHIFT;
2541 	start_ofs = ofs & ~PAGE_MASK;
2542 	/* Work out the ending index and page offset. */
2543 	end = ofs + cnt;
2544 	end_ofs = end & ~PAGE_MASK;
2545 	/* If the end is outside the inode size return -ESPIPE. */
2546 	if (unlikely(end > i_size_read(VFS_I(ni)))) {
2547 		ntfs_error(vol->sb, "Request exceeds end of attribute.");
2548 		return -ESPIPE;
2549 	}
2550 	end >>= PAGE_SHIFT;
2551 	/* If there is a first partial page, need to do it the slow way. */
2552 	if (start_ofs) {
2553 		page = read_mapping_page(mapping, idx, NULL);
2554 		if (IS_ERR(page)) {
2555 			ntfs_error(vol->sb, "Failed to read first partial "
2556 					"page (error, index 0x%lx).", idx);
2557 			return PTR_ERR(page);
2558 		}
2559 		/*
2560 		 * If the last page is the same as the first page, need to
2561 		 * limit the write to the end offset.
2562 		 */
2563 		size = PAGE_SIZE;
2564 		if (idx == end)
2565 			size = end_ofs;
2566 		kaddr = kmap_atomic(page);
2567 		memset(kaddr + start_ofs, val, size - start_ofs);
2568 		flush_dcache_page(page);
2569 		kunmap_atomic(kaddr);
2570 		set_page_dirty(page);
2571 		put_page(page);
2572 		balance_dirty_pages_ratelimited(mapping);
2573 		cond_resched();
2574 		if (idx == end)
2575 			goto done;
2576 		idx++;
2577 	}
2578 	/* Do the whole pages the fast way. */
2579 	for (; idx < end; idx++) {
2580 		/* Find or create the current page.  (The page is locked.) */
2581 		page = grab_cache_page(mapping, idx);
2582 		if (unlikely(!page)) {
2583 			ntfs_error(vol->sb, "Insufficient memory to grab "
2584 					"page (index 0x%lx).", idx);
2585 			return -ENOMEM;
2586 		}
2587 		kaddr = kmap_atomic(page);
2588 		memset(kaddr, val, PAGE_SIZE);
2589 		flush_dcache_page(page);
2590 		kunmap_atomic(kaddr);
2591 		/*
2592 		 * If the page has buffers, mark them uptodate since buffer
2593 		 * state and not page state is definitive in 2.6 kernels.
2594 		 */
2595 		if (page_has_buffers(page)) {
2596 			struct buffer_head *bh, *head;
2597 
2598 			bh = head = page_buffers(page);
2599 			do {
2600 				set_buffer_uptodate(bh);
2601 			} while ((bh = bh->b_this_page) != head);
2602 		}
2603 		/* Now that buffers are uptodate, set the page uptodate, too. */
2604 		SetPageUptodate(page);
2605 		/*
2606 		 * Set the page and all its buffers dirty and mark the inode
2607 		 * dirty, too.  The VM will write the page later on.
2608 		 */
2609 		set_page_dirty(page);
2610 		/* Finally unlock and release the page. */
2611 		unlock_page(page);
2612 		put_page(page);
2613 		balance_dirty_pages_ratelimited(mapping);
2614 		cond_resched();
2615 	}
2616 	/* If there is a last partial page, need to do it the slow way. */
2617 	if (end_ofs) {
2618 		page = read_mapping_page(mapping, idx, NULL);
2619 		if (IS_ERR(page)) {
2620 			ntfs_error(vol->sb, "Failed to read last partial page "
2621 					"(error, index 0x%lx).", idx);
2622 			return PTR_ERR(page);
2623 		}
2624 		kaddr = kmap_atomic(page);
2625 		memset(kaddr, val, end_ofs);
2626 		flush_dcache_page(page);
2627 		kunmap_atomic(kaddr);
2628 		set_page_dirty(page);
2629 		put_page(page);
2630 		balance_dirty_pages_ratelimited(mapping);
2631 		cond_resched();
2632 	}
2633 done:
2634 	ntfs_debug("Done.");
2635 	return 0;
2636 }
2637 
2638 #endif /* NTFS_RW */
2639