1 /*
2 * vfsv0 quota IO operations on file
3 */
4
5 #include <linux/errno.h>
6 #include <linux/fs.h>
7 #include <linux/mount.h>
8 #include <linux/dqblk_v2.h>
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/quotaops.h>
14
15 #include <asm/byteorder.h>
16
17 #include "quota_tree.h"
18
19 MODULE_AUTHOR("Jan Kara");
20 MODULE_DESCRIPTION("Quota trie support");
21 MODULE_LICENSE("GPL");
22
23 #define __QUOTA_QT_PARANOIA
24
__get_index(struct qtree_mem_dqinfo * info,qid_t id,int depth)25 static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
26 {
27 unsigned int epb = info->dqi_usable_bs >> 2;
28
29 depth = info->dqi_qtree_depth - depth - 1;
30 while (depth--)
31 id /= epb;
32 return id % epb;
33 }
34
get_index(struct qtree_mem_dqinfo * info,struct kqid qid,int depth)35 static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
36 {
37 qid_t id = from_kqid(&init_user_ns, qid);
38
39 return __get_index(info, id, depth);
40 }
41
42 /* Number of entries in one blocks */
qtree_dqstr_in_blk(struct qtree_mem_dqinfo * info)43 static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
44 {
45 return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
46 / info->dqi_entry_size;
47 }
48
getdqbuf(size_t size)49 static char *getdqbuf(size_t size)
50 {
51 char *buf = kmalloc(size, GFP_NOFS);
52 if (!buf)
53 printk(KERN_WARNING
54 "VFS: Not enough memory for quota buffers.\n");
55 return buf;
56 }
57
read_blk(struct qtree_mem_dqinfo * info,uint blk,char * buf)58 static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
59 {
60 struct super_block *sb = info->dqi_sb;
61
62 memset(buf, 0, info->dqi_usable_bs);
63 return sb->s_op->quota_read(sb, info->dqi_type, buf,
64 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
65 }
66
write_blk(struct qtree_mem_dqinfo * info,uint blk,char * buf)67 static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
68 {
69 struct super_block *sb = info->dqi_sb;
70 ssize_t ret;
71
72 ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
73 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
74 if (ret != info->dqi_usable_bs) {
75 quota_error(sb, "dquota write failed");
76 if (ret >= 0)
77 ret = -EIO;
78 }
79 return ret;
80 }
81
do_check_range(struct super_block * sb,const char * val_name,uint val,uint min_val,uint max_val)82 static inline int do_check_range(struct super_block *sb, const char *val_name,
83 uint val, uint min_val, uint max_val)
84 {
85 if (val < min_val || val > max_val) {
86 quota_error(sb, "Getting %s %u out of range %u-%u",
87 val_name, val, min_val, max_val);
88 return -EUCLEAN;
89 }
90
91 return 0;
92 }
93
check_dquot_block_header(struct qtree_mem_dqinfo * info,struct qt_disk_dqdbheader * dh)94 static int check_dquot_block_header(struct qtree_mem_dqinfo *info,
95 struct qt_disk_dqdbheader *dh)
96 {
97 int err = 0;
98
99 err = do_check_range(info->dqi_sb, "dqdh_next_free",
100 le32_to_cpu(dh->dqdh_next_free), 0,
101 info->dqi_blocks - 1);
102 if (err)
103 return err;
104 err = do_check_range(info->dqi_sb, "dqdh_prev_free",
105 le32_to_cpu(dh->dqdh_prev_free), 0,
106 info->dqi_blocks - 1);
107
108 return err;
109 }
110
111 /* Remove empty block from list and return it */
get_free_dqblk(struct qtree_mem_dqinfo * info)112 static int get_free_dqblk(struct qtree_mem_dqinfo *info)
113 {
114 char *buf = getdqbuf(info->dqi_usable_bs);
115 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
116 int ret, blk;
117
118 if (!buf)
119 return -ENOMEM;
120 if (info->dqi_free_blk) {
121 blk = info->dqi_free_blk;
122 ret = read_blk(info, blk, buf);
123 if (ret < 0)
124 goto out_buf;
125 ret = check_dquot_block_header(info, dh);
126 if (ret)
127 goto out_buf;
128 info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
129 }
130 else {
131 memset(buf, 0, info->dqi_usable_bs);
132 /* Assure block allocation... */
133 ret = write_blk(info, info->dqi_blocks, buf);
134 if (ret < 0)
135 goto out_buf;
136 blk = info->dqi_blocks++;
137 }
138 mark_info_dirty(info->dqi_sb, info->dqi_type);
139 ret = blk;
140 out_buf:
141 kfree(buf);
142 return ret;
143 }
144
145 /* Insert empty block to the list */
put_free_dqblk(struct qtree_mem_dqinfo * info,char * buf,uint blk)146 static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
147 {
148 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
149 int err;
150
151 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
152 dh->dqdh_prev_free = cpu_to_le32(0);
153 dh->dqdh_entries = cpu_to_le16(0);
154 err = write_blk(info, blk, buf);
155 if (err < 0)
156 return err;
157 info->dqi_free_blk = blk;
158 mark_info_dirty(info->dqi_sb, info->dqi_type);
159 return 0;
160 }
161
162 /* Remove given block from the list of blocks with free entries */
remove_free_dqentry(struct qtree_mem_dqinfo * info,char * buf,uint blk)163 static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
164 uint blk)
165 {
166 char *tmpbuf = getdqbuf(info->dqi_usable_bs);
167 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
168 uint nextblk = le32_to_cpu(dh->dqdh_next_free);
169 uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
170 int err;
171
172 if (!tmpbuf)
173 return -ENOMEM;
174 if (nextblk) {
175 err = read_blk(info, nextblk, tmpbuf);
176 if (err < 0)
177 goto out_buf;
178 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
179 dh->dqdh_prev_free;
180 err = write_blk(info, nextblk, tmpbuf);
181 if (err < 0)
182 goto out_buf;
183 }
184 if (prevblk) {
185 err = read_blk(info, prevblk, tmpbuf);
186 if (err < 0)
187 goto out_buf;
188 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
189 dh->dqdh_next_free;
190 err = write_blk(info, prevblk, tmpbuf);
191 if (err < 0)
192 goto out_buf;
193 } else {
194 info->dqi_free_entry = nextblk;
195 mark_info_dirty(info->dqi_sb, info->dqi_type);
196 }
197 kfree(tmpbuf);
198 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
199 /* No matter whether write succeeds block is out of list */
200 if (write_blk(info, blk, buf) < 0)
201 quota_error(info->dqi_sb, "Can't write block (%u) "
202 "with free entries", blk);
203 return 0;
204 out_buf:
205 kfree(tmpbuf);
206 return err;
207 }
208
209 /* Insert given block to the beginning of list with free entries */
insert_free_dqentry(struct qtree_mem_dqinfo * info,char * buf,uint blk)210 static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
211 uint blk)
212 {
213 char *tmpbuf = getdqbuf(info->dqi_usable_bs);
214 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
215 int err;
216
217 if (!tmpbuf)
218 return -ENOMEM;
219 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
220 dh->dqdh_prev_free = cpu_to_le32(0);
221 err = write_blk(info, blk, buf);
222 if (err < 0)
223 goto out_buf;
224 if (info->dqi_free_entry) {
225 err = read_blk(info, info->dqi_free_entry, tmpbuf);
226 if (err < 0)
227 goto out_buf;
228 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
229 cpu_to_le32(blk);
230 err = write_blk(info, info->dqi_free_entry, tmpbuf);
231 if (err < 0)
232 goto out_buf;
233 }
234 kfree(tmpbuf);
235 info->dqi_free_entry = blk;
236 mark_info_dirty(info->dqi_sb, info->dqi_type);
237 return 0;
238 out_buf:
239 kfree(tmpbuf);
240 return err;
241 }
242
243 /* Is the entry in the block free? */
qtree_entry_unused(struct qtree_mem_dqinfo * info,char * disk)244 int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
245 {
246 int i;
247
248 for (i = 0; i < info->dqi_entry_size; i++)
249 if (disk[i])
250 return 0;
251 return 1;
252 }
253 EXPORT_SYMBOL(qtree_entry_unused);
254
255 /* Find space for dquot */
find_free_dqentry(struct qtree_mem_dqinfo * info,struct dquot * dquot,int * err)256 static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
257 struct dquot *dquot, int *err)
258 {
259 uint blk, i;
260 struct qt_disk_dqdbheader *dh;
261 char *buf = getdqbuf(info->dqi_usable_bs);
262 char *ddquot;
263
264 *err = 0;
265 if (!buf) {
266 *err = -ENOMEM;
267 return 0;
268 }
269 dh = (struct qt_disk_dqdbheader *)buf;
270 if (info->dqi_free_entry) {
271 blk = info->dqi_free_entry;
272 *err = read_blk(info, blk, buf);
273 if (*err < 0)
274 goto out_buf;
275 *err = check_dquot_block_header(info, dh);
276 if (*err)
277 goto out_buf;
278 } else {
279 blk = get_free_dqblk(info);
280 if ((int)blk < 0) {
281 *err = blk;
282 kfree(buf);
283 return 0;
284 }
285 memset(buf, 0, info->dqi_usable_bs);
286 /* This is enough as the block is already zeroed and the entry
287 * list is empty... */
288 info->dqi_free_entry = blk;
289 mark_info_dirty(dquot->dq_sb, dquot->dq_id.type);
290 }
291 /* Block will be full? */
292 if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
293 *err = remove_free_dqentry(info, buf, blk);
294 if (*err < 0) {
295 quota_error(dquot->dq_sb, "Can't remove block (%u) "
296 "from entry free list", blk);
297 goto out_buf;
298 }
299 }
300 le16_add_cpu(&dh->dqdh_entries, 1);
301 /* Find free structure in block */
302 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
303 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
304 if (qtree_entry_unused(info, ddquot))
305 break;
306 ddquot += info->dqi_entry_size;
307 }
308 #ifdef __QUOTA_QT_PARANOIA
309 if (i == qtree_dqstr_in_blk(info)) {
310 quota_error(dquot->dq_sb, "Data block full but it shouldn't");
311 *err = -EIO;
312 goto out_buf;
313 }
314 #endif
315 *err = write_blk(info, blk, buf);
316 if (*err < 0) {
317 quota_error(dquot->dq_sb, "Can't write quota data block %u",
318 blk);
319 goto out_buf;
320 }
321 dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
322 sizeof(struct qt_disk_dqdbheader) +
323 i * info->dqi_entry_size;
324 kfree(buf);
325 return blk;
326 out_buf:
327 kfree(buf);
328 return 0;
329 }
330
331 /* Insert reference to structure into the trie */
do_insert_tree(struct qtree_mem_dqinfo * info,struct dquot * dquot,uint * treeblk,int depth)332 static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
333 uint *treeblk, int depth)
334 {
335 char *buf = getdqbuf(info->dqi_usable_bs);
336 int ret = 0, newson = 0, newact = 0;
337 __le32 *ref;
338 uint newblk;
339
340 if (!buf)
341 return -ENOMEM;
342 if (!*treeblk) {
343 ret = get_free_dqblk(info);
344 if (ret < 0)
345 goto out_buf;
346 *treeblk = ret;
347 memset(buf, 0, info->dqi_usable_bs);
348 newact = 1;
349 } else {
350 ret = read_blk(info, *treeblk, buf);
351 if (ret < 0) {
352 quota_error(dquot->dq_sb, "Can't read tree quota "
353 "block %u", *treeblk);
354 goto out_buf;
355 }
356 }
357 ref = (__le32 *)buf;
358 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
359 if (!newblk)
360 newson = 1;
361 if (depth == info->dqi_qtree_depth - 1) {
362 #ifdef __QUOTA_QT_PARANOIA
363 if (newblk) {
364 quota_error(dquot->dq_sb, "Inserting already present "
365 "quota entry (block %u)",
366 le32_to_cpu(ref[get_index(info,
367 dquot->dq_id, depth)]));
368 ret = -EIO;
369 goto out_buf;
370 }
371 #endif
372 newblk = find_free_dqentry(info, dquot, &ret);
373 } else {
374 ret = do_insert_tree(info, dquot, &newblk, depth+1);
375 }
376 if (newson && ret >= 0) {
377 ref[get_index(info, dquot->dq_id, depth)] =
378 cpu_to_le32(newblk);
379 ret = write_blk(info, *treeblk, buf);
380 } else if (newact && ret < 0) {
381 put_free_dqblk(info, buf, *treeblk);
382 }
383 out_buf:
384 kfree(buf);
385 return ret;
386 }
387
388 /* Wrapper for inserting quota structure into tree */
dq_insert_tree(struct qtree_mem_dqinfo * info,struct dquot * dquot)389 static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
390 struct dquot *dquot)
391 {
392 int tmp = QT_TREEOFF;
393
394 #ifdef __QUOTA_QT_PARANOIA
395 if (info->dqi_blocks <= QT_TREEOFF) {
396 quota_error(dquot->dq_sb, "Quota tree root isn't allocated!");
397 return -EIO;
398 }
399 #endif
400 return do_insert_tree(info, dquot, &tmp, 0);
401 }
402
403 /*
404 * We don't have to be afraid of deadlocks as we never have quotas on quota
405 * files...
406 */
qtree_write_dquot(struct qtree_mem_dqinfo * info,struct dquot * dquot)407 int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
408 {
409 int type = dquot->dq_id.type;
410 struct super_block *sb = dquot->dq_sb;
411 ssize_t ret;
412 char *ddquot = getdqbuf(info->dqi_entry_size);
413
414 if (!ddquot)
415 return -ENOMEM;
416
417 /* dq_off is guarded by dqio_sem */
418 if (!dquot->dq_off) {
419 ret = dq_insert_tree(info, dquot);
420 if (ret < 0) {
421 quota_error(sb, "Error %zd occurred while creating "
422 "quota", ret);
423 kfree(ddquot);
424 return ret;
425 }
426 }
427 spin_lock(&dquot->dq_dqb_lock);
428 info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
429 spin_unlock(&dquot->dq_dqb_lock);
430 ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
431 dquot->dq_off);
432 if (ret != info->dqi_entry_size) {
433 quota_error(sb, "dquota write failed");
434 if (ret >= 0)
435 ret = -ENOSPC;
436 } else {
437 ret = 0;
438 }
439 dqstats_inc(DQST_WRITES);
440 kfree(ddquot);
441
442 return ret;
443 }
444 EXPORT_SYMBOL(qtree_write_dquot);
445
446 /* Free dquot entry in data block */
free_dqentry(struct qtree_mem_dqinfo * info,struct dquot * dquot,uint blk)447 static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
448 uint blk)
449 {
450 struct qt_disk_dqdbheader *dh;
451 char *buf = getdqbuf(info->dqi_usable_bs);
452 int ret = 0;
453
454 if (!buf)
455 return -ENOMEM;
456 if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
457 quota_error(dquot->dq_sb, "Quota structure has offset to "
458 "other block (%u) than it should (%u)", blk,
459 (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
460 ret = -EIO;
461 goto out_buf;
462 }
463 ret = read_blk(info, blk, buf);
464 if (ret < 0) {
465 quota_error(dquot->dq_sb, "Can't read quota data block %u",
466 blk);
467 goto out_buf;
468 }
469 dh = (struct qt_disk_dqdbheader *)buf;
470 ret = check_dquot_block_header(info, dh);
471 if (ret)
472 goto out_buf;
473 le16_add_cpu(&dh->dqdh_entries, -1);
474 if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */
475 ret = remove_free_dqentry(info, buf, blk);
476 if (ret >= 0)
477 ret = put_free_dqblk(info, buf, blk);
478 if (ret < 0) {
479 quota_error(dquot->dq_sb, "Can't move quota data block "
480 "(%u) to free list", blk);
481 goto out_buf;
482 }
483 } else {
484 memset(buf +
485 (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
486 0, info->dqi_entry_size);
487 if (le16_to_cpu(dh->dqdh_entries) ==
488 qtree_dqstr_in_blk(info) - 1) {
489 /* Insert will write block itself */
490 ret = insert_free_dqentry(info, buf, blk);
491 if (ret < 0) {
492 quota_error(dquot->dq_sb, "Can't insert quota "
493 "data block (%u) to free entry list", blk);
494 goto out_buf;
495 }
496 } else {
497 ret = write_blk(info, blk, buf);
498 if (ret < 0) {
499 quota_error(dquot->dq_sb, "Can't write quota "
500 "data block %u", blk);
501 goto out_buf;
502 }
503 }
504 }
505 dquot->dq_off = 0; /* Quota is now unattached */
506 out_buf:
507 kfree(buf);
508 return ret;
509 }
510
511 /* Remove reference to dquot from tree */
remove_tree(struct qtree_mem_dqinfo * info,struct dquot * dquot,uint * blk,int depth)512 static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
513 uint *blk, int depth)
514 {
515 char *buf = getdqbuf(info->dqi_usable_bs);
516 int ret = 0;
517 uint newblk;
518 __le32 *ref = (__le32 *)buf;
519
520 if (!buf)
521 return -ENOMEM;
522 ret = read_blk(info, *blk, buf);
523 if (ret < 0) {
524 quota_error(dquot->dq_sb, "Can't read quota data block %u",
525 *blk);
526 goto out_buf;
527 }
528 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
529 if (newblk < QT_TREEOFF || newblk >= info->dqi_blocks) {
530 quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)",
531 newblk, info->dqi_blocks);
532 ret = -EUCLEAN;
533 goto out_buf;
534 }
535
536 if (depth == info->dqi_qtree_depth - 1) {
537 ret = free_dqentry(info, dquot, newblk);
538 newblk = 0;
539 } else {
540 ret = remove_tree(info, dquot, &newblk, depth+1);
541 }
542 if (ret >= 0 && !newblk) {
543 int i;
544 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
545 /* Block got empty? */
546 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
547 ;
548 /* Don't put the root block into the free block list */
549 if (i == (info->dqi_usable_bs >> 2)
550 && *blk != QT_TREEOFF) {
551 put_free_dqblk(info, buf, *blk);
552 *blk = 0;
553 } else {
554 ret = write_blk(info, *blk, buf);
555 if (ret < 0)
556 quota_error(dquot->dq_sb,
557 "Can't write quota tree block %u",
558 *blk);
559 }
560 }
561 out_buf:
562 kfree(buf);
563 return ret;
564 }
565
566 /* Delete dquot from tree */
qtree_delete_dquot(struct qtree_mem_dqinfo * info,struct dquot * dquot)567 int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
568 {
569 uint tmp = QT_TREEOFF;
570
571 if (!dquot->dq_off) /* Even not allocated? */
572 return 0;
573 return remove_tree(info, dquot, &tmp, 0);
574 }
575 EXPORT_SYMBOL(qtree_delete_dquot);
576
577 /* Find entry in block */
find_block_dqentry(struct qtree_mem_dqinfo * info,struct dquot * dquot,uint blk)578 static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
579 struct dquot *dquot, uint blk)
580 {
581 char *buf = getdqbuf(info->dqi_usable_bs);
582 loff_t ret = 0;
583 int i;
584 char *ddquot;
585
586 if (!buf)
587 return -ENOMEM;
588 ret = read_blk(info, blk, buf);
589 if (ret < 0) {
590 quota_error(dquot->dq_sb, "Can't read quota tree "
591 "block %u", blk);
592 goto out_buf;
593 }
594 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
595 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
596 if (info->dqi_ops->is_id(ddquot, dquot))
597 break;
598 ddquot += info->dqi_entry_size;
599 }
600 if (i == qtree_dqstr_in_blk(info)) {
601 quota_error(dquot->dq_sb,
602 "Quota for id %u referenced but not present",
603 from_kqid(&init_user_ns, dquot->dq_id));
604 ret = -EIO;
605 goto out_buf;
606 } else {
607 ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
608 qt_disk_dqdbheader) + i * info->dqi_entry_size;
609 }
610 out_buf:
611 kfree(buf);
612 return ret;
613 }
614
615 /* Find entry for given id in the tree */
find_tree_dqentry(struct qtree_mem_dqinfo * info,struct dquot * dquot,uint blk,int depth)616 static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
617 struct dquot *dquot, uint blk, int depth)
618 {
619 char *buf = getdqbuf(info->dqi_usable_bs);
620 loff_t ret = 0;
621 __le32 *ref = (__le32 *)buf;
622
623 if (!buf)
624 return -ENOMEM;
625 ret = read_blk(info, blk, buf);
626 if (ret < 0) {
627 quota_error(dquot->dq_sb, "Can't read quota tree block %u",
628 blk);
629 goto out_buf;
630 }
631 ret = 0;
632 blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
633 if (!blk) /* No reference? */
634 goto out_buf;
635 if (blk < QT_TREEOFF || blk >= info->dqi_blocks) {
636 quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)",
637 blk, info->dqi_blocks);
638 ret = -EUCLEAN;
639 goto out_buf;
640 }
641
642 if (depth < info->dqi_qtree_depth - 1)
643 ret = find_tree_dqentry(info, dquot, blk, depth+1);
644 else
645 ret = find_block_dqentry(info, dquot, blk);
646 out_buf:
647 kfree(buf);
648 return ret;
649 }
650
651 /* Find entry for given id in the tree - wrapper function */
find_dqentry(struct qtree_mem_dqinfo * info,struct dquot * dquot)652 static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
653 struct dquot *dquot)
654 {
655 return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
656 }
657
qtree_read_dquot(struct qtree_mem_dqinfo * info,struct dquot * dquot)658 int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
659 {
660 int type = dquot->dq_id.type;
661 struct super_block *sb = dquot->dq_sb;
662 loff_t offset;
663 char *ddquot;
664 int ret = 0;
665
666 #ifdef __QUOTA_QT_PARANOIA
667 /* Invalidated quota? */
668 if (!sb_dqopt(dquot->dq_sb)->files[type]) {
669 quota_error(sb, "Quota invalidated while reading!");
670 return -EIO;
671 }
672 #endif
673 /* Do we know offset of the dquot entry in the quota file? */
674 if (!dquot->dq_off) {
675 offset = find_dqentry(info, dquot);
676 if (offset <= 0) { /* Entry not present? */
677 if (offset < 0)
678 quota_error(sb,"Can't read quota structure "
679 "for id %u",
680 from_kqid(&init_user_ns,
681 dquot->dq_id));
682 dquot->dq_off = 0;
683 set_bit(DQ_FAKE_B, &dquot->dq_flags);
684 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
685 ret = offset;
686 goto out;
687 }
688 dquot->dq_off = offset;
689 }
690 ddquot = getdqbuf(info->dqi_entry_size);
691 if (!ddquot)
692 return -ENOMEM;
693 ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
694 dquot->dq_off);
695 if (ret != info->dqi_entry_size) {
696 if (ret >= 0)
697 ret = -EIO;
698 quota_error(sb, "Error while reading quota structure for id %u",
699 from_kqid(&init_user_ns, dquot->dq_id));
700 set_bit(DQ_FAKE_B, &dquot->dq_flags);
701 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
702 kfree(ddquot);
703 goto out;
704 }
705 spin_lock(&dquot->dq_dqb_lock);
706 info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
707 if (!dquot->dq_dqb.dqb_bhardlimit &&
708 !dquot->dq_dqb.dqb_bsoftlimit &&
709 !dquot->dq_dqb.dqb_ihardlimit &&
710 !dquot->dq_dqb.dqb_isoftlimit)
711 set_bit(DQ_FAKE_B, &dquot->dq_flags);
712 spin_unlock(&dquot->dq_dqb_lock);
713 kfree(ddquot);
714 out:
715 dqstats_inc(DQST_READS);
716 return ret;
717 }
718 EXPORT_SYMBOL(qtree_read_dquot);
719
720 /* Check whether dquot should not be deleted. We know we are
721 * the only one operating on dquot (thanks to dq_lock) */
qtree_release_dquot(struct qtree_mem_dqinfo * info,struct dquot * dquot)722 int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
723 {
724 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
725 !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
726 return qtree_delete_dquot(info, dquot);
727 return 0;
728 }
729 EXPORT_SYMBOL(qtree_release_dquot);
730
find_next_id(struct qtree_mem_dqinfo * info,qid_t * id,unsigned int blk,int depth)731 static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
732 unsigned int blk, int depth)
733 {
734 char *buf = getdqbuf(info->dqi_usable_bs);
735 __le32 *ref = (__le32 *)buf;
736 ssize_t ret;
737 unsigned int epb = info->dqi_usable_bs >> 2;
738 unsigned int level_inc = 1;
739 int i;
740
741 if (!buf)
742 return -ENOMEM;
743
744 for (i = depth; i < info->dqi_qtree_depth - 1; i++)
745 level_inc *= epb;
746
747 ret = read_blk(info, blk, buf);
748 if (ret < 0) {
749 quota_error(info->dqi_sb,
750 "Can't read quota tree block %u", blk);
751 goto out_buf;
752 }
753 for (i = __get_index(info, *id, depth); i < epb; i++) {
754 if (ref[i] == cpu_to_le32(0)) {
755 *id += level_inc;
756 continue;
757 }
758 if (depth == info->dqi_qtree_depth - 1) {
759 ret = 0;
760 goto out_buf;
761 }
762 ret = find_next_id(info, id, le32_to_cpu(ref[i]), depth + 1);
763 if (ret != -ENOENT)
764 break;
765 }
766 if (i == epb) {
767 ret = -ENOENT;
768 goto out_buf;
769 }
770 out_buf:
771 kfree(buf);
772 return ret;
773 }
774
qtree_get_next_id(struct qtree_mem_dqinfo * info,struct kqid * qid)775 int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid)
776 {
777 qid_t id = from_kqid(&init_user_ns, *qid);
778 int ret;
779
780 ret = find_next_id(info, &id, QT_TREEOFF, 0);
781 if (ret < 0)
782 return ret;
783 *qid = make_kqid(&init_user_ns, qid->type, id);
784 return 0;
785 }
786 EXPORT_SYMBOL(qtree_get_next_id);
787