Merge branch 'stable-4.9' of git://git.infradead.org/users/pcmoore/audit
[sfrench/cifs-2.6.git] / fs / ufs / balloc.c
1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9  */
10
11 #include <linux/fs.h>
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <asm/byteorder.h>
19
20 #include "ufs_fs.h"
21 #include "ufs.h"
22 #include "swab.h"
23 #include "util.h"
24
25 #define INVBLOCK ((u64)-1L)
26
27 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
28 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
29 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
30 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
31 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
32 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
33
34 /*
35  * Free 'count' fragments from fragment number 'fragment'
36  */
37 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
38 {
39         struct super_block * sb;
40         struct ufs_sb_private_info * uspi;
41         struct ufs_cg_private_info * ucpi;
42         struct ufs_cylinder_group * ucg;
43         unsigned cgno, bit, end_bit, bbase, blkmap, i;
44         u64 blkno;
45         
46         sb = inode->i_sb;
47         uspi = UFS_SB(sb)->s_uspi;
48         
49         UFSD("ENTER, fragment %llu, count %u\n",
50              (unsigned long long)fragment, count);
51         
52         if (ufs_fragnum(fragment) + count > uspi->s_fpg)
53                 ufs_error (sb, "ufs_free_fragments", "internal error");
54
55         mutex_lock(&UFS_SB(sb)->s_lock);
56         
57         cgno = ufs_dtog(uspi, fragment);
58         bit = ufs_dtogd(uspi, fragment);
59         if (cgno >= uspi->s_ncg) {
60                 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
61                 goto failed;
62         }
63                 
64         ucpi = ufs_load_cylinder (sb, cgno);
65         if (!ucpi) 
66                 goto failed;
67         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
68         if (!ufs_cg_chkmagic(sb, ucg)) {
69                 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
70                 goto failed;
71         }
72
73         end_bit = bit + count;
74         bbase = ufs_blknum (bit);
75         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
76         ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
77         for (i = bit; i < end_bit; i++) {
78                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
79                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
80                 else 
81                         ufs_error (sb, "ufs_free_fragments",
82                                    "bit already cleared for fragment %u", i);
83         }
84         
85         fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
86         uspi->cs_total.cs_nffree += count;
87         fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
88         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
89         ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
90
91         /*
92          * Trying to reassemble free fragments into block
93          */
94         blkno = ufs_fragstoblks (bbase);
95         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
96                 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
97                 uspi->cs_total.cs_nffree -= uspi->s_fpb;
98                 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
99                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
100                         ufs_clusteracct (sb, ucpi, blkno, 1);
101                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
102                 uspi->cs_total.cs_nbfree++;
103                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
104                 if (uspi->fs_magic != UFS2_MAGIC) {
105                         unsigned cylno = ufs_cbtocylno (bbase);
106
107                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
108                                                   ufs_cbtorpos(bbase)), 1);
109                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
110                 }
111         }
112         
113         ubh_mark_buffer_dirty (USPI_UBH(uspi));
114         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
115         if (sb->s_flags & MS_SYNCHRONOUS)
116                 ubh_sync_block(UCPI_UBH(ucpi));
117         ufs_mark_sb_dirty(sb);
118
119         mutex_unlock(&UFS_SB(sb)->s_lock);
120         UFSD("EXIT\n");
121         return;
122
123 failed:
124         mutex_unlock(&UFS_SB(sb)->s_lock);
125         UFSD("EXIT (FAILED)\n");
126         return;
127 }
128
129 /*
130  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
131  */
132 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
133 {
134         struct super_block * sb;
135         struct ufs_sb_private_info * uspi;
136         struct ufs_cg_private_info * ucpi;
137         struct ufs_cylinder_group * ucg;
138         unsigned overflow, cgno, bit, end_bit, i;
139         u64 blkno;
140         
141         sb = inode->i_sb;
142         uspi = UFS_SB(sb)->s_uspi;
143
144         UFSD("ENTER, fragment %llu, count %u\n",
145              (unsigned long long)fragment, count);
146         
147         if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
148                 ufs_error (sb, "ufs_free_blocks", "internal error, "
149                            "fragment %llu, count %u\n",
150                            (unsigned long long)fragment, count);
151                 goto failed;
152         }
153
154         mutex_lock(&UFS_SB(sb)->s_lock);
155         
156 do_more:
157         overflow = 0;
158         cgno = ufs_dtog(uspi, fragment);
159         bit = ufs_dtogd(uspi, fragment);
160         if (cgno >= uspi->s_ncg) {
161                 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
162                 goto failed_unlock;
163         }
164         end_bit = bit + count;
165         if (end_bit > uspi->s_fpg) {
166                 overflow = bit + count - uspi->s_fpg;
167                 count -= overflow;
168                 end_bit -= overflow;
169         }
170
171         ucpi = ufs_load_cylinder (sb, cgno);
172         if (!ucpi) 
173                 goto failed_unlock;
174         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
175         if (!ufs_cg_chkmagic(sb, ucg)) {
176                 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
177                 goto failed_unlock;
178         }
179
180         for (i = bit; i < end_bit; i += uspi->s_fpb) {
181                 blkno = ufs_fragstoblks(i);
182                 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
183                         ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
184                 }
185                 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
186                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
187                         ufs_clusteracct (sb, ucpi, blkno, 1);
188
189                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
190                 uspi->cs_total.cs_nbfree++;
191                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
192
193                 if (uspi->fs_magic != UFS2_MAGIC) {
194                         unsigned cylno = ufs_cbtocylno(i);
195
196                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
197                                                   ufs_cbtorpos(i)), 1);
198                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
199                 }
200         }
201
202         ubh_mark_buffer_dirty (USPI_UBH(uspi));
203         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
204         if (sb->s_flags & MS_SYNCHRONOUS)
205                 ubh_sync_block(UCPI_UBH(ucpi));
206
207         if (overflow) {
208                 fragment += count;
209                 count = overflow;
210                 goto do_more;
211         }
212
213         ufs_mark_sb_dirty(sb);
214         mutex_unlock(&UFS_SB(sb)->s_lock);
215         UFSD("EXIT\n");
216         return;
217
218 failed_unlock:
219         mutex_unlock(&UFS_SB(sb)->s_lock);
220 failed:
221         UFSD("EXIT (FAILED)\n");
222         return;
223 }
224
225 /*
226  * Modify inode page cache in such way:
227  * have - blocks with b_blocknr equal to oldb...oldb+count-1
228  * get - blocks with b_blocknr equal to newb...newb+count-1
229  * also we suppose that oldb...oldb+count-1 blocks
230  * situated at the end of file.
231  *
232  * We can come here from ufs_writepage or ufs_prepare_write,
233  * locked_page is argument of these functions, so we already lock it.
234  */
235 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
236                                unsigned int count, sector_t oldb,
237                                sector_t newb, struct page *locked_page)
238 {
239         const unsigned blks_per_page =
240                 1 << (PAGE_SHIFT - inode->i_blkbits);
241         const unsigned mask = blks_per_page - 1;
242         struct address_space * const mapping = inode->i_mapping;
243         pgoff_t index, cur_index, last_index;
244         unsigned pos, j, lblock;
245         sector_t end, i;
246         struct page *page;
247         struct buffer_head *head, *bh;
248
249         UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
250               inode->i_ino, count,
251              (unsigned long long)oldb, (unsigned long long)newb);
252
253         BUG_ON(!locked_page);
254         BUG_ON(!PageLocked(locked_page));
255
256         cur_index = locked_page->index;
257         end = count + beg;
258         last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
259         for (i = beg; i < end; i = (i | mask) + 1) {
260                 index = i >> (PAGE_SHIFT - inode->i_blkbits);
261
262                 if (likely(cur_index != index)) {
263                         page = ufs_get_locked_page(mapping, index);
264                         if (!page)/* it was truncated */
265                                 continue;
266                         if (IS_ERR(page)) {/* or EIO */
267                                 ufs_error(inode->i_sb, __func__,
268                                           "read of page %llu failed\n",
269                                           (unsigned long long)index);
270                                 continue;
271                         }
272                 } else
273                         page = locked_page;
274
275                 head = page_buffers(page);
276                 bh = head;
277                 pos = i & mask;
278                 for (j = 0; j < pos; ++j)
279                         bh = bh->b_this_page;
280
281
282                 if (unlikely(index == last_index))
283                         lblock = end & mask;
284                 else
285                         lblock = blks_per_page;
286
287                 do {
288                         if (j >= lblock)
289                                 break;
290                         pos = (i - beg) + j;
291
292                         if (!buffer_mapped(bh))
293                                         map_bh(bh, inode->i_sb, oldb + pos);
294                         if (!buffer_uptodate(bh)) {
295                                 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
296                                 wait_on_buffer(bh);
297                                 if (!buffer_uptodate(bh)) {
298                                         ufs_error(inode->i_sb, __func__,
299                                                   "read of block failed\n");
300                                         break;
301                                 }
302                         }
303
304                         UFSD(" change from %llu to %llu, pos %u\n",
305                              (unsigned long long)(pos + oldb),
306                              (unsigned long long)(pos + newb), pos);
307
308                         bh->b_blocknr = newb + pos;
309                         unmap_underlying_metadata(bh->b_bdev,
310                                                   bh->b_blocknr);
311                         mark_buffer_dirty(bh);
312                         ++j;
313                         bh = bh->b_this_page;
314                 } while (bh != head);
315
316                 if (likely(cur_index != index))
317                         ufs_put_locked_page(page);
318         }
319         UFSD("EXIT\n");
320 }
321
322 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
323                             int sync)
324 {
325         struct buffer_head *bh;
326         sector_t end = beg + n;
327
328         for (; beg < end; ++beg) {
329                 bh = sb_getblk(inode->i_sb, beg);
330                 lock_buffer(bh);
331                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
332                 set_buffer_uptodate(bh);
333                 mark_buffer_dirty(bh);
334                 unlock_buffer(bh);
335                 if (IS_SYNC(inode) || sync)
336                         sync_dirty_buffer(bh);
337                 brelse(bh);
338         }
339 }
340
341 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
342                            u64 goal, unsigned count, int *err,
343                            struct page *locked_page)
344 {
345         struct super_block * sb;
346         struct ufs_sb_private_info * uspi;
347         struct ufs_super_block_first * usb1;
348         unsigned cgno, oldcount, newcount;
349         u64 tmp, request, result;
350         
351         UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
352              inode->i_ino, (unsigned long long)fragment,
353              (unsigned long long)goal, count);
354         
355         sb = inode->i_sb;
356         uspi = UFS_SB(sb)->s_uspi;
357         usb1 = ubh_get_usb_first(uspi);
358         *err = -ENOSPC;
359
360         mutex_lock(&UFS_SB(sb)->s_lock);
361         tmp = ufs_data_ptr_to_cpu(sb, p);
362
363         if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
364                 ufs_warning(sb, "ufs_new_fragments", "internal warning"
365                             " fragment %llu, count %u",
366                             (unsigned long long)fragment, count);
367                 count = uspi->s_fpb - ufs_fragnum(fragment); 
368         }
369         oldcount = ufs_fragnum (fragment);
370         newcount = oldcount + count;
371
372         /*
373          * Somebody else has just allocated our fragments
374          */
375         if (oldcount) {
376                 if (!tmp) {
377                         ufs_error(sb, "ufs_new_fragments", "internal error, "
378                                   "fragment %llu, tmp %llu\n",
379                                   (unsigned long long)fragment,
380                                   (unsigned long long)tmp);
381                         mutex_unlock(&UFS_SB(sb)->s_lock);
382                         return INVBLOCK;
383                 }
384                 if (fragment < UFS_I(inode)->i_lastfrag) {
385                         UFSD("EXIT (ALREADY ALLOCATED)\n");
386                         mutex_unlock(&UFS_SB(sb)->s_lock);
387                         return 0;
388                 }
389         }
390         else {
391                 if (tmp) {
392                         UFSD("EXIT (ALREADY ALLOCATED)\n");
393                         mutex_unlock(&UFS_SB(sb)->s_lock);
394                         return 0;
395                 }
396         }
397
398         /*
399          * There is not enough space for user on the device
400          */
401         if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
402                 mutex_unlock(&UFS_SB(sb)->s_lock);
403                 UFSD("EXIT (FAILED)\n");
404                 return 0;
405         }
406
407         if (goal >= uspi->s_size) 
408                 goal = 0;
409         if (goal == 0) 
410                 cgno = ufs_inotocg (inode->i_ino);
411         else
412                 cgno = ufs_dtog(uspi, goal);
413          
414         /*
415          * allocate new fragment
416          */
417         if (oldcount == 0) {
418                 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
419                 if (result) {
420                         ufs_clear_frags(inode, result + oldcount,
421                                         newcount - oldcount, locked_page != NULL);
422                         write_seqlock(&UFS_I(inode)->meta_lock);
423                         ufs_cpu_to_data_ptr(sb, p, result);
424                         write_sequnlock(&UFS_I(inode)->meta_lock);
425                         *err = 0;
426                         UFS_I(inode)->i_lastfrag =
427                                 max(UFS_I(inode)->i_lastfrag, fragment + count);
428                 }
429                 mutex_unlock(&UFS_SB(sb)->s_lock);
430                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
431                 return result;
432         }
433
434         /*
435          * resize block
436          */
437         result = ufs_add_fragments(inode, tmp, oldcount, newcount);
438         if (result) {
439                 *err = 0;
440                 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
441                                                 fragment + count);
442                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
443                                 locked_page != NULL);
444                 mutex_unlock(&UFS_SB(sb)->s_lock);
445                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
446                 return result;
447         }
448
449         /*
450          * allocate new block and move data
451          */
452         switch (fs32_to_cpu(sb, usb1->fs_optim)) {
453             case UFS_OPTSPACE:
454                 request = newcount;
455                 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
456                     > uspi->s_dsize * uspi->s_minfree / (2 * 100))
457                         break;
458                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
459                 break;
460             default:
461                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
462         
463             case UFS_OPTTIME:
464                 request = uspi->s_fpb;
465                 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
466                     (uspi->s_minfree - 2) / 100)
467                         break;
468                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
469                 break;
470         }
471         result = ufs_alloc_fragments (inode, cgno, goal, request, err);
472         if (result) {
473                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
474                                 locked_page != NULL);
475                 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
476                                    uspi->s_sbbase + tmp,
477                                    uspi->s_sbbase + result, locked_page);
478                 write_seqlock(&UFS_I(inode)->meta_lock);
479                 ufs_cpu_to_data_ptr(sb, p, result);
480                 write_sequnlock(&UFS_I(inode)->meta_lock);
481                 *err = 0;
482                 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
483                                                 fragment + count);
484                 mutex_unlock(&UFS_SB(sb)->s_lock);
485                 if (newcount < request)
486                         ufs_free_fragments (inode, result + newcount, request - newcount);
487                 ufs_free_fragments (inode, tmp, oldcount);
488                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
489                 return result;
490         }
491
492         mutex_unlock(&UFS_SB(sb)->s_lock);
493         UFSD("EXIT (FAILED)\n");
494         return 0;
495 }               
496
497 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
498                              unsigned oldcount, unsigned newcount)
499 {
500         struct super_block * sb;
501         struct ufs_sb_private_info * uspi;
502         struct ufs_cg_private_info * ucpi;
503         struct ufs_cylinder_group * ucg;
504         unsigned cgno, fragno, fragoff, count, fragsize, i;
505         
506         UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
507              (unsigned long long)fragment, oldcount, newcount);
508         
509         sb = inode->i_sb;
510         uspi = UFS_SB(sb)->s_uspi;
511         count = newcount - oldcount;
512         
513         cgno = ufs_dtog(uspi, fragment);
514         if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
515                 return 0;
516         if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
517                 return 0;
518         ucpi = ufs_load_cylinder (sb, cgno);
519         if (!ucpi)
520                 return 0;
521         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
522         if (!ufs_cg_chkmagic(sb, ucg)) {
523                 ufs_panic (sb, "ufs_add_fragments",
524                         "internal error, bad magic number on cg %u", cgno);
525                 return 0;
526         }
527
528         fragno = ufs_dtogd(uspi, fragment);
529         fragoff = ufs_fragnum (fragno);
530         for (i = oldcount; i < newcount; i++)
531                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
532                         return 0;
533         /*
534          * Block can be extended
535          */
536         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
537         for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
538                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
539                         break;
540         fragsize = i - oldcount;
541         if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
542                 ufs_panic (sb, "ufs_add_fragments",
543                         "internal error or corrupted bitmap on cg %u", cgno);
544         fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
545         if (fragsize != count)
546                 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
547         for (i = oldcount; i < newcount; i++)
548                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
549
550         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
551         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
552         uspi->cs_total.cs_nffree -= count;
553         
554         ubh_mark_buffer_dirty (USPI_UBH(uspi));
555         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
556         if (sb->s_flags & MS_SYNCHRONOUS)
557                 ubh_sync_block(UCPI_UBH(ucpi));
558         ufs_mark_sb_dirty(sb);
559
560         UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
561         
562         return fragment;
563 }
564
565 #define UFS_TEST_FREE_SPACE_CG \
566         ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
567         if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
568                 goto cg_found; \
569         for (k = count; k < uspi->s_fpb; k++) \
570                 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
571                         goto cg_found; 
572
573 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
574                                u64 goal, unsigned count, int *err)
575 {
576         struct super_block * sb;
577         struct ufs_sb_private_info * uspi;
578         struct ufs_cg_private_info * ucpi;
579         struct ufs_cylinder_group * ucg;
580         unsigned oldcg, i, j, k, allocsize;
581         u64 result;
582         
583         UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
584              inode->i_ino, cgno, (unsigned long long)goal, count);
585
586         sb = inode->i_sb;
587         uspi = UFS_SB(sb)->s_uspi;
588         oldcg = cgno;
589         
590         /*
591          * 1. searching on preferred cylinder group
592          */
593         UFS_TEST_FREE_SPACE_CG
594
595         /*
596          * 2. quadratic rehash
597          */
598         for (j = 1; j < uspi->s_ncg; j *= 2) {
599                 cgno += j;
600                 if (cgno >= uspi->s_ncg) 
601                         cgno -= uspi->s_ncg;
602                 UFS_TEST_FREE_SPACE_CG
603         }
604
605         /*
606          * 3. brute force search
607          * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
608          */
609         cgno = (oldcg + 1) % uspi->s_ncg;
610         for (j = 2; j < uspi->s_ncg; j++) {
611                 cgno++;
612                 if (cgno >= uspi->s_ncg)
613                         cgno = 0;
614                 UFS_TEST_FREE_SPACE_CG
615         }
616         
617         UFSD("EXIT (FAILED)\n");
618         return 0;
619
620 cg_found:
621         ucpi = ufs_load_cylinder (sb, cgno);
622         if (!ucpi)
623                 return 0;
624         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
625         if (!ufs_cg_chkmagic(sb, ucg)) 
626                 ufs_panic (sb, "ufs_alloc_fragments",
627                         "internal error, bad magic number on cg %u", cgno);
628         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
629
630         if (count == uspi->s_fpb) {
631                 result = ufs_alloccg_block (inode, ucpi, goal, err);
632                 if (result == INVBLOCK)
633                         return 0;
634                 goto succed;
635         }
636
637         for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
638                 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
639                         break;
640         
641         if (allocsize == uspi->s_fpb) {
642                 result = ufs_alloccg_block (inode, ucpi, goal, err);
643                 if (result == INVBLOCK)
644                         return 0;
645                 goal = ufs_dtogd(uspi, result);
646                 for (i = count; i < uspi->s_fpb; i++)
647                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
648                 i = uspi->s_fpb - count;
649
650                 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
651                 uspi->cs_total.cs_nffree += i;
652                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
653                 fs32_add(sb, &ucg->cg_frsum[i], 1);
654                 goto succed;
655         }
656
657         result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
658         if (result == INVBLOCK)
659                 return 0;
660         for (i = 0; i < count; i++)
661                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
662         
663         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
664         uspi->cs_total.cs_nffree -= count;
665         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
666         fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
667
668         if (count != allocsize)
669                 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
670
671 succed:
672         ubh_mark_buffer_dirty (USPI_UBH(uspi));
673         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
674         if (sb->s_flags & MS_SYNCHRONOUS)
675                 ubh_sync_block(UCPI_UBH(ucpi));
676         ufs_mark_sb_dirty(sb);
677
678         result += cgno * uspi->s_fpg;
679         UFSD("EXIT3, result %llu\n", (unsigned long long)result);
680         return result;
681 }
682
683 static u64 ufs_alloccg_block(struct inode *inode,
684                              struct ufs_cg_private_info *ucpi,
685                              u64 goal, int *err)
686 {
687         struct super_block * sb;
688         struct ufs_sb_private_info * uspi;
689         struct ufs_cylinder_group * ucg;
690         u64 result, blkno;
691
692         UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
693
694         sb = inode->i_sb;
695         uspi = UFS_SB(sb)->s_uspi;
696         ucg = ubh_get_ucg(UCPI_UBH(ucpi));
697
698         if (goal == 0) {
699                 goal = ucpi->c_rotor;
700                 goto norot;
701         }
702         goal = ufs_blknum (goal);
703         goal = ufs_dtogd(uspi, goal);
704         
705         /*
706          * If the requested block is available, use it.
707          */
708         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
709                 result = goal;
710                 goto gotit;
711         }
712         
713 norot:  
714         result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
715         if (result == INVBLOCK)
716                 return INVBLOCK;
717         ucpi->c_rotor = result;
718 gotit:
719         blkno = ufs_fragstoblks(result);
720         ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
721         if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
722                 ufs_clusteracct (sb, ucpi, blkno, -1);
723
724         fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
725         uspi->cs_total.cs_nbfree--;
726         fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
727
728         if (uspi->fs_magic != UFS2_MAGIC) {
729                 unsigned cylno = ufs_cbtocylno((unsigned)result);
730
731                 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
732                                           ufs_cbtorpos((unsigned)result)), 1);
733                 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
734         }
735         
736         UFSD("EXIT, result %llu\n", (unsigned long long)result);
737
738         return result;
739 }
740
741 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
742                           struct ufs_buffer_head *ubh,
743                           unsigned begin, unsigned size,
744                           unsigned char *table, unsigned char mask)
745 {
746         unsigned rest, offset;
747         unsigned char *cp;
748         
749
750         offset = begin & ~uspi->s_fmask;
751         begin >>= uspi->s_fshift;
752         for (;;) {
753                 if ((offset + size) < uspi->s_fsize)
754                         rest = size;
755                 else
756                         rest = uspi->s_fsize - offset;
757                 size -= rest;
758                 cp = ubh->bh[begin]->b_data + offset;
759                 while ((table[*cp++] & mask) == 0 && --rest)
760                         ;
761                 if (rest || !size)
762                         break;
763                 begin++;
764                 offset = 0;
765         }
766         return (size + rest);
767 }
768
769 /*
770  * Find a block of the specified size in the specified cylinder group.
771  * @sp: pointer to super block
772  * @ucpi: pointer to cylinder group info
773  * @goal: near which block we want find new one
774  * @count: specified size
775  */
776 static u64 ufs_bitmap_search(struct super_block *sb,
777                              struct ufs_cg_private_info *ucpi,
778                              u64 goal, unsigned count)
779 {
780         /*
781          * Bit patterns for identifying fragments in the block map
782          * used as ((map & mask_arr) == want_arr)
783          */
784         static const int mask_arr[9] = {
785                 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
786         };
787         static const int want_arr[9] = {
788                 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
789         };
790         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
791         unsigned start, length, loc;
792         unsigned pos, want, blockmap, mask, end;
793         u64 result;
794
795         UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
796              (unsigned long long)goal, count);
797
798         if (goal)
799                 start = ufs_dtogd(uspi, goal) >> 3;
800         else
801                 start = ucpi->c_frotor >> 3;
802                 
803         length = ((uspi->s_fpg + 7) >> 3) - start;
804         loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
805                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
806                 1 << (count - 1 + (uspi->s_fpb & 7))); 
807         if (loc == 0) {
808                 length = start + 1;
809                 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
810                                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
811                                 ufs_fragtable_other,
812                                 1 << (count - 1 + (uspi->s_fpb & 7)));
813                 if (loc == 0) {
814                         ufs_error(sb, "ufs_bitmap_search",
815                                   "bitmap corrupted on cg %u, start %u,"
816                                   " length %u, count %u, freeoff %u\n",
817                                   ucpi->c_cgx, start, length, count,
818                                   ucpi->c_freeoff);
819                         return INVBLOCK;
820                 }
821                 start = 0;
822         }
823         result = (start + length - loc) << 3;
824         ucpi->c_frotor = result;
825
826         /*
827          * found the byte in the map
828          */
829
830         for (end = result + 8; result < end; result += uspi->s_fpb) {
831                 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
832                 blockmap <<= 1;
833                 mask = mask_arr[count];
834                 want = want_arr[count];
835                 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
836                         if ((blockmap & mask) == want) {
837                                 UFSD("EXIT, result %llu\n",
838                                      (unsigned long long)result);
839                                 return result + pos;
840                         }
841                         mask <<= 1;
842                         want <<= 1;
843                 }
844         }
845
846         ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
847                   ucpi->c_cgx);
848         UFSD("EXIT (FAILED)\n");
849         return INVBLOCK;
850 }
851
852 static void ufs_clusteracct(struct super_block * sb,
853         struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
854 {
855         struct ufs_sb_private_info * uspi;
856         int i, start, end, forw, back;
857         
858         uspi = UFS_SB(sb)->s_uspi;
859         if (uspi->s_contigsumsize <= 0)
860                 return;
861
862         if (cnt > 0)
863                 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
864         else
865                 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
866
867         /*
868          * Find the size of the cluster going forward.
869          */
870         start = blkno + 1;
871         end = start + uspi->s_contigsumsize;
872         if ( end >= ucpi->c_nclusterblks)
873                 end = ucpi->c_nclusterblks;
874         i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
875         if (i > end)
876                 i = end;
877         forw = i - start;
878         
879         /*
880          * Find the size of the cluster going backward.
881          */
882         start = blkno - 1;
883         end = start - uspi->s_contigsumsize;
884         if (end < 0 ) 
885                 end = -1;
886         i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
887         if ( i < end) 
888                 i = end;
889         back = start - i;
890         
891         /*
892          * Account for old cluster and the possibly new forward and
893          * back clusters.
894          */
895         i = back + forw + 1;
896         if (i > uspi->s_contigsumsize)
897                 i = uspi->s_contigsumsize;
898         fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
899         if (back > 0)
900                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
901         if (forw > 0)
902                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
903 }
904
905
906 static unsigned char ufs_fragtable_8fpb[] = {
907         0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
908         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
909         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
910         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
911         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, 
912         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
913         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
914         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
915         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
916         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
917         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
918         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
919         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
920         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
921         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
922         0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
923 };
924
925 static unsigned char ufs_fragtable_other[] = {
926         0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
927         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
928         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
929         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
930         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
931         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
932         0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
933         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
934         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
935         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
936         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
937         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
938         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
939         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
940         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
941         0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
942 };