fs: Add helper to clean bdev aliases under a bh and use it
[sfrench/cifs-2.6.git] / fs / ufs / balloc.c
1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9  */
10
11 #include <linux/fs.h>
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <linux/bio.h>
19 #include <asm/byteorder.h>
20
21 #include "ufs_fs.h"
22 #include "ufs.h"
23 #include "swab.h"
24 #include "util.h"
25
26 #define INVBLOCK ((u64)-1L)
27
28 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
29 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
30 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
31 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
32 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
33 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
34
35 /*
36  * Free 'count' fragments from fragment number 'fragment'
37  */
38 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
39 {
40         struct super_block * sb;
41         struct ufs_sb_private_info * uspi;
42         struct ufs_cg_private_info * ucpi;
43         struct ufs_cylinder_group * ucg;
44         unsigned cgno, bit, end_bit, bbase, blkmap, i;
45         u64 blkno;
46         
47         sb = inode->i_sb;
48         uspi = UFS_SB(sb)->s_uspi;
49         
50         UFSD("ENTER, fragment %llu, count %u\n",
51              (unsigned long long)fragment, count);
52         
53         if (ufs_fragnum(fragment) + count > uspi->s_fpg)
54                 ufs_error (sb, "ufs_free_fragments", "internal error");
55
56         mutex_lock(&UFS_SB(sb)->s_lock);
57         
58         cgno = ufs_dtog(uspi, fragment);
59         bit = ufs_dtogd(uspi, fragment);
60         if (cgno >= uspi->s_ncg) {
61                 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
62                 goto failed;
63         }
64                 
65         ucpi = ufs_load_cylinder (sb, cgno);
66         if (!ucpi) 
67                 goto failed;
68         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
69         if (!ufs_cg_chkmagic(sb, ucg)) {
70                 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
71                 goto failed;
72         }
73
74         end_bit = bit + count;
75         bbase = ufs_blknum (bit);
76         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
77         ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
78         for (i = bit; i < end_bit; i++) {
79                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
80                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
81                 else 
82                         ufs_error (sb, "ufs_free_fragments",
83                                    "bit already cleared for fragment %u", i);
84         }
85         
86         fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
87         uspi->cs_total.cs_nffree += count;
88         fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
89         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
90         ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
91
92         /*
93          * Trying to reassemble free fragments into block
94          */
95         blkno = ufs_fragstoblks (bbase);
96         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
97                 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
98                 uspi->cs_total.cs_nffree -= uspi->s_fpb;
99                 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
100                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
101                         ufs_clusteracct (sb, ucpi, blkno, 1);
102                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
103                 uspi->cs_total.cs_nbfree++;
104                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
105                 if (uspi->fs_magic != UFS2_MAGIC) {
106                         unsigned cylno = ufs_cbtocylno (bbase);
107
108                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
109                                                   ufs_cbtorpos(bbase)), 1);
110                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
111                 }
112         }
113         
114         ubh_mark_buffer_dirty (USPI_UBH(uspi));
115         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
116         if (sb->s_flags & MS_SYNCHRONOUS)
117                 ubh_sync_block(UCPI_UBH(ucpi));
118         ufs_mark_sb_dirty(sb);
119
120         mutex_unlock(&UFS_SB(sb)->s_lock);
121         UFSD("EXIT\n");
122         return;
123
124 failed:
125         mutex_unlock(&UFS_SB(sb)->s_lock);
126         UFSD("EXIT (FAILED)\n");
127         return;
128 }
129
130 /*
131  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
132  */
133 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
134 {
135         struct super_block * sb;
136         struct ufs_sb_private_info * uspi;
137         struct ufs_cg_private_info * ucpi;
138         struct ufs_cylinder_group * ucg;
139         unsigned overflow, cgno, bit, end_bit, i;
140         u64 blkno;
141         
142         sb = inode->i_sb;
143         uspi = UFS_SB(sb)->s_uspi;
144
145         UFSD("ENTER, fragment %llu, count %u\n",
146              (unsigned long long)fragment, count);
147         
148         if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
149                 ufs_error (sb, "ufs_free_blocks", "internal error, "
150                            "fragment %llu, count %u\n",
151                            (unsigned long long)fragment, count);
152                 goto failed;
153         }
154
155         mutex_lock(&UFS_SB(sb)->s_lock);
156         
157 do_more:
158         overflow = 0;
159         cgno = ufs_dtog(uspi, fragment);
160         bit = ufs_dtogd(uspi, fragment);
161         if (cgno >= uspi->s_ncg) {
162                 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
163                 goto failed_unlock;
164         }
165         end_bit = bit + count;
166         if (end_bit > uspi->s_fpg) {
167                 overflow = bit + count - uspi->s_fpg;
168                 count -= overflow;
169                 end_bit -= overflow;
170         }
171
172         ucpi = ufs_load_cylinder (sb, cgno);
173         if (!ucpi) 
174                 goto failed_unlock;
175         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
176         if (!ufs_cg_chkmagic(sb, ucg)) {
177                 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
178                 goto failed_unlock;
179         }
180
181         for (i = bit; i < end_bit; i += uspi->s_fpb) {
182                 blkno = ufs_fragstoblks(i);
183                 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
184                         ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
185                 }
186                 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
187                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
188                         ufs_clusteracct (sb, ucpi, blkno, 1);
189
190                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
191                 uspi->cs_total.cs_nbfree++;
192                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
193
194                 if (uspi->fs_magic != UFS2_MAGIC) {
195                         unsigned cylno = ufs_cbtocylno(i);
196
197                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
198                                                   ufs_cbtorpos(i)), 1);
199                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
200                 }
201         }
202
203         ubh_mark_buffer_dirty (USPI_UBH(uspi));
204         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
205         if (sb->s_flags & MS_SYNCHRONOUS)
206                 ubh_sync_block(UCPI_UBH(ucpi));
207
208         if (overflow) {
209                 fragment += count;
210                 count = overflow;
211                 goto do_more;
212         }
213
214         ufs_mark_sb_dirty(sb);
215         mutex_unlock(&UFS_SB(sb)->s_lock);
216         UFSD("EXIT\n");
217         return;
218
219 failed_unlock:
220         mutex_unlock(&UFS_SB(sb)->s_lock);
221 failed:
222         UFSD("EXIT (FAILED)\n");
223         return;
224 }
225
226 /*
227  * Modify inode page cache in such way:
228  * have - blocks with b_blocknr equal to oldb...oldb+count-1
229  * get - blocks with b_blocknr equal to newb...newb+count-1
230  * also we suppose that oldb...oldb+count-1 blocks
231  * situated at the end of file.
232  *
233  * We can come here from ufs_writepage or ufs_prepare_write,
234  * locked_page is argument of these functions, so we already lock it.
235  */
236 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
237                                unsigned int count, sector_t oldb,
238                                sector_t newb, struct page *locked_page)
239 {
240         const unsigned blks_per_page =
241                 1 << (PAGE_SHIFT - inode->i_blkbits);
242         const unsigned mask = blks_per_page - 1;
243         struct address_space * const mapping = inode->i_mapping;
244         pgoff_t index, cur_index, last_index;
245         unsigned pos, j, lblock;
246         sector_t end, i;
247         struct page *page;
248         struct buffer_head *head, *bh;
249
250         UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
251               inode->i_ino, count,
252              (unsigned long long)oldb, (unsigned long long)newb);
253
254         BUG_ON(!locked_page);
255         BUG_ON(!PageLocked(locked_page));
256
257         cur_index = locked_page->index;
258         end = count + beg;
259         last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
260         for (i = beg; i < end; i = (i | mask) + 1) {
261                 index = i >> (PAGE_SHIFT - inode->i_blkbits);
262
263                 if (likely(cur_index != index)) {
264                         page = ufs_get_locked_page(mapping, index);
265                         if (!page)/* it was truncated */
266                                 continue;
267                         if (IS_ERR(page)) {/* or EIO */
268                                 ufs_error(inode->i_sb, __func__,
269                                           "read of page %llu failed\n",
270                                           (unsigned long long)index);
271                                 continue;
272                         }
273                 } else
274                         page = locked_page;
275
276                 head = page_buffers(page);
277                 bh = head;
278                 pos = i & mask;
279                 for (j = 0; j < pos; ++j)
280                         bh = bh->b_this_page;
281
282
283                 if (unlikely(index == last_index))
284                         lblock = end & mask;
285                 else
286                         lblock = blks_per_page;
287
288                 do {
289                         if (j >= lblock)
290                                 break;
291                         pos = (i - beg) + j;
292
293                         if (!buffer_mapped(bh))
294                                         map_bh(bh, inode->i_sb, oldb + pos);
295                         if (!buffer_uptodate(bh)) {
296                                 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
297                                 wait_on_buffer(bh);
298                                 if (!buffer_uptodate(bh)) {
299                                         ufs_error(inode->i_sb, __func__,
300                                                   "read of block failed\n");
301                                         break;
302                                 }
303                         }
304
305                         UFSD(" change from %llu to %llu, pos %u\n",
306                              (unsigned long long)(pos + oldb),
307                              (unsigned long long)(pos + newb), pos);
308
309                         bh->b_blocknr = newb + pos;
310                         clean_bdev_bh_alias(bh);
311                         mark_buffer_dirty(bh);
312                         ++j;
313                         bh = bh->b_this_page;
314                 } while (bh != head);
315
316                 if (likely(cur_index != index))
317                         ufs_put_locked_page(page);
318         }
319         UFSD("EXIT\n");
320 }
321
322 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
323                             int sync)
324 {
325         struct buffer_head *bh;
326         sector_t end = beg + n;
327
328         for (; beg < end; ++beg) {
329                 bh = sb_getblk(inode->i_sb, beg);
330                 lock_buffer(bh);
331                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
332                 set_buffer_uptodate(bh);
333                 mark_buffer_dirty(bh);
334                 unlock_buffer(bh);
335                 if (IS_SYNC(inode) || sync)
336                         sync_dirty_buffer(bh);
337                 brelse(bh);
338         }
339 }
340
341 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
342                            u64 goal, unsigned count, int *err,
343                            struct page *locked_page)
344 {
345         struct super_block * sb;
346         struct ufs_sb_private_info * uspi;
347         struct ufs_super_block_first * usb1;
348         unsigned cgno, oldcount, newcount;
349         u64 tmp, request, result;
350         
351         UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
352              inode->i_ino, (unsigned long long)fragment,
353              (unsigned long long)goal, count);
354         
355         sb = inode->i_sb;
356         uspi = UFS_SB(sb)->s_uspi;
357         usb1 = ubh_get_usb_first(uspi);
358         *err = -ENOSPC;
359
360         mutex_lock(&UFS_SB(sb)->s_lock);
361         tmp = ufs_data_ptr_to_cpu(sb, p);
362
363         if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
364                 ufs_warning(sb, "ufs_new_fragments", "internal warning"
365                             " fragment %llu, count %u",
366                             (unsigned long long)fragment, count);
367                 count = uspi->s_fpb - ufs_fragnum(fragment); 
368         }
369         oldcount = ufs_fragnum (fragment);
370         newcount = oldcount + count;
371
372         /*
373          * Somebody else has just allocated our fragments
374          */
375         if (oldcount) {
376                 if (!tmp) {
377                         ufs_error(sb, "ufs_new_fragments", "internal error, "
378                                   "fragment %llu, tmp %llu\n",
379                                   (unsigned long long)fragment,
380                                   (unsigned long long)tmp);
381                         mutex_unlock(&UFS_SB(sb)->s_lock);
382                         return INVBLOCK;
383                 }
384                 if (fragment < UFS_I(inode)->i_lastfrag) {
385                         UFSD("EXIT (ALREADY ALLOCATED)\n");
386                         mutex_unlock(&UFS_SB(sb)->s_lock);
387                         return 0;
388                 }
389         }
390         else {
391                 if (tmp) {
392                         UFSD("EXIT (ALREADY ALLOCATED)\n");
393                         mutex_unlock(&UFS_SB(sb)->s_lock);
394                         return 0;
395                 }
396         }
397
398         /*
399          * There is not enough space for user on the device
400          */
401         if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
402                 mutex_unlock(&UFS_SB(sb)->s_lock);
403                 UFSD("EXIT (FAILED)\n");
404                 return 0;
405         }
406
407         if (goal >= uspi->s_size) 
408                 goal = 0;
409         if (goal == 0) 
410                 cgno = ufs_inotocg (inode->i_ino);
411         else
412                 cgno = ufs_dtog(uspi, goal);
413          
414         /*
415          * allocate new fragment
416          */
417         if (oldcount == 0) {
418                 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
419                 if (result) {
420                         ufs_clear_frags(inode, result + oldcount,
421                                         newcount - oldcount, locked_page != NULL);
422                         write_seqlock(&UFS_I(inode)->meta_lock);
423                         ufs_cpu_to_data_ptr(sb, p, result);
424                         write_sequnlock(&UFS_I(inode)->meta_lock);
425                         *err = 0;
426                         UFS_I(inode)->i_lastfrag =
427                                 max(UFS_I(inode)->i_lastfrag, fragment + count);
428                 }
429                 mutex_unlock(&UFS_SB(sb)->s_lock);
430                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
431                 return result;
432         }
433
434         /*
435          * resize block
436          */
437         result = ufs_add_fragments(inode, tmp, oldcount, newcount);
438         if (result) {
439                 *err = 0;
440                 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
441                                                 fragment + count);
442                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
443                                 locked_page != NULL);
444                 mutex_unlock(&UFS_SB(sb)->s_lock);
445                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
446                 return result;
447         }
448
449         /*
450          * allocate new block and move data
451          */
452         switch (fs32_to_cpu(sb, usb1->fs_optim)) {
453             case UFS_OPTSPACE:
454                 request = newcount;
455                 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
456                     > uspi->s_dsize * uspi->s_minfree / (2 * 100))
457                         break;
458                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
459                 break;
460             default:
461                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
462         
463             case UFS_OPTTIME:
464                 request = uspi->s_fpb;
465                 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
466                     (uspi->s_minfree - 2) / 100)
467                         break;
468                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
469                 break;
470         }
471         result = ufs_alloc_fragments (inode, cgno, goal, request, err);
472         if (result) {
473                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
474                                 locked_page != NULL);
475                 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
476                                    uspi->s_sbbase + tmp,
477                                    uspi->s_sbbase + result, locked_page);
478                 write_seqlock(&UFS_I(inode)->meta_lock);
479                 ufs_cpu_to_data_ptr(sb, p, result);
480                 write_sequnlock(&UFS_I(inode)->meta_lock);
481                 *err = 0;
482                 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
483                                                 fragment + count);
484                 mutex_unlock(&UFS_SB(sb)->s_lock);
485                 if (newcount < request)
486                         ufs_free_fragments (inode, result + newcount, request - newcount);
487                 ufs_free_fragments (inode, tmp, oldcount);
488                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
489                 return result;
490         }
491
492         mutex_unlock(&UFS_SB(sb)->s_lock);
493         UFSD("EXIT (FAILED)\n");
494         return 0;
495 }               
496
497 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
498                              unsigned oldcount, unsigned newcount)
499 {
500         struct super_block * sb;
501         struct ufs_sb_private_info * uspi;
502         struct ufs_cg_private_info * ucpi;
503         struct ufs_cylinder_group * ucg;
504         unsigned cgno, fragno, fragoff, count, fragsize, i;
505         
506         UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
507              (unsigned long long)fragment, oldcount, newcount);
508         
509         sb = inode->i_sb;
510         uspi = UFS_SB(sb)->s_uspi;
511         count = newcount - oldcount;
512         
513         cgno = ufs_dtog(uspi, fragment);
514         if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
515                 return 0;
516         if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
517                 return 0;
518         ucpi = ufs_load_cylinder (sb, cgno);
519         if (!ucpi)
520                 return 0;
521         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
522         if (!ufs_cg_chkmagic(sb, ucg)) {
523                 ufs_panic (sb, "ufs_add_fragments",
524                         "internal error, bad magic number on cg %u", cgno);
525                 return 0;
526         }
527
528         fragno = ufs_dtogd(uspi, fragment);
529         fragoff = ufs_fragnum (fragno);
530         for (i = oldcount; i < newcount; i++)
531                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
532                         return 0;
533         /*
534          * Block can be extended
535          */
536         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
537         for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
538                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
539                         break;
540         fragsize = i - oldcount;
541         if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
542                 ufs_panic (sb, "ufs_add_fragments",
543                         "internal error or corrupted bitmap on cg %u", cgno);
544         fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
545         if (fragsize != count)
546                 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
547         for (i = oldcount; i < newcount; i++)
548                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
549
550         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
551         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
552         uspi->cs_total.cs_nffree -= count;
553         
554         ubh_mark_buffer_dirty (USPI_UBH(uspi));
555         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
556         if (sb->s_flags & MS_SYNCHRONOUS)
557                 ubh_sync_block(UCPI_UBH(ucpi));
558         ufs_mark_sb_dirty(sb);
559
560         UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
561         
562         return fragment;
563 }
564
565 #define UFS_TEST_FREE_SPACE_CG \
566         ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
567         if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
568                 goto cg_found; \
569         for (k = count; k < uspi->s_fpb; k++) \
570                 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
571                         goto cg_found; 
572
573 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
574                                u64 goal, unsigned count, int *err)
575 {
576         struct super_block * sb;
577         struct ufs_sb_private_info * uspi;
578         struct ufs_cg_private_info * ucpi;
579         struct ufs_cylinder_group * ucg;
580         unsigned oldcg, i, j, k, allocsize;
581         u64 result;
582         
583         UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
584              inode->i_ino, cgno, (unsigned long long)goal, count);
585
586         sb = inode->i_sb;
587         uspi = UFS_SB(sb)->s_uspi;
588         oldcg = cgno;
589         
590         /*
591          * 1. searching on preferred cylinder group
592          */
593         UFS_TEST_FREE_SPACE_CG
594
595         /*
596          * 2. quadratic rehash
597          */
598         for (j = 1; j < uspi->s_ncg; j *= 2) {
599                 cgno += j;
600                 if (cgno >= uspi->s_ncg) 
601                         cgno -= uspi->s_ncg;
602                 UFS_TEST_FREE_SPACE_CG
603         }
604
605         /*
606          * 3. brute force search
607          * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
608          */
609         cgno = (oldcg + 1) % uspi->s_ncg;
610         for (j = 2; j < uspi->s_ncg; j++) {
611                 cgno++;
612                 if (cgno >= uspi->s_ncg)
613                         cgno = 0;
614                 UFS_TEST_FREE_SPACE_CG
615         }
616         
617         UFSD("EXIT (FAILED)\n");
618         return 0;
619
620 cg_found:
621         ucpi = ufs_load_cylinder (sb, cgno);
622         if (!ucpi)
623                 return 0;
624         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
625         if (!ufs_cg_chkmagic(sb, ucg)) 
626                 ufs_panic (sb, "ufs_alloc_fragments",
627                         "internal error, bad magic number on cg %u", cgno);
628         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
629
630         if (count == uspi->s_fpb) {
631                 result = ufs_alloccg_block (inode, ucpi, goal, err);
632                 if (result == INVBLOCK)
633                         return 0;
634                 goto succed;
635         }
636
637         for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
638                 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
639                         break;
640         
641         if (allocsize == uspi->s_fpb) {
642                 result = ufs_alloccg_block (inode, ucpi, goal, err);
643                 if (result == INVBLOCK)
644                         return 0;
645                 goal = ufs_dtogd(uspi, result);
646                 for (i = count; i < uspi->s_fpb; i++)
647                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
648                 i = uspi->s_fpb - count;
649
650                 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
651                 uspi->cs_total.cs_nffree += i;
652                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
653                 fs32_add(sb, &ucg->cg_frsum[i], 1);
654                 goto succed;
655         }
656
657         result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
658         if (result == INVBLOCK)
659                 return 0;
660         for (i = 0; i < count; i++)
661                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
662         
663         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
664         uspi->cs_total.cs_nffree -= count;
665         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
666         fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
667
668         if (count != allocsize)
669                 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
670
671 succed:
672         ubh_mark_buffer_dirty (USPI_UBH(uspi));
673         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
674         if (sb->s_flags & MS_SYNCHRONOUS)
675                 ubh_sync_block(UCPI_UBH(ucpi));
676         ufs_mark_sb_dirty(sb);
677
678         result += cgno * uspi->s_fpg;
679         UFSD("EXIT3, result %llu\n", (unsigned long long)result);
680         return result;
681 }
682
683 static u64 ufs_alloccg_block(struct inode *inode,
684                              struct ufs_cg_private_info *ucpi,
685                              u64 goal, int *err)
686 {
687         struct super_block * sb;
688         struct ufs_sb_private_info * uspi;
689         struct ufs_cylinder_group * ucg;
690         u64 result, blkno;
691
692         UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
693
694         sb = inode->i_sb;
695         uspi = UFS_SB(sb)->s_uspi;
696         ucg = ubh_get_ucg(UCPI_UBH(ucpi));
697
698         if (goal == 0) {
699                 goal = ucpi->c_rotor;
700                 goto norot;
701         }
702         goal = ufs_blknum (goal);
703         goal = ufs_dtogd(uspi, goal);
704         
705         /*
706          * If the requested block is available, use it.
707          */
708         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
709                 result = goal;
710                 goto gotit;
711         }
712         
713 norot:  
714         result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
715         if (result == INVBLOCK)
716                 return INVBLOCK;
717         ucpi->c_rotor = result;
718 gotit:
719         blkno = ufs_fragstoblks(result);
720         ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
721         if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
722                 ufs_clusteracct (sb, ucpi, blkno, -1);
723
724         fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
725         uspi->cs_total.cs_nbfree--;
726         fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
727
728         if (uspi->fs_magic != UFS2_MAGIC) {
729                 unsigned cylno = ufs_cbtocylno((unsigned)result);
730
731                 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
732                                           ufs_cbtorpos((unsigned)result)), 1);
733                 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
734         }
735         
736         UFSD("EXIT, result %llu\n", (unsigned long long)result);
737
738         return result;
739 }
740
741 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
742                           struct ufs_buffer_head *ubh,
743                           unsigned begin, unsigned size,
744                           unsigned char *table, unsigned char mask)
745 {
746         unsigned rest, offset;
747         unsigned char *cp;
748         
749
750         offset = begin & ~uspi->s_fmask;
751         begin >>= uspi->s_fshift;
752         for (;;) {
753                 if ((offset + size) < uspi->s_fsize)
754                         rest = size;
755                 else
756                         rest = uspi->s_fsize - offset;
757                 size -= rest;
758                 cp = ubh->bh[begin]->b_data + offset;
759                 while ((table[*cp++] & mask) == 0 && --rest)
760                         ;
761                 if (rest || !size)
762                         break;
763                 begin++;
764                 offset = 0;
765         }
766         return (size + rest);
767 }
768
769 /*
770  * Find a block of the specified size in the specified cylinder group.
771  * @sp: pointer to super block
772  * @ucpi: pointer to cylinder group info
773  * @goal: near which block we want find new one
774  * @count: specified size
775  */
776 static u64 ufs_bitmap_search(struct super_block *sb,
777                              struct ufs_cg_private_info *ucpi,
778                              u64 goal, unsigned count)
779 {
780         /*
781          * Bit patterns for identifying fragments in the block map
782          * used as ((map & mask_arr) == want_arr)
783          */
784         static const int mask_arr[9] = {
785                 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
786         };
787         static const int want_arr[9] = {
788                 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
789         };
790         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
791         unsigned start, length, loc;
792         unsigned pos, want, blockmap, mask, end;
793         u64 result;
794
795         UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
796              (unsigned long long)goal, count);
797
798         if (goal)
799                 start = ufs_dtogd(uspi, goal) >> 3;
800         else
801                 start = ucpi->c_frotor >> 3;
802                 
803         length = ((uspi->s_fpg + 7) >> 3) - start;
804         loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
805                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
806                 1 << (count - 1 + (uspi->s_fpb & 7))); 
807         if (loc == 0) {
808                 length = start + 1;
809                 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
810                                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
811                                 ufs_fragtable_other,
812                                 1 << (count - 1 + (uspi->s_fpb & 7)));
813                 if (loc == 0) {
814                         ufs_error(sb, "ufs_bitmap_search",
815                                   "bitmap corrupted on cg %u, start %u,"
816                                   " length %u, count %u, freeoff %u\n",
817                                   ucpi->c_cgx, start, length, count,
818                                   ucpi->c_freeoff);
819                         return INVBLOCK;
820                 }
821                 start = 0;
822         }
823         result = (start + length - loc) << 3;
824         ucpi->c_frotor = result;
825
826         /*
827          * found the byte in the map
828          */
829
830         for (end = result + 8; result < end; result += uspi->s_fpb) {
831                 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
832                 blockmap <<= 1;
833                 mask = mask_arr[count];
834                 want = want_arr[count];
835                 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
836                         if ((blockmap & mask) == want) {
837                                 UFSD("EXIT, result %llu\n",
838                                      (unsigned long long)result);
839                                 return result + pos;
840                         }
841                         mask <<= 1;
842                         want <<= 1;
843                 }
844         }
845
846         ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
847                   ucpi->c_cgx);
848         UFSD("EXIT (FAILED)\n");
849         return INVBLOCK;
850 }
851
852 static void ufs_clusteracct(struct super_block * sb,
853         struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
854 {
855         struct ufs_sb_private_info * uspi;
856         int i, start, end, forw, back;
857         
858         uspi = UFS_SB(sb)->s_uspi;
859         if (uspi->s_contigsumsize <= 0)
860                 return;
861
862         if (cnt > 0)
863                 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
864         else
865                 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
866
867         /*
868          * Find the size of the cluster going forward.
869          */
870         start = blkno + 1;
871         end = start + uspi->s_contigsumsize;
872         if ( end >= ucpi->c_nclusterblks)
873                 end = ucpi->c_nclusterblks;
874         i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
875         if (i > end)
876                 i = end;
877         forw = i - start;
878         
879         /*
880          * Find the size of the cluster going backward.
881          */
882         start = blkno - 1;
883         end = start - uspi->s_contigsumsize;
884         if (end < 0 ) 
885                 end = -1;
886         i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
887         if ( i < end) 
888                 i = end;
889         back = start - i;
890         
891         /*
892          * Account for old cluster and the possibly new forward and
893          * back clusters.
894          */
895         i = back + forw + 1;
896         if (i > uspi->s_contigsumsize)
897                 i = uspi->s_contigsumsize;
898         fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
899         if (back > 0)
900                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
901         if (forw > 0)
902                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
903 }
904
905
906 static unsigned char ufs_fragtable_8fpb[] = {
907         0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
908         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
909         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
910         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
911         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, 
912         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
913         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
914         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
915         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
916         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
917         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
918         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
919         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
920         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
921         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
922         0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
923 };
924
925 static unsigned char ufs_fragtable_other[] = {
926         0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
927         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
928         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
929         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
930         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
931         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
932         0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
933         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
934         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
935         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
936         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
937         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
938         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
939         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
940         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
941         0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
942 };