Merge branch 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[sfrench/cifs-2.6.git] / drivers / staging / vc04_services / interface / vchiq_arm / vchiq_2835_arm.c
1 /**
2  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions, and the following disclaimer,
9  *    without modification.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The names of the above-listed copyright holders may not be used
14  *    to endorse or promote products derived from this software without
15  *    specific prior written permission.
16  *
17  * ALTERNATIVELY, this software may be distributed under the terms of the
18  * GNU General Public License ("GPL") version 2, as published by the Free
19  * Software Foundation.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
22  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/errno.h>
37 #include <linux/interrupt.h>
38 #include <linux/pagemap.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/io.h>
41 #include <linux/platform_device.h>
42 #include <linux/uaccess.h>
43 #include <linux/mm.h>
44 #include <linux/of.h>
45 #include <soc/bcm2835/raspberrypi-firmware.h>
46
47 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
48
49 #include "vchiq_arm.h"
50 #include "vchiq_connected.h"
51 #include "vchiq_killable.h"
52 #include "vchiq_pagelist.h"
53
54 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
55
56 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
57 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
58
59 #define BELL0   0x00
60 #define BELL2   0x08
61
62 struct vchiq_2835_state {
63         int inited;
64         VCHIQ_ARM_STATE_T arm_state;
65 };
66
67 struct vchiq_pagelist_info {
68         PAGELIST_T *pagelist;
69         size_t pagelist_buffer_size;
70         dma_addr_t dma_addr;
71         enum dma_data_direction dma_dir;
72         unsigned int num_pages;
73         unsigned int pages_need_release;
74         struct page **pages;
75         struct scatterlist *scatterlist;
76         unsigned int scatterlist_mapped;
77 };
78
79 static void __iomem *g_regs;
80 static unsigned int g_cache_line_size = sizeof(CACHE_LINE_SIZE);
81 static unsigned int g_fragments_size;
82 static char *g_fragments_base;
83 static char *g_free_fragments;
84 static struct semaphore g_free_fragments_sema;
85 static struct device *g_dev;
86
87 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
88
89 static irqreturn_t
90 vchiq_doorbell_irq(int irq, void *dev_id);
91
92 static struct vchiq_pagelist_info *
93 create_pagelist(char __user *buf, size_t count, unsigned short type,
94                 struct task_struct *task);
95
96 static void
97 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
98               int actual);
99
100 int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
101 {
102         struct device *dev = &pdev->dev;
103         struct rpi_firmware *fw = platform_get_drvdata(pdev);
104         VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
105         struct resource *res;
106         void *slot_mem;
107         dma_addr_t slot_phys;
108         u32 channelbase;
109         int slot_mem_size, frag_mem_size;
110         int err, irq, i;
111
112         /*
113          * VCHI messages between the CPU and firmware use
114          * 32-bit bus addresses.
115          */
116         err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
117
118         if (err < 0)
119                 return err;
120
121         err = of_property_read_u32(dev->of_node, "cache-line-size",
122                                    &g_cache_line_size);
123
124         if (err) {
125                 dev_err(dev, "Missing cache-line-size property\n");
126                 return -ENODEV;
127         }
128
129         g_fragments_size = 2 * g_cache_line_size;
130
131         /* Allocate space for the channels in coherent memory */
132         slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
133         frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
134
135         slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
136                                        &slot_phys, GFP_KERNEL);
137         if (!slot_mem) {
138                 dev_err(dev, "could not allocate DMA memory\n");
139                 return -ENOMEM;
140         }
141
142         WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
143
144         vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
145         if (!vchiq_slot_zero)
146                 return -EINVAL;
147
148         vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
149                 (int)slot_phys + slot_mem_size;
150         vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
151                 MAX_FRAGMENTS;
152
153         g_fragments_base = (char *)slot_mem + slot_mem_size;
154
155         g_free_fragments = g_fragments_base;
156         for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
157                 *(char **)&g_fragments_base[i*g_fragments_size] =
158                         &g_fragments_base[(i + 1)*g_fragments_size];
159         }
160         *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
161         sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
162
163         if (vchiq_init_state(state, vchiq_slot_zero, 0) != VCHIQ_SUCCESS)
164                 return -EINVAL;
165
166         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
167         g_regs = devm_ioremap_resource(&pdev->dev, res);
168         if (IS_ERR(g_regs))
169                 return PTR_ERR(g_regs);
170
171         irq = platform_get_irq(pdev, 0);
172         if (irq <= 0) {
173                 dev_err(dev, "failed to get IRQ\n");
174                 return irq;
175         }
176
177         err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
178                                "VCHIQ doorbell", state);
179         if (err) {
180                 dev_err(dev, "failed to register irq=%d\n", irq);
181                 return err;
182         }
183
184         /* Send the base address of the slots to VideoCore */
185         channelbase = slot_phys;
186         err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
187                                     &channelbase, sizeof(channelbase));
188         if (err || channelbase) {
189                 dev_err(dev, "failed to set channelbase\n");
190                 return err ? : -ENXIO;
191         }
192
193         g_dev = dev;
194         vchiq_log_info(vchiq_arm_log_level,
195                 "vchiq_init - done (slots %pK, phys %pad)",
196                 vchiq_slot_zero, &slot_phys);
197
198         vchiq_call_connected_callbacks();
199
200         return 0;
201 }
202
203 VCHIQ_STATUS_T
204 vchiq_platform_init_state(VCHIQ_STATE_T *state)
205 {
206         VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
207         struct vchiq_2835_state *platform_state;
208
209         state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
210         platform_state = (struct vchiq_2835_state *)state->platform_state;
211
212         platform_state->inited = 1;
213         status = vchiq_arm_init_state(state, &platform_state->arm_state);
214
215         if (status != VCHIQ_SUCCESS)
216                 platform_state->inited = 0;
217
218         return status;
219 }
220
221 VCHIQ_ARM_STATE_T*
222 vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
223 {
224         struct vchiq_2835_state *platform_state;
225
226         platform_state   = (struct vchiq_2835_state *)state->platform_state;
227
228         if (!platform_state->inited)
229                 BUG();
230
231         return &platform_state->arm_state;
232 }
233
234 void
235 remote_event_signal(REMOTE_EVENT_T *event)
236 {
237         wmb();
238
239         event->fired = 1;
240
241         dsb(sy);         /* data barrier operation */
242
243         if (event->armed)
244                 writel(0, g_regs + BELL2); /* trigger vc interrupt */
245 }
246
247 VCHIQ_STATUS_T
248 vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
249         void *offset, int size, int dir)
250 {
251         struct vchiq_pagelist_info *pagelistinfo;
252
253         WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
254
255         pagelistinfo = create_pagelist((char __user *)offset, size,
256                                        (dir == VCHIQ_BULK_RECEIVE)
257                                        ? PAGELIST_READ
258                                        : PAGELIST_WRITE,
259                                        current);
260
261         if (!pagelistinfo)
262                 return VCHIQ_ERROR;
263
264         bulk->handle = memhandle;
265         bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr;
266
267         /*
268          * Store the pagelistinfo address in remote_data,
269          * which isn't used by the slave.
270          */
271         bulk->remote_data = pagelistinfo;
272
273         return VCHIQ_SUCCESS;
274 }
275
276 void
277 vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
278 {
279         if (bulk && bulk->remote_data && bulk->actual)
280                 free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
281                               bulk->actual);
282 }
283
284 void
285 vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
286 {
287         /*
288          * This should only be called on the master (VideoCore) side, but
289          * provide an implementation to avoid the need for ifdefery.
290          */
291         BUG();
292 }
293
294 void
295 vchiq_dump_platform_state(void *dump_context)
296 {
297         char buf[80];
298         int len;
299
300         len = snprintf(buf, sizeof(buf),
301                 "  Platform: 2835 (VC master)");
302         vchiq_dump(dump_context, buf, len + 1);
303 }
304
305 VCHIQ_STATUS_T
306 vchiq_platform_suspend(VCHIQ_STATE_T *state)
307 {
308         return VCHIQ_ERROR;
309 }
310
311 VCHIQ_STATUS_T
312 vchiq_platform_resume(VCHIQ_STATE_T *state)
313 {
314         return VCHIQ_SUCCESS;
315 }
316
317 void
318 vchiq_platform_paused(VCHIQ_STATE_T *state)
319 {
320 }
321
322 void
323 vchiq_platform_resumed(VCHIQ_STATE_T *state)
324 {
325 }
326
327 int
328 vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state)
329 {
330         return 1; // autosuspend not supported - videocore always wanted
331 }
332
333 int
334 vchiq_platform_use_suspend_timer(void)
335 {
336         return 0;
337 }
338 void
339 vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
340 {
341         vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use");
342 }
343 void
344 vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
345 {
346         (void)state;
347 }
348 /*
349  * Local functions
350  */
351
352 static irqreturn_t
353 vchiq_doorbell_irq(int irq, void *dev_id)
354 {
355         VCHIQ_STATE_T *state = dev_id;
356         irqreturn_t ret = IRQ_NONE;
357         unsigned int status;
358
359         /* Read (and clear) the doorbell */
360         status = readl(g_regs + BELL0);
361
362         if (status & 0x4) {  /* Was the doorbell rung? */
363                 remote_event_pollall(state);
364                 ret = IRQ_HANDLED;
365         }
366
367         return ret;
368 }
369
370 static void
371 cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
372 {
373         if (pagelistinfo->scatterlist_mapped) {
374                 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
375                              pagelistinfo->num_pages, pagelistinfo->dma_dir);
376         }
377
378         if (pagelistinfo->pages_need_release) {
379                 unsigned int i;
380
381                 for (i = 0; i < pagelistinfo->num_pages; i++)
382                         put_page(pagelistinfo->pages[i]);
383         }
384
385         dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
386                           pagelistinfo->pagelist, pagelistinfo->dma_addr);
387 }
388
389 /* There is a potential problem with partial cache lines (pages?)
390  * at the ends of the block when reading. If the CPU accessed anything in
391  * the same line (page?) then it may have pulled old data into the cache,
392  * obscuring the new data underneath. We can solve this by transferring the
393  * partial cache lines separately, and allowing the ARM to copy into the
394  * cached area.
395  */
396
397 static struct vchiq_pagelist_info *
398 create_pagelist(char __user *buf, size_t count, unsigned short type,
399                 struct task_struct *task)
400 {
401         PAGELIST_T *pagelist;
402         struct vchiq_pagelist_info *pagelistinfo;
403         struct page **pages;
404         u32 *addrs;
405         unsigned int num_pages, offset, i, k;
406         int actual_pages;
407         size_t pagelist_size;
408         struct scatterlist *scatterlist, *sg;
409         int dma_buffers;
410         dma_addr_t dma_addr;
411
412         offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
413         num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
414
415         pagelist_size = sizeof(PAGELIST_T) +
416                         (num_pages * sizeof(u32)) +
417                         (num_pages * sizeof(pages[0]) +
418                         (num_pages * sizeof(struct scatterlist))) +
419                         sizeof(struct vchiq_pagelist_info);
420
421         /* Allocate enough storage to hold the page pointers and the page
422          * list
423          */
424         pagelist = dma_zalloc_coherent(g_dev,
425                                        pagelist_size,
426                                        &dma_addr,
427                                        GFP_KERNEL);
428
429         vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
430
431         if (!pagelist)
432                 return NULL;
433
434         addrs           = pagelist->addrs;
435         pages           = (struct page **)(addrs + num_pages);
436         scatterlist     = (struct scatterlist *)(pages + num_pages);
437         pagelistinfo    = (struct vchiq_pagelist_info *)
438                           (scatterlist + num_pages);
439
440         pagelist->length = count;
441         pagelist->type = type;
442         pagelist->offset = offset;
443
444         /* Populate the fields of the pagelistinfo structure */
445         pagelistinfo->pagelist = pagelist;
446         pagelistinfo->pagelist_buffer_size = pagelist_size;
447         pagelistinfo->dma_addr = dma_addr;
448         pagelistinfo->dma_dir =  (type == PAGELIST_WRITE) ?
449                                   DMA_TO_DEVICE : DMA_FROM_DEVICE;
450         pagelistinfo->num_pages = num_pages;
451         pagelistinfo->pages_need_release = 0;
452         pagelistinfo->pages = pages;
453         pagelistinfo->scatterlist = scatterlist;
454         pagelistinfo->scatterlist_mapped = 0;
455
456         if (is_vmalloc_addr(buf)) {
457                 unsigned long length = count;
458                 unsigned int off = offset;
459
460                 for (actual_pages = 0; actual_pages < num_pages;
461                      actual_pages++) {
462                         struct page *pg = vmalloc_to_page(buf + (actual_pages *
463                                                                  PAGE_SIZE));
464                         size_t bytes = PAGE_SIZE - off;
465
466                         if (!pg) {
467                                 cleanup_pagelistinfo(pagelistinfo);
468                                 return NULL;
469                         }
470
471                         if (bytes > length)
472                                 bytes = length;
473                         pages[actual_pages] = pg;
474                         length -= bytes;
475                         off = 0;
476                 }
477                 /* do not try and release vmalloc pages */
478         } else {
479                 down_read(&task->mm->mmap_sem);
480                 actual_pages = get_user_pages(
481                                           (unsigned long)buf & PAGE_MASK,
482                                           num_pages,
483                                           (type == PAGELIST_READ) ? FOLL_WRITE : 0,
484                                           pages,
485                                           NULL /*vmas */);
486                 up_read(&task->mm->mmap_sem);
487
488                 if (actual_pages != num_pages) {
489                         vchiq_log_info(vchiq_arm_log_level,
490                                        "%s - only %d/%d pages locked",
491                                        __func__, actual_pages, num_pages);
492
493                         /* This is probably due to the process being killed */
494                         while (actual_pages > 0)
495                         {
496                                 actual_pages--;
497                                 put_page(pages[actual_pages]);
498                         }
499                         cleanup_pagelistinfo(pagelistinfo);
500                         return NULL;
501                 }
502                  /* release user pages */
503                 pagelistinfo->pages_need_release = 1;
504         }
505
506         /*
507          * Initialize the scatterlist so that the magic cookie
508          *  is filled if debugging is enabled
509          */
510         sg_init_table(scatterlist, num_pages);
511         /* Now set the pages for each scatterlist */
512         for (i = 0; i < num_pages; i++) {
513                 unsigned int len = PAGE_SIZE - offset;
514
515                 if (len > count)
516                         len = count;
517                 sg_set_page(scatterlist + i, pages[i], len, offset);
518                 offset = 0;
519                 count -= len;
520         }
521
522         dma_buffers = dma_map_sg(g_dev,
523                                  scatterlist,
524                                  num_pages,
525                                  pagelistinfo->dma_dir);
526
527         if (dma_buffers == 0) {
528                 cleanup_pagelistinfo(pagelistinfo);
529                 return NULL;
530         }
531
532         pagelistinfo->scatterlist_mapped = 1;
533
534         /* Combine adjacent blocks for performance */
535         k = 0;
536         for_each_sg(scatterlist, sg, dma_buffers, i) {
537                 u32 len = sg_dma_len(sg);
538                 u32 addr = sg_dma_address(sg);
539
540                 /* Note: addrs is the address + page_count - 1
541                  * The firmware expects blocks after the first to be page-
542                  * aligned and a multiple of the page size
543                  */
544                 WARN_ON(len == 0);
545                 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
546                 WARN_ON(i && (addr & ~PAGE_MASK));
547                 if (k > 0 &&
548                     ((addrs[k - 1] & PAGE_MASK) +
549                      (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
550                     == (addr & PAGE_MASK))
551                         addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
552                 else
553                         addrs[k++] = (addr & PAGE_MASK) |
554                                 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
555         }
556
557         /* Partial cache lines (fragments) require special measures */
558         if ((type == PAGELIST_READ) &&
559                 ((pagelist->offset & (g_cache_line_size - 1)) ||
560                 ((pagelist->offset + pagelist->length) &
561                 (g_cache_line_size - 1)))) {
562                 char *fragments;
563
564                 if (down_interruptible(&g_free_fragments_sema) != 0) {
565                         cleanup_pagelistinfo(pagelistinfo);
566                         return NULL;
567                 }
568
569                 WARN_ON(g_free_fragments == NULL);
570
571                 down(&g_free_fragments_mutex);
572                 fragments = g_free_fragments;
573                 WARN_ON(fragments == NULL);
574                 g_free_fragments = *(char **) g_free_fragments;
575                 up(&g_free_fragments_mutex);
576                 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
577                         (fragments - g_fragments_base) / g_fragments_size;
578         }
579
580         return pagelistinfo;
581 }
582
583 static void
584 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
585               int actual)
586 {
587         PAGELIST_T *pagelist   = pagelistinfo->pagelist;
588         struct page **pages    = pagelistinfo->pages;
589         unsigned int num_pages = pagelistinfo->num_pages;
590
591         vchiq_log_trace(vchiq_arm_log_level, "free_pagelist - %pK, %d",
592                         pagelistinfo->pagelist, actual);
593
594         /*
595          * NOTE: dma_unmap_sg must be called before the
596          * cpu can touch any of the data/pages.
597          */
598         dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
599                      pagelistinfo->num_pages, pagelistinfo->dma_dir);
600         pagelistinfo->scatterlist_mapped = 0;
601
602         /* Deal with any partial cache lines (fragments) */
603         if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
604                 char *fragments = g_fragments_base +
605                         (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
606                         g_fragments_size;
607                 int head_bytes, tail_bytes;
608
609                 head_bytes = (g_cache_line_size - pagelist->offset) &
610                         (g_cache_line_size - 1);
611                 tail_bytes = (pagelist->offset + actual) &
612                         (g_cache_line_size - 1);
613
614                 if ((actual >= 0) && (head_bytes != 0)) {
615                         if (head_bytes > actual)
616                                 head_bytes = actual;
617
618                         memcpy((char *)kmap(pages[0]) +
619                                 pagelist->offset,
620                                 fragments,
621                                 head_bytes);
622                         kunmap(pages[0]);
623                 }
624                 if ((actual >= 0) && (head_bytes < actual) &&
625                         (tail_bytes != 0)) {
626                         memcpy((char *)kmap(pages[num_pages - 1]) +
627                                 ((pagelist->offset + actual) &
628                                 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
629                                 fragments + g_cache_line_size,
630                                 tail_bytes);
631                         kunmap(pages[num_pages - 1]);
632                 }
633
634                 down(&g_free_fragments_mutex);
635                 *(char **)fragments = g_free_fragments;
636                 g_free_fragments = fragments;
637                 up(&g_free_fragments_mutex);
638                 up(&g_free_fragments_sema);
639         }
640
641         /* Need to mark all the pages dirty. */
642         if (pagelist->type != PAGELIST_WRITE &&
643             pagelistinfo->pages_need_release) {
644                 unsigned int i;
645
646                 for (i = 0; i < num_pages; i++)
647                         set_page_dirty(pages[i]);
648         }
649
650         cleanup_pagelistinfo(pagelistinfo);
651 }