1 /**
2  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions, and the following disclaimer,
9  *    without modification.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The names of the above-listed copyright holders may not be used
14  *    to endorse or promote products derived from this software without
15  *    specific prior written permission.
16  *
17  * ALTERNATIVELY, this software may be distributed under the terms of the
18  * GNU General Public License ("GPL") version 2, as published by the Free
19  * Software Foundation.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
22  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/errno.h>
37 #include <linux/interrupt.h>
38 #include <linux/pagemap.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/io.h>
41 #include <linux/platform_device.h>
42 #include <linux/uaccess.h>
43 #include <linux/mm.h>
44 #include <linux/of.h>
45 #include <soc/bcm2835/raspberrypi-firmware.h>
46 
47 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
48 
49 #include "vchiq_arm.h"
50 #include "vchiq_connected.h"
51 #include "vchiq_killable.h"
52 #include "vchiq_pagelist.h"
53 
54 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
55 
56 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
57 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
58 
59 #define BELL0	0x00
60 #define BELL2	0x08
61 
62 struct vchiq_2835_state {
63 	int inited;
64 	VCHIQ_ARM_STATE_T arm_state;
65 };
66 
67 struct vchiq_pagelist_info {
68 	PAGELIST_T *pagelist;
69 	size_t pagelist_buffer_size;
70 	dma_addr_t dma_addr;
71 	enum dma_data_direction dma_dir;
72 	unsigned int num_pages;
73 	unsigned int pages_need_release;
74 	struct page **pages;
75 	struct scatterlist *scatterlist;
76 	unsigned int scatterlist_mapped;
77 };
78 
79 static void __iomem *g_regs;
80 /* This value is the size of the L2 cache lines as understood by the
81  * VPU firmware, which determines the required alignment of the
82  * offsets/sizes in pagelists.
83  *
84  * Modern VPU firmware looks for a DT "cache-line-size" property in
85  * the VCHIQ node and will overwrite it with the actual L2 cache size,
86  * which the kernel must then respect.  That property was rejected
87  * upstream, so we have to use the VPU firmware's compatibility value
88  * of 32.
89  */
90 static unsigned int g_cache_line_size = 32;
91 static unsigned int g_fragments_size;
92 static char *g_fragments_base;
93 static char *g_free_fragments;
94 static struct semaphore g_free_fragments_sema;
95 static struct device *g_dev;
96 
97 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
98 
99 static irqreturn_t
100 vchiq_doorbell_irq(int irq, void *dev_id);
101 
102 static struct vchiq_pagelist_info *
103 create_pagelist(char __user *buf, size_t count, unsigned short type);
104 
105 static void
106 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
107 	      int actual);
108 
vchiq_platform_init(struct platform_device * pdev,VCHIQ_STATE_T * state)109 int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
110 {
111 	struct device *dev = &pdev->dev;
112 	struct rpi_firmware *fw = platform_get_drvdata(pdev);
113 	VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
114 	struct resource *res;
115 	void *slot_mem;
116 	dma_addr_t slot_phys;
117 	u32 channelbase;
118 	int slot_mem_size, frag_mem_size;
119 	int err, irq, i;
120 
121 	/*
122 	 * VCHI messages between the CPU and firmware use
123 	 * 32-bit bus addresses.
124 	 */
125 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
126 
127 	if (err < 0)
128 		return err;
129 
130 	g_fragments_size = 2 * g_cache_line_size;
131 
132 	/* Allocate space for the channels in coherent memory */
133 	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
134 	frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
135 
136 	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
137 				       &slot_phys, GFP_KERNEL);
138 	if (!slot_mem) {
139 		dev_err(dev, "could not allocate DMA memory\n");
140 		return -ENOMEM;
141 	}
142 
143 	WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
144 
145 	vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
146 	if (!vchiq_slot_zero)
147 		return -EINVAL;
148 
149 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
150 		(int)slot_phys + slot_mem_size;
151 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
152 		MAX_FRAGMENTS;
153 
154 	g_fragments_base = (char *)slot_mem + slot_mem_size;
155 
156 	g_free_fragments = g_fragments_base;
157 	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
158 		*(char **)&g_fragments_base[i*g_fragments_size] =
159 			&g_fragments_base[(i + 1)*g_fragments_size];
160 	}
161 	*(char **)&g_fragments_base[i * g_fragments_size] = NULL;
162 	sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
163 
164 	if (vchiq_init_state(state, vchiq_slot_zero, 0) != VCHIQ_SUCCESS)
165 		return -EINVAL;
166 
167 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
168 	g_regs = devm_ioremap_resource(&pdev->dev, res);
169 	if (IS_ERR(g_regs))
170 		return PTR_ERR(g_regs);
171 
172 	irq = platform_get_irq(pdev, 0);
173 	if (irq <= 0) {
174 		dev_err(dev, "failed to get IRQ\n");
175 		return irq;
176 	}
177 
178 	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
179 			       "VCHIQ doorbell", state);
180 	if (err) {
181 		dev_err(dev, "failed to register irq=%d\n", irq);
182 		return err;
183 	}
184 
185 	/* Send the base address of the slots to VideoCore */
186 	channelbase = slot_phys;
187 	err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
188 				    &channelbase, sizeof(channelbase));
189 	if (err || channelbase) {
190 		dev_err(dev, "failed to set channelbase\n");
191 		return err ? : -ENXIO;
192 	}
193 
194 	g_dev = dev;
195 	vchiq_log_info(vchiq_arm_log_level,
196 		"vchiq_init - done (slots %pK, phys %pad)",
197 		vchiq_slot_zero, &slot_phys);
198 
199 	vchiq_call_connected_callbacks();
200 
201 	return 0;
202 }
203 
204 VCHIQ_STATUS_T
vchiq_platform_init_state(VCHIQ_STATE_T * state)205 vchiq_platform_init_state(VCHIQ_STATE_T *state)
206 {
207 	VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
208 	struct vchiq_2835_state *platform_state;
209 
210 	state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
211 	platform_state = (struct vchiq_2835_state *)state->platform_state;
212 
213 	platform_state->inited = 1;
214 	status = vchiq_arm_init_state(state, &platform_state->arm_state);
215 
216 	if (status != VCHIQ_SUCCESS)
217 		platform_state->inited = 0;
218 
219 	return status;
220 }
221 
222 VCHIQ_ARM_STATE_T*
vchiq_platform_get_arm_state(VCHIQ_STATE_T * state)223 vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
224 {
225 	struct vchiq_2835_state *platform_state;
226 
227 	platform_state   = (struct vchiq_2835_state *)state->platform_state;
228 
229 	WARN_ON_ONCE(!platform_state->inited);
230 
231 	return &platform_state->arm_state;
232 }
233 
234 void
remote_event_signal(REMOTE_EVENT_T * event)235 remote_event_signal(REMOTE_EVENT_T *event)
236 {
237 	wmb();
238 
239 	event->fired = 1;
240 
241 	dsb(sy);         /* data barrier operation */
242 
243 	if (event->armed)
244 		writel(0, g_regs + BELL2); /* trigger vc interrupt */
245 }
246 
247 VCHIQ_STATUS_T
vchiq_prepare_bulk_data(VCHIQ_BULK_T * bulk,VCHI_MEM_HANDLE_T memhandle,void * offset,int size,int dir)248 vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
249 	void *offset, int size, int dir)
250 {
251 	struct vchiq_pagelist_info *pagelistinfo;
252 
253 	WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
254 
255 	pagelistinfo = create_pagelist((char __user *)offset, size,
256 				       (dir == VCHIQ_BULK_RECEIVE)
257 				       ? PAGELIST_READ
258 				       : PAGELIST_WRITE);
259 
260 	if (!pagelistinfo)
261 		return VCHIQ_ERROR;
262 
263 	bulk->handle = memhandle;
264 	bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr;
265 
266 	/*
267 	 * Store the pagelistinfo address in remote_data,
268 	 * which isn't used by the slave.
269 	 */
270 	bulk->remote_data = pagelistinfo;
271 
272 	return VCHIQ_SUCCESS;
273 }
274 
275 void
vchiq_complete_bulk(VCHIQ_BULK_T * bulk)276 vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
277 {
278 	if (bulk && bulk->remote_data && bulk->actual)
279 		free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
280 			      bulk->actual);
281 }
282 
283 void
vchiq_transfer_bulk(VCHIQ_BULK_T * bulk)284 vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
285 {
286 	/*
287 	 * This should only be called on the master (VideoCore) side, but
288 	 * provide an implementation to avoid the need for ifdefery.
289 	 */
290 	BUG();
291 }
292 
293 void
vchiq_dump_platform_state(void * dump_context)294 vchiq_dump_platform_state(void *dump_context)
295 {
296 	char buf[80];
297 	int len;
298 
299 	len = snprintf(buf, sizeof(buf),
300 		"  Platform: 2835 (VC master)");
301 	vchiq_dump(dump_context, buf, len + 1);
302 }
303 
304 VCHIQ_STATUS_T
vchiq_platform_suspend(VCHIQ_STATE_T * state)305 vchiq_platform_suspend(VCHIQ_STATE_T *state)
306 {
307 	return VCHIQ_ERROR;
308 }
309 
310 VCHIQ_STATUS_T
vchiq_platform_resume(VCHIQ_STATE_T * state)311 vchiq_platform_resume(VCHIQ_STATE_T *state)
312 {
313 	return VCHIQ_SUCCESS;
314 }
315 
316 void
vchiq_platform_paused(VCHIQ_STATE_T * state)317 vchiq_platform_paused(VCHIQ_STATE_T *state)
318 {
319 }
320 
321 void
vchiq_platform_resumed(VCHIQ_STATE_T * state)322 vchiq_platform_resumed(VCHIQ_STATE_T *state)
323 {
324 }
325 
326 int
vchiq_platform_videocore_wanted(VCHIQ_STATE_T * state)327 vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state)
328 {
329 	return 1; // autosuspend not supported - videocore always wanted
330 }
331 
332 int
vchiq_platform_use_suspend_timer(void)333 vchiq_platform_use_suspend_timer(void)
334 {
335 	return 0;
336 }
337 void
vchiq_dump_platform_use_state(VCHIQ_STATE_T * state)338 vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
339 {
340 	vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use");
341 }
342 void
vchiq_platform_handle_timeout(VCHIQ_STATE_T * state)343 vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
344 {
345 	(void)state;
346 }
347 /*
348  * Local functions
349  */
350 
351 static irqreturn_t
vchiq_doorbell_irq(int irq,void * dev_id)352 vchiq_doorbell_irq(int irq, void *dev_id)
353 {
354 	VCHIQ_STATE_T *state = dev_id;
355 	irqreturn_t ret = IRQ_NONE;
356 	unsigned int status;
357 
358 	/* Read (and clear) the doorbell */
359 	status = readl(g_regs + BELL0);
360 
361 	if (status & 0x4) {  /* Was the doorbell rung? */
362 		remote_event_pollall(state);
363 		ret = IRQ_HANDLED;
364 	}
365 
366 	return ret;
367 }
368 
369 static void
cleanup_pagelistinfo(struct vchiq_pagelist_info * pagelistinfo)370 cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
371 {
372 	if (pagelistinfo->scatterlist_mapped) {
373 		dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
374 			     pagelistinfo->num_pages, pagelistinfo->dma_dir);
375 	}
376 
377 	if (pagelistinfo->pages_need_release) {
378 		unsigned int i;
379 
380 		for (i = 0; i < pagelistinfo->num_pages; i++)
381 			put_page(pagelistinfo->pages[i]);
382 	}
383 
384 	dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
385 			  pagelistinfo->pagelist, pagelistinfo->dma_addr);
386 }
387 
388 /* There is a potential problem with partial cache lines (pages?)
389  * at the ends of the block when reading. If the CPU accessed anything in
390  * the same line (page?) then it may have pulled old data into the cache,
391  * obscuring the new data underneath. We can solve this by transferring the
392  * partial cache lines separately, and allowing the ARM to copy into the
393  * cached area.
394  */
395 
396 static struct vchiq_pagelist_info *
create_pagelist(char __user * buf,size_t count,unsigned short type)397 create_pagelist(char __user *buf, size_t count, unsigned short type)
398 {
399 	PAGELIST_T *pagelist;
400 	struct vchiq_pagelist_info *pagelistinfo;
401 	struct page **pages;
402 	u32 *addrs;
403 	unsigned int num_pages, offset, i, k;
404 	int actual_pages;
405 	size_t pagelist_size;
406 	struct scatterlist *scatterlist, *sg;
407 	int dma_buffers;
408 	dma_addr_t dma_addr;
409 
410 	offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
411 	num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
412 
413 	pagelist_size = sizeof(PAGELIST_T) +
414 			(num_pages * sizeof(u32)) +
415 			(num_pages * sizeof(pages[0]) +
416 			(num_pages * sizeof(struct scatterlist))) +
417 			sizeof(struct vchiq_pagelist_info);
418 
419 	/* Allocate enough storage to hold the page pointers and the page
420 	 * list
421 	 */
422 	pagelist = dma_zalloc_coherent(g_dev,
423 				       pagelist_size,
424 				       &dma_addr,
425 				       GFP_KERNEL);
426 
427 	vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
428 
429 	if (!pagelist)
430 		return NULL;
431 
432 	addrs		= pagelist->addrs;
433 	pages		= (struct page **)(addrs + num_pages);
434 	scatterlist	= (struct scatterlist *)(pages + num_pages);
435 	pagelistinfo	= (struct vchiq_pagelist_info *)
436 			  (scatterlist + num_pages);
437 
438 	pagelist->length = count;
439 	pagelist->type = type;
440 	pagelist->offset = offset;
441 
442 	/* Populate the fields of the pagelistinfo structure */
443 	pagelistinfo->pagelist = pagelist;
444 	pagelistinfo->pagelist_buffer_size = pagelist_size;
445 	pagelistinfo->dma_addr = dma_addr;
446 	pagelistinfo->dma_dir =  (type == PAGELIST_WRITE) ?
447 				  DMA_TO_DEVICE : DMA_FROM_DEVICE;
448 	pagelistinfo->num_pages = num_pages;
449 	pagelistinfo->pages_need_release = 0;
450 	pagelistinfo->pages = pages;
451 	pagelistinfo->scatterlist = scatterlist;
452 	pagelistinfo->scatterlist_mapped = 0;
453 
454 	if (is_vmalloc_addr(buf)) {
455 		unsigned long length = count;
456 		unsigned int off = offset;
457 
458 		for (actual_pages = 0; actual_pages < num_pages;
459 		     actual_pages++) {
460 			struct page *pg = vmalloc_to_page(buf + (actual_pages *
461 								 PAGE_SIZE));
462 			size_t bytes = PAGE_SIZE - off;
463 
464 			if (!pg) {
465 				cleanup_pagelistinfo(pagelistinfo);
466 				return NULL;
467 			}
468 
469 			if (bytes > length)
470 				bytes = length;
471 			pages[actual_pages] = pg;
472 			length -= bytes;
473 			off = 0;
474 		}
475 		/* do not try and release vmalloc pages */
476 	} else {
477 		actual_pages = get_user_pages_fast(
478 					  (unsigned long)buf & PAGE_MASK,
479 					  num_pages,
480 					  type == PAGELIST_READ,
481 					  pages);
482 
483 		if (actual_pages != num_pages) {
484 			vchiq_log_info(vchiq_arm_log_level,
485 				       "%s - only %d/%d pages locked",
486 				       __func__, actual_pages, num_pages);
487 
488 			/* This is probably due to the process being killed */
489 			while (actual_pages > 0) {
490 				actual_pages--;
491 				put_page(pages[actual_pages]);
492 			}
493 			cleanup_pagelistinfo(pagelistinfo);
494 			return NULL;
495 		}
496 		 /* release user pages */
497 		pagelistinfo->pages_need_release = 1;
498 	}
499 
500 	/*
501 	 * Initialize the scatterlist so that the magic cookie
502 	 *  is filled if debugging is enabled
503 	 */
504 	sg_init_table(scatterlist, num_pages);
505 	/* Now set the pages for each scatterlist */
506 	for (i = 0; i < num_pages; i++)	{
507 		unsigned int len = PAGE_SIZE - offset;
508 
509 		if (len > count)
510 			len = count;
511 		sg_set_page(scatterlist + i, pages[i], len, offset);
512 		offset = 0;
513 		count -= len;
514 	}
515 
516 	dma_buffers = dma_map_sg(g_dev,
517 				 scatterlist,
518 				 num_pages,
519 				 pagelistinfo->dma_dir);
520 
521 	if (dma_buffers == 0) {
522 		cleanup_pagelistinfo(pagelistinfo);
523 		return NULL;
524 	}
525 
526 	pagelistinfo->scatterlist_mapped = 1;
527 
528 	/* Combine adjacent blocks for performance */
529 	k = 0;
530 	for_each_sg(scatterlist, sg, dma_buffers, i) {
531 		u32 len = sg_dma_len(sg);
532 		u32 addr = sg_dma_address(sg);
533 
534 		/* Note: addrs is the address + page_count - 1
535 		 * The firmware expects blocks after the first to be page-
536 		 * aligned and a multiple of the page size
537 		 */
538 		WARN_ON(len == 0);
539 		WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
540 		WARN_ON(i && (addr & ~PAGE_MASK));
541 		if (k > 0 &&
542 		    ((addrs[k - 1] & PAGE_MASK) +
543 		     (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
544 		    == (addr & PAGE_MASK))
545 			addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
546 		else
547 			addrs[k++] = (addr & PAGE_MASK) |
548 				(((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
549 	}
550 
551 	/* Partial cache lines (fragments) require special measures */
552 	if ((type == PAGELIST_READ) &&
553 		((pagelist->offset & (g_cache_line_size - 1)) ||
554 		((pagelist->offset + pagelist->length) &
555 		(g_cache_line_size - 1)))) {
556 		char *fragments;
557 
558 		if (down_interruptible(&g_free_fragments_sema) != 0) {
559 			cleanup_pagelistinfo(pagelistinfo);
560 			return NULL;
561 		}
562 
563 		WARN_ON(g_free_fragments == NULL);
564 
565 		down(&g_free_fragments_mutex);
566 		fragments = g_free_fragments;
567 		WARN_ON(fragments == NULL);
568 		g_free_fragments = *(char **) g_free_fragments;
569 		up(&g_free_fragments_mutex);
570 		pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
571 			(fragments - g_fragments_base) / g_fragments_size;
572 	}
573 
574 	return pagelistinfo;
575 }
576 
577 static void
free_pagelist(struct vchiq_pagelist_info * pagelistinfo,int actual)578 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
579 	      int actual)
580 {
581 	PAGELIST_T *pagelist   = pagelistinfo->pagelist;
582 	struct page **pages    = pagelistinfo->pages;
583 	unsigned int num_pages = pagelistinfo->num_pages;
584 
585 	vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
586 			__func__, pagelistinfo->pagelist, actual);
587 
588 	/*
589 	 * NOTE: dma_unmap_sg must be called before the
590 	 * cpu can touch any of the data/pages.
591 	 */
592 	dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
593 		     pagelistinfo->num_pages, pagelistinfo->dma_dir);
594 	pagelistinfo->scatterlist_mapped = 0;
595 
596 	/* Deal with any partial cache lines (fragments) */
597 	if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
598 		char *fragments = g_fragments_base +
599 			(pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
600 			g_fragments_size;
601 		int head_bytes, tail_bytes;
602 
603 		head_bytes = (g_cache_line_size - pagelist->offset) &
604 			(g_cache_line_size - 1);
605 		tail_bytes = (pagelist->offset + actual) &
606 			(g_cache_line_size - 1);
607 
608 		if ((actual >= 0) && (head_bytes != 0)) {
609 			if (head_bytes > actual)
610 				head_bytes = actual;
611 
612 			memcpy((char *)kmap(pages[0]) +
613 				pagelist->offset,
614 				fragments,
615 				head_bytes);
616 			kunmap(pages[0]);
617 		}
618 		if ((actual >= 0) && (head_bytes < actual) &&
619 			(tail_bytes != 0)) {
620 			memcpy((char *)kmap(pages[num_pages - 1]) +
621 				((pagelist->offset + actual) &
622 				(PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
623 				fragments + g_cache_line_size,
624 				tail_bytes);
625 			kunmap(pages[num_pages - 1]);
626 		}
627 
628 		down(&g_free_fragments_mutex);
629 		*(char **)fragments = g_free_fragments;
630 		g_free_fragments = fragments;
631 		up(&g_free_fragments_mutex);
632 		up(&g_free_fragments_sema);
633 	}
634 
635 	/* Need to mark all the pages dirty. */
636 	if (pagelist->type != PAGELIST_WRITE &&
637 	    pagelistinfo->pages_need_release) {
638 		unsigned int i;
639 
640 		for (i = 0; i < num_pages; i++)
641 			set_page_dirty(pages[i]);
642 	}
643 
644 	cleanup_pagelistinfo(pagelistinfo);
645 }
646