1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <linux/file.h>
8 #include <linux/namei.h>
9 #include <linux/writeback.h>
10 
11 #include <linux/ceph/libceph.h>
12 
13 /*
14  * build a vector of user pages
15  */
ceph_get_direct_page_vector(const void __user * data,int num_pages,bool write_page)16 struct page **ceph_get_direct_page_vector(const void __user *data,
17 					  int num_pages, bool write_page)
18 {
19 	struct page **pages;
20 	int got = 0;
21 	int rc = 0;
22 
23 	pages = kmalloc_array(num_pages, sizeof(*pages), GFP_NOFS);
24 	if (!pages)
25 		return ERR_PTR(-ENOMEM);
26 
27 	while (got < num_pages) {
28 		rc = get_user_pages_fast(
29 		    (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
30 		    num_pages - got, write_page, pages + got);
31 		if (rc < 0)
32 			break;
33 		BUG_ON(rc == 0);
34 		got += rc;
35 	}
36 	if (rc < 0)
37 		goto fail;
38 	return pages;
39 
40 fail:
41 	ceph_put_page_vector(pages, got, false);
42 	return ERR_PTR(rc);
43 }
44 EXPORT_SYMBOL(ceph_get_direct_page_vector);
45 
ceph_put_page_vector(struct page ** pages,int num_pages,bool dirty)46 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
47 {
48 	int i;
49 
50 	for (i = 0; i < num_pages; i++) {
51 		if (dirty)
52 			set_page_dirty_lock(pages[i]);
53 		put_page(pages[i]);
54 	}
55 	kvfree(pages);
56 }
57 EXPORT_SYMBOL(ceph_put_page_vector);
58 
ceph_release_page_vector(struct page ** pages,int num_pages)59 void ceph_release_page_vector(struct page **pages, int num_pages)
60 {
61 	int i;
62 
63 	for (i = 0; i < num_pages; i++)
64 		__free_pages(pages[i], 0);
65 	kfree(pages);
66 }
67 EXPORT_SYMBOL(ceph_release_page_vector);
68 
69 /*
70  * allocate a vector new pages
71  */
ceph_alloc_page_vector(int num_pages,gfp_t flags)72 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
73 {
74 	struct page **pages;
75 	int i;
76 
77 	pages = kmalloc_array(num_pages, sizeof(*pages), flags);
78 	if (!pages)
79 		return ERR_PTR(-ENOMEM);
80 	for (i = 0; i < num_pages; i++) {
81 		pages[i] = __page_cache_alloc(flags);
82 		if (pages[i] == NULL) {
83 			ceph_release_page_vector(pages, i);
84 			return ERR_PTR(-ENOMEM);
85 		}
86 	}
87 	return pages;
88 }
89 EXPORT_SYMBOL(ceph_alloc_page_vector);
90 
91 /*
92  * copy user data into a page vector
93  */
ceph_copy_user_to_page_vector(struct page ** pages,const void __user * data,loff_t off,size_t len)94 int ceph_copy_user_to_page_vector(struct page **pages,
95 					 const void __user *data,
96 					 loff_t off, size_t len)
97 {
98 	int i = 0;
99 	int po = off & ~PAGE_MASK;
100 	int left = len;
101 	int l, bad;
102 
103 	while (left > 0) {
104 		l = min_t(int, PAGE_SIZE-po, left);
105 		bad = copy_from_user(page_address(pages[i]) + po, data, l);
106 		if (bad == l)
107 			return -EFAULT;
108 		data += l - bad;
109 		left -= l - bad;
110 		po += l - bad;
111 		if (po == PAGE_SIZE) {
112 			po = 0;
113 			i++;
114 		}
115 	}
116 	return len;
117 }
118 EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
119 
ceph_copy_to_page_vector(struct page ** pages,const void * data,loff_t off,size_t len)120 void ceph_copy_to_page_vector(struct page **pages,
121 				    const void *data,
122 				    loff_t off, size_t len)
123 {
124 	int i = 0;
125 	size_t po = off & ~PAGE_MASK;
126 	size_t left = len;
127 
128 	while (left > 0) {
129 		size_t l = min_t(size_t, PAGE_SIZE-po, left);
130 
131 		memcpy(page_address(pages[i]) + po, data, l);
132 		data += l;
133 		left -= l;
134 		po += l;
135 		if (po == PAGE_SIZE) {
136 			po = 0;
137 			i++;
138 		}
139 	}
140 }
141 EXPORT_SYMBOL(ceph_copy_to_page_vector);
142 
ceph_copy_from_page_vector(struct page ** pages,void * data,loff_t off,size_t len)143 void ceph_copy_from_page_vector(struct page **pages,
144 				    void *data,
145 				    loff_t off, size_t len)
146 {
147 	int i = 0;
148 	size_t po = off & ~PAGE_MASK;
149 	size_t left = len;
150 
151 	while (left > 0) {
152 		size_t l = min_t(size_t, PAGE_SIZE-po, left);
153 
154 		memcpy(data, page_address(pages[i]) + po, l);
155 		data += l;
156 		left -= l;
157 		po += l;
158 		if (po == PAGE_SIZE) {
159 			po = 0;
160 			i++;
161 		}
162 	}
163 }
164 EXPORT_SYMBOL(ceph_copy_from_page_vector);
165 
166 /*
167  * Zero an extent within a page vector.  Offset is relative to the
168  * start of the first page.
169  */
ceph_zero_page_vector_range(int off,int len,struct page ** pages)170 void ceph_zero_page_vector_range(int off, int len, struct page **pages)
171 {
172 	int i = off >> PAGE_SHIFT;
173 
174 	off &= ~PAGE_MASK;
175 
176 	dout("zero_page_vector_page %u~%u\n", off, len);
177 
178 	/* leading partial page? */
179 	if (off) {
180 		int end = min((int)PAGE_SIZE, off + len);
181 		dout("zeroing %d %p head from %d\n", i, pages[i],
182 		     (int)off);
183 		zero_user_segment(pages[i], off, end);
184 		len -= (end - off);
185 		i++;
186 	}
187 	while (len >= PAGE_SIZE) {
188 		dout("zeroing %d %p len=%d\n", i, pages[i], len);
189 		zero_user_segment(pages[i], 0, PAGE_SIZE);
190 		len -= PAGE_SIZE;
191 		i++;
192 	}
193 	/* trailing partial page? */
194 	if (len) {
195 		dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
196 		zero_user_segment(pages[i], 0, len);
197 	}
198 }
199 EXPORT_SYMBOL(ceph_zero_page_vector_range);
200