1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * t10_pi.c - Functions for generating and verifying T10 Protection
4  *	      Information.
5  */
6 
7 #include <linux/t10-pi.h>
8 #include <linux/blkdev.h>
9 #include <linux/crc-t10dif.h>
10 #include <linux/module.h>
11 #include <net/checksum.h>
12 
13 typedef __be16 (csum_fn) (void *, unsigned int);
14 
t10_pi_crc_fn(void * data,unsigned int len)15 static __be16 t10_pi_crc_fn(void *data, unsigned int len)
16 {
17 	return cpu_to_be16(crc_t10dif(data, len));
18 }
19 
t10_pi_ip_fn(void * data,unsigned int len)20 static __be16 t10_pi_ip_fn(void *data, unsigned int len)
21 {
22 	return (__force __be16)ip_compute_csum(data, len);
23 }
24 
25 /*
26  * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
27  * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
28  * tag.
29  */
t10_pi_generate(struct blk_integrity_iter * iter,csum_fn * fn,enum t10_dif_type type)30 static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
31 		csum_fn *fn, enum t10_dif_type type)
32 {
33 	unsigned int i;
34 
35 	for (i = 0 ; i < iter->data_size ; i += iter->interval) {
36 		struct t10_pi_tuple *pi = iter->prot_buf;
37 
38 		pi->guard_tag = fn(iter->data_buf, iter->interval);
39 		pi->app_tag = 0;
40 
41 		if (type == T10_PI_TYPE1_PROTECTION)
42 			pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
43 		else
44 			pi->ref_tag = 0;
45 
46 		iter->data_buf += iter->interval;
47 		iter->prot_buf += sizeof(struct t10_pi_tuple);
48 		iter->seed++;
49 	}
50 
51 	return BLK_STS_OK;
52 }
53 
t10_pi_verify(struct blk_integrity_iter * iter,csum_fn * fn,enum t10_dif_type type)54 static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
55 		csum_fn *fn, enum t10_dif_type type)
56 {
57 	unsigned int i;
58 
59 	BUG_ON(type == T10_PI_TYPE0_PROTECTION);
60 
61 	for (i = 0 ; i < iter->data_size ; i += iter->interval) {
62 		struct t10_pi_tuple *pi = iter->prot_buf;
63 		__be16 csum;
64 
65 		if (type == T10_PI_TYPE1_PROTECTION ||
66 		    type == T10_PI_TYPE2_PROTECTION) {
67 			if (pi->app_tag == T10_PI_APP_ESCAPE)
68 				goto next;
69 
70 			if (be32_to_cpu(pi->ref_tag) !=
71 			    lower_32_bits(iter->seed)) {
72 				pr_err("%s: ref tag error at location %llu " \
73 				       "(rcvd %u)\n", iter->disk_name,
74 				       (unsigned long long)
75 				       iter->seed, be32_to_cpu(pi->ref_tag));
76 				return BLK_STS_PROTECTION;
77 			}
78 		} else if (type == T10_PI_TYPE3_PROTECTION) {
79 			if (pi->app_tag == T10_PI_APP_ESCAPE &&
80 			    pi->ref_tag == T10_PI_REF_ESCAPE)
81 				goto next;
82 		}
83 
84 		csum = fn(iter->data_buf, iter->interval);
85 
86 		if (pi->guard_tag != csum) {
87 			pr_err("%s: guard tag error at sector %llu " \
88 			       "(rcvd %04x, want %04x)\n", iter->disk_name,
89 			       (unsigned long long)iter->seed,
90 			       be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
91 			return BLK_STS_PROTECTION;
92 		}
93 
94 next:
95 		iter->data_buf += iter->interval;
96 		iter->prot_buf += sizeof(struct t10_pi_tuple);
97 		iter->seed++;
98 	}
99 
100 	return BLK_STS_OK;
101 }
102 
t10_pi_type1_generate_crc(struct blk_integrity_iter * iter)103 static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
104 {
105 	return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
106 }
107 
t10_pi_type1_generate_ip(struct blk_integrity_iter * iter)108 static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
109 {
110 	return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
111 }
112 
t10_pi_type1_verify_crc(struct blk_integrity_iter * iter)113 static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
114 {
115 	return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
116 }
117 
t10_pi_type1_verify_ip(struct blk_integrity_iter * iter)118 static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
119 {
120 	return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
121 }
122 
123 /**
124  * t10_pi_type1_prepare - prepare PI prior submitting request to device
125  * @rq:              request with PI that should be prepared
126  *
127  * For Type 1/Type 2, the virtual start sector is the one that was
128  * originally submitted by the block layer for the ref_tag usage. Due to
129  * partitioning, MD/DM cloning, etc. the actual physical start sector is
130  * likely to be different. Remap protection information to match the
131  * physical LBA.
132  */
t10_pi_type1_prepare(struct request * rq)133 static void t10_pi_type1_prepare(struct request *rq)
134 {
135 	const int tuple_sz = rq->q->integrity.tuple_size;
136 	u32 ref_tag = t10_pi_ref_tag(rq);
137 	struct bio *bio;
138 
139 	__rq_for_each_bio(bio, rq) {
140 		struct bio_integrity_payload *bip = bio_integrity(bio);
141 		u32 virt = bip_get_seed(bip) & 0xffffffff;
142 		struct bio_vec iv;
143 		struct bvec_iter iter;
144 
145 		/* Already remapped? */
146 		if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
147 			break;
148 
149 		bip_for_each_vec(iv, bip, iter) {
150 			void *p, *pmap;
151 			unsigned int j;
152 
153 			pmap = kmap_atomic(iv.bv_page);
154 			p = pmap + iv.bv_offset;
155 			for (j = 0; j < iv.bv_len; j += tuple_sz) {
156 				struct t10_pi_tuple *pi = p;
157 
158 				if (be32_to_cpu(pi->ref_tag) == virt)
159 					pi->ref_tag = cpu_to_be32(ref_tag);
160 				virt++;
161 				ref_tag++;
162 				p += tuple_sz;
163 			}
164 
165 			kunmap_atomic(pmap);
166 		}
167 
168 		bip->bip_flags |= BIP_MAPPED_INTEGRITY;
169 	}
170 }
171 
172 /**
173  * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
174  * @rq:              request with PI that should be prepared
175  * @nr_bytes:        total bytes to prepare
176  *
177  * For Type 1/Type 2, the virtual start sector is the one that was
178  * originally submitted by the block layer for the ref_tag usage. Due to
179  * partitioning, MD/DM cloning, etc. the actual physical start sector is
180  * likely to be different. Since the physical start sector was submitted
181  * to the device, we should remap it back to virtual values expected by the
182  * block layer.
183  */
t10_pi_type1_complete(struct request * rq,unsigned int nr_bytes)184 static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
185 {
186 	unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp;
187 	const int tuple_sz = rq->q->integrity.tuple_size;
188 	u32 ref_tag = t10_pi_ref_tag(rq);
189 	struct bio *bio;
190 
191 	__rq_for_each_bio(bio, rq) {
192 		struct bio_integrity_payload *bip = bio_integrity(bio);
193 		u32 virt = bip_get_seed(bip) & 0xffffffff;
194 		struct bio_vec iv;
195 		struct bvec_iter iter;
196 
197 		bip_for_each_vec(iv, bip, iter) {
198 			void *p, *pmap;
199 			unsigned int j;
200 
201 			pmap = kmap_atomic(iv.bv_page);
202 			p = pmap + iv.bv_offset;
203 			for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
204 				struct t10_pi_tuple *pi = p;
205 
206 				if (be32_to_cpu(pi->ref_tag) == ref_tag)
207 					pi->ref_tag = cpu_to_be32(virt);
208 				virt++;
209 				ref_tag++;
210 				intervals--;
211 				p += tuple_sz;
212 			}
213 
214 			kunmap_atomic(pmap);
215 		}
216 	}
217 }
218 
t10_pi_type3_generate_crc(struct blk_integrity_iter * iter)219 static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
220 {
221 	return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
222 }
223 
t10_pi_type3_generate_ip(struct blk_integrity_iter * iter)224 static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
225 {
226 	return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
227 }
228 
t10_pi_type3_verify_crc(struct blk_integrity_iter * iter)229 static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
230 {
231 	return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
232 }
233 
t10_pi_type3_verify_ip(struct blk_integrity_iter * iter)234 static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
235 {
236 	return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
237 }
238 
239 /* Type 3 does not have a reference tag so no remapping is required. */
t10_pi_type3_prepare(struct request * rq)240 static void t10_pi_type3_prepare(struct request *rq)
241 {
242 }
243 
244 /* Type 3 does not have a reference tag so no remapping is required. */
t10_pi_type3_complete(struct request * rq,unsigned int nr_bytes)245 static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
246 {
247 }
248 
249 const struct blk_integrity_profile t10_pi_type1_crc = {
250 	.name			= "T10-DIF-TYPE1-CRC",
251 	.generate_fn		= t10_pi_type1_generate_crc,
252 	.verify_fn		= t10_pi_type1_verify_crc,
253 	.prepare_fn		= t10_pi_type1_prepare,
254 	.complete_fn		= t10_pi_type1_complete,
255 };
256 EXPORT_SYMBOL(t10_pi_type1_crc);
257 
258 const struct blk_integrity_profile t10_pi_type1_ip = {
259 	.name			= "T10-DIF-TYPE1-IP",
260 	.generate_fn		= t10_pi_type1_generate_ip,
261 	.verify_fn		= t10_pi_type1_verify_ip,
262 	.prepare_fn		= t10_pi_type1_prepare,
263 	.complete_fn		= t10_pi_type1_complete,
264 };
265 EXPORT_SYMBOL(t10_pi_type1_ip);
266 
267 const struct blk_integrity_profile t10_pi_type3_crc = {
268 	.name			= "T10-DIF-TYPE3-CRC",
269 	.generate_fn		= t10_pi_type3_generate_crc,
270 	.verify_fn		= t10_pi_type3_verify_crc,
271 	.prepare_fn		= t10_pi_type3_prepare,
272 	.complete_fn		= t10_pi_type3_complete,
273 };
274 EXPORT_SYMBOL(t10_pi_type3_crc);
275 
276 const struct blk_integrity_profile t10_pi_type3_ip = {
277 	.name			= "T10-DIF-TYPE3-IP",
278 	.generate_fn		= t10_pi_type3_generate_ip,
279 	.verify_fn		= t10_pi_type3_verify_ip,
280 	.prepare_fn		= t10_pi_type3_prepare,
281 	.complete_fn		= t10_pi_type3_complete,
282 };
283 EXPORT_SYMBOL(t10_pi_type3_ip);
284 
285 MODULE_LICENSE("GPL");
286