1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright 2019 Google LLC
4 */
5
6 #ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
7 #define __LINUX_BLK_CRYPTO_INTERNAL_H
8
9 #include <linux/bio.h>
10 #include <linux/blkdev.h>
11
12 /* Represents a crypto mode supported by blk-crypto */
13 struct blk_crypto_mode {
14 const char *cipher_str; /* crypto API name (for fallback case) */
15 unsigned int keysize; /* key size in bytes */
16 unsigned int ivsize; /* iv size in bytes */
17 };
18
19 extern const struct blk_crypto_mode blk_crypto_modes[];
20
21 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
22
23 void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
24 unsigned int inc);
25
26 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
27
28 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
29 struct bio_crypt_ctx *bc2);
30
bio_crypt_ctx_back_mergeable(struct request * req,struct bio * bio)31 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
32 struct bio *bio)
33 {
34 return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
35 bio->bi_crypt_context);
36 }
37
bio_crypt_ctx_front_mergeable(struct request * req,struct bio * bio)38 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
39 struct bio *bio)
40 {
41 return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
42 bio->bi_iter.bi_size, req->crypt_ctx);
43 }
44
bio_crypt_ctx_merge_rq(struct request * req,struct request * next)45 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
46 struct request *next)
47 {
48 return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
49 next->crypt_ctx);
50 }
51
blk_crypto_rq_set_defaults(struct request * rq)52 static inline void blk_crypto_rq_set_defaults(struct request *rq)
53 {
54 rq->crypt_ctx = NULL;
55 rq->crypt_keyslot = NULL;
56 }
57
blk_crypto_rq_is_encrypted(struct request * rq)58 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
59 {
60 return rq->crypt_ctx;
61 }
62
63 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
64
bio_crypt_rq_ctx_compatible(struct request * rq,struct bio * bio)65 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
66 struct bio *bio)
67 {
68 return true;
69 }
70
bio_crypt_ctx_front_mergeable(struct request * req,struct bio * bio)71 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
72 struct bio *bio)
73 {
74 return true;
75 }
76
bio_crypt_ctx_back_mergeable(struct request * req,struct bio * bio)77 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
78 struct bio *bio)
79 {
80 return true;
81 }
82
bio_crypt_ctx_merge_rq(struct request * req,struct request * next)83 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
84 struct request *next)
85 {
86 return true;
87 }
88
blk_crypto_rq_set_defaults(struct request * rq)89 static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
90
blk_crypto_rq_is_encrypted(struct request * rq)91 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
92 {
93 return false;
94 }
95
96 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
97
98 void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
bio_crypt_advance(struct bio * bio,unsigned int bytes)99 static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
100 {
101 if (bio_has_crypt_ctx(bio))
102 __bio_crypt_advance(bio, bytes);
103 }
104
105 void __bio_crypt_free_ctx(struct bio *bio);
bio_crypt_free_ctx(struct bio * bio)106 static inline void bio_crypt_free_ctx(struct bio *bio)
107 {
108 if (bio_has_crypt_ctx(bio))
109 __bio_crypt_free_ctx(bio);
110 }
111
bio_crypt_do_front_merge(struct request * rq,struct bio * bio)112 static inline void bio_crypt_do_front_merge(struct request *rq,
113 struct bio *bio)
114 {
115 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
116 if (bio_has_crypt_ctx(bio))
117 memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
118 sizeof(rq->crypt_ctx->bc_dun));
119 #endif
120 }
121
122 bool __blk_crypto_bio_prep(struct bio **bio_ptr);
blk_crypto_bio_prep(struct bio ** bio_ptr)123 static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
124 {
125 if (bio_has_crypt_ctx(*bio_ptr))
126 return __blk_crypto_bio_prep(bio_ptr);
127 return true;
128 }
129
130 blk_status_t __blk_crypto_init_request(struct request *rq);
blk_crypto_init_request(struct request * rq)131 static inline blk_status_t blk_crypto_init_request(struct request *rq)
132 {
133 if (blk_crypto_rq_is_encrypted(rq))
134 return __blk_crypto_init_request(rq);
135 return BLK_STS_OK;
136 }
137
138 void __blk_crypto_free_request(struct request *rq);
blk_crypto_free_request(struct request * rq)139 static inline void blk_crypto_free_request(struct request *rq)
140 {
141 if (blk_crypto_rq_is_encrypted(rq))
142 __blk_crypto_free_request(rq);
143 }
144
145 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
146 gfp_t gfp_mask);
147 /**
148 * blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
149 * is inserted
150 * @rq: The request to prepare
151 * @bio: The first bio being inserted into the request
152 * @gfp_mask: Memory allocation flags
153 *
154 * Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if
155 * @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
156 */
blk_crypto_rq_bio_prep(struct request * rq,struct bio * bio,gfp_t gfp_mask)157 static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
158 gfp_t gfp_mask)
159 {
160 if (bio_has_crypt_ctx(bio))
161 return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
162 return 0;
163 }
164
165 /**
166 * blk_crypto_insert_cloned_request - Prepare a cloned request to be inserted
167 * into a request queue.
168 * @rq: the request being queued
169 *
170 * Return: BLK_STS_OK on success, nonzero on error.
171 */
blk_crypto_insert_cloned_request(struct request * rq)172 static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
173 {
174
175 if (blk_crypto_rq_is_encrypted(rq))
176 return blk_crypto_init_request(rq);
177 return BLK_STS_OK;
178 }
179
180 #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
181
182 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
183
184 bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
185
186 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
187
188 #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
189
190 static inline int
blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)191 blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
192 {
193 pr_warn_once("crypto API fallback is disabled\n");
194 return -ENOPKG;
195 }
196
blk_crypto_fallback_bio_prep(struct bio ** bio_ptr)197 static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
198 {
199 pr_warn_once("crypto API fallback disabled; failing request.\n");
200 (*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
201 return false;
202 }
203
204 static inline int
blk_crypto_fallback_evict_key(const struct blk_crypto_key * key)205 blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
206 {
207 return 0;
208 }
209
210 #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
211
212 #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */
213