1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Universal Flash Storage Host Performance Booster
4  *
5  * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
6  *
7  * Authors:
8  *	Yongmyung Lee <ymhungry.lee@samsung.com>
9  *	Jinyoung Choi <j-young.choi@samsung.com>
10  */
11 
12 #include <asm/unaligned.h>
13 #include <linux/async.h>
14 
15 #include "ufshcd.h"
16 #include "ufshpb.h"
17 #include "../sd.h"
18 
19 #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
20 #define READ_TO_MS 1000
21 #define READ_TO_EXPIRIES 100
22 #define POLLING_INTERVAL_MS 200
23 #define THROTTLE_MAP_REQ_DEFAULT 1
24 
25 /* memory management */
26 static struct kmem_cache *ufshpb_mctx_cache;
27 static mempool_t *ufshpb_mctx_pool;
28 static mempool_t *ufshpb_page_pool;
29 /* A cache size of 2MB can cache ppn in the 1GB range. */
30 static unsigned int ufshpb_host_map_kbytes = 2048;
31 static int tot_active_srgn_pages;
32 
33 static struct workqueue_struct *ufshpb_wq;
34 
35 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
36 				      int srgn_idx);
37 
ufshpb_is_allowed(struct ufs_hba * hba)38 bool ufshpb_is_allowed(struct ufs_hba *hba)
39 {
40 	return !(hba->ufshpb_dev.hpb_disabled);
41 }
42 
43 /* HPB version 1.0 is called as legacy version. */
ufshpb_is_legacy(struct ufs_hba * hba)44 bool ufshpb_is_legacy(struct ufs_hba *hba)
45 {
46 	return hba->ufshpb_dev.is_legacy;
47 }
48 
ufshpb_get_hpb_data(struct scsi_device * sdev)49 static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
50 {
51 	return sdev->hostdata;
52 }
53 
ufshpb_get_state(struct ufshpb_lu * hpb)54 static int ufshpb_get_state(struct ufshpb_lu *hpb)
55 {
56 	return atomic_read(&hpb->hpb_state);
57 }
58 
ufshpb_set_state(struct ufshpb_lu * hpb,int state)59 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
60 {
61 	atomic_set(&hpb->hpb_state, state);
62 }
63 
ufshpb_is_valid_srgn(struct ufshpb_region * rgn,struct ufshpb_subregion * srgn)64 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
65 				struct ufshpb_subregion *srgn)
66 {
67 	return rgn->rgn_state != HPB_RGN_INACTIVE &&
68 		srgn->srgn_state == HPB_SRGN_VALID;
69 }
70 
ufshpb_is_read_cmd(struct scsi_cmnd * cmd)71 static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
72 {
73 	return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
74 }
75 
ufshpb_is_write_or_discard(struct scsi_cmnd * cmd)76 static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
77 {
78 	return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
79 	       op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
80 }
81 
ufshpb_is_supported_chunk(struct ufshpb_lu * hpb,int transfer_len)82 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
83 {
84 	return transfer_len <= hpb->pre_req_max_tr_len;
85 }
86 
ufshpb_is_general_lun(int lun)87 static bool ufshpb_is_general_lun(int lun)
88 {
89 	return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
90 }
91 
ufshpb_is_pinned_region(struct ufshpb_lu * hpb,int rgn_idx)92 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
93 {
94 	if (hpb->lu_pinned_end != PINNED_NOT_SET &&
95 	    rgn_idx >= hpb->lu_pinned_start &&
96 	    rgn_idx <= hpb->lu_pinned_end)
97 		return true;
98 
99 	return false;
100 }
101 
ufshpb_kick_map_work(struct ufshpb_lu * hpb)102 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
103 {
104 	bool ret = false;
105 	unsigned long flags;
106 
107 	if (ufshpb_get_state(hpb) != HPB_PRESENT)
108 		return;
109 
110 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
111 	if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
112 		ret = true;
113 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
114 
115 	if (ret)
116 		queue_work(ufshpb_wq, &hpb->map_work);
117 }
118 
ufshpb_is_hpb_rsp_valid(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,struct utp_hpb_rsp * rsp_field)119 static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
120 				    struct ufshcd_lrb *lrbp,
121 				    struct utp_hpb_rsp *rsp_field)
122 {
123 	/* Check HPB_UPDATE_ALERT */
124 	if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
125 	      UPIU_HEADER_DWORD(0, 2, 0, 0)))
126 		return false;
127 
128 	if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
129 	    rsp_field->desc_type != DEV_DES_TYPE ||
130 	    rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
131 	    rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
132 	    rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
133 	    rsp_field->hpb_op == HPB_RSP_NONE ||
134 	    (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
135 	     !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
136 		return false;
137 
138 	if (!ufshpb_is_general_lun(rsp_field->lun)) {
139 		dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
140 			 lrbp->lun);
141 		return false;
142 	}
143 
144 	return true;
145 }
146 
ufshpb_iterate_rgn(struct ufshpb_lu * hpb,int rgn_idx,int srgn_idx,int srgn_offset,int cnt,bool set_dirty)147 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
148 			       int srgn_offset, int cnt, bool set_dirty)
149 {
150 	struct ufshpb_region *rgn;
151 	struct ufshpb_subregion *srgn, *prev_srgn = NULL;
152 	int set_bit_len;
153 	int bitmap_len;
154 	unsigned long flags;
155 
156 next_srgn:
157 	rgn = hpb->rgn_tbl + rgn_idx;
158 	srgn = rgn->srgn_tbl + srgn_idx;
159 
160 	if (likely(!srgn->is_last))
161 		bitmap_len = hpb->entries_per_srgn;
162 	else
163 		bitmap_len = hpb->last_srgn_entries;
164 
165 	if ((srgn_offset + cnt) > bitmap_len)
166 		set_bit_len = bitmap_len - srgn_offset;
167 	else
168 		set_bit_len = cnt;
169 
170 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
171 	if (rgn->rgn_state != HPB_RGN_INACTIVE) {
172 		if (set_dirty) {
173 			if (srgn->srgn_state == HPB_SRGN_VALID)
174 				bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
175 					   set_bit_len);
176 		} else if (hpb->is_hcm) {
177 			 /* rewind the read timer for lru regions */
178 			rgn->read_timeout = ktime_add_ms(ktime_get(),
179 					rgn->hpb->params.read_timeout_ms);
180 			rgn->read_timeout_expiries =
181 				rgn->hpb->params.read_timeout_expiries;
182 		}
183 	}
184 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
185 
186 	if (hpb->is_hcm && prev_srgn != srgn) {
187 		bool activate = false;
188 
189 		spin_lock(&rgn->rgn_lock);
190 		if (set_dirty) {
191 			rgn->reads -= srgn->reads;
192 			srgn->reads = 0;
193 			set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
194 		} else {
195 			srgn->reads++;
196 			rgn->reads++;
197 			if (srgn->reads == hpb->params.activation_thld)
198 				activate = true;
199 		}
200 		spin_unlock(&rgn->rgn_lock);
201 
202 		if (activate ||
203 		    test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
204 			spin_lock_irqsave(&hpb->rsp_list_lock, flags);
205 			ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
206 			spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
207 			dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
208 				"activate region %d-%d\n", rgn_idx, srgn_idx);
209 		}
210 
211 		prev_srgn = srgn;
212 	}
213 
214 	srgn_offset = 0;
215 	if (++srgn_idx == hpb->srgns_per_rgn) {
216 		srgn_idx = 0;
217 		rgn_idx++;
218 	}
219 
220 	cnt -= set_bit_len;
221 	if (cnt > 0)
222 		goto next_srgn;
223 }
224 
ufshpb_test_ppn_dirty(struct ufshpb_lu * hpb,int rgn_idx,int srgn_idx,int srgn_offset,int cnt)225 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
226 				  int srgn_idx, int srgn_offset, int cnt)
227 {
228 	struct ufshpb_region *rgn;
229 	struct ufshpb_subregion *srgn;
230 	int bitmap_len;
231 	int bit_len;
232 
233 next_srgn:
234 	rgn = hpb->rgn_tbl + rgn_idx;
235 	srgn = rgn->srgn_tbl + srgn_idx;
236 
237 	if (likely(!srgn->is_last))
238 		bitmap_len = hpb->entries_per_srgn;
239 	else
240 		bitmap_len = hpb->last_srgn_entries;
241 
242 	if (!ufshpb_is_valid_srgn(rgn, srgn))
243 		return true;
244 
245 	/*
246 	 * If the region state is active, mctx must be allocated.
247 	 * In this case, check whether the region is evicted or
248 	 * mctx allocation fail.
249 	 */
250 	if (unlikely(!srgn->mctx)) {
251 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
252 			"no mctx in region %d subregion %d.\n",
253 			srgn->rgn_idx, srgn->srgn_idx);
254 		return true;
255 	}
256 
257 	if ((srgn_offset + cnt) > bitmap_len)
258 		bit_len = bitmap_len - srgn_offset;
259 	else
260 		bit_len = cnt;
261 
262 	if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
263 			  srgn_offset) < bit_len + srgn_offset)
264 		return true;
265 
266 	srgn_offset = 0;
267 	if (++srgn_idx == hpb->srgns_per_rgn) {
268 		srgn_idx = 0;
269 		rgn_idx++;
270 	}
271 
272 	cnt -= bit_len;
273 	if (cnt > 0)
274 		goto next_srgn;
275 
276 	return false;
277 }
278 
is_rgn_dirty(struct ufshpb_region * rgn)279 static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
280 {
281 	return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
282 }
283 
ufshpb_fill_ppn_from_page(struct ufshpb_lu * hpb,struct ufshpb_map_ctx * mctx,int pos,int len,__be64 * ppn_buf)284 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
285 				     struct ufshpb_map_ctx *mctx, int pos,
286 				     int len, __be64 *ppn_buf)
287 {
288 	struct page *page;
289 	int index, offset;
290 	int copied;
291 
292 	index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
293 	offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
294 
295 	if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
296 		copied = len;
297 	else
298 		copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
299 
300 	page = mctx->m_page[index];
301 	if (unlikely(!page)) {
302 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
303 			"error. cannot find page in mctx\n");
304 		return -ENOMEM;
305 	}
306 
307 	memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
308 	       copied * HPB_ENTRY_SIZE);
309 
310 	return copied;
311 }
312 
313 static void
ufshpb_get_pos_from_lpn(struct ufshpb_lu * hpb,unsigned long lpn,int * rgn_idx,int * srgn_idx,int * offset)314 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
315 			int *srgn_idx, int *offset)
316 {
317 	int rgn_offset;
318 
319 	*rgn_idx = lpn >> hpb->entries_per_rgn_shift;
320 	rgn_offset = lpn & hpb->entries_per_rgn_mask;
321 	*srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
322 	*offset = rgn_offset & hpb->entries_per_srgn_mask;
323 }
324 
325 static void
ufshpb_set_hpb_read_to_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,__be64 ppn,u8 transfer_len)326 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
327 			    __be64 ppn, u8 transfer_len)
328 {
329 	unsigned char *cdb = lrbp->cmd->cmnd;
330 	__be64 ppn_tmp = ppn;
331 	cdb[0] = UFSHPB_READ;
332 
333 	if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
334 		ppn_tmp = swab64(ppn);
335 
336 	/* ppn value is stored as big-endian in the host memory */
337 	memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
338 	cdb[14] = transfer_len;
339 	cdb[15] = 0;
340 
341 	lrbp->cmd->cmd_len = UFS_CDB_SIZE;
342 }
343 
344 /*
345  * This function will set up HPB read command using host-side L2P map data.
346  */
ufshpb_prep(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)347 int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
348 {
349 	struct ufshpb_lu *hpb;
350 	struct ufshpb_region *rgn;
351 	struct ufshpb_subregion *srgn;
352 	struct scsi_cmnd *cmd = lrbp->cmd;
353 	u32 lpn;
354 	__be64 ppn;
355 	unsigned long flags;
356 	int transfer_len, rgn_idx, srgn_idx, srgn_offset;
357 	int err = 0;
358 
359 	hpb = ufshpb_get_hpb_data(cmd->device);
360 	if (!hpb)
361 		return -ENODEV;
362 
363 	if (ufshpb_get_state(hpb) == HPB_INIT)
364 		return -ENODEV;
365 
366 	if (ufshpb_get_state(hpb) != HPB_PRESENT) {
367 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
368 			   "%s: ufshpb state is not PRESENT", __func__);
369 		return -ENODEV;
370 	}
371 
372 	if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
373 	    (!ufshpb_is_write_or_discard(cmd) &&
374 	     !ufshpb_is_read_cmd(cmd)))
375 		return 0;
376 
377 	transfer_len = sectors_to_logical(cmd->device,
378 					  blk_rq_sectors(scsi_cmd_to_rq(cmd)));
379 	if (unlikely(!transfer_len))
380 		return 0;
381 
382 	lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
383 	ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
384 	rgn = hpb->rgn_tbl + rgn_idx;
385 	srgn = rgn->srgn_tbl + srgn_idx;
386 
387 	/* If command type is WRITE or DISCARD, set bitmap as drity */
388 	if (ufshpb_is_write_or_discard(cmd)) {
389 		ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
390 				   transfer_len, true);
391 		return 0;
392 	}
393 
394 	if (!ufshpb_is_supported_chunk(hpb, transfer_len))
395 		return 0;
396 
397 	WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH);
398 
399 	if (hpb->is_hcm) {
400 		/*
401 		 * in host control mode, reads are the main source for
402 		 * activation trials.
403 		 */
404 		ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
405 				   transfer_len, false);
406 
407 		/* keep those counters normalized */
408 		if (rgn->reads > hpb->entries_per_srgn)
409 			schedule_work(&hpb->ufshpb_normalization_work);
410 	}
411 
412 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
413 	if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
414 				   transfer_len)) {
415 		hpb->stats.miss_cnt++;
416 		spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
417 		return 0;
418 	}
419 
420 	err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
421 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
422 	if (unlikely(err < 0)) {
423 		/*
424 		 * In this case, the region state is active,
425 		 * but the ppn table is not allocated.
426 		 * Make sure that ppn table must be allocated on
427 		 * active state.
428 		 */
429 		dev_err(hba->dev, "get ppn failed. err %d\n", err);
430 		return err;
431 	}
432 
433 	ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
434 
435 	hpb->stats.hit_cnt++;
436 	return 0;
437 }
438 
ufshpb_get_req(struct ufshpb_lu * hpb,int rgn_idx,enum req_opf dir,bool atomic)439 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
440 					 int rgn_idx, enum req_opf dir,
441 					 bool atomic)
442 {
443 	struct ufshpb_req *rq;
444 	struct request *req;
445 	int retries = HPB_MAP_REQ_RETRIES;
446 
447 	rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
448 	if (!rq)
449 		return NULL;
450 
451 retry:
452 	req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
453 			      BLK_MQ_REQ_NOWAIT);
454 
455 	if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
456 		usleep_range(3000, 3100);
457 		goto retry;
458 	}
459 
460 	if (IS_ERR(req))
461 		goto free_rq;
462 
463 	rq->hpb = hpb;
464 	rq->req = req;
465 	rq->rb.rgn_idx = rgn_idx;
466 
467 	return rq;
468 
469 free_rq:
470 	kmem_cache_free(hpb->map_req_cache, rq);
471 	return NULL;
472 }
473 
ufshpb_put_req(struct ufshpb_lu * hpb,struct ufshpb_req * rq)474 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
475 {
476 	blk_put_request(rq->req);
477 	kmem_cache_free(hpb->map_req_cache, rq);
478 }
479 
ufshpb_get_map_req(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)480 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
481 					     struct ufshpb_subregion *srgn)
482 {
483 	struct ufshpb_req *map_req;
484 	struct bio *bio;
485 	unsigned long flags;
486 
487 	if (hpb->is_hcm &&
488 	    hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
489 		dev_info(&hpb->sdev_ufs_lu->sdev_dev,
490 			 "map_req throttle. inflight %d throttle %d",
491 			 hpb->num_inflight_map_req,
492 			 hpb->params.inflight_map_req);
493 		return NULL;
494 	}
495 
496 	map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
497 	if (!map_req)
498 		return NULL;
499 
500 	bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
501 	if (!bio) {
502 		ufshpb_put_req(hpb, map_req);
503 		return NULL;
504 	}
505 
506 	map_req->bio = bio;
507 
508 	map_req->rb.srgn_idx = srgn->srgn_idx;
509 	map_req->rb.mctx = srgn->mctx;
510 
511 	spin_lock_irqsave(&hpb->param_lock, flags);
512 	hpb->num_inflight_map_req++;
513 	spin_unlock_irqrestore(&hpb->param_lock, flags);
514 
515 	return map_req;
516 }
517 
ufshpb_put_map_req(struct ufshpb_lu * hpb,struct ufshpb_req * map_req)518 static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
519 			       struct ufshpb_req *map_req)
520 {
521 	unsigned long flags;
522 
523 	bio_put(map_req->bio);
524 	ufshpb_put_req(hpb, map_req);
525 
526 	spin_lock_irqsave(&hpb->param_lock, flags);
527 	hpb->num_inflight_map_req--;
528 	spin_unlock_irqrestore(&hpb->param_lock, flags);
529 }
530 
ufshpb_clear_dirty_bitmap(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)531 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
532 				     struct ufshpb_subregion *srgn)
533 {
534 	struct ufshpb_region *rgn;
535 	u32 num_entries = hpb->entries_per_srgn;
536 
537 	if (!srgn->mctx) {
538 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
539 			"no mctx in region %d subregion %d.\n",
540 			srgn->rgn_idx, srgn->srgn_idx);
541 		return -1;
542 	}
543 
544 	if (unlikely(srgn->is_last))
545 		num_entries = hpb->last_srgn_entries;
546 
547 	bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
548 
549 	rgn = hpb->rgn_tbl + srgn->rgn_idx;
550 	clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
551 
552 	return 0;
553 }
554 
ufshpb_update_active_info(struct ufshpb_lu * hpb,int rgn_idx,int srgn_idx)555 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
556 				      int srgn_idx)
557 {
558 	struct ufshpb_region *rgn;
559 	struct ufshpb_subregion *srgn;
560 
561 	rgn = hpb->rgn_tbl + rgn_idx;
562 	srgn = rgn->srgn_tbl + srgn_idx;
563 
564 	list_del_init(&rgn->list_inact_rgn);
565 
566 	if (list_empty(&srgn->list_act_srgn))
567 		list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
568 
569 	hpb->stats.rb_active_cnt++;
570 }
571 
ufshpb_update_inactive_info(struct ufshpb_lu * hpb,int rgn_idx)572 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
573 {
574 	struct ufshpb_region *rgn;
575 	struct ufshpb_subregion *srgn;
576 	int srgn_idx;
577 
578 	rgn = hpb->rgn_tbl + rgn_idx;
579 
580 	for_each_sub_region(rgn, srgn_idx, srgn)
581 		list_del_init(&srgn->list_act_srgn);
582 
583 	if (list_empty(&rgn->list_inact_rgn))
584 		list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
585 
586 	hpb->stats.rb_inactive_cnt++;
587 }
588 
ufshpb_activate_subregion(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)589 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
590 				      struct ufshpb_subregion *srgn)
591 {
592 	struct ufshpb_region *rgn;
593 
594 	/*
595 	 * If there is no mctx in subregion
596 	 * after I/O progress for HPB_READ_BUFFER, the region to which the
597 	 * subregion belongs was evicted.
598 	 * Make sure the region must not evict in I/O progress
599 	 */
600 	if (!srgn->mctx) {
601 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
602 			"no mctx in region %d subregion %d.\n",
603 			srgn->rgn_idx, srgn->srgn_idx);
604 		srgn->srgn_state = HPB_SRGN_INVALID;
605 		return;
606 	}
607 
608 	rgn = hpb->rgn_tbl + srgn->rgn_idx;
609 
610 	if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
611 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
612 			"region %d subregion %d evicted\n",
613 			srgn->rgn_idx, srgn->srgn_idx);
614 		srgn->srgn_state = HPB_SRGN_INVALID;
615 		return;
616 	}
617 	srgn->srgn_state = HPB_SRGN_VALID;
618 }
619 
ufshpb_umap_req_compl_fn(struct request * req,blk_status_t error)620 static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
621 {
622 	struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
623 
624 	ufshpb_put_req(umap_req->hpb, umap_req);
625 }
626 
ufshpb_map_req_compl_fn(struct request * req,blk_status_t error)627 static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
628 {
629 	struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
630 	struct ufshpb_lu *hpb = map_req->hpb;
631 	struct ufshpb_subregion *srgn;
632 	unsigned long flags;
633 
634 	srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
635 		map_req->rb.srgn_idx;
636 
637 	ufshpb_clear_dirty_bitmap(hpb, srgn);
638 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
639 	ufshpb_activate_subregion(hpb, srgn);
640 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
641 
642 	ufshpb_put_map_req(map_req->hpb, map_req);
643 }
644 
ufshpb_set_unmap_cmd(unsigned char * cdb,struct ufshpb_region * rgn)645 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
646 {
647 	cdb[0] = UFSHPB_WRITE_BUFFER;
648 	cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
649 			  UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
650 	if (rgn)
651 		put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
652 	cdb[9] = 0x00;
653 }
654 
ufshpb_set_read_buf_cmd(unsigned char * cdb,int rgn_idx,int srgn_idx,int srgn_mem_size)655 static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
656 				    int srgn_idx, int srgn_mem_size)
657 {
658 	cdb[0] = UFSHPB_READ_BUFFER;
659 	cdb[1] = UFSHPB_READ_BUFFER_ID;
660 
661 	put_unaligned_be16(rgn_idx, &cdb[2]);
662 	put_unaligned_be16(srgn_idx, &cdb[4]);
663 	put_unaligned_be24(srgn_mem_size, &cdb[6]);
664 
665 	cdb[9] = 0x00;
666 }
667 
ufshpb_execute_umap_req(struct ufshpb_lu * hpb,struct ufshpb_req * umap_req,struct ufshpb_region * rgn)668 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
669 				   struct ufshpb_req *umap_req,
670 				   struct ufshpb_region *rgn)
671 {
672 	struct request *req;
673 	struct scsi_request *rq;
674 
675 	req = umap_req->req;
676 	req->timeout = 0;
677 	req->end_io_data = (void *)umap_req;
678 	rq = scsi_req(req);
679 	ufshpb_set_unmap_cmd(rq->cmd, rgn);
680 	rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
681 
682 	blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn);
683 
684 	hpb->stats.umap_req_cnt++;
685 }
686 
ufshpb_execute_map_req(struct ufshpb_lu * hpb,struct ufshpb_req * map_req,bool last)687 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
688 				  struct ufshpb_req *map_req, bool last)
689 {
690 	struct request_queue *q;
691 	struct request *req;
692 	struct scsi_request *rq;
693 	int mem_size = hpb->srgn_mem_size;
694 	int ret = 0;
695 	int i;
696 
697 	q = hpb->sdev_ufs_lu->request_queue;
698 	for (i = 0; i < hpb->pages_per_srgn; i++) {
699 		ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
700 				      PAGE_SIZE, 0);
701 		if (ret != PAGE_SIZE) {
702 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
703 				   "bio_add_pc_page fail %d - %d\n",
704 				   map_req->rb.rgn_idx, map_req->rb.srgn_idx);
705 			return ret;
706 		}
707 	}
708 
709 	req = map_req->req;
710 
711 	blk_rq_append_bio(req, map_req->bio);
712 
713 	req->end_io_data = map_req;
714 
715 	rq = scsi_req(req);
716 
717 	if (unlikely(last))
718 		mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
719 
720 	ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx,
721 				map_req->rb.srgn_idx, mem_size);
722 	rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
723 
724 	blk_execute_rq_nowait(NULL, req, 1, ufshpb_map_req_compl_fn);
725 
726 	hpb->stats.map_req_cnt++;
727 	return 0;
728 }
729 
ufshpb_get_map_ctx(struct ufshpb_lu * hpb,bool last)730 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
731 						 bool last)
732 {
733 	struct ufshpb_map_ctx *mctx;
734 	u32 num_entries = hpb->entries_per_srgn;
735 	int i, j;
736 
737 	mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
738 	if (!mctx)
739 		return NULL;
740 
741 	mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
742 	if (!mctx->m_page)
743 		goto release_mctx;
744 
745 	if (unlikely(last))
746 		num_entries = hpb->last_srgn_entries;
747 
748 	mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
749 	if (!mctx->ppn_dirty)
750 		goto release_m_page;
751 
752 	for (i = 0; i < hpb->pages_per_srgn; i++) {
753 		mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
754 		if (!mctx->m_page[i]) {
755 			for (j = 0; j < i; j++)
756 				mempool_free(mctx->m_page[j], ufshpb_page_pool);
757 			goto release_ppn_dirty;
758 		}
759 		clear_page(page_address(mctx->m_page[i]));
760 	}
761 
762 	return mctx;
763 
764 release_ppn_dirty:
765 	bitmap_free(mctx->ppn_dirty);
766 release_m_page:
767 	kmem_cache_free(hpb->m_page_cache, mctx->m_page);
768 release_mctx:
769 	mempool_free(mctx, ufshpb_mctx_pool);
770 	return NULL;
771 }
772 
ufshpb_put_map_ctx(struct ufshpb_lu * hpb,struct ufshpb_map_ctx * mctx)773 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
774 			       struct ufshpb_map_ctx *mctx)
775 {
776 	int i;
777 
778 	for (i = 0; i < hpb->pages_per_srgn; i++)
779 		mempool_free(mctx->m_page[i], ufshpb_page_pool);
780 
781 	bitmap_free(mctx->ppn_dirty);
782 	kmem_cache_free(hpb->m_page_cache, mctx->m_page);
783 	mempool_free(mctx, ufshpb_mctx_pool);
784 }
785 
ufshpb_check_srgns_issue_state(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)786 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
787 					  struct ufshpb_region *rgn)
788 {
789 	struct ufshpb_subregion *srgn;
790 	int srgn_idx;
791 
792 	for_each_sub_region(rgn, srgn_idx, srgn)
793 		if (srgn->srgn_state == HPB_SRGN_ISSUED)
794 			return -EPERM;
795 
796 	return 0;
797 }
798 
ufshpb_read_to_handler(struct work_struct * work)799 static void ufshpb_read_to_handler(struct work_struct *work)
800 {
801 	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
802 					     ufshpb_read_to_work.work);
803 	struct victim_select_info *lru_info = &hpb->lru_info;
804 	struct ufshpb_region *rgn, *next_rgn;
805 	unsigned long flags;
806 	unsigned int poll;
807 	LIST_HEAD(expired_list);
808 
809 	if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
810 		return;
811 
812 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
813 
814 	list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
815 				 list_lru_rgn) {
816 		bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
817 
818 		if (timedout) {
819 			rgn->read_timeout_expiries--;
820 			if (is_rgn_dirty(rgn) ||
821 			    rgn->read_timeout_expiries == 0)
822 				list_add(&rgn->list_expired_rgn, &expired_list);
823 			else
824 				rgn->read_timeout = ktime_add_ms(ktime_get(),
825 						hpb->params.read_timeout_ms);
826 		}
827 	}
828 
829 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
830 
831 	list_for_each_entry_safe(rgn, next_rgn, &expired_list,
832 				 list_expired_rgn) {
833 		list_del_init(&rgn->list_expired_rgn);
834 		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
835 		ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
836 		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
837 	}
838 
839 	ufshpb_kick_map_work(hpb);
840 
841 	clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
842 
843 	poll = hpb->params.timeout_polling_interval_ms;
844 	schedule_delayed_work(&hpb->ufshpb_read_to_work,
845 			      msecs_to_jiffies(poll));
846 }
847 
ufshpb_add_lru_info(struct victim_select_info * lru_info,struct ufshpb_region * rgn)848 static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
849 				struct ufshpb_region *rgn)
850 {
851 	rgn->rgn_state = HPB_RGN_ACTIVE;
852 	list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
853 	atomic_inc(&lru_info->active_cnt);
854 	if (rgn->hpb->is_hcm) {
855 		rgn->read_timeout =
856 			ktime_add_ms(ktime_get(),
857 				     rgn->hpb->params.read_timeout_ms);
858 		rgn->read_timeout_expiries =
859 			rgn->hpb->params.read_timeout_expiries;
860 	}
861 }
862 
ufshpb_hit_lru_info(struct victim_select_info * lru_info,struct ufshpb_region * rgn)863 static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
864 				struct ufshpb_region *rgn)
865 {
866 	list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
867 }
868 
ufshpb_victim_lru_info(struct ufshpb_lu * hpb)869 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
870 {
871 	struct victim_select_info *lru_info = &hpb->lru_info;
872 	struct ufshpb_region *rgn, *victim_rgn = NULL;
873 
874 	list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
875 		if (!rgn) {
876 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
877 				"%s: no region allocated\n",
878 				__func__);
879 			return NULL;
880 		}
881 		if (ufshpb_check_srgns_issue_state(hpb, rgn))
882 			continue;
883 
884 		/*
885 		 * in host control mode, verify that the exiting region
886 		 * has fewer reads
887 		 */
888 		if (hpb->is_hcm &&
889 		    rgn->reads > hpb->params.eviction_thld_exit)
890 			continue;
891 
892 		victim_rgn = rgn;
893 		break;
894 	}
895 
896 	return victim_rgn;
897 }
898 
ufshpb_cleanup_lru_info(struct victim_select_info * lru_info,struct ufshpb_region * rgn)899 static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
900 				    struct ufshpb_region *rgn)
901 {
902 	list_del_init(&rgn->list_lru_rgn);
903 	rgn->rgn_state = HPB_RGN_INACTIVE;
904 	atomic_dec(&lru_info->active_cnt);
905 }
906 
ufshpb_purge_active_subregion(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)907 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
908 					  struct ufshpb_subregion *srgn)
909 {
910 	if (srgn->srgn_state != HPB_SRGN_UNUSED) {
911 		ufshpb_put_map_ctx(hpb, srgn->mctx);
912 		srgn->srgn_state = HPB_SRGN_UNUSED;
913 		srgn->mctx = NULL;
914 	}
915 }
916 
ufshpb_issue_umap_req(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,bool atomic)917 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
918 				 struct ufshpb_region *rgn,
919 				 bool atomic)
920 {
921 	struct ufshpb_req *umap_req;
922 	int rgn_idx = rgn ? rgn->rgn_idx : 0;
923 
924 	umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
925 	if (!umap_req)
926 		return -ENOMEM;
927 
928 	ufshpb_execute_umap_req(hpb, umap_req, rgn);
929 
930 	return 0;
931 }
932 
ufshpb_issue_umap_single_req(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)933 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
934 					struct ufshpb_region *rgn)
935 {
936 	return ufshpb_issue_umap_req(hpb, rgn, true);
937 }
938 
ufshpb_issue_umap_all_req(struct ufshpb_lu * hpb)939 static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
940 {
941 	return ufshpb_issue_umap_req(hpb, NULL, false);
942 }
943 
__ufshpb_evict_region(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)944 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
945 				 struct ufshpb_region *rgn)
946 {
947 	struct victim_select_info *lru_info;
948 	struct ufshpb_subregion *srgn;
949 	int srgn_idx;
950 
951 	lru_info = &hpb->lru_info;
952 
953 	dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
954 
955 	ufshpb_cleanup_lru_info(lru_info, rgn);
956 
957 	for_each_sub_region(rgn, srgn_idx, srgn)
958 		ufshpb_purge_active_subregion(hpb, srgn);
959 }
960 
ufshpb_evict_region(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)961 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
962 {
963 	unsigned long flags;
964 	int ret = 0;
965 
966 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
967 	if (rgn->rgn_state == HPB_RGN_PINNED) {
968 		dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
969 			 "pinned region cannot drop-out. region %d\n",
970 			 rgn->rgn_idx);
971 		goto out;
972 	}
973 
974 	if (!list_empty(&rgn->list_lru_rgn)) {
975 		if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
976 			ret = -EBUSY;
977 			goto out;
978 		}
979 
980 		if (hpb->is_hcm) {
981 			spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
982 			ret = ufshpb_issue_umap_single_req(hpb, rgn);
983 			spin_lock_irqsave(&hpb->rgn_state_lock, flags);
984 			if (ret)
985 				goto out;
986 		}
987 
988 		__ufshpb_evict_region(hpb, rgn);
989 	}
990 out:
991 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
992 	return ret;
993 }
994 
ufshpb_issue_map_req(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,struct ufshpb_subregion * srgn)995 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
996 				struct ufshpb_region *rgn,
997 				struct ufshpb_subregion *srgn)
998 {
999 	struct ufshpb_req *map_req;
1000 	unsigned long flags;
1001 	int ret;
1002 	int err = -EAGAIN;
1003 	bool alloc_required = false;
1004 	enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
1005 
1006 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1007 
1008 	if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1009 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1010 			   "%s: ufshpb state is not PRESENT\n", __func__);
1011 		goto unlock_out;
1012 	}
1013 
1014 	if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
1015 	    (srgn->srgn_state == HPB_SRGN_INVALID)) {
1016 		err = 0;
1017 		goto unlock_out;
1018 	}
1019 
1020 	if (srgn->srgn_state == HPB_SRGN_UNUSED)
1021 		alloc_required = true;
1022 
1023 	/*
1024 	 * If the subregion is already ISSUED state,
1025 	 * a specific event (e.g., GC or wear-leveling, etc.) occurs in
1026 	 * the device and HPB response for map loading is received.
1027 	 * In this case, after finishing the HPB_READ_BUFFER,
1028 	 * the next HPB_READ_BUFFER is performed again to obtain the latest
1029 	 * map data.
1030 	 */
1031 	if (srgn->srgn_state == HPB_SRGN_ISSUED)
1032 		goto unlock_out;
1033 
1034 	srgn->srgn_state = HPB_SRGN_ISSUED;
1035 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1036 
1037 	if (alloc_required) {
1038 		srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1039 		if (!srgn->mctx) {
1040 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1041 			    "get map_ctx failed. region %d - %d\n",
1042 			    rgn->rgn_idx, srgn->srgn_idx);
1043 			state = HPB_SRGN_UNUSED;
1044 			goto change_srgn_state;
1045 		}
1046 	}
1047 
1048 	map_req = ufshpb_get_map_req(hpb, srgn);
1049 	if (!map_req)
1050 		goto change_srgn_state;
1051 
1052 
1053 	ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
1054 	if (ret) {
1055 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1056 			   "%s: issue map_req failed: %d, region %d - %d\n",
1057 			   __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
1058 		goto free_map_req;
1059 	}
1060 	return 0;
1061 
1062 free_map_req:
1063 	ufshpb_put_map_req(hpb, map_req);
1064 change_srgn_state:
1065 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1066 	srgn->srgn_state = state;
1067 unlock_out:
1068 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1069 	return err;
1070 }
1071 
ufshpb_add_region(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1072 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1073 {
1074 	struct ufshpb_region *victim_rgn = NULL;
1075 	struct victim_select_info *lru_info = &hpb->lru_info;
1076 	unsigned long flags;
1077 	int ret = 0;
1078 
1079 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1080 	/*
1081 	 * If region belongs to lru_list, just move the region
1082 	 * to the front of lru list because the state of the region
1083 	 * is already active-state.
1084 	 */
1085 	if (!list_empty(&rgn->list_lru_rgn)) {
1086 		ufshpb_hit_lru_info(lru_info, rgn);
1087 		goto out;
1088 	}
1089 
1090 	if (rgn->rgn_state == HPB_RGN_INACTIVE) {
1091 		if (atomic_read(&lru_info->active_cnt) ==
1092 		    lru_info->max_lru_active_cnt) {
1093 			/*
1094 			 * If the maximum number of active regions
1095 			 * is exceeded, evict the least recently used region.
1096 			 * This case may occur when the device responds
1097 			 * to the eviction information late.
1098 			 * It is okay to evict the least recently used region,
1099 			 * because the device could detect this region
1100 			 * by not issuing HPB_READ
1101 			 *
1102 			 * in host control mode, verify that the entering
1103 			 * region has enough reads
1104 			 */
1105 			if (hpb->is_hcm &&
1106 			    rgn->reads < hpb->params.eviction_thld_enter) {
1107 				ret = -EACCES;
1108 				goto out;
1109 			}
1110 
1111 			victim_rgn = ufshpb_victim_lru_info(hpb);
1112 			if (!victim_rgn) {
1113 				dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1114 				    "cannot get victim region %s\n",
1115 				    hpb->is_hcm ? "" : "error");
1116 				ret = -ENOMEM;
1117 				goto out;
1118 			}
1119 
1120 			dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1121 				"LRU full (%d), choose victim %d\n",
1122 				atomic_read(&lru_info->active_cnt),
1123 				victim_rgn->rgn_idx);
1124 
1125 			if (hpb->is_hcm) {
1126 				spin_unlock_irqrestore(&hpb->rgn_state_lock,
1127 						       flags);
1128 				ret = ufshpb_issue_umap_single_req(hpb,
1129 								victim_rgn);
1130 				spin_lock_irqsave(&hpb->rgn_state_lock,
1131 						  flags);
1132 				if (ret)
1133 					goto out;
1134 			}
1135 
1136 			__ufshpb_evict_region(hpb, victim_rgn);
1137 		}
1138 
1139 		/*
1140 		 * When a region is added to lru_info list_head,
1141 		 * it is guaranteed that the subregion has been
1142 		 * assigned all mctx. If failed, try to receive mctx again
1143 		 * without being added to lru_info list_head
1144 		 */
1145 		ufshpb_add_lru_info(lru_info, rgn);
1146 	}
1147 out:
1148 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1149 	return ret;
1150 }
1151 
ufshpb_rsp_req_region_update(struct ufshpb_lu * hpb,struct utp_hpb_rsp * rsp_field)1152 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
1153 					 struct utp_hpb_rsp *rsp_field)
1154 {
1155 	struct ufshpb_region *rgn;
1156 	struct ufshpb_subregion *srgn;
1157 	int i, rgn_i, srgn_i;
1158 
1159 	BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
1160 	/*
1161 	 * If the active region and the inactive region are the same,
1162 	 * we will inactivate this region.
1163 	 * The device could check this (region inactivated) and
1164 	 * will response the proper active region information
1165 	 */
1166 	for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
1167 		rgn_i =
1168 			be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
1169 		srgn_i =
1170 			be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
1171 
1172 		rgn = hpb->rgn_tbl + rgn_i;
1173 		if (hpb->is_hcm &&
1174 		    (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
1175 			/*
1176 			 * in host control mode, subregion activation
1177 			 * recommendations are only allowed to active regions.
1178 			 * Also, ignore recommendations for dirty regions - the
1179 			 * host will make decisions concerning those by himself
1180 			 */
1181 			continue;
1182 		}
1183 
1184 		dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1185 			"activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
1186 
1187 		spin_lock(&hpb->rsp_list_lock);
1188 		ufshpb_update_active_info(hpb, rgn_i, srgn_i);
1189 		spin_unlock(&hpb->rsp_list_lock);
1190 
1191 		srgn = rgn->srgn_tbl + srgn_i;
1192 
1193 		/* blocking HPB_READ */
1194 		spin_lock(&hpb->rgn_state_lock);
1195 		if (srgn->srgn_state == HPB_SRGN_VALID)
1196 			srgn->srgn_state = HPB_SRGN_INVALID;
1197 		spin_unlock(&hpb->rgn_state_lock);
1198 	}
1199 
1200 	if (hpb->is_hcm) {
1201 		/*
1202 		 * in host control mode the device is not allowed to inactivate
1203 		 * regions
1204 		 */
1205 		goto out;
1206 	}
1207 
1208 	for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
1209 		rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
1210 		dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1211 			"inactivate(%d) region %d\n", i, rgn_i);
1212 
1213 		spin_lock(&hpb->rsp_list_lock);
1214 		ufshpb_update_inactive_info(hpb, rgn_i);
1215 		spin_unlock(&hpb->rsp_list_lock);
1216 
1217 		rgn = hpb->rgn_tbl + rgn_i;
1218 
1219 		spin_lock(&hpb->rgn_state_lock);
1220 		if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1221 			for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
1222 				srgn = rgn->srgn_tbl + srgn_i;
1223 				if (srgn->srgn_state == HPB_SRGN_VALID)
1224 					srgn->srgn_state = HPB_SRGN_INVALID;
1225 			}
1226 		}
1227 		spin_unlock(&hpb->rgn_state_lock);
1228 
1229 	}
1230 
1231 out:
1232 	dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
1233 		rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
1234 
1235 	if (ufshpb_get_state(hpb) == HPB_PRESENT)
1236 		queue_work(ufshpb_wq, &hpb->map_work);
1237 }
1238 
ufshpb_dev_reset_handler(struct ufshpb_lu * hpb)1239 static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
1240 {
1241 	struct victim_select_info *lru_info = &hpb->lru_info;
1242 	struct ufshpb_region *rgn;
1243 	unsigned long flags;
1244 
1245 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1246 
1247 	list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1248 		set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
1249 
1250 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1251 }
1252 
1253 /*
1254  * This function will parse recommended active subregion information in sense
1255  * data field of response UPIU with SAM_STAT_GOOD state.
1256  */
ufshpb_rsp_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)1257 void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1258 {
1259 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
1260 	struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
1261 	int data_seg_len;
1262 
1263 	if (unlikely(lrbp->lun != rsp_field->lun)) {
1264 		struct scsi_device *sdev;
1265 		bool found = false;
1266 
1267 		__shost_for_each_device(sdev, hba->host) {
1268 			hpb = ufshpb_get_hpb_data(sdev);
1269 
1270 			if (!hpb)
1271 				continue;
1272 
1273 			if (rsp_field->lun == hpb->lun) {
1274 				found = true;
1275 				break;
1276 			}
1277 		}
1278 
1279 		if (!found)
1280 			return;
1281 	}
1282 
1283 	if (!hpb)
1284 		return;
1285 
1286 	if (ufshpb_get_state(hpb) == HPB_INIT)
1287 		return;
1288 
1289 	if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
1290 	    (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
1291 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1292 			   "%s: ufshpb state is not PRESENT/SUSPEND\n",
1293 			   __func__);
1294 		return;
1295 	}
1296 
1297 	data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
1298 		& MASK_RSP_UPIU_DATA_SEG_LEN;
1299 
1300 	/* To flush remained rsp_list, we queue the map_work task */
1301 	if (!data_seg_len) {
1302 		if (!ufshpb_is_general_lun(hpb->lun))
1303 			return;
1304 
1305 		ufshpb_kick_map_work(hpb);
1306 		return;
1307 	}
1308 
1309 	BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
1310 
1311 	if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
1312 		return;
1313 
1314 	hpb->stats.rb_noti_cnt++;
1315 
1316 	switch (rsp_field->hpb_op) {
1317 	case HPB_RSP_REQ_REGION_UPDATE:
1318 		if (data_seg_len != DEV_DATA_SEG_LEN)
1319 			dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1320 				 "%s: data seg length is not same.\n",
1321 				 __func__);
1322 		ufshpb_rsp_req_region_update(hpb, rsp_field);
1323 		break;
1324 	case HPB_RSP_DEV_RESET:
1325 		dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1326 			 "UFS device lost HPB information during PM.\n");
1327 
1328 		if (hpb->is_hcm) {
1329 			struct scsi_device *sdev;
1330 
1331 			__shost_for_each_device(sdev, hba->host) {
1332 				struct ufshpb_lu *h = sdev->hostdata;
1333 
1334 				if (h)
1335 					ufshpb_dev_reset_handler(h);
1336 			}
1337 		}
1338 
1339 		break;
1340 	default:
1341 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1342 			   "hpb_op is not available: %d\n",
1343 			   rsp_field->hpb_op);
1344 		break;
1345 	}
1346 }
1347 
ufshpb_add_active_list(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,struct ufshpb_subregion * srgn)1348 static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
1349 				   struct ufshpb_region *rgn,
1350 				   struct ufshpb_subregion *srgn)
1351 {
1352 	if (!list_empty(&rgn->list_inact_rgn))
1353 		return;
1354 
1355 	if (!list_empty(&srgn->list_act_srgn)) {
1356 		list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1357 		return;
1358 	}
1359 
1360 	list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1361 }
1362 
ufshpb_add_pending_evict_list(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,struct list_head * pending_list)1363 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
1364 					  struct ufshpb_region *rgn,
1365 					  struct list_head *pending_list)
1366 {
1367 	struct ufshpb_subregion *srgn;
1368 	int srgn_idx;
1369 
1370 	if (!list_empty(&rgn->list_inact_rgn))
1371 		return;
1372 
1373 	for_each_sub_region(rgn, srgn_idx, srgn)
1374 		if (!list_empty(&srgn->list_act_srgn))
1375 			return;
1376 
1377 	list_add_tail(&rgn->list_inact_rgn, pending_list);
1378 }
1379 
ufshpb_run_active_subregion_list(struct ufshpb_lu * hpb)1380 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
1381 {
1382 	struct ufshpb_region *rgn;
1383 	struct ufshpb_subregion *srgn;
1384 	unsigned long flags;
1385 	int ret = 0;
1386 
1387 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1388 	while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
1389 						struct ufshpb_subregion,
1390 						list_act_srgn))) {
1391 		if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1392 			break;
1393 
1394 		list_del_init(&srgn->list_act_srgn);
1395 		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1396 
1397 		rgn = hpb->rgn_tbl + srgn->rgn_idx;
1398 		ret = ufshpb_add_region(hpb, rgn);
1399 		if (ret)
1400 			goto active_failed;
1401 
1402 		ret = ufshpb_issue_map_req(hpb, rgn, srgn);
1403 		if (ret) {
1404 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1405 			    "issue map_req failed. ret %d, region %d - %d\n",
1406 			    ret, rgn->rgn_idx, srgn->srgn_idx);
1407 			goto active_failed;
1408 		}
1409 		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1410 	}
1411 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1412 	return;
1413 
1414 active_failed:
1415 	dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
1416 		   rgn->rgn_idx, srgn->srgn_idx);
1417 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1418 	ufshpb_add_active_list(hpb, rgn, srgn);
1419 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1420 }
1421 
ufshpb_run_inactive_region_list(struct ufshpb_lu * hpb)1422 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
1423 {
1424 	struct ufshpb_region *rgn;
1425 	unsigned long flags;
1426 	int ret;
1427 	LIST_HEAD(pending_list);
1428 
1429 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1430 	while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
1431 					       struct ufshpb_region,
1432 					       list_inact_rgn))) {
1433 		if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1434 			break;
1435 
1436 		list_del_init(&rgn->list_inact_rgn);
1437 		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1438 
1439 		ret = ufshpb_evict_region(hpb, rgn);
1440 		if (ret) {
1441 			spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1442 			ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
1443 			spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1444 		}
1445 
1446 		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1447 	}
1448 
1449 	list_splice(&pending_list, &hpb->lh_inact_rgn);
1450 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1451 }
1452 
ufshpb_normalization_work_handler(struct work_struct * work)1453 static void ufshpb_normalization_work_handler(struct work_struct *work)
1454 {
1455 	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1456 					     ufshpb_normalization_work);
1457 	int rgn_idx;
1458 	u8 factor = hpb->params.normalization_factor;
1459 
1460 	for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1461 		struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
1462 		int srgn_idx;
1463 
1464 		spin_lock(&rgn->rgn_lock);
1465 		rgn->reads = 0;
1466 		for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
1467 			struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
1468 
1469 			srgn->reads >>= factor;
1470 			rgn->reads += srgn->reads;
1471 		}
1472 		spin_unlock(&rgn->rgn_lock);
1473 
1474 		if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
1475 			continue;
1476 
1477 		/* if region is active but has no reads - inactivate it */
1478 		spin_lock(&hpb->rsp_list_lock);
1479 		ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1480 		spin_unlock(&hpb->rsp_list_lock);
1481 	}
1482 }
1483 
ufshpb_map_work_handler(struct work_struct * work)1484 static void ufshpb_map_work_handler(struct work_struct *work)
1485 {
1486 	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
1487 
1488 	if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1489 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1490 			   "%s: ufshpb state is not PRESENT\n", __func__);
1491 		return;
1492 	}
1493 
1494 	ufshpb_run_inactive_region_list(hpb);
1495 	ufshpb_run_active_subregion_list(hpb);
1496 }
1497 
1498 /*
1499  * this function doesn't need to hold lock due to be called in init.
1500  * (rgn_state_lock, rsp_list_lock, etc..)
1501  */
ufshpb_init_pinned_active_region(struct ufs_hba * hba,struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1502 static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
1503 					    struct ufshpb_lu *hpb,
1504 					    struct ufshpb_region *rgn)
1505 {
1506 	struct ufshpb_subregion *srgn;
1507 	int srgn_idx, i;
1508 	int err = 0;
1509 
1510 	for_each_sub_region(rgn, srgn_idx, srgn) {
1511 		srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1512 		srgn->srgn_state = HPB_SRGN_INVALID;
1513 		if (!srgn->mctx) {
1514 			err = -ENOMEM;
1515 			dev_err(hba->dev,
1516 				"alloc mctx for pinned region failed\n");
1517 			goto release;
1518 		}
1519 
1520 		list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1521 	}
1522 
1523 	rgn->rgn_state = HPB_RGN_PINNED;
1524 	return 0;
1525 
1526 release:
1527 	for (i = 0; i < srgn_idx; i++) {
1528 		srgn = rgn->srgn_tbl + i;
1529 		ufshpb_put_map_ctx(hpb, srgn->mctx);
1530 	}
1531 	return err;
1532 }
1533 
ufshpb_init_subregion_tbl(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,bool last)1534 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
1535 				      struct ufshpb_region *rgn, bool last)
1536 {
1537 	int srgn_idx;
1538 	struct ufshpb_subregion *srgn;
1539 
1540 	for_each_sub_region(rgn, srgn_idx, srgn) {
1541 		INIT_LIST_HEAD(&srgn->list_act_srgn);
1542 
1543 		srgn->rgn_idx = rgn->rgn_idx;
1544 		srgn->srgn_idx = srgn_idx;
1545 		srgn->srgn_state = HPB_SRGN_UNUSED;
1546 	}
1547 
1548 	if (unlikely(last && hpb->last_srgn_entries))
1549 		srgn->is_last = true;
1550 }
1551 
ufshpb_alloc_subregion_tbl(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,int srgn_cnt)1552 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
1553 				      struct ufshpb_region *rgn, int srgn_cnt)
1554 {
1555 	rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
1556 				 GFP_KERNEL);
1557 	if (!rgn->srgn_tbl)
1558 		return -ENOMEM;
1559 
1560 	rgn->srgn_cnt = srgn_cnt;
1561 	return 0;
1562 }
1563 
ufshpb_lu_parameter_init(struct ufs_hba * hba,struct ufshpb_lu * hpb,struct ufshpb_dev_info * hpb_dev_info,struct ufshpb_lu_info * hpb_lu_info)1564 static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
1565 				     struct ufshpb_lu *hpb,
1566 				     struct ufshpb_dev_info *hpb_dev_info,
1567 				     struct ufshpb_lu_info *hpb_lu_info)
1568 {
1569 	u32 entries_per_rgn;
1570 	u64 rgn_mem_size, tmp;
1571 
1572 	if (ufshpb_is_legacy(hba))
1573 		hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
1574 	else
1575 		hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
1576 
1577 	hpb->lu_pinned_start = hpb_lu_info->pinned_start;
1578 	hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
1579 		(hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
1580 		: PINNED_NOT_SET;
1581 	hpb->lru_info.max_lru_active_cnt =
1582 		hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
1583 
1584 	rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
1585 			* HPB_ENTRY_SIZE;
1586 	do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
1587 	hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
1588 		* HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
1589 
1590 	tmp = rgn_mem_size;
1591 	do_div(tmp, HPB_ENTRY_SIZE);
1592 	entries_per_rgn = (u32)tmp;
1593 	hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
1594 	hpb->entries_per_rgn_mask = entries_per_rgn - 1;
1595 
1596 	hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
1597 	hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
1598 	hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
1599 
1600 	tmp = rgn_mem_size;
1601 	do_div(tmp, hpb->srgn_mem_size);
1602 	hpb->srgns_per_rgn = (int)tmp;
1603 
1604 	hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1605 				entries_per_rgn);
1606 	hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1607 				(hpb->srgn_mem_size / HPB_ENTRY_SIZE));
1608 	hpb->last_srgn_entries = hpb_lu_info->num_blocks
1609 				 % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
1610 
1611 	hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
1612 
1613 	if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
1614 		hpb->is_hcm = true;
1615 }
1616 
ufshpb_alloc_region_tbl(struct ufs_hba * hba,struct ufshpb_lu * hpb)1617 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
1618 {
1619 	struct ufshpb_region *rgn_table, *rgn;
1620 	int rgn_idx, i;
1621 	int ret = 0;
1622 
1623 	rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
1624 			    GFP_KERNEL);
1625 	if (!rgn_table)
1626 		return -ENOMEM;
1627 
1628 	for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1629 		int srgn_cnt = hpb->srgns_per_rgn;
1630 		bool last_srgn = false;
1631 
1632 		rgn = rgn_table + rgn_idx;
1633 		rgn->rgn_idx = rgn_idx;
1634 
1635 		spin_lock_init(&rgn->rgn_lock);
1636 
1637 		INIT_LIST_HEAD(&rgn->list_inact_rgn);
1638 		INIT_LIST_HEAD(&rgn->list_lru_rgn);
1639 		INIT_LIST_HEAD(&rgn->list_expired_rgn);
1640 
1641 		if (rgn_idx == hpb->rgns_per_lu - 1) {
1642 			srgn_cnt = ((hpb->srgns_per_lu - 1) %
1643 				    hpb->srgns_per_rgn) + 1;
1644 			last_srgn = true;
1645 		}
1646 
1647 		ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
1648 		if (ret)
1649 			goto release_srgn_table;
1650 		ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
1651 
1652 		if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
1653 			ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
1654 			if (ret)
1655 				goto release_srgn_table;
1656 		} else {
1657 			rgn->rgn_state = HPB_RGN_INACTIVE;
1658 		}
1659 
1660 		rgn->rgn_flags = 0;
1661 		rgn->hpb = hpb;
1662 	}
1663 
1664 	hpb->rgn_tbl = rgn_table;
1665 
1666 	return 0;
1667 
1668 release_srgn_table:
1669 	for (i = 0; i <= rgn_idx; i++)
1670 		kvfree(rgn_table[i].srgn_tbl);
1671 
1672 	kvfree(rgn_table);
1673 	return ret;
1674 }
1675 
ufshpb_destroy_subregion_tbl(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1676 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
1677 					 struct ufshpb_region *rgn)
1678 {
1679 	int srgn_idx;
1680 	struct ufshpb_subregion *srgn;
1681 
1682 	for_each_sub_region(rgn, srgn_idx, srgn)
1683 		if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1684 			srgn->srgn_state = HPB_SRGN_UNUSED;
1685 			ufshpb_put_map_ctx(hpb, srgn->mctx);
1686 		}
1687 }
1688 
ufshpb_destroy_region_tbl(struct ufshpb_lu * hpb)1689 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
1690 {
1691 	int rgn_idx;
1692 
1693 	for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1694 		struct ufshpb_region *rgn;
1695 
1696 		rgn = hpb->rgn_tbl + rgn_idx;
1697 		if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1698 			rgn->rgn_state = HPB_RGN_INACTIVE;
1699 
1700 			ufshpb_destroy_subregion_tbl(hpb, rgn);
1701 		}
1702 
1703 		kvfree(rgn->srgn_tbl);
1704 	}
1705 
1706 	kvfree(hpb->rgn_tbl);
1707 }
1708 
1709 /* SYSFS functions */
1710 #define ufshpb_sysfs_attr_show_func(__name)				\
1711 static ssize_t __name##_show(struct device *dev,			\
1712 	struct device_attribute *attr, char *buf)			\
1713 {									\
1714 	struct scsi_device *sdev = to_scsi_device(dev);			\
1715 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);		\
1716 									\
1717 	if (!hpb)							\
1718 		return -ENODEV;						\
1719 									\
1720 	return sysfs_emit(buf, "%llu\n", hpb->stats.__name);		\
1721 }									\
1722 \
1723 static DEVICE_ATTR_RO(__name)
1724 
1725 ufshpb_sysfs_attr_show_func(hit_cnt);
1726 ufshpb_sysfs_attr_show_func(miss_cnt);
1727 ufshpb_sysfs_attr_show_func(rb_noti_cnt);
1728 ufshpb_sysfs_attr_show_func(rb_active_cnt);
1729 ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
1730 ufshpb_sysfs_attr_show_func(map_req_cnt);
1731 ufshpb_sysfs_attr_show_func(umap_req_cnt);
1732 
1733 static struct attribute *hpb_dev_stat_attrs[] = {
1734 	&dev_attr_hit_cnt.attr,
1735 	&dev_attr_miss_cnt.attr,
1736 	&dev_attr_rb_noti_cnt.attr,
1737 	&dev_attr_rb_active_cnt.attr,
1738 	&dev_attr_rb_inactive_cnt.attr,
1739 	&dev_attr_map_req_cnt.attr,
1740 	&dev_attr_umap_req_cnt.attr,
1741 	NULL,
1742 };
1743 
1744 struct attribute_group ufs_sysfs_hpb_stat_group = {
1745 	.name = "hpb_stats",
1746 	.attrs = hpb_dev_stat_attrs,
1747 };
1748 
1749 /* SYSFS functions */
1750 #define ufshpb_sysfs_param_show_func(__name)				\
1751 static ssize_t __name##_show(struct device *dev,			\
1752 	struct device_attribute *attr, char *buf)			\
1753 {									\
1754 	struct scsi_device *sdev = to_scsi_device(dev);			\
1755 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);		\
1756 									\
1757 	if (!hpb)							\
1758 		return -ENODEV;						\
1759 									\
1760 	return sysfs_emit(buf, "%d\n", hpb->params.__name);		\
1761 }
1762 
1763 ufshpb_sysfs_param_show_func(requeue_timeout_ms);
1764 static ssize_t
requeue_timeout_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1765 requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1766 			 const char *buf, size_t count)
1767 {
1768 	struct scsi_device *sdev = to_scsi_device(dev);
1769 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1770 	int val;
1771 
1772 	if (!hpb)
1773 		return -ENODEV;
1774 
1775 	if (kstrtouint(buf, 0, &val))
1776 		return -EINVAL;
1777 
1778 	if (val < 0)
1779 		return -EINVAL;
1780 
1781 	hpb->params.requeue_timeout_ms = val;
1782 
1783 	return count;
1784 }
1785 static DEVICE_ATTR_RW(requeue_timeout_ms);
1786 
1787 ufshpb_sysfs_param_show_func(activation_thld);
1788 static ssize_t
activation_thld_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1789 activation_thld_store(struct device *dev, struct device_attribute *attr,
1790 		      const char *buf, size_t count)
1791 {
1792 	struct scsi_device *sdev = to_scsi_device(dev);
1793 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1794 	int val;
1795 
1796 	if (!hpb)
1797 		return -ENODEV;
1798 
1799 	if (!hpb->is_hcm)
1800 		return -EOPNOTSUPP;
1801 
1802 	if (kstrtouint(buf, 0, &val))
1803 		return -EINVAL;
1804 
1805 	if (val <= 0)
1806 		return -EINVAL;
1807 
1808 	hpb->params.activation_thld = val;
1809 
1810 	return count;
1811 }
1812 static DEVICE_ATTR_RW(activation_thld);
1813 
1814 ufshpb_sysfs_param_show_func(normalization_factor);
1815 static ssize_t
normalization_factor_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1816 normalization_factor_store(struct device *dev, struct device_attribute *attr,
1817 			   const char *buf, size_t count)
1818 {
1819 	struct scsi_device *sdev = to_scsi_device(dev);
1820 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1821 	int val;
1822 
1823 	if (!hpb)
1824 		return -ENODEV;
1825 
1826 	if (!hpb->is_hcm)
1827 		return -EOPNOTSUPP;
1828 
1829 	if (kstrtouint(buf, 0, &val))
1830 		return -EINVAL;
1831 
1832 	if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
1833 		return -EINVAL;
1834 
1835 	hpb->params.normalization_factor = val;
1836 
1837 	return count;
1838 }
1839 static DEVICE_ATTR_RW(normalization_factor);
1840 
1841 ufshpb_sysfs_param_show_func(eviction_thld_enter);
1842 static ssize_t
eviction_thld_enter_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1843 eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
1844 			  const char *buf, size_t count)
1845 {
1846 	struct scsi_device *sdev = to_scsi_device(dev);
1847 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1848 	int val;
1849 
1850 	if (!hpb)
1851 		return -ENODEV;
1852 
1853 	if (!hpb->is_hcm)
1854 		return -EOPNOTSUPP;
1855 
1856 	if (kstrtouint(buf, 0, &val))
1857 		return -EINVAL;
1858 
1859 	if (val <= hpb->params.eviction_thld_exit)
1860 		return -EINVAL;
1861 
1862 	hpb->params.eviction_thld_enter = val;
1863 
1864 	return count;
1865 }
1866 static DEVICE_ATTR_RW(eviction_thld_enter);
1867 
1868 ufshpb_sysfs_param_show_func(eviction_thld_exit);
1869 static ssize_t
eviction_thld_exit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1870 eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
1871 			 const char *buf, size_t count)
1872 {
1873 	struct scsi_device *sdev = to_scsi_device(dev);
1874 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1875 	int val;
1876 
1877 	if (!hpb)
1878 		return -ENODEV;
1879 
1880 	if (!hpb->is_hcm)
1881 		return -EOPNOTSUPP;
1882 
1883 	if (kstrtouint(buf, 0, &val))
1884 		return -EINVAL;
1885 
1886 	if (val <= hpb->params.activation_thld)
1887 		return -EINVAL;
1888 
1889 	hpb->params.eviction_thld_exit = val;
1890 
1891 	return count;
1892 }
1893 static DEVICE_ATTR_RW(eviction_thld_exit);
1894 
1895 ufshpb_sysfs_param_show_func(read_timeout_ms);
1896 static ssize_t
read_timeout_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1897 read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1898 		      const char *buf, size_t count)
1899 {
1900 	struct scsi_device *sdev = to_scsi_device(dev);
1901 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1902 	int val;
1903 
1904 	if (!hpb)
1905 		return -ENODEV;
1906 
1907 	if (!hpb->is_hcm)
1908 		return -EOPNOTSUPP;
1909 
1910 	if (kstrtouint(buf, 0, &val))
1911 		return -EINVAL;
1912 
1913 	/* read_timeout >> timeout_polling_interval */
1914 	if (val < hpb->params.timeout_polling_interval_ms * 2)
1915 		return -EINVAL;
1916 
1917 	hpb->params.read_timeout_ms = val;
1918 
1919 	return count;
1920 }
1921 static DEVICE_ATTR_RW(read_timeout_ms);
1922 
1923 ufshpb_sysfs_param_show_func(read_timeout_expiries);
1924 static ssize_t
read_timeout_expiries_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1925 read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
1926 			    const char *buf, size_t count)
1927 {
1928 	struct scsi_device *sdev = to_scsi_device(dev);
1929 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1930 	int val;
1931 
1932 	if (!hpb)
1933 		return -ENODEV;
1934 
1935 	if (!hpb->is_hcm)
1936 		return -EOPNOTSUPP;
1937 
1938 	if (kstrtouint(buf, 0, &val))
1939 		return -EINVAL;
1940 
1941 	if (val <= 0)
1942 		return -EINVAL;
1943 
1944 	hpb->params.read_timeout_expiries = val;
1945 
1946 	return count;
1947 }
1948 static DEVICE_ATTR_RW(read_timeout_expiries);
1949 
1950 ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
1951 static ssize_t
timeout_polling_interval_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1952 timeout_polling_interval_ms_store(struct device *dev,
1953 				  struct device_attribute *attr,
1954 				  const char *buf, size_t count)
1955 {
1956 	struct scsi_device *sdev = to_scsi_device(dev);
1957 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1958 	int val;
1959 
1960 	if (!hpb)
1961 		return -ENODEV;
1962 
1963 	if (!hpb->is_hcm)
1964 		return -EOPNOTSUPP;
1965 
1966 	if (kstrtouint(buf, 0, &val))
1967 		return -EINVAL;
1968 
1969 	/* timeout_polling_interval << read_timeout */
1970 	if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
1971 		return -EINVAL;
1972 
1973 	hpb->params.timeout_polling_interval_ms = val;
1974 
1975 	return count;
1976 }
1977 static DEVICE_ATTR_RW(timeout_polling_interval_ms);
1978 
1979 ufshpb_sysfs_param_show_func(inflight_map_req);
inflight_map_req_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1980 static ssize_t inflight_map_req_store(struct device *dev,
1981 				      struct device_attribute *attr,
1982 				      const char *buf, size_t count)
1983 {
1984 	struct scsi_device *sdev = to_scsi_device(dev);
1985 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1986 	int val;
1987 
1988 	if (!hpb)
1989 		return -ENODEV;
1990 
1991 	if (!hpb->is_hcm)
1992 		return -EOPNOTSUPP;
1993 
1994 	if (kstrtouint(buf, 0, &val))
1995 		return -EINVAL;
1996 
1997 	if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
1998 		return -EINVAL;
1999 
2000 	hpb->params.inflight_map_req = val;
2001 
2002 	return count;
2003 }
2004 static DEVICE_ATTR_RW(inflight_map_req);
2005 
ufshpb_hcm_param_init(struct ufshpb_lu * hpb)2006 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
2007 {
2008 	hpb->params.activation_thld = ACTIVATION_THRESHOLD;
2009 	hpb->params.normalization_factor = 1;
2010 	hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
2011 	hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
2012 	hpb->params.read_timeout_ms = READ_TO_MS;
2013 	hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
2014 	hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
2015 	hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
2016 }
2017 
2018 static struct attribute *hpb_dev_param_attrs[] = {
2019 	&dev_attr_requeue_timeout_ms.attr,
2020 	&dev_attr_activation_thld.attr,
2021 	&dev_attr_normalization_factor.attr,
2022 	&dev_attr_eviction_thld_enter.attr,
2023 	&dev_attr_eviction_thld_exit.attr,
2024 	&dev_attr_read_timeout_ms.attr,
2025 	&dev_attr_read_timeout_expiries.attr,
2026 	&dev_attr_timeout_polling_interval_ms.attr,
2027 	&dev_attr_inflight_map_req.attr,
2028 	NULL,
2029 };
2030 
2031 struct attribute_group ufs_sysfs_hpb_param_group = {
2032 	.name = "hpb_params",
2033 	.attrs = hpb_dev_param_attrs,
2034 };
2035 
ufshpb_pre_req_mempool_init(struct ufshpb_lu * hpb)2036 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
2037 {
2038 	struct ufshpb_req *pre_req = NULL, *t;
2039 	int qd = hpb->sdev_ufs_lu->queue_depth / 2;
2040 	int i;
2041 
2042 	INIT_LIST_HEAD(&hpb->lh_pre_req_free);
2043 
2044 	hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
2045 	hpb->throttle_pre_req = qd;
2046 	hpb->num_inflight_pre_req = 0;
2047 
2048 	if (!hpb->pre_req)
2049 		goto release_mem;
2050 
2051 	for (i = 0; i < qd; i++) {
2052 		pre_req = hpb->pre_req + i;
2053 		INIT_LIST_HEAD(&pre_req->list_req);
2054 		pre_req->req = NULL;
2055 
2056 		pre_req->bio = bio_alloc(GFP_KERNEL, 1);
2057 		if (!pre_req->bio)
2058 			goto release_mem;
2059 
2060 		pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2061 		if (!pre_req->wb.m_page) {
2062 			bio_put(pre_req->bio);
2063 			goto release_mem;
2064 		}
2065 
2066 		list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
2067 	}
2068 
2069 	return 0;
2070 release_mem:
2071 	list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
2072 		list_del_init(&pre_req->list_req);
2073 		bio_put(pre_req->bio);
2074 		__free_page(pre_req->wb.m_page);
2075 	}
2076 
2077 	kfree(hpb->pre_req);
2078 	return -ENOMEM;
2079 }
2080 
ufshpb_pre_req_mempool_destroy(struct ufshpb_lu * hpb)2081 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
2082 {
2083 	struct ufshpb_req *pre_req = NULL;
2084 	int i;
2085 
2086 	for (i = 0; i < hpb->throttle_pre_req; i++) {
2087 		pre_req = hpb->pre_req + i;
2088 		bio_put(hpb->pre_req[i].bio);
2089 		if (!pre_req->wb.m_page)
2090 			__free_page(hpb->pre_req[i].wb.m_page);
2091 		list_del_init(&pre_req->list_req);
2092 	}
2093 
2094 	kfree(hpb->pre_req);
2095 }
2096 
ufshpb_stat_init(struct ufshpb_lu * hpb)2097 static void ufshpb_stat_init(struct ufshpb_lu *hpb)
2098 {
2099 	hpb->stats.hit_cnt = 0;
2100 	hpb->stats.miss_cnt = 0;
2101 	hpb->stats.rb_noti_cnt = 0;
2102 	hpb->stats.rb_active_cnt = 0;
2103 	hpb->stats.rb_inactive_cnt = 0;
2104 	hpb->stats.map_req_cnt = 0;
2105 	hpb->stats.umap_req_cnt = 0;
2106 }
2107 
ufshpb_param_init(struct ufshpb_lu * hpb)2108 static void ufshpb_param_init(struct ufshpb_lu *hpb)
2109 {
2110 	hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
2111 	if (hpb->is_hcm)
2112 		ufshpb_hcm_param_init(hpb);
2113 }
2114 
ufshpb_lu_hpb_init(struct ufs_hba * hba,struct ufshpb_lu * hpb)2115 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
2116 {
2117 	int ret;
2118 
2119 	spin_lock_init(&hpb->rgn_state_lock);
2120 	spin_lock_init(&hpb->rsp_list_lock);
2121 	spin_lock_init(&hpb->param_lock);
2122 
2123 	INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
2124 	INIT_LIST_HEAD(&hpb->lh_act_srgn);
2125 	INIT_LIST_HEAD(&hpb->lh_inact_rgn);
2126 	INIT_LIST_HEAD(&hpb->list_hpb_lu);
2127 
2128 	INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
2129 	if (hpb->is_hcm) {
2130 		INIT_WORK(&hpb->ufshpb_normalization_work,
2131 			  ufshpb_normalization_work_handler);
2132 		INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
2133 				  ufshpb_read_to_handler);
2134 	}
2135 
2136 	hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
2137 			  sizeof(struct ufshpb_req), 0, 0, NULL);
2138 	if (!hpb->map_req_cache) {
2139 		dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
2140 			hpb->lun);
2141 		return -ENOMEM;
2142 	}
2143 
2144 	hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
2145 			  sizeof(struct page *) * hpb->pages_per_srgn,
2146 			  0, 0, NULL);
2147 	if (!hpb->m_page_cache) {
2148 		dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
2149 			hpb->lun);
2150 		ret = -ENOMEM;
2151 		goto release_req_cache;
2152 	}
2153 
2154 	ret = ufshpb_pre_req_mempool_init(hpb);
2155 	if (ret) {
2156 		dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
2157 			hpb->lun);
2158 		goto release_m_page_cache;
2159 	}
2160 
2161 	ret = ufshpb_alloc_region_tbl(hba, hpb);
2162 	if (ret)
2163 		goto release_pre_req_mempool;
2164 
2165 	ufshpb_stat_init(hpb);
2166 	ufshpb_param_init(hpb);
2167 
2168 	if (hpb->is_hcm) {
2169 		unsigned int poll;
2170 
2171 		poll = hpb->params.timeout_polling_interval_ms;
2172 		schedule_delayed_work(&hpb->ufshpb_read_to_work,
2173 				      msecs_to_jiffies(poll));
2174 	}
2175 
2176 	return 0;
2177 
2178 release_pre_req_mempool:
2179 	ufshpb_pre_req_mempool_destroy(hpb);
2180 release_m_page_cache:
2181 	kmem_cache_destroy(hpb->m_page_cache);
2182 release_req_cache:
2183 	kmem_cache_destroy(hpb->map_req_cache);
2184 	return ret;
2185 }
2186 
2187 static struct ufshpb_lu *
ufshpb_alloc_hpb_lu(struct ufs_hba * hba,struct scsi_device * sdev,struct ufshpb_dev_info * hpb_dev_info,struct ufshpb_lu_info * hpb_lu_info)2188 ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
2189 		    struct ufshpb_dev_info *hpb_dev_info,
2190 		    struct ufshpb_lu_info *hpb_lu_info)
2191 {
2192 	struct ufshpb_lu *hpb;
2193 	int ret;
2194 
2195 	hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
2196 	if (!hpb)
2197 		return NULL;
2198 
2199 	hpb->lun = sdev->lun;
2200 	hpb->sdev_ufs_lu = sdev;
2201 
2202 	ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
2203 
2204 	ret = ufshpb_lu_hpb_init(hba, hpb);
2205 	if (ret) {
2206 		dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
2207 		goto release_hpb;
2208 	}
2209 
2210 	sdev->hostdata = hpb;
2211 	return hpb;
2212 
2213 release_hpb:
2214 	kfree(hpb);
2215 	return NULL;
2216 }
2217 
ufshpb_discard_rsp_lists(struct ufshpb_lu * hpb)2218 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
2219 {
2220 	struct ufshpb_region *rgn, *next_rgn;
2221 	struct ufshpb_subregion *srgn, *next_srgn;
2222 	unsigned long flags;
2223 
2224 	/*
2225 	 * If the device reset occurred, the remaining HPB region information
2226 	 * may be stale. Therefore, by discarding the lists of HPB response
2227 	 * that remained after reset, we prevent unnecessary work.
2228 	 */
2229 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
2230 	list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
2231 				 list_inact_rgn)
2232 		list_del_init(&rgn->list_inact_rgn);
2233 
2234 	list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
2235 				 list_act_srgn)
2236 		list_del_init(&srgn->list_act_srgn);
2237 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
2238 }
2239 
ufshpb_cancel_jobs(struct ufshpb_lu * hpb)2240 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
2241 {
2242 	if (hpb->is_hcm) {
2243 		cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
2244 		cancel_work_sync(&hpb->ufshpb_normalization_work);
2245 	}
2246 	cancel_work_sync(&hpb->map_work);
2247 }
2248 
ufshpb_check_hpb_reset_query(struct ufs_hba * hba)2249 static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
2250 {
2251 	int err = 0;
2252 	bool flag_res = true;
2253 	int try;
2254 
2255 	/* wait for the device to complete HPB reset query */
2256 	for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2257 		dev_dbg(hba->dev,
2258 			"%s start flag reset polling %d times\n",
2259 			__func__, try);
2260 
2261 		/* Poll fHpbReset flag to be cleared */
2262 		err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2263 				QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
2264 
2265 		if (err) {
2266 			dev_err(hba->dev,
2267 				"%s reading fHpbReset flag failed with error %d\n",
2268 				__func__, err);
2269 			return flag_res;
2270 		}
2271 
2272 		if (!flag_res)
2273 			goto out;
2274 
2275 		usleep_range(1000, 1100);
2276 	}
2277 	if (flag_res) {
2278 		dev_err(hba->dev,
2279 			"%s fHpbReset was not cleared by the device\n",
2280 			__func__);
2281 	}
2282 out:
2283 	return flag_res;
2284 }
2285 
ufshpb_reset(struct ufs_hba * hba)2286 void ufshpb_reset(struct ufs_hba *hba)
2287 {
2288 	struct ufshpb_lu *hpb;
2289 	struct scsi_device *sdev;
2290 
2291 	shost_for_each_device(sdev, hba->host) {
2292 		hpb = ufshpb_get_hpb_data(sdev);
2293 		if (!hpb)
2294 			continue;
2295 
2296 		if (ufshpb_get_state(hpb) != HPB_RESET)
2297 			continue;
2298 
2299 		ufshpb_set_state(hpb, HPB_PRESENT);
2300 	}
2301 }
2302 
ufshpb_reset_host(struct ufs_hba * hba)2303 void ufshpb_reset_host(struct ufs_hba *hba)
2304 {
2305 	struct ufshpb_lu *hpb;
2306 	struct scsi_device *sdev;
2307 
2308 	shost_for_each_device(sdev, hba->host) {
2309 		hpb = ufshpb_get_hpb_data(sdev);
2310 		if (!hpb)
2311 			continue;
2312 
2313 		if (ufshpb_get_state(hpb) != HPB_PRESENT)
2314 			continue;
2315 		ufshpb_set_state(hpb, HPB_RESET);
2316 		ufshpb_cancel_jobs(hpb);
2317 		ufshpb_discard_rsp_lists(hpb);
2318 	}
2319 }
2320 
ufshpb_suspend(struct ufs_hba * hba)2321 void ufshpb_suspend(struct ufs_hba *hba)
2322 {
2323 	struct ufshpb_lu *hpb;
2324 	struct scsi_device *sdev;
2325 
2326 	shost_for_each_device(sdev, hba->host) {
2327 		hpb = ufshpb_get_hpb_data(sdev);
2328 		if (!hpb)
2329 			continue;
2330 
2331 		if (ufshpb_get_state(hpb) != HPB_PRESENT)
2332 			continue;
2333 		ufshpb_set_state(hpb, HPB_SUSPEND);
2334 		ufshpb_cancel_jobs(hpb);
2335 	}
2336 }
2337 
ufshpb_resume(struct ufs_hba * hba)2338 void ufshpb_resume(struct ufs_hba *hba)
2339 {
2340 	struct ufshpb_lu *hpb;
2341 	struct scsi_device *sdev;
2342 
2343 	shost_for_each_device(sdev, hba->host) {
2344 		hpb = ufshpb_get_hpb_data(sdev);
2345 		if (!hpb)
2346 			continue;
2347 
2348 		if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
2349 		    (ufshpb_get_state(hpb) != HPB_SUSPEND))
2350 			continue;
2351 		ufshpb_set_state(hpb, HPB_PRESENT);
2352 		ufshpb_kick_map_work(hpb);
2353 		if (hpb->is_hcm) {
2354 			unsigned int poll =
2355 				hpb->params.timeout_polling_interval_ms;
2356 
2357 			schedule_delayed_work(&hpb->ufshpb_read_to_work,
2358 				msecs_to_jiffies(poll));
2359 		}
2360 	}
2361 }
2362 
ufshpb_get_lu_info(struct ufs_hba * hba,int lun,struct ufshpb_lu_info * hpb_lu_info)2363 static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
2364 			      struct ufshpb_lu_info *hpb_lu_info)
2365 {
2366 	u16 max_active_rgns;
2367 	u8 lu_enable;
2368 	int size;
2369 	int ret;
2370 	char desc_buf[QUERY_DESC_MAX_SIZE];
2371 
2372 	ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
2373 
2374 	pm_runtime_get_sync(hba->dev);
2375 	ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2376 					    QUERY_DESC_IDN_UNIT, lun, 0,
2377 					    desc_buf, &size);
2378 	pm_runtime_put_sync(hba->dev);
2379 
2380 	if (ret) {
2381 		dev_err(hba->dev,
2382 			"%s: idn: %d lun: %d  query request failed",
2383 			__func__, QUERY_DESC_IDN_UNIT, lun);
2384 		return ret;
2385 	}
2386 
2387 	lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
2388 	if (lu_enable != LU_ENABLED_HPB_FUNC)
2389 		return -ENODEV;
2390 
2391 	max_active_rgns = get_unaligned_be16(
2392 			desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
2393 	if (!max_active_rgns) {
2394 		dev_err(hba->dev,
2395 			"lun %d wrong number of max active regions\n", lun);
2396 		return -ENODEV;
2397 	}
2398 
2399 	hpb_lu_info->num_blocks = get_unaligned_be64(
2400 			desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
2401 	hpb_lu_info->pinned_start = get_unaligned_be16(
2402 			desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
2403 	hpb_lu_info->num_pinned = get_unaligned_be16(
2404 			desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
2405 	hpb_lu_info->max_active_rgns = max_active_rgns;
2406 
2407 	return 0;
2408 }
2409 
ufshpb_destroy_lu(struct ufs_hba * hba,struct scsi_device * sdev)2410 void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2411 {
2412 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2413 
2414 	if (!hpb)
2415 		return;
2416 
2417 	ufshpb_set_state(hpb, HPB_FAILED);
2418 
2419 	sdev = hpb->sdev_ufs_lu;
2420 	sdev->hostdata = NULL;
2421 
2422 	ufshpb_cancel_jobs(hpb);
2423 
2424 	ufshpb_pre_req_mempool_destroy(hpb);
2425 	ufshpb_destroy_region_tbl(hpb);
2426 
2427 	kmem_cache_destroy(hpb->map_req_cache);
2428 	kmem_cache_destroy(hpb->m_page_cache);
2429 
2430 	list_del_init(&hpb->list_hpb_lu);
2431 
2432 	kfree(hpb);
2433 }
2434 
ufshpb_hpb_lu_prepared(struct ufs_hba * hba)2435 static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
2436 {
2437 	int pool_size;
2438 	struct ufshpb_lu *hpb;
2439 	struct scsi_device *sdev;
2440 	bool init_success;
2441 
2442 	if (tot_active_srgn_pages == 0) {
2443 		ufshpb_remove(hba);
2444 		return;
2445 	}
2446 
2447 	init_success = !ufshpb_check_hpb_reset_query(hba);
2448 
2449 	pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2450 	if (pool_size > tot_active_srgn_pages) {
2451 		mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
2452 		mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
2453 	}
2454 
2455 	shost_for_each_device(sdev, hba->host) {
2456 		hpb = ufshpb_get_hpb_data(sdev);
2457 		if (!hpb)
2458 			continue;
2459 
2460 		if (init_success) {
2461 			ufshpb_set_state(hpb, HPB_PRESENT);
2462 			if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
2463 				queue_work(ufshpb_wq, &hpb->map_work);
2464 			if (!hpb->is_hcm)
2465 				ufshpb_issue_umap_all_req(hpb);
2466 		} else {
2467 			dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
2468 			ufshpb_destroy_lu(hba, sdev);
2469 		}
2470 	}
2471 
2472 	if (!init_success)
2473 		ufshpb_remove(hba);
2474 }
2475 
ufshpb_init_hpb_lu(struct ufs_hba * hba,struct scsi_device * sdev)2476 void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2477 {
2478 	struct ufshpb_lu *hpb;
2479 	int ret;
2480 	struct ufshpb_lu_info hpb_lu_info = { 0 };
2481 	int lun = sdev->lun;
2482 
2483 	if (lun >= hba->dev_info.max_lu_supported)
2484 		goto out;
2485 
2486 	ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
2487 	if (ret)
2488 		goto out;
2489 
2490 	hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
2491 				  &hpb_lu_info);
2492 	if (!hpb)
2493 		goto out;
2494 
2495 	tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
2496 			hpb->srgns_per_rgn * hpb->pages_per_srgn;
2497 
2498 out:
2499 	/* All LUs are initialized */
2500 	if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
2501 		ufshpb_hpb_lu_prepared(hba);
2502 }
2503 
ufshpb_init_mem_wq(struct ufs_hba * hba)2504 static int ufshpb_init_mem_wq(struct ufs_hba *hba)
2505 {
2506 	int ret;
2507 	unsigned int pool_size;
2508 
2509 	ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
2510 					sizeof(struct ufshpb_map_ctx),
2511 					0, 0, NULL);
2512 	if (!ufshpb_mctx_cache) {
2513 		dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
2514 		return -ENOMEM;
2515 	}
2516 
2517 	pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2518 	dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
2519 	       __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
2520 
2521 	ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
2522 						    ufshpb_mctx_cache);
2523 	if (!ufshpb_mctx_pool) {
2524 		dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
2525 		ret = -ENOMEM;
2526 		goto release_mctx_cache;
2527 	}
2528 
2529 	ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
2530 	if (!ufshpb_page_pool) {
2531 		dev_err(hba->dev, "ufshpb: cannot init page pool\n");
2532 		ret = -ENOMEM;
2533 		goto release_mctx_pool;
2534 	}
2535 
2536 	ufshpb_wq = alloc_workqueue("ufshpb-wq",
2537 					WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2538 	if (!ufshpb_wq) {
2539 		dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
2540 		ret = -ENOMEM;
2541 		goto release_page_pool;
2542 	}
2543 
2544 	return 0;
2545 
2546 release_page_pool:
2547 	mempool_destroy(ufshpb_page_pool);
2548 release_mctx_pool:
2549 	mempool_destroy(ufshpb_mctx_pool);
2550 release_mctx_cache:
2551 	kmem_cache_destroy(ufshpb_mctx_cache);
2552 	return ret;
2553 }
2554 
ufshpb_get_geo_info(struct ufs_hba * hba,u8 * geo_buf)2555 void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
2556 {
2557 	struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
2558 	int max_active_rgns = 0;
2559 	int hpb_num_lu;
2560 
2561 	hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
2562 	if (hpb_num_lu == 0) {
2563 		dev_err(hba->dev, "No HPB LU supported\n");
2564 		hpb_info->hpb_disabled = true;
2565 		return;
2566 	}
2567 
2568 	hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
2569 	hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
2570 	max_active_rgns = get_unaligned_be16(geo_buf +
2571 			  GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
2572 
2573 	if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
2574 	    max_active_rgns == 0) {
2575 		dev_err(hba->dev, "No HPB supported device\n");
2576 		hpb_info->hpb_disabled = true;
2577 		return;
2578 	}
2579 }
2580 
ufshpb_get_dev_info(struct ufs_hba * hba,u8 * desc_buf)2581 void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
2582 {
2583 	struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2584 	int version, ret;
2585 	u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW;
2586 
2587 	hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
2588 
2589 	version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
2590 	if ((version != HPB_SUPPORT_VERSION) &&
2591 	    (version != HPB_SUPPORT_LEGACY_VERSION)) {
2592 		dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
2593 			__func__, version);
2594 		hpb_dev_info->hpb_disabled = true;
2595 		return;
2596 	}
2597 
2598 	if (version == HPB_SUPPORT_LEGACY_VERSION)
2599 		hpb_dev_info->is_legacy = true;
2600 
2601 	pm_runtime_get_sync(hba->dev);
2602 	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2603 		QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd);
2604 	pm_runtime_put_sync(hba->dev);
2605 
2606 	if (ret)
2607 		dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed",
2608 			__func__);
2609 	hpb_dev_info->max_hpb_single_cmd = max_hpb_single_cmd;
2610 
2611 	/*
2612 	 * Get the number of user logical unit to check whether all
2613 	 * scsi_device finish initialization
2614 	 */
2615 	hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
2616 }
2617 
ufshpb_init(struct ufs_hba * hba)2618 void ufshpb_init(struct ufs_hba *hba)
2619 {
2620 	struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2621 	int try;
2622 	int ret;
2623 
2624 	if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
2625 		return;
2626 
2627 	if (ufshpb_init_mem_wq(hba)) {
2628 		hpb_dev_info->hpb_disabled = true;
2629 		return;
2630 	}
2631 
2632 	atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
2633 	tot_active_srgn_pages = 0;
2634 	/* issue HPB reset query */
2635 	for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2636 		ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2637 					QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
2638 		if (!ret)
2639 			break;
2640 	}
2641 }
2642 
ufshpb_remove(struct ufs_hba * hba)2643 void ufshpb_remove(struct ufs_hba *hba)
2644 {
2645 	mempool_destroy(ufshpb_page_pool);
2646 	mempool_destroy(ufshpb_mctx_pool);
2647 	kmem_cache_destroy(ufshpb_mctx_cache);
2648 
2649 	destroy_workqueue(ufshpb_wq);
2650 }
2651 
2652 module_param(ufshpb_host_map_kbytes, uint, 0644);
2653 MODULE_PARM_DESC(ufshpb_host_map_kbytes,
2654 	"ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");
2655