Lines Matching +full:cmdq +full:- +full:sync

1 // SPDX-License-Identifier: GPL-2.0
19 #include <linux/io-pgtable.h>
27 #include <linux/pci-ats.h>
30 #include "arm-smmu-v3.h"
31 #include "../../dma-iommu.h"
32 #include "../../iommu-sva-lib.h"
42 "Disable MSI-based polling for CMD_SYNC completion.");
84 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
85 { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
94 if (of_property_read_bool(smmu->dev->of_node, in parse_driver_options()
96 smmu->options |= arm_smmu_options[i].opt; in parse_driver_options()
97 dev_notice(smmu->dev, "option %s\n", in parse_driver_options()
103 /* Low-level queue manipulation functions */
108 prod = Q_IDX(q, q->prod); in queue_has_space()
109 cons = Q_IDX(q, q->cons); in queue_has_space()
111 if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons)) in queue_has_space()
112 space = (1 << q->max_n_shift) - (prod - cons); in queue_has_space()
114 space = cons - prod; in queue_has_space()
121 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && in queue_full()
122 Q_WRP(q, q->prod) != Q_WRP(q, q->cons); in queue_full()
127 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && in queue_empty()
128 Q_WRP(q, q->prod) == Q_WRP(q, q->cons); in queue_empty()
133 return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) && in queue_consumed()
134 (Q_IDX(q, q->cons) > Q_IDX(q, prod))) || in queue_consumed()
135 ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) && in queue_consumed()
136 (Q_IDX(q, q->cons) <= Q_IDX(q, prod))); in queue_consumed()
146 writel_relaxed(q->llq.cons, q->cons_reg); in queue_sync_cons_out()
151 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; in queue_inc_cons()
152 q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); in queue_inc_cons()
165 prod = readl(q->prod_reg); in queue_sync_prod_in()
167 if (Q_OVF(prod) != Q_OVF(q->llq.prod)) in queue_sync_prod_in()
168 ret = -EOVERFLOW; in queue_sync_prod_in()
170 q->llq.prod = prod; in queue_sync_prod_in()
176 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n; in queue_inc_prod_n()
177 return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); in queue_inc_prod_n()
183 qp->delay = 1; in queue_poll_init()
184 qp->spin_cnt = 0; in queue_poll_init()
185 qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); in queue_poll_init()
186 qp->timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US); in queue_poll_init()
191 if (ktime_compare(ktime_get(), qp->timeout) > 0) in queue_poll()
192 return -ETIMEDOUT; in queue_poll()
194 if (qp->wfe) { in queue_poll()
196 } else if (++qp->spin_cnt < ARM_SMMU_POLL_SPIN_COUNT) { in queue_poll()
199 udelay(qp->delay); in queue_poll()
200 qp->delay *= 2; in queue_poll()
201 qp->spin_cnt = 0; in queue_poll()
225 if (queue_empty(&q->llq)) in queue_remove_raw()
226 return -EAGAIN; in queue_remove_raw()
228 queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords); in queue_remove_raw()
229 queue_inc_cons(&q->llq); in queue_remove_raw()
234 /* High-level queue accessors */
238 cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode); in arm_smmu_cmdq_build_cmd()
240 switch (ent->opcode) { in arm_smmu_cmdq_build_cmd()
245 cmd[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID, ent->prefetch.sid); in arm_smmu_cmdq_build_cmd()
248 cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid); in arm_smmu_cmdq_build_cmd()
251 cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); in arm_smmu_cmdq_build_cmd()
252 cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf); in arm_smmu_cmdq_build_cmd()
255 cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); in arm_smmu_cmdq_build_cmd()
262 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); in arm_smmu_cmdq_build_cmd()
265 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num); in arm_smmu_cmdq_build_cmd()
266 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale); in arm_smmu_cmdq_build_cmd()
267 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); in arm_smmu_cmdq_build_cmd()
268 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); in arm_smmu_cmdq_build_cmd()
269 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl); in arm_smmu_cmdq_build_cmd()
270 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg); in arm_smmu_cmdq_build_cmd()
271 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; in arm_smmu_cmdq_build_cmd()
274 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num); in arm_smmu_cmdq_build_cmd()
275 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale); in arm_smmu_cmdq_build_cmd()
276 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); in arm_smmu_cmdq_build_cmd()
277 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); in arm_smmu_cmdq_build_cmd()
278 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl); in arm_smmu_cmdq_build_cmd()
279 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg); in arm_smmu_cmdq_build_cmd()
280 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK; in arm_smmu_cmdq_build_cmd()
283 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); in arm_smmu_cmdq_build_cmd()
286 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); in arm_smmu_cmdq_build_cmd()
289 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); in arm_smmu_cmdq_build_cmd()
292 cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid); in arm_smmu_cmdq_build_cmd()
293 cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global); in arm_smmu_cmdq_build_cmd()
294 cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SSID, ent->atc.ssid); in arm_smmu_cmdq_build_cmd()
295 cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SID, ent->atc.sid); in arm_smmu_cmdq_build_cmd()
296 cmd[1] |= FIELD_PREP(CMDQ_ATC_1_SIZE, ent->atc.size); in arm_smmu_cmdq_build_cmd()
297 cmd[1] |= ent->atc.addr & CMDQ_ATC_1_ADDR_MASK; in arm_smmu_cmdq_build_cmd()
300 cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid); in arm_smmu_cmdq_build_cmd()
301 cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid); in arm_smmu_cmdq_build_cmd()
302 cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SID, ent->pri.sid); in arm_smmu_cmdq_build_cmd()
303 cmd[1] |= FIELD_PREP(CMDQ_PRI_1_GRPID, ent->pri.grpid); in arm_smmu_cmdq_build_cmd()
304 switch (ent->pri.resp) { in arm_smmu_cmdq_build_cmd()
310 return -EINVAL; in arm_smmu_cmdq_build_cmd()
312 cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp); in arm_smmu_cmdq_build_cmd()
315 cmd[0] |= FIELD_PREP(CMDQ_RESUME_0_SID, ent->resume.sid); in arm_smmu_cmdq_build_cmd()
316 cmd[0] |= FIELD_PREP(CMDQ_RESUME_0_RESP, ent->resume.resp); in arm_smmu_cmdq_build_cmd()
317 cmd[1] |= FIELD_PREP(CMDQ_RESUME_1_STAG, ent->resume.stag); in arm_smmu_cmdq_build_cmd()
320 if (ent->sync.msiaddr) { in arm_smmu_cmdq_build_cmd()
322 cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; in arm_smmu_cmdq_build_cmd()
330 return -ENOENT; in arm_smmu_cmdq_build_cmd()
338 return &smmu->cmdq; in arm_smmu_get_cmdq()
352 if (smmu->options & ARM_SMMU_OPT_MSIPOLL) { in arm_smmu_cmdq_build_sync_cmd()
353 ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) * in arm_smmu_cmdq_build_sync_cmd()
354 q->ent_dwords * 8; in arm_smmu_cmdq_build_sync_cmd()
372 u32 cons = readl_relaxed(q->cons_reg); in __arm_smmu_cmdq_skip_err()
378 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons, in __arm_smmu_cmdq_skip_err()
383 dev_err(smmu->dev, "retrying command fetch\n"); in __arm_smmu_cmdq_skip_err()
402 * not to touch any of the shadow cmdq state. in __arm_smmu_cmdq_skip_err()
404 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords); in __arm_smmu_cmdq_skip_err()
405 dev_err(smmu->dev, "skipping command in error state:\n"); in __arm_smmu_cmdq_skip_err()
407 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]); in __arm_smmu_cmdq_skip_err()
412 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); in __arm_smmu_cmdq_skip_err()
417 __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq.q); in arm_smmu_cmdq_skip_err()
424 * - The only LOCK routines are exclusive_trylock() and shared_lock().
428 * - The UNLOCK routines are supplemented with shared_tryunlock(), which
432 static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_lock() argument
442 if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0) in arm_smmu_cmdq_shared_lock()
446 val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0); in arm_smmu_cmdq_shared_lock()
447 } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val); in arm_smmu_cmdq_shared_lock()
450 static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_unlock() argument
452 (void)atomic_dec_return_release(&cmdq->lock); in arm_smmu_cmdq_shared_unlock()
455 static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_tryunlock() argument
457 if (atomic_read(&cmdq->lock) == 1) in arm_smmu_cmdq_shared_tryunlock()
460 arm_smmu_cmdq_shared_unlock(cmdq); in arm_smmu_cmdq_shared_tryunlock()
464 #define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags) \ argument
468 __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \
474 #define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \ argument
476 atomic_set_release(&cmdq->lock, 0); \
485 * you like mixed-size concurrency, dependency ordering and relaxed atomics,
517 * SYNC completion and freeing up space in the queue before we think that it is
520 static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq, in __arm_smmu_cmdq_poll_set_valid_map() argument
525 .max_n_shift = cmdq->q.llq.max_n_shift, in __arm_smmu_cmdq_poll_set_valid_map()
540 ptr = &cmdq->valid_map[swidx]; in __arm_smmu_cmdq_poll_set_valid_map()
545 mask = GENMASK(limit - 1, sbidx); in __arm_smmu_cmdq_poll_set_valid_map()
549 * that a zero-initialised queue is invalid and, after marking in __arm_smmu_cmdq_poll_set_valid_map()
562 llq.prod = queue_inc_prod_n(&llq, limit - sbidx); in __arm_smmu_cmdq_poll_set_valid_map()
567 static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq, in arm_smmu_cmdq_set_valid_map() argument
570 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true); in arm_smmu_cmdq_set_valid_map()
574 static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq, in arm_smmu_cmdq_poll_valid_map() argument
577 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false); in arm_smmu_cmdq_poll_valid_map()
580 /* Wait for the command queue to become non-full */
586 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); in arm_smmu_cmdq_poll_until_not_full() local
590 * Try to update our copy of cons by grabbing exclusive cmdq access. If in arm_smmu_cmdq_poll_until_not_full()
593 if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) { in arm_smmu_cmdq_poll_until_not_full()
594 WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); in arm_smmu_cmdq_poll_until_not_full()
595 arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags); in arm_smmu_cmdq_poll_until_not_full()
596 llq->val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_poll_until_not_full()
602 llq->val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_poll_until_not_full()
614 * Must be called with the cmdq lock held in some capacity.
621 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); in __arm_smmu_cmdq_poll_until_msi() local
622 u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod)); in __arm_smmu_cmdq_poll_until_msi()
632 llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1); in __arm_smmu_cmdq_poll_until_msi()
637 * Wait until the SMMU cons index passes llq->prod.
638 * Must be called with the cmdq lock held in some capacity.
644 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); in __arm_smmu_cmdq_poll_until_consumed() local
645 u32 prod = llq->prod; in __arm_smmu_cmdq_poll_until_consumed()
649 llq->val = READ_ONCE(cmdq->q.llq.val); in __arm_smmu_cmdq_poll_until_consumed()
664 * cmdq->q.llq.cons. Roughly speaking: in __arm_smmu_cmdq_poll_until_consumed()
668 * if (sync) in __arm_smmu_cmdq_poll_until_consumed()
684 llq->cons = readl(cmdq->q.cons_reg); in __arm_smmu_cmdq_poll_until_consumed()
693 if (smmu->options & ARM_SMMU_OPT_MSIPOLL) in arm_smmu_cmdq_poll_until_sync()
699 static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds, in arm_smmu_cmdq_write_entries() argument
704 .max_n_shift = cmdq->q.llq.max_n_shift, in arm_smmu_cmdq_write_entries()
712 queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS); in arm_smmu_cmdq_write_entries()
720 * - There is a dma_wmb() before publishing any commands to the queue.
724 * - On completion of a CMD_SYNC, there is a control dependency.
728 * - Command insertion is totally ordered, so if two CPUs each race to
733 u64 *cmds, int n, bool sync) in arm_smmu_cmdq_issue_cmdlist() argument
739 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); in arm_smmu_cmdq_issue_cmdlist() local
743 llq.max_n_shift = cmdq->q.llq.max_n_shift; in arm_smmu_cmdq_issue_cmdlist()
747 llq.val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_issue_cmdlist()
751 while (!queue_has_space(&llq, n + sync)) { in arm_smmu_cmdq_issue_cmdlist()
754 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); in arm_smmu_cmdq_issue_cmdlist()
759 head.prod = queue_inc_prod_n(&llq, n + sync) | in arm_smmu_cmdq_issue_cmdlist()
762 old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val); in arm_smmu_cmdq_issue_cmdlist()
776 arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n); in arm_smmu_cmdq_issue_cmdlist()
777 if (sync) { in arm_smmu_cmdq_issue_cmdlist()
779 arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, &cmdq->q, prod); in arm_smmu_cmdq_issue_cmdlist()
780 queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); in arm_smmu_cmdq_issue_cmdlist()
785 * We achieve that by taking the cmdq lock as shared before in arm_smmu_cmdq_issue_cmdlist()
788 arm_smmu_cmdq_shared_lock(cmdq); in arm_smmu_cmdq_issue_cmdlist()
793 arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod); in arm_smmu_cmdq_issue_cmdlist()
798 atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod); in arm_smmu_cmdq_issue_cmdlist()
802 &cmdq->q.llq.atomic.prod); in arm_smmu_cmdq_issue_cmdlist()
810 arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod); in arm_smmu_cmdq_issue_cmdlist()
816 writel_relaxed(prod, cmdq->q.prod_reg); in arm_smmu_cmdq_issue_cmdlist()
823 atomic_set_release(&cmdq->owner_prod, prod); in arm_smmu_cmdq_issue_cmdlist()
827 if (sync) { in arm_smmu_cmdq_issue_cmdlist()
831 dev_err_ratelimited(smmu->dev, in arm_smmu_cmdq_issue_cmdlist()
834 readl_relaxed(cmdq->q.prod_reg), in arm_smmu_cmdq_issue_cmdlist()
835 readl_relaxed(cmdq->q.cons_reg)); in arm_smmu_cmdq_issue_cmdlist()
839 * Try to unlock the cmdq lock. This will fail if we're the last in arm_smmu_cmdq_issue_cmdlist()
840 * reader, in which case we can safely update cmdq->q.llq.cons in arm_smmu_cmdq_issue_cmdlist()
842 if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) { in arm_smmu_cmdq_issue_cmdlist()
843 WRITE_ONCE(cmdq->q.llq.cons, llq.cons); in arm_smmu_cmdq_issue_cmdlist()
844 arm_smmu_cmdq_shared_unlock(cmdq); in arm_smmu_cmdq_issue_cmdlist()
854 bool sync) in __arm_smmu_cmdq_issue_cmd() argument
859 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", in __arm_smmu_cmdq_issue_cmd()
860 ent->opcode); in __arm_smmu_cmdq_issue_cmd()
861 return -EINVAL; in __arm_smmu_cmdq_issue_cmd()
864 return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, sync); in __arm_smmu_cmdq_issue_cmd()
885 if (cmds->num == CMDQ_BATCH_ENTRIES) { in arm_smmu_cmdq_batch_add()
886 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false); in arm_smmu_cmdq_batch_add()
887 cmds->num = 0; in arm_smmu_cmdq_batch_add()
890 index = cmds->num * CMDQ_ENT_DWORDS; in arm_smmu_cmdq_batch_add()
891 if (unlikely(arm_smmu_cmdq_build_cmd(&cmds->cmds[index], cmd))) { in arm_smmu_cmdq_batch_add()
892 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", in arm_smmu_cmdq_batch_add()
893 cmd->opcode); in arm_smmu_cmdq_batch_add()
897 cmds->num++; in arm_smmu_cmdq_batch_add()
903 return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true); in arm_smmu_cmdq_batch_submit()
912 int sid = master->streams[0].id; in arm_smmu_page_response()
914 if (master->stall_enabled) { in arm_smmu_page_response()
917 cmd.resume.stag = resp->grpid; in arm_smmu_page_response()
918 switch (resp->code) { in arm_smmu_page_response()
927 return -EINVAL; in arm_smmu_page_response()
930 return -ENODEV; in arm_smmu_page_response()
933 arm_smmu_cmdq_issue_cmd(master->smmu, &cmd); in arm_smmu_page_response()
935 * Don't send a SYNC, it doesn't do anything for RESUME or PRI_RESP. in arm_smmu_page_response()
948 .opcode = smmu->features & ARM_SMMU_FEAT_E2H ? in arm_smmu_tlb_inv_asid()
963 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_sync_cd()
974 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_sync_cd()
975 list_for_each_entry(master, &smmu_domain->devices, domain_head) { in arm_smmu_sync_cd()
976 for (i = 0; i < master->num_streams; i++) { in arm_smmu_sync_cd()
977 cmd.cfgi.sid = master->streams[i].id; in arm_smmu_sync_cd()
981 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_sync_cd()
991 l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, in arm_smmu_alloc_cd_leaf_table()
992 &l1_desc->l2ptr_dma, GFP_KERNEL); in arm_smmu_alloc_cd_leaf_table()
993 if (!l1_desc->l2ptr) { in arm_smmu_alloc_cd_leaf_table()
994 dev_warn(smmu->dev, in arm_smmu_alloc_cd_leaf_table()
996 return -ENOMEM; in arm_smmu_alloc_cd_leaf_table()
1004 u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) | in arm_smmu_write_cd_l1_desc()
1017 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_get_cd_ptr()
1018 struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg; in arm_smmu_get_cd_ptr()
1020 if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR) in arm_smmu_get_cd_ptr()
1021 return cdcfg->cdtab + ssid * CTXDESC_CD_DWORDS; in arm_smmu_get_cd_ptr()
1024 l1_desc = &cdcfg->l1_desc[idx]; in arm_smmu_get_cd_ptr()
1025 if (!l1_desc->l2ptr) { in arm_smmu_get_cd_ptr()
1029 l1ptr = cdcfg->cdtab + idx * CTXDESC_L1_DESC_DWORDS; in arm_smmu_get_cd_ptr()
1034 idx = ssid & (CTXDESC_L2_ENTRIES - 1); in arm_smmu_get_cd_ptr()
1035 return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS; in arm_smmu_get_cd_ptr()
1056 if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax))) in arm_smmu_write_ctx_desc()
1057 return -E2BIG; in arm_smmu_write_ctx_desc()
1061 return -ENOMEM; in arm_smmu_write_ctx_desc()
1072 val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid); in arm_smmu_write_ctx_desc()
1078 cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK); in arm_smmu_write_ctx_desc()
1080 cdptr[3] = cpu_to_le64(cd->mair); in arm_smmu_write_ctx_desc()
1089 val = cd->tcr | in arm_smmu_write_ctx_desc()
1094 (cd->mm ? 0 : CTXDESC_CD_0_ASET) | in arm_smmu_write_ctx_desc()
1096 FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) | in arm_smmu_write_ctx_desc()
1099 if (smmu_domain->stall_enabled) in arm_smmu_write_ctx_desc()
1104 * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3 in arm_smmu_write_ctx_desc()
1107 * The size of single-copy atomic reads made by the SMMU is in arm_smmu_write_ctx_desc()
1109 * field within an aligned 64-bit span of a structure can be altered in arm_smmu_write_ctx_desc()
1122 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_alloc_cd_tables()
1123 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; in arm_smmu_alloc_cd_tables()
1124 struct arm_smmu_ctx_desc_cfg *cdcfg = &cfg->cdcfg; in arm_smmu_alloc_cd_tables()
1126 max_contexts = 1 << cfg->s1cdmax; in arm_smmu_alloc_cd_tables()
1128 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) || in arm_smmu_alloc_cd_tables()
1130 cfg->s1fmt = STRTAB_STE_0_S1FMT_LINEAR; in arm_smmu_alloc_cd_tables()
1131 cdcfg->num_l1_ents = max_contexts; in arm_smmu_alloc_cd_tables()
1135 cfg->s1fmt = STRTAB_STE_0_S1FMT_64K_L2; in arm_smmu_alloc_cd_tables()
1136 cdcfg->num_l1_ents = DIV_ROUND_UP(max_contexts, in arm_smmu_alloc_cd_tables()
1139 cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents, in arm_smmu_alloc_cd_tables()
1140 sizeof(*cdcfg->l1_desc), in arm_smmu_alloc_cd_tables()
1142 if (!cdcfg->l1_desc) in arm_smmu_alloc_cd_tables()
1143 return -ENOMEM; in arm_smmu_alloc_cd_tables()
1145 l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3); in arm_smmu_alloc_cd_tables()
1148 cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma, in arm_smmu_alloc_cd_tables()
1150 if (!cdcfg->cdtab) { in arm_smmu_alloc_cd_tables()
1151 dev_warn(smmu->dev, "failed to allocate context descriptor\n"); in arm_smmu_alloc_cd_tables()
1152 ret = -ENOMEM; in arm_smmu_alloc_cd_tables()
1159 if (cdcfg->l1_desc) { in arm_smmu_alloc_cd_tables()
1160 devm_kfree(smmu->dev, cdcfg->l1_desc); in arm_smmu_alloc_cd_tables()
1161 cdcfg->l1_desc = NULL; in arm_smmu_alloc_cd_tables()
1170 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_free_cd_tables()
1171 struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg; in arm_smmu_free_cd_tables()
1173 if (cdcfg->l1_desc) { in arm_smmu_free_cd_tables()
1176 for (i = 0; i < cdcfg->num_l1_ents; i++) { in arm_smmu_free_cd_tables()
1177 if (!cdcfg->l1_desc[i].l2ptr) in arm_smmu_free_cd_tables()
1180 dmam_free_coherent(smmu->dev, size, in arm_smmu_free_cd_tables()
1181 cdcfg->l1_desc[i].l2ptr, in arm_smmu_free_cd_tables()
1182 cdcfg->l1_desc[i].l2ptr_dma); in arm_smmu_free_cd_tables()
1184 devm_kfree(smmu->dev, cdcfg->l1_desc); in arm_smmu_free_cd_tables()
1185 cdcfg->l1_desc = NULL; in arm_smmu_free_cd_tables()
1187 l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3); in arm_smmu_free_cd_tables()
1189 l1size = cdcfg->num_l1_ents * (CTXDESC_CD_DWORDS << 3); in arm_smmu_free_cd_tables()
1192 dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma); in arm_smmu_free_cd_tables()
1193 cdcfg->cdtab_dma = 0; in arm_smmu_free_cd_tables()
1194 cdcfg->cdtab = NULL; in arm_smmu_free_cd_tables()
1202 if (!cd->asid) in arm_smmu_free_asid()
1205 free = refcount_dec_and_test(&cd->refs); in arm_smmu_free_asid()
1207 old_cd = xa_erase(&arm_smmu_asid_xa, cd->asid); in arm_smmu_free_asid()
1219 val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span); in arm_smmu_write_strtab_l1_desc()
1220 val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK; in arm_smmu_write_strtab_l1_desc()
1246 * 1. Invalid (all zero) -> bypass/fault (init) in arm_smmu_write_strtab_ent()
1247 * 2. Bypass/fault -> translation/bypass (attach) in arm_smmu_write_strtab_ent()
1248 * 3. Translation/bypass -> bypass/fault (detach) in arm_smmu_write_strtab_ent()
1255 * 2. Write everything apart from dword 0, sync, write dword 0, sync in arm_smmu_write_strtab_ent()
1256 * 3. Update Config, sync in arm_smmu_write_strtab_ent()
1272 smmu_domain = master->domain; in arm_smmu_write_strtab_ent()
1273 smmu = master->smmu; in arm_smmu_write_strtab_ent()
1277 switch (smmu_domain->stage) { in arm_smmu_write_strtab_ent()
1279 s1_cfg = &smmu_domain->s1_cfg; in arm_smmu_write_strtab_ent()
1283 s2_cfg = &smmu_domain->s2_cfg; in arm_smmu_write_strtab_ent()
1321 * The SMMU can perform negative caching, so we must sync in arm_smmu_write_strtab_ent()
1330 u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ? in arm_smmu_write_strtab_ent()
1341 if (smmu->features & ARM_SMMU_FEAT_STALLS && in arm_smmu_write_strtab_ent()
1342 !master->stall_enabled) in arm_smmu_write_strtab_ent()
1345 val |= (s1_cfg->cdcfg.cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) | in arm_smmu_write_strtab_ent()
1347 FIELD_PREP(STRTAB_STE_0_S1CDMAX, s1_cfg->s1cdmax) | in arm_smmu_write_strtab_ent()
1348 FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt); in arm_smmu_write_strtab_ent()
1354 FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | in arm_smmu_write_strtab_ent()
1355 FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) | in arm_smmu_write_strtab_ent()
1362 dst[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK); in arm_smmu_write_strtab_ent()
1367 if (master->ats_enabled) in arm_smmu_write_strtab_ent()
1377 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) in arm_smmu_write_strtab_ent()
1404 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_init_l2_strtab()
1405 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT]; in arm_smmu_init_l2_strtab()
1407 if (desc->l2ptr) in arm_smmu_init_l2_strtab()
1411 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS]; in arm_smmu_init_l2_strtab()
1413 desc->span = STRTAB_SPLIT + 1; in arm_smmu_init_l2_strtab()
1414 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma, in arm_smmu_init_l2_strtab()
1416 if (!desc->l2ptr) { in arm_smmu_init_l2_strtab()
1417 dev_err(smmu->dev, in arm_smmu_init_l2_strtab()
1420 return -ENOMEM; in arm_smmu_init_l2_strtab()
1423 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false); in arm_smmu_init_l2_strtab()
1434 lockdep_assert_held(&smmu->streams_mutex); in arm_smmu_find_master()
1436 node = smmu->streams.rb_node; in arm_smmu_find_master()
1439 if (stream->id < sid) in arm_smmu_find_master()
1440 node = node->rb_right; in arm_smmu_find_master()
1441 else if (stream->id > sid) in arm_smmu_find_master()
1442 node = node->rb_left; in arm_smmu_find_master()
1444 return stream->master; in arm_smmu_find_master()
1476 return -EOPNOTSUPP; in arm_smmu_handle_evt()
1479 /* Stage-2 is always pinned at the moment */ in arm_smmu_handle_evt()
1481 return -EFAULT; in arm_smmu_handle_evt()
1495 flt->type = IOMMU_FAULT_PAGE_REQ; in arm_smmu_handle_evt()
1496 flt->prm = (struct iommu_fault_page_request) { in arm_smmu_handle_evt()
1504 flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; in arm_smmu_handle_evt()
1505 flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]); in arm_smmu_handle_evt()
1508 flt->type = IOMMU_FAULT_DMA_UNRECOV; in arm_smmu_handle_evt()
1509 flt->event = (struct iommu_fault_unrecoverable) { in arm_smmu_handle_evt()
1517 flt->event.flags |= IOMMU_FAULT_UNRECOV_PASID_VALID; in arm_smmu_handle_evt()
1518 flt->event.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]); in arm_smmu_handle_evt()
1522 mutex_lock(&smmu->streams_mutex); in arm_smmu_handle_evt()
1525 ret = -EINVAL; in arm_smmu_handle_evt()
1529 ret = iommu_report_device_fault(master->dev, &fault_evt); in arm_smmu_handle_evt()
1530 if (ret && flt->type == IOMMU_FAULT_PAGE_REQ) { in arm_smmu_handle_evt()
1533 .pasid = flt->prm.pasid, in arm_smmu_handle_evt()
1534 .grpid = flt->prm.grpid, in arm_smmu_handle_evt()
1537 arm_smmu_page_response(master->dev, &fault_evt, &resp); in arm_smmu_handle_evt()
1541 mutex_unlock(&smmu->streams_mutex); in arm_smmu_handle_evt()
1549 struct arm_smmu_queue *q = &smmu->evtq.q; in arm_smmu_evtq_thread()
1550 struct arm_smmu_ll_queue *llq = &q->llq; in arm_smmu_evtq_thread()
1563 dev_info(smmu->dev, "event 0x%02x received:\n", id); in arm_smmu_evtq_thread()
1565 dev_info(smmu->dev, "\t0x%016llx\n", in arm_smmu_evtq_thread()
1575 if (queue_sync_prod_in(q) == -EOVERFLOW) in arm_smmu_evtq_thread()
1576 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); in arm_smmu_evtq_thread()
1579 /* Sync our overflow flag, as we believe we're up to speed */ in arm_smmu_evtq_thread()
1580 llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | in arm_smmu_evtq_thread()
1581 Q_IDX(llq, llq->cons); in arm_smmu_evtq_thread()
1597 dev_info(smmu->dev, "unexpected PRI request received:\n"); in arm_smmu_handle_ppr()
1598 dev_info(smmu->dev, in arm_smmu_handle_ppr()
1626 struct arm_smmu_queue *q = &smmu->priq.q; in arm_smmu_priq_thread()
1627 struct arm_smmu_ll_queue *llq = &q->llq; in arm_smmu_priq_thread()
1634 if (queue_sync_prod_in(q) == -EOVERFLOW) in arm_smmu_priq_thread()
1635 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); in arm_smmu_priq_thread()
1638 /* Sync our overflow flag, as we believe we're up to speed */ in arm_smmu_priq_thread()
1639 llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | in arm_smmu_priq_thread()
1640 Q_IDX(llq, llq->cons); in arm_smmu_priq_thread()
1652 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR); in arm_smmu_gerror_handler()
1653 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN); in arm_smmu_gerror_handler()
1659 dev_warn(smmu->dev, in arm_smmu_gerror_handler()
1664 dev_err(smmu->dev, "device has entered Service Failure Mode!\n"); in arm_smmu_gerror_handler()
1669 dev_warn(smmu->dev, "GERROR MSI write aborted\n"); in arm_smmu_gerror_handler()
1672 dev_warn(smmu->dev, "PRIQ MSI write aborted\n"); in arm_smmu_gerror_handler()
1675 dev_warn(smmu->dev, "EVTQ MSI write aborted\n"); in arm_smmu_gerror_handler()
1678 dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); in arm_smmu_gerror_handler()
1681 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n"); in arm_smmu_gerror_handler()
1684 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n"); in arm_smmu_gerror_handler()
1689 writel(gerror, smmu->base + ARM_SMMU_GERRORN); in arm_smmu_gerror_handler()
1698 if (smmu->features & ARM_SMMU_FEAT_PRI) in arm_smmu_combined_irq_thread()
1716 /* ATC invalidates are always on 4096-bytes pages */ in arm_smmu_atc_inv_to_cmd()
1728 * When using STRTAB_STE_1_S1DSS_SSID0 (reserving CD 0 for non-PASID in arm_smmu_atc_inv_to_cmd()
1731 * This has the unpleasant side-effect of invalidating all PASID-tagged in arm_smmu_atc_inv_to_cmd()
1741 cmd->atc.size = ATC_INV_SIZE_ALL; in arm_smmu_atc_inv_to_cmd()
1746 page_end = (iova + size - 1) >> inval_grain_shift; in arm_smmu_atc_inv_to_cmd()
1751 * thus have to choose between grossly over-invalidating the region, or in arm_smmu_atc_inv_to_cmd()
1769 span_mask = (1ULL << log2_span) - 1; in arm_smmu_atc_inv_to_cmd()
1773 cmd->atc.addr = page_start << inval_grain_shift; in arm_smmu_atc_inv_to_cmd()
1774 cmd->atc.size = log2_span; in arm_smmu_atc_inv_to_cmd()
1786 for (i = 0; i < master->num_streams; i++) { in arm_smmu_atc_inv_master()
1787 cmd.atc.sid = master->streams[i].id; in arm_smmu_atc_inv_master()
1788 arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd); in arm_smmu_atc_inv_master()
1791 return arm_smmu_cmdq_batch_submit(master->smmu, &cmds); in arm_smmu_atc_inv_master()
1803 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) in arm_smmu_atc_inv_domain()
1812 * TLBI+SYNC atomic_inc(&nr_ats_masters); in arm_smmu_atc_inv_domain()
1820 if (!atomic_read(&smmu_domain->nr_ats_masters)) in arm_smmu_atc_inv_domain()
1827 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_atc_inv_domain()
1828 list_for_each_entry(master, &smmu_domain->devices, domain_head) { in arm_smmu_atc_inv_domain()
1829 if (!master->ats_enabled) in arm_smmu_atc_inv_domain()
1832 for (i = 0; i < master->num_streams; i++) { in arm_smmu_atc_inv_domain()
1833 cmd.atc.sid = master->streams[i].id; in arm_smmu_atc_inv_domain()
1834 arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd); in arm_smmu_atc_inv_domain()
1837 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_atc_inv_domain()
1839 return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds); in arm_smmu_atc_inv_domain()
1846 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_context()
1850 * NOTE: when io-pgtable is in non-strict mode, we may get here with in arm_smmu_tlb_inv_context()
1856 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_tlb_inv_context()
1857 arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid); in arm_smmu_tlb_inv_context()
1860 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; in arm_smmu_tlb_inv_context()
1871 struct arm_smmu_device *smmu = smmu_domain->smmu; in __arm_smmu_tlb_inv_range()
1879 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { in __arm_smmu_tlb_inv_range()
1881 tg = __ffs(smmu_domain->domain.pgsize_bitmap); in __arm_smmu_tlb_inv_range()
1884 cmd->tlbi.tg = (tg - 10) / 2; in __arm_smmu_tlb_inv_range()
1887 cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); in __arm_smmu_tlb_inv_range()
1895 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { in __arm_smmu_tlb_inv_range()
1907 cmd->tlbi.scale = scale; in __arm_smmu_tlb_inv_range()
1911 cmd->tlbi.num = num - 1; in __arm_smmu_tlb_inv_range()
1917 num_pages -= num << scale; in __arm_smmu_tlb_inv_range()
1920 cmd->tlbi.addr = iova; in __arm_smmu_tlb_inv_range()
1937 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_tlb_inv_range_domain()
1938 cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ? in arm_smmu_tlb_inv_range_domain()
1940 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid; in arm_smmu_tlb_inv_range_domain()
1943 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; in arm_smmu_tlb_inv_range_domain()
1948 * Unfortunately, this can't be leaf-only since we may have in arm_smmu_tlb_inv_range_domain()
1959 .opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ? in arm_smmu_tlb_inv_range_asid()
1975 struct iommu_domain *domain = &smmu_domain->domain; in arm_smmu_tlb_inv_page_nosync()
2000 return master->smmu->features & ARM_SMMU_FEAT_COHERENCY; in arm_smmu_capable()
2027 mutex_init(&smmu_domain->init_mutex); in arm_smmu_domain_alloc()
2028 INIT_LIST_HEAD(&smmu_domain->devices); in arm_smmu_domain_alloc()
2029 spin_lock_init(&smmu_domain->devices_lock); in arm_smmu_domain_alloc()
2030 INIT_LIST_HEAD(&smmu_domain->mmu_notifiers); in arm_smmu_domain_alloc()
2032 return &smmu_domain->domain; in arm_smmu_domain_alloc()
2042 return -ENOSPC; in arm_smmu_bitmap_alloc()
2056 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_free()
2058 free_io_pgtable_ops(smmu_domain->pgtbl_ops); in arm_smmu_domain_free()
2061 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_domain_free()
2062 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; in arm_smmu_domain_free()
2066 if (cfg->cdcfg.cdtab) in arm_smmu_domain_free()
2068 arm_smmu_free_asid(&cfg->cd); in arm_smmu_domain_free()
2071 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; in arm_smmu_domain_free()
2072 if (cfg->vmid) in arm_smmu_domain_free()
2073 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid); in arm_smmu_domain_free()
2085 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_finalise_s1()
2086 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; in arm_smmu_domain_finalise_s1()
2087 typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr; in arm_smmu_domain_finalise_s1()
2089 refcount_set(&cfg->cd.refs, 1); in arm_smmu_domain_finalise_s1()
2093 ret = xa_alloc(&arm_smmu_asid_xa, &asid, &cfg->cd, in arm_smmu_domain_finalise_s1()
2094 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); in arm_smmu_domain_finalise_s1()
2098 cfg->s1cdmax = master->ssid_bits; in arm_smmu_domain_finalise_s1()
2100 smmu_domain->stall_enabled = master->stall_enabled; in arm_smmu_domain_finalise_s1()
2106 cfg->cd.asid = (u16)asid; in arm_smmu_domain_finalise_s1()
2107 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr; in arm_smmu_domain_finalise_s1()
2108 cfg->cd.tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) | in arm_smmu_domain_finalise_s1()
2109 FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) | in arm_smmu_domain_finalise_s1()
2110 FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) | in arm_smmu_domain_finalise_s1()
2111 FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) | in arm_smmu_domain_finalise_s1()
2112 FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) | in arm_smmu_domain_finalise_s1()
2113 FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) | in arm_smmu_domain_finalise_s1()
2115 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair; in arm_smmu_domain_finalise_s1()
2122 ret = arm_smmu_write_ctx_desc(smmu_domain, 0, &cfg->cd); in arm_smmu_domain_finalise_s1()
2132 arm_smmu_free_asid(&cfg->cd); in arm_smmu_domain_finalise_s1()
2143 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_finalise_s2()
2144 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; in arm_smmu_domain_finalise_s2()
2145 typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr; in arm_smmu_domain_finalise_s2()
2147 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits); in arm_smmu_domain_finalise_s2()
2151 vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr; in arm_smmu_domain_finalise_s2()
2152 cfg->vmid = (u16)vmid; in arm_smmu_domain_finalise_s2()
2153 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; in arm_smmu_domain_finalise_s2()
2154 cfg->vtcr = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) | in arm_smmu_domain_finalise_s2()
2155 FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) | in arm_smmu_domain_finalise_s2()
2156 FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) | in arm_smmu_domain_finalise_s2()
2157 FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) | in arm_smmu_domain_finalise_s2()
2158 FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) | in arm_smmu_domain_finalise_s2()
2159 FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) | in arm_smmu_domain_finalise_s2()
2160 FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps); in arm_smmu_domain_finalise_s2()
2176 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_finalise()
2178 if (domain->type == IOMMU_DOMAIN_IDENTITY) { in arm_smmu_domain_finalise()
2179 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; in arm_smmu_domain_finalise()
2184 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) in arm_smmu_domain_finalise()
2185 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; in arm_smmu_domain_finalise()
2186 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) in arm_smmu_domain_finalise()
2187 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; in arm_smmu_domain_finalise()
2189 switch (smmu_domain->stage) { in arm_smmu_domain_finalise()
2191 ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48; in arm_smmu_domain_finalise()
2193 oas = smmu->ias; in arm_smmu_domain_finalise()
2199 ias = smmu->ias; in arm_smmu_domain_finalise()
2200 oas = smmu->oas; in arm_smmu_domain_finalise()
2205 return -EINVAL; in arm_smmu_domain_finalise()
2209 .pgsize_bitmap = smmu->pgsize_bitmap, in arm_smmu_domain_finalise()
2212 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY, in arm_smmu_domain_finalise()
2214 .iommu_dev = smmu->dev, in arm_smmu_domain_finalise()
2219 return -ENOMEM; in arm_smmu_domain_finalise()
2221 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; in arm_smmu_domain_finalise()
2222 domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1; in arm_smmu_domain_finalise()
2223 domain->geometry.force_aperture = true; in arm_smmu_domain_finalise()
2231 smmu_domain->pgtbl_ops = pgtbl_ops; in arm_smmu_domain_finalise()
2238 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_get_step_for_sid()
2240 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { in arm_smmu_get_step_for_sid()
2244 /* Two-level walk */ in arm_smmu_get_step_for_sid()
2246 l1_desc = &cfg->l1_desc[idx]; in arm_smmu_get_step_for_sid()
2247 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS; in arm_smmu_get_step_for_sid()
2248 step = &l1_desc->l2ptr[idx]; in arm_smmu_get_step_for_sid()
2251 step = &cfg->strtab[sid * STRTAB_STE_DWORDS]; in arm_smmu_get_step_for_sid()
2260 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_install_ste_for_dev()
2262 for (i = 0; i < master->num_streams; ++i) { in arm_smmu_install_ste_for_dev()
2263 u32 sid = master->streams[i].id; in arm_smmu_install_ste_for_dev()
2268 if (master->streams[j].id == sid) in arm_smmu_install_ste_for_dev()
2279 struct device *dev = master->dev; in arm_smmu_ats_supported()
2280 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_ats_supported()
2283 if (!(smmu->features & ARM_SMMU_FEAT_ATS)) in arm_smmu_ats_supported()
2286 if (!(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS)) in arm_smmu_ats_supported()
2296 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_enable_ats()
2297 struct arm_smmu_domain *smmu_domain = master->domain; in arm_smmu_enable_ats()
2300 if (!master->ats_enabled) in arm_smmu_enable_ats()
2304 stu = __ffs(smmu->pgsize_bitmap); in arm_smmu_enable_ats()
2305 pdev = to_pci_dev(master->dev); in arm_smmu_enable_ats()
2307 atomic_inc(&smmu_domain->nr_ats_masters); in arm_smmu_enable_ats()
2310 dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu); in arm_smmu_enable_ats()
2315 struct arm_smmu_domain *smmu_domain = master->domain; in arm_smmu_disable_ats()
2317 if (!master->ats_enabled) in arm_smmu_disable_ats()
2320 pci_disable_ats(to_pci_dev(master->dev)); in arm_smmu_disable_ats()
2327 atomic_dec(&smmu_domain->nr_ats_masters); in arm_smmu_disable_ats()
2337 if (!dev_is_pci(master->dev)) in arm_smmu_enable_pasid()
2338 return -ENODEV; in arm_smmu_enable_pasid()
2340 pdev = to_pci_dev(master->dev); in arm_smmu_enable_pasid()
2352 dev_err(&pdev->dev, "Failed to enable PASID\n"); in arm_smmu_enable_pasid()
2356 master->ssid_bits = min_t(u8, ilog2(num_pasids), in arm_smmu_enable_pasid()
2357 master->smmu->ssid_bits); in arm_smmu_enable_pasid()
2365 if (!dev_is_pci(master->dev)) in arm_smmu_disable_pasid()
2368 pdev = to_pci_dev(master->dev); in arm_smmu_disable_pasid()
2370 if (!pdev->pasid_enabled) in arm_smmu_disable_pasid()
2373 master->ssid_bits = 0; in arm_smmu_disable_pasid()
2380 struct arm_smmu_domain *smmu_domain = master->domain; in arm_smmu_detach_dev()
2387 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_detach_dev()
2388 list_del(&master->domain_head); in arm_smmu_detach_dev()
2389 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_detach_dev()
2391 master->domain = NULL; in arm_smmu_detach_dev()
2392 master->ats_enabled = false; in arm_smmu_detach_dev()
2406 return -ENOENT; in arm_smmu_attach_dev()
2409 smmu = master->smmu; in arm_smmu_attach_dev()
2417 dev_err(dev, "cannot attach - SVA enabled\n"); in arm_smmu_attach_dev()
2418 return -EBUSY; in arm_smmu_attach_dev()
2423 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_attach_dev()
2425 if (!smmu_domain->smmu) { in arm_smmu_attach_dev()
2426 smmu_domain->smmu = smmu; in arm_smmu_attach_dev()
2429 smmu_domain->smmu = NULL; in arm_smmu_attach_dev()
2432 } else if (smmu_domain->smmu != smmu) { in arm_smmu_attach_dev()
2435 dev_name(smmu_domain->smmu->dev), in arm_smmu_attach_dev()
2436 dev_name(smmu->dev)); in arm_smmu_attach_dev()
2437 ret = -ENXIO; in arm_smmu_attach_dev()
2439 } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && in arm_smmu_attach_dev()
2440 master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) { in arm_smmu_attach_dev()
2443 smmu_domain->s1_cfg.s1cdmax, master->ssid_bits); in arm_smmu_attach_dev()
2444 ret = -EINVAL; in arm_smmu_attach_dev()
2446 } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && in arm_smmu_attach_dev()
2447 smmu_domain->stall_enabled != master->stall_enabled) { in arm_smmu_attach_dev()
2448 dev_err(dev, "cannot attach to stall-%s domain\n", in arm_smmu_attach_dev()
2449 smmu_domain->stall_enabled ? "enabled" : "disabled"); in arm_smmu_attach_dev()
2450 ret = -EINVAL; in arm_smmu_attach_dev()
2454 master->domain = smmu_domain; in arm_smmu_attach_dev()
2456 if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS) in arm_smmu_attach_dev()
2457 master->ats_enabled = arm_smmu_ats_supported(master); in arm_smmu_attach_dev()
2461 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_attach_dev()
2462 list_add(&master->domain_head, &smmu_domain->devices); in arm_smmu_attach_dev()
2463 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_attach_dev()
2468 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_attach_dev()
2476 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_map_pages()
2479 return -ENODEV; in arm_smmu_map_pages()
2481 return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped); in arm_smmu_map_pages()
2489 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; in arm_smmu_unmap_pages()
2494 return ops->unmap_pages(ops, iova, pgsize, pgcount, gather); in arm_smmu_unmap_pages()
2501 if (smmu_domain->smmu) in arm_smmu_flush_iotlb_all()
2510 if (!gather->pgsize) in arm_smmu_iotlb_sync()
2513 arm_smmu_tlb_inv_range_domain(gather->start, in arm_smmu_iotlb_sync()
2514 gather->end - gather->start + 1, in arm_smmu_iotlb_sync()
2515 gather->pgsize, true, smmu_domain); in arm_smmu_iotlb_sync()
2521 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_iova_to_phys()
2526 return ops->iova_to_phys(ops, iova); in arm_smmu_iova_to_phys()
2542 unsigned long limit = smmu->strtab_cfg.num_l1_ents; in arm_smmu_sid_in_range()
2544 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) in arm_smmu_sid_in_range()
2554 return -ERANGE; in arm_smmu_init_sid_strtab()
2557 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) in arm_smmu_init_sid_strtab()
2570 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev); in arm_smmu_insert_master()
2572 master->streams = kcalloc(fwspec->num_ids, sizeof(*master->streams), in arm_smmu_insert_master()
2574 if (!master->streams) in arm_smmu_insert_master()
2575 return -ENOMEM; in arm_smmu_insert_master()
2576 master->num_streams = fwspec->num_ids; in arm_smmu_insert_master()
2578 mutex_lock(&smmu->streams_mutex); in arm_smmu_insert_master()
2579 for (i = 0; i < fwspec->num_ids; i++) { in arm_smmu_insert_master()
2580 u32 sid = fwspec->ids[i]; in arm_smmu_insert_master()
2582 new_stream = &master->streams[i]; in arm_smmu_insert_master()
2583 new_stream->id = sid; in arm_smmu_insert_master()
2584 new_stream->master = master; in arm_smmu_insert_master()
2591 new_node = &(smmu->streams.rb_node); in arm_smmu_insert_master()
2596 if (cur_stream->id > new_stream->id) { in arm_smmu_insert_master()
2597 new_node = &((*new_node)->rb_left); in arm_smmu_insert_master()
2598 } else if (cur_stream->id < new_stream->id) { in arm_smmu_insert_master()
2599 new_node = &((*new_node)->rb_right); in arm_smmu_insert_master()
2601 dev_warn(master->dev, in arm_smmu_insert_master()
2603 cur_stream->id); in arm_smmu_insert_master()
2604 ret = -EINVAL; in arm_smmu_insert_master()
2611 rb_link_node(&new_stream->node, parent_node, new_node); in arm_smmu_insert_master()
2612 rb_insert_color(&new_stream->node, &smmu->streams); in arm_smmu_insert_master()
2616 for (i--; i >= 0; i--) in arm_smmu_insert_master()
2617 rb_erase(&master->streams[i].node, &smmu->streams); in arm_smmu_insert_master()
2618 kfree(master->streams); in arm_smmu_insert_master()
2620 mutex_unlock(&smmu->streams_mutex); in arm_smmu_insert_master()
2628 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_remove_master()
2629 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev); in arm_smmu_remove_master()
2631 if (!smmu || !master->streams) in arm_smmu_remove_master()
2634 mutex_lock(&smmu->streams_mutex); in arm_smmu_remove_master()
2635 for (i = 0; i < fwspec->num_ids; i++) in arm_smmu_remove_master()
2636 rb_erase(&master->streams[i].node, &smmu->streams); in arm_smmu_remove_master()
2637 mutex_unlock(&smmu->streams_mutex); in arm_smmu_remove_master()
2639 kfree(master->streams); in arm_smmu_remove_master()
2651 if (!fwspec || fwspec->ops != &arm_smmu_ops) in arm_smmu_probe_device()
2652 return ERR_PTR(-ENODEV); in arm_smmu_probe_device()
2655 return ERR_PTR(-EBUSY); in arm_smmu_probe_device()
2657 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); in arm_smmu_probe_device()
2659 return ERR_PTR(-ENODEV); in arm_smmu_probe_device()
2663 return ERR_PTR(-ENOMEM); in arm_smmu_probe_device()
2665 master->dev = dev; in arm_smmu_probe_device()
2666 master->smmu = smmu; in arm_smmu_probe_device()
2667 INIT_LIST_HEAD(&master->bonds); in arm_smmu_probe_device()
2674 device_property_read_u32(dev, "pasid-num-bits", &master->ssid_bits); in arm_smmu_probe_device()
2675 master->ssid_bits = min(smmu->ssid_bits, master->ssid_bits); in arm_smmu_probe_device()
2679 * PCI Express Base 4.0r1.0 - 10.5.1.3 ATS Control Register in arm_smmu_probe_device()
2687 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB)) in arm_smmu_probe_device()
2688 master->ssid_bits = min_t(u8, master->ssid_bits, in arm_smmu_probe_device()
2691 if ((smmu->features & ARM_SMMU_FEAT_STALLS && in arm_smmu_probe_device()
2692 device_property_read_bool(dev, "dma-can-stall")) || in arm_smmu_probe_device()
2693 smmu->features & ARM_SMMU_FEAT_STALL_FORCE) in arm_smmu_probe_device()
2694 master->stall_enabled = true; in arm_smmu_probe_device()
2696 return &smmu->iommu; in arm_smmu_probe_device()
2709 iopf_queue_remove_device(master->smmu->evtq.iopf, dev); in arm_smmu_release_device()
2722 * aliases, since the necessary ID-to-device lookup becomes rather in arm_smmu_device_group()
2723 * impractical given a potential sparse 32-bit stream ID space. in arm_smmu_device_group()
2738 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_enable_nesting()
2739 if (smmu_domain->smmu) in arm_smmu_enable_nesting()
2740 ret = -EPERM; in arm_smmu_enable_nesting()
2742 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; in arm_smmu_enable_nesting()
2743 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_enable_nesting()
2750 return iommu_fwspec_add_ids(dev, args->args, 1); in arm_smmu_of_xlate()
2764 list_add_tail(&region->list, head); in arm_smmu_get_resv_regions()
2775 return -ENODEV; in arm_smmu_dev_enable_feature()
2780 return -EINVAL; in arm_smmu_dev_enable_feature()
2781 if (master->iopf_enabled) in arm_smmu_dev_enable_feature()
2782 return -EBUSY; in arm_smmu_dev_enable_feature()
2783 master->iopf_enabled = true; in arm_smmu_dev_enable_feature()
2787 return -EINVAL; in arm_smmu_dev_enable_feature()
2789 return -EBUSY; in arm_smmu_dev_enable_feature()
2792 return -EINVAL; in arm_smmu_dev_enable_feature()
2802 return -EINVAL; in arm_smmu_dev_disable_feature()
2806 if (!master->iopf_enabled) in arm_smmu_dev_disable_feature()
2807 return -EINVAL; in arm_smmu_dev_disable_feature()
2808 if (master->sva_enabled) in arm_smmu_dev_disable_feature()
2809 return -EBUSY; in arm_smmu_dev_disable_feature()
2810 master->iopf_enabled = false; in arm_smmu_dev_disable_feature()
2814 return -EINVAL; in arm_smmu_dev_disable_feature()
2817 return -EINVAL; in arm_smmu_dev_disable_feature()
2826 #define IS_HISI_PTT_DEVICE(pdev) ((pdev)->vendor == PCI_VENDOR_ID_HUAWEI && \
2827 (pdev)->device == 0xa12e)
2856 .pgsize_bitmap = -1UL, /* Restricted during device attach */
2881 qsz = ((1 << q->llq.max_n_shift) * dwords) << 3; in arm_smmu_init_one_queue()
2882 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, in arm_smmu_init_one_queue()
2884 if (q->base || qsz < PAGE_SIZE) in arm_smmu_init_one_queue()
2887 q->llq.max_n_shift--; in arm_smmu_init_one_queue()
2890 if (!q->base) { in arm_smmu_init_one_queue()
2891 dev_err(smmu->dev, in arm_smmu_init_one_queue()
2894 return -ENOMEM; in arm_smmu_init_one_queue()
2897 if (!WARN_ON(q->base_dma & (qsz - 1))) { in arm_smmu_init_one_queue()
2898 dev_info(smmu->dev, "allocated %u entries for %s\n", in arm_smmu_init_one_queue()
2899 1 << q->llq.max_n_shift, name); in arm_smmu_init_one_queue()
2902 q->prod_reg = page + prod_off; in arm_smmu_init_one_queue()
2903 q->cons_reg = page + cons_off; in arm_smmu_init_one_queue()
2904 q->ent_dwords = dwords; in arm_smmu_init_one_queue()
2906 q->q_base = Q_BASE_RWA; in arm_smmu_init_one_queue()
2907 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK; in arm_smmu_init_one_queue()
2908 q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift); in arm_smmu_init_one_queue()
2910 q->llq.prod = q->llq.cons = 0; in arm_smmu_init_one_queue()
2916 struct arm_smmu_cmdq *cmdq = &smmu->cmdq; in arm_smmu_cmdq_init() local
2917 unsigned int nents = 1 << cmdq->q.llq.max_n_shift; in arm_smmu_cmdq_init()
2919 atomic_set(&cmdq->owner_prod, 0); in arm_smmu_cmdq_init()
2920 atomic_set(&cmdq->lock, 0); in arm_smmu_cmdq_init()
2922 cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents, in arm_smmu_cmdq_init()
2924 if (!cmdq->valid_map) in arm_smmu_cmdq_init()
2925 return -ENOMEM; in arm_smmu_cmdq_init()
2934 /* cmdq */ in arm_smmu_init_queues()
2935 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, smmu->base, in arm_smmu_init_queues()
2937 CMDQ_ENT_DWORDS, "cmdq"); in arm_smmu_init_queues()
2946 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, smmu->page1, in arm_smmu_init_queues()
2952 if ((smmu->features & ARM_SMMU_FEAT_SVA) && in arm_smmu_init_queues()
2953 (smmu->features & ARM_SMMU_FEAT_STALLS)) { in arm_smmu_init_queues()
2954 smmu->evtq.iopf = iopf_queue_alloc(dev_name(smmu->dev)); in arm_smmu_init_queues()
2955 if (!smmu->evtq.iopf) in arm_smmu_init_queues()
2956 return -ENOMEM; in arm_smmu_init_queues()
2960 if (!(smmu->features & ARM_SMMU_FEAT_PRI)) in arm_smmu_init_queues()
2963 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, smmu->page1, in arm_smmu_init_queues()
2971 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_init_l1_strtab()
2972 void *strtab = smmu->strtab_cfg.strtab; in arm_smmu_init_l1_strtab()
2974 cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents, in arm_smmu_init_l1_strtab()
2975 sizeof(*cfg->l1_desc), GFP_KERNEL); in arm_smmu_init_l1_strtab()
2976 if (!cfg->l1_desc) in arm_smmu_init_l1_strtab()
2977 return -ENOMEM; in arm_smmu_init_l1_strtab()
2979 for (i = 0; i < cfg->num_l1_ents; ++i) { in arm_smmu_init_l1_strtab()
2980 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]); in arm_smmu_init_l1_strtab()
2992 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_init_strtab_2lvl()
2995 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); in arm_smmu_init_strtab_2lvl()
2996 size = min(size, smmu->sid_bits - STRTAB_SPLIT); in arm_smmu_init_strtab_2lvl()
2997 cfg->num_l1_ents = 1 << size; in arm_smmu_init_strtab_2lvl()
3000 if (size < smmu->sid_bits) in arm_smmu_init_strtab_2lvl()
3001 dev_warn(smmu->dev, in arm_smmu_init_strtab_2lvl()
3002 "2-level strtab only covers %u/%u bits of SID\n", in arm_smmu_init_strtab_2lvl()
3003 size, smmu->sid_bits); in arm_smmu_init_strtab_2lvl()
3005 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); in arm_smmu_init_strtab_2lvl()
3006 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma, in arm_smmu_init_strtab_2lvl()
3009 dev_err(smmu->dev, in arm_smmu_init_strtab_2lvl()
3012 return -ENOMEM; in arm_smmu_init_strtab_2lvl()
3014 cfg->strtab = strtab; in arm_smmu_init_strtab_2lvl()
3020 cfg->strtab_base_cfg = reg; in arm_smmu_init_strtab_2lvl()
3030 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_init_strtab_linear()
3032 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3); in arm_smmu_init_strtab_linear()
3033 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma, in arm_smmu_init_strtab_linear()
3036 dev_err(smmu->dev, in arm_smmu_init_strtab_linear()
3039 return -ENOMEM; in arm_smmu_init_strtab_linear()
3041 cfg->strtab = strtab; in arm_smmu_init_strtab_linear()
3042 cfg->num_l1_ents = 1 << smmu->sid_bits; in arm_smmu_init_strtab_linear()
3046 reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits); in arm_smmu_init_strtab_linear()
3047 cfg->strtab_base_cfg = reg; in arm_smmu_init_strtab_linear()
3049 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false); in arm_smmu_init_strtab_linear()
3058 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) in arm_smmu_init_strtab()
3067 reg = smmu->strtab_cfg.strtab_dma & STRTAB_BASE_ADDR_MASK; in arm_smmu_init_strtab()
3069 smmu->strtab_cfg.strtab_base = reg; in arm_smmu_init_strtab()
3071 /* Allocate the first VMID for stage-2 bypass STEs */ in arm_smmu_init_strtab()
3072 set_bit(0, smmu->vmid_map); in arm_smmu_init_strtab()
3080 mutex_init(&smmu->streams_mutex); in arm_smmu_init_structures()
3081 smmu->streams = RB_ROOT; in arm_smmu_init_structures()
3095 writel_relaxed(val, smmu->base + reg_off); in arm_smmu_write_reg_sync()
3096 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val, in arm_smmu_write_reg_sync()
3104 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA; in arm_smmu_update_gbpa()
3118 dev_err(smmu->dev, "GBPA not responding to update\n"); in arm_smmu_update_gbpa()
3133 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->msi_index]; in arm_smmu_write_msi_msg()
3135 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; in arm_smmu_write_msi_msg()
3138 writeq_relaxed(doorbell, smmu->base + cfg[0]); in arm_smmu_write_msi_msg()
3139 writel_relaxed(msg->data, smmu->base + cfg[1]); in arm_smmu_write_msi_msg()
3140 writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]); in arm_smmu_write_msi_msg()
3146 struct device *dev = smmu->dev; in arm_smmu_setup_msis()
3149 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0); in arm_smmu_setup_msis()
3150 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0); in arm_smmu_setup_msis()
3152 if (smmu->features & ARM_SMMU_FEAT_PRI) in arm_smmu_setup_msis()
3153 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0); in arm_smmu_setup_msis()
3155 nvec--; in arm_smmu_setup_msis()
3157 if (!(smmu->features & ARM_SMMU_FEAT_MSI)) in arm_smmu_setup_msis()
3160 if (!dev->msi.domain) { in arm_smmu_setup_msis()
3161 dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n"); in arm_smmu_setup_msis()
3165 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */ in arm_smmu_setup_msis()
3168 dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n"); in arm_smmu_setup_msis()
3172 smmu->evtq.q.irq = msi_get_virq(dev, EVTQ_MSI_INDEX); in arm_smmu_setup_msis()
3173 smmu->gerr_irq = msi_get_virq(dev, GERROR_MSI_INDEX); in arm_smmu_setup_msis()
3174 smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX); in arm_smmu_setup_msis()
3187 irq = smmu->evtq.q.irq; in arm_smmu_setup_unique_irqs()
3189 ret = devm_request_threaded_irq(smmu->dev, irq, NULL, in arm_smmu_setup_unique_irqs()
3192 "arm-smmu-v3-evtq", smmu); in arm_smmu_setup_unique_irqs()
3194 dev_warn(smmu->dev, "failed to enable evtq irq\n"); in arm_smmu_setup_unique_irqs()
3196 dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n"); in arm_smmu_setup_unique_irqs()
3199 irq = smmu->gerr_irq; in arm_smmu_setup_unique_irqs()
3201 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler, in arm_smmu_setup_unique_irqs()
3202 0, "arm-smmu-v3-gerror", smmu); in arm_smmu_setup_unique_irqs()
3204 dev_warn(smmu->dev, "failed to enable gerror irq\n"); in arm_smmu_setup_unique_irqs()
3206 dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n"); in arm_smmu_setup_unique_irqs()
3209 if (smmu->features & ARM_SMMU_FEAT_PRI) { in arm_smmu_setup_unique_irqs()
3210 irq = smmu->priq.q.irq; in arm_smmu_setup_unique_irqs()
3212 ret = devm_request_threaded_irq(smmu->dev, irq, NULL, in arm_smmu_setup_unique_irqs()
3215 "arm-smmu-v3-priq", in arm_smmu_setup_unique_irqs()
3218 dev_warn(smmu->dev, in arm_smmu_setup_unique_irqs()
3221 dev_warn(smmu->dev, "no priq irq - PRI will be broken\n"); in arm_smmu_setup_unique_irqs()
3235 dev_err(smmu->dev, "failed to disable irqs\n"); in arm_smmu_setup_irqs()
3239 irq = smmu->combined_irq; in arm_smmu_setup_irqs()
3245 ret = devm_request_threaded_irq(smmu->dev, irq, in arm_smmu_setup_irqs()
3249 "arm-smmu-v3-combined-irq", smmu); in arm_smmu_setup_irqs()
3251 dev_warn(smmu->dev, "failed to enable combined irq\n"); in arm_smmu_setup_irqs()
3255 if (smmu->features & ARM_SMMU_FEAT_PRI) in arm_smmu_setup_irqs()
3262 dev_warn(smmu->dev, "failed to enable irqs\n"); in arm_smmu_setup_irqs()
3273 dev_err(smmu->dev, "failed to clear cr0\n"); in arm_smmu_device_disable()
3284 /* Clear CR0 and sync (disables SMMU and queue processing) */ in arm_smmu_device_reset()
3285 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0); in arm_smmu_device_reset()
3287 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n"); in arm_smmu_device_reset()
3303 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1); in arm_smmu_device_reset()
3308 if (smmu->features & ARM_SMMU_FEAT_E2H) in arm_smmu_device_reset()
3311 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2); in arm_smmu_device_reset()
3314 writeq_relaxed(smmu->strtab_cfg.strtab_base, in arm_smmu_device_reset()
3315 smmu->base + ARM_SMMU_STRTAB_BASE); in arm_smmu_device_reset()
3316 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg, in arm_smmu_device_reset()
3317 smmu->base + ARM_SMMU_STRTAB_BASE_CFG); in arm_smmu_device_reset()
3320 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); in arm_smmu_device_reset()
3321 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); in arm_smmu_device_reset()
3322 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); in arm_smmu_device_reset()
3328 dev_err(smmu->dev, "failed to enable command queue\n"); in arm_smmu_device_reset()
3337 if (smmu->features & ARM_SMMU_FEAT_HYP) { in arm_smmu_device_reset()
3346 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); in arm_smmu_device_reset()
3347 writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD); in arm_smmu_device_reset()
3348 writel_relaxed(smmu->evtq.q.llq.cons, smmu->page1 + ARM_SMMU_EVTQ_CONS); in arm_smmu_device_reset()
3354 dev_err(smmu->dev, "failed to enable event queue\n"); in arm_smmu_device_reset()
3359 if (smmu->features & ARM_SMMU_FEAT_PRI) { in arm_smmu_device_reset()
3360 writeq_relaxed(smmu->priq.q.q_base, in arm_smmu_device_reset()
3361 smmu->base + ARM_SMMU_PRIQ_BASE); in arm_smmu_device_reset()
3362 writel_relaxed(smmu->priq.q.llq.prod, in arm_smmu_device_reset()
3363 smmu->page1 + ARM_SMMU_PRIQ_PROD); in arm_smmu_device_reset()
3364 writel_relaxed(smmu->priq.q.llq.cons, in arm_smmu_device_reset()
3365 smmu->page1 + ARM_SMMU_PRIQ_CONS); in arm_smmu_device_reset()
3371 dev_err(smmu->dev, "failed to enable PRI queue\n"); in arm_smmu_device_reset()
3376 if (smmu->features & ARM_SMMU_FEAT_ATS) { in arm_smmu_device_reset()
3381 dev_err(smmu->dev, "failed to enable ATS check\n"); in arm_smmu_device_reset()
3388 dev_err(smmu->dev, "failed to setup irqs\n"); in arm_smmu_device_reset()
3406 dev_err(smmu->dev, "failed to enable SMMU interface\n"); in arm_smmu_device_reset()
3416 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY; in arm_smmu_device_hw_probe()
3419 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0); in arm_smmu_device_hw_probe()
3421 /* 2-level structures */ in arm_smmu_device_hw_probe()
3423 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB; in arm_smmu_device_hw_probe()
3426 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB; in arm_smmu_device_hw_probe()
3435 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE; in arm_smmu_device_hw_probe()
3439 smmu->features |= ARM_SMMU_FEAT_TT_BE; in arm_smmu_device_hw_probe()
3443 smmu->features |= ARM_SMMU_FEAT_TT_LE; in arm_smmu_device_hw_probe()
3447 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n"); in arm_smmu_device_hw_probe()
3448 return -ENXIO; in arm_smmu_device_hw_probe()
3453 smmu->features |= ARM_SMMU_FEAT_PRI; in arm_smmu_device_hw_probe()
3456 smmu->features |= ARM_SMMU_FEAT_ATS; in arm_smmu_device_hw_probe()
3459 smmu->features |= ARM_SMMU_FEAT_SEV; in arm_smmu_device_hw_probe()
3462 smmu->features |= ARM_SMMU_FEAT_MSI; in arm_smmu_device_hw_probe()
3464 smmu->options |= ARM_SMMU_OPT_MSIPOLL; in arm_smmu_device_hw_probe()
3468 smmu->features |= ARM_SMMU_FEAT_HYP; in arm_smmu_device_hw_probe()
3470 smmu->features |= ARM_SMMU_FEAT_E2H; in arm_smmu_device_hw_probe()
3478 dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n", in arm_smmu_device_hw_probe()
3483 smmu->features |= ARM_SMMU_FEAT_STALL_FORCE; in arm_smmu_device_hw_probe()
3486 smmu->features |= ARM_SMMU_FEAT_STALLS; in arm_smmu_device_hw_probe()
3490 smmu->features |= ARM_SMMU_FEAT_TRANS_S1; in arm_smmu_device_hw_probe()
3493 smmu->features |= ARM_SMMU_FEAT_TRANS_S2; in arm_smmu_device_hw_probe()
3496 dev_err(smmu->dev, "no translation support!\n"); in arm_smmu_device_hw_probe()
3497 return -ENXIO; in arm_smmu_device_hw_probe()
3503 smmu->ias = 40; in arm_smmu_device_hw_probe()
3508 dev_err(smmu->dev, "AArch64 table format not supported!\n"); in arm_smmu_device_hw_probe()
3509 return -ENXIO; in arm_smmu_device_hw_probe()
3513 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8; in arm_smmu_device_hw_probe()
3514 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8; in arm_smmu_device_hw_probe()
3517 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1); in arm_smmu_device_hw_probe()
3519 dev_err(smmu->dev, "embedded implementation not supported\n"); in arm_smmu_device_hw_probe()
3520 return -ENXIO; in arm_smmu_device_hw_probe()
3524 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
3526 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) { in arm_smmu_device_hw_probe()
3529 * commands plus an extra sync needs to fit inside the command in arm_smmu_device_hw_probe()
3531 * restrictions on the base pointer for a unit-length queue. in arm_smmu_device_hw_probe()
3533 dev_err(smmu->dev, "command queue size <= %d entries not supported\n", in arm_smmu_device_hw_probe()
3535 return -ENXIO; in arm_smmu_device_hw_probe()
3538 smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
3540 smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
3544 smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg); in arm_smmu_device_hw_probe()
3545 smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg); in arm_smmu_device_hw_probe()
3551 if (smmu->sid_bits <= STRTAB_SPLIT) in arm_smmu_device_hw_probe()
3552 smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB; in arm_smmu_device_hw_probe()
3555 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3); in arm_smmu_device_hw_probe()
3557 smmu->features |= ARM_SMMU_FEAT_RANGE_INV; in arm_smmu_device_hw_probe()
3560 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); in arm_smmu_device_hw_probe()
3563 smmu->evtq.max_stalls = FIELD_GET(IDR5_STALL_MAX, reg); in arm_smmu_device_hw_probe()
3567 smmu->pgsize_bitmap |= SZ_64K | SZ_512M; in arm_smmu_device_hw_probe()
3569 smmu->pgsize_bitmap |= SZ_16K | SZ_32M; in arm_smmu_device_hw_probe()
3571 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; in arm_smmu_device_hw_probe()
3575 smmu->features |= ARM_SMMU_FEAT_VAX; in arm_smmu_device_hw_probe()
3580 smmu->oas = 32; in arm_smmu_device_hw_probe()
3583 smmu->oas = 36; in arm_smmu_device_hw_probe()
3586 smmu->oas = 40; in arm_smmu_device_hw_probe()
3589 smmu->oas = 42; in arm_smmu_device_hw_probe()
3592 smmu->oas = 44; in arm_smmu_device_hw_probe()
3595 smmu->oas = 52; in arm_smmu_device_hw_probe()
3596 smmu->pgsize_bitmap |= 1ULL << 42; /* 4TB */ in arm_smmu_device_hw_probe()
3599 dev_info(smmu->dev, in arm_smmu_device_hw_probe()
3600 "unknown output address size. Truncating to 48-bit\n"); in arm_smmu_device_hw_probe()
3603 smmu->oas = 48; in arm_smmu_device_hw_probe()
3606 if (arm_smmu_ops.pgsize_bitmap == -1UL) in arm_smmu_device_hw_probe()
3607 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap; in arm_smmu_device_hw_probe()
3609 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap; in arm_smmu_device_hw_probe()
3612 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas))) in arm_smmu_device_hw_probe()
3613 dev_warn(smmu->dev, in arm_smmu_device_hw_probe()
3616 smmu->ias = max(smmu->ias, smmu->oas); in arm_smmu_device_hw_probe()
3619 smmu->features |= ARM_SMMU_FEAT_SVA; in arm_smmu_device_hw_probe()
3621 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", in arm_smmu_device_hw_probe()
3622 smmu->ias, smmu->oas, smmu->features); in arm_smmu_device_hw_probe()
3631 smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY; in acpi_smmu_get_options()
3634 smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH; in acpi_smmu_get_options()
3638 dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options); in acpi_smmu_get_options()
3645 struct device *dev = smmu->dev; in arm_smmu_device_acpi_probe()
3651 iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_device_acpi_probe()
3653 acpi_smmu_get_options(iort_smmu->model, smmu); in arm_smmu_device_acpi_probe()
3655 if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) in arm_smmu_device_acpi_probe()
3656 smmu->features |= ARM_SMMU_FEAT_COHERENCY; in arm_smmu_device_acpi_probe()
3664 return -ENODEV; in arm_smmu_device_acpi_probe()
3671 struct device *dev = &pdev->dev; in arm_smmu_device_dt_probe()
3673 int ret = -EINVAL; in arm_smmu_device_dt_probe()
3675 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells)) in arm_smmu_device_dt_probe()
3676 dev_err(dev, "missing #iommu-cells property\n"); in arm_smmu_device_dt_probe()
3678 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells); in arm_smmu_device_dt_probe()
3684 if (of_dma_is_coherent(dev->of_node)) in arm_smmu_device_dt_probe()
3685 smmu->features |= ARM_SMMU_FEAT_COHERENCY; in arm_smmu_device_dt_probe()
3692 if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY) in arm_smmu_resource_size()
3712 iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); in arm_smmu_rmr_install_bypass_ste()
3720 for (i = 0; i < rmr->num_sids; i++) { in arm_smmu_rmr_install_bypass_ste()
3721 ret = arm_smmu_init_sid_strtab(smmu, rmr->sids[i]); in arm_smmu_rmr_install_bypass_ste()
3723 dev_err(smmu->dev, "RMR SID(0x%x) bypass failed\n", in arm_smmu_rmr_install_bypass_ste()
3724 rmr->sids[i]); in arm_smmu_rmr_install_bypass_ste()
3728 step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]); in arm_smmu_rmr_install_bypass_ste()
3733 iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); in arm_smmu_rmr_install_bypass_ste()
3742 struct device *dev = &pdev->dev; in arm_smmu_device_probe()
3747 return -ENOMEM; in arm_smmu_device_probe()
3748 smmu->dev = dev; in arm_smmu_device_probe()
3750 if (dev->of_node) { in arm_smmu_device_probe()
3754 if (ret == -ENODEV) in arm_smmu_device_probe()
3764 return -EINVAL; in arm_smmu_device_probe()
3767 return -EINVAL; in arm_smmu_device_probe()
3769 ioaddr = res->start; in arm_smmu_device_probe()
3775 smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ); in arm_smmu_device_probe()
3776 if (IS_ERR(smmu->base)) in arm_smmu_device_probe()
3777 return PTR_ERR(smmu->base); in arm_smmu_device_probe()
3780 smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K, in arm_smmu_device_probe()
3782 if (IS_ERR(smmu->page1)) in arm_smmu_device_probe()
3783 return PTR_ERR(smmu->page1); in arm_smmu_device_probe()
3785 smmu->page1 = smmu->base; in arm_smmu_device_probe()
3792 smmu->combined_irq = irq; in arm_smmu_device_probe()
3796 smmu->evtq.q.irq = irq; in arm_smmu_device_probe()
3800 smmu->priq.q.irq = irq; in arm_smmu_device_probe()
3804 smmu->gerr_irq = irq; in arm_smmu_device_probe()
3811 /* Initialise in-memory data structures */ in arm_smmu_device_probe()
3828 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, in arm_smmu_device_probe()
3833 ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev); in arm_smmu_device_probe()
3836 iommu_device_sysfs_remove(&smmu->iommu); in arm_smmu_device_probe()
3847 iommu_device_unregister(&smmu->iommu); in arm_smmu_device_remove()
3848 iommu_device_sysfs_remove(&smmu->iommu); in arm_smmu_device_remove()
3850 iopf_queue_free(smmu->evtq.iopf); in arm_smmu_device_remove()
3861 { .compatible = "arm,smmu-v3", },
3874 .name = "arm-smmu-v3",
3887 MODULE_ALIAS("platform:arm-smmu-v3");