Lines Matching +full:smmu +full:- +full:v2

1 // SPDX-License-Identifier: GPL-2.0
17 #include <linux/dma-iommu.h>
20 #include <linux/io-pgtable.h>
29 #include <linux/pci-ats.h>
34 #include "arm-smmu-v3.h"
39 …domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
44 "Disable MSI-based polling for CMD_SYNC completion.");
80 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
81 { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
86 struct arm_smmu_device *smmu) in arm_smmu_page1_fixup() argument
89 return smmu->page1 + offset - SZ_64K; in arm_smmu_page1_fixup()
91 return smmu->base + offset; in arm_smmu_page1_fixup()
99 static void parse_driver_options(struct arm_smmu_device *smmu) in parse_driver_options() argument
104 if (of_property_read_bool(smmu->dev->of_node, in parse_driver_options()
106 smmu->options |= arm_smmu_options[i].opt; in parse_driver_options()
107 dev_notice(smmu->dev, "option %s\n", in parse_driver_options()
113 /* Low-level queue manipulation functions */
118 prod = Q_IDX(q, q->prod); in queue_has_space()
119 cons = Q_IDX(q, q->cons); in queue_has_space()
121 if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons)) in queue_has_space()
122 space = (1 << q->max_n_shift) - (prod - cons); in queue_has_space()
124 space = cons - prod; in queue_has_space()
131 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && in queue_full()
132 Q_WRP(q, q->prod) != Q_WRP(q, q->cons); in queue_full()
137 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && in queue_empty()
138 Q_WRP(q, q->prod) == Q_WRP(q, q->cons); in queue_empty()
143 return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) && in queue_consumed()
144 (Q_IDX(q, q->cons) > Q_IDX(q, prod))) || in queue_consumed()
145 ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) && in queue_consumed()
146 (Q_IDX(q, q->cons) <= Q_IDX(q, prod))); in queue_consumed()
156 writel_relaxed(q->llq.cons, q->cons_reg); in queue_sync_cons_out()
161 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; in queue_inc_cons()
162 q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); in queue_inc_cons()
175 prod = readl(q->prod_reg); in queue_sync_prod_in()
177 if (Q_OVF(prod) != Q_OVF(q->llq.prod)) in queue_sync_prod_in()
178 ret = -EOVERFLOW; in queue_sync_prod_in()
180 q->llq.prod = prod; in queue_sync_prod_in()
186 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n; in queue_inc_prod_n()
187 return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); in queue_inc_prod_n()
190 static void queue_poll_init(struct arm_smmu_device *smmu, in queue_poll_init() argument
193 qp->delay = 1; in queue_poll_init()
194 qp->spin_cnt = 0; in queue_poll_init()
195 qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); in queue_poll_init()
196 qp->timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US); in queue_poll_init()
201 if (ktime_compare(ktime_get(), qp->timeout) > 0) in queue_poll()
202 return -ETIMEDOUT; in queue_poll()
204 if (qp->wfe) { in queue_poll()
206 } else if (++qp->spin_cnt < ARM_SMMU_POLL_SPIN_COUNT) { in queue_poll()
209 udelay(qp->delay); in queue_poll()
210 qp->delay *= 2; in queue_poll()
211 qp->spin_cnt = 0; in queue_poll()
235 if (queue_empty(&q->llq)) in queue_remove_raw()
236 return -EAGAIN; in queue_remove_raw()
238 queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords); in queue_remove_raw()
239 queue_inc_cons(&q->llq); in queue_remove_raw()
244 /* High-level queue accessors */
248 cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode); in arm_smmu_cmdq_build_cmd()
250 switch (ent->opcode) { in arm_smmu_cmdq_build_cmd()
255 cmd[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID, ent->prefetch.sid); in arm_smmu_cmdq_build_cmd()
256 cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size); in arm_smmu_cmdq_build_cmd()
257 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK; in arm_smmu_cmdq_build_cmd()
260 cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid); in arm_smmu_cmdq_build_cmd()
263 cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); in arm_smmu_cmdq_build_cmd()
264 cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf); in arm_smmu_cmdq_build_cmd()
267 cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); in arm_smmu_cmdq_build_cmd()
274 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num); in arm_smmu_cmdq_build_cmd()
275 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale); in arm_smmu_cmdq_build_cmd()
276 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); in arm_smmu_cmdq_build_cmd()
277 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); in arm_smmu_cmdq_build_cmd()
278 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); in arm_smmu_cmdq_build_cmd()
279 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl); in arm_smmu_cmdq_build_cmd()
280 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg); in arm_smmu_cmdq_build_cmd()
281 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; in arm_smmu_cmdq_build_cmd()
284 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num); in arm_smmu_cmdq_build_cmd()
285 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale); in arm_smmu_cmdq_build_cmd()
286 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); in arm_smmu_cmdq_build_cmd()
287 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); in arm_smmu_cmdq_build_cmd()
288 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl); in arm_smmu_cmdq_build_cmd()
289 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg); in arm_smmu_cmdq_build_cmd()
290 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK; in arm_smmu_cmdq_build_cmd()
293 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); in arm_smmu_cmdq_build_cmd()
296 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); in arm_smmu_cmdq_build_cmd()
299 cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid); in arm_smmu_cmdq_build_cmd()
300 cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global); in arm_smmu_cmdq_build_cmd()
301 cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SSID, ent->atc.ssid); in arm_smmu_cmdq_build_cmd()
302 cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SID, ent->atc.sid); in arm_smmu_cmdq_build_cmd()
303 cmd[1] |= FIELD_PREP(CMDQ_ATC_1_SIZE, ent->atc.size); in arm_smmu_cmdq_build_cmd()
304 cmd[1] |= ent->atc.addr & CMDQ_ATC_1_ADDR_MASK; in arm_smmu_cmdq_build_cmd()
307 cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid); in arm_smmu_cmdq_build_cmd()
308 cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid); in arm_smmu_cmdq_build_cmd()
309 cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SID, ent->pri.sid); in arm_smmu_cmdq_build_cmd()
310 cmd[1] |= FIELD_PREP(CMDQ_PRI_1_GRPID, ent->pri.grpid); in arm_smmu_cmdq_build_cmd()
311 switch (ent->pri.resp) { in arm_smmu_cmdq_build_cmd()
317 return -EINVAL; in arm_smmu_cmdq_build_cmd()
319 cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp); in arm_smmu_cmdq_build_cmd()
322 if (ent->sync.msiaddr) { in arm_smmu_cmdq_build_cmd()
324 cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; in arm_smmu_cmdq_build_cmd()
332 return -ENOENT; in arm_smmu_cmdq_build_cmd()
338 static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu, in arm_smmu_cmdq_build_sync_cmd() argument
341 struct arm_smmu_queue *q = &smmu->cmdq.q; in arm_smmu_cmdq_build_sync_cmd()
350 if (smmu->options & ARM_SMMU_OPT_MSIPOLL) { in arm_smmu_cmdq_build_sync_cmd()
351 ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) * in arm_smmu_cmdq_build_sync_cmd()
352 q->ent_dwords * 8; in arm_smmu_cmdq_build_sync_cmd()
358 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) in arm_smmu_cmdq_skip_err() argument
369 struct arm_smmu_queue *q = &smmu->cmdq.q; in arm_smmu_cmdq_skip_err()
370 u32 cons = readl_relaxed(q->cons_reg); in arm_smmu_cmdq_skip_err()
376 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons, in arm_smmu_cmdq_skip_err()
381 dev_err(smmu->dev, "retrying command fetch\n"); in arm_smmu_cmdq_skip_err()
401 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords); in arm_smmu_cmdq_skip_err()
402 dev_err(smmu->dev, "skipping command in error state:\n"); in arm_smmu_cmdq_skip_err()
404 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]); in arm_smmu_cmdq_skip_err()
408 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n"); in arm_smmu_cmdq_skip_err()
412 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); in arm_smmu_cmdq_skip_err()
419 * - The only LOCK routines are exclusive_trylock() and shared_lock().
423 * - The UNLOCK routines are supplemented with shared_tryunlock(), which
437 if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0) in arm_smmu_cmdq_shared_lock()
441 val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0); in arm_smmu_cmdq_shared_lock()
442 } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val); in arm_smmu_cmdq_shared_lock()
447 (void)atomic_dec_return_release(&cmdq->lock); in arm_smmu_cmdq_shared_unlock()
452 if (atomic_read(&cmdq->lock) == 1) in arm_smmu_cmdq_shared_tryunlock()
463 __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \
471 atomic_set_release(&cmdq->lock, 0); \
480 * you like mixed-size concurrency, dependency ordering and relaxed atomics,
506 * a. If we have MSIs, the SMMU can write back into the CMD_SYNC
520 .max_n_shift = cmdq->q.llq.max_n_shift, in __arm_smmu_cmdq_poll_set_valid_map()
535 ptr = &cmdq->valid_map[swidx]; in __arm_smmu_cmdq_poll_set_valid_map()
540 mask = GENMASK(limit - 1, sbidx); in __arm_smmu_cmdq_poll_set_valid_map()
544 * that a zero-initialised queue is invalid and, after marking in __arm_smmu_cmdq_poll_set_valid_map()
557 llq.prod = queue_inc_prod_n(&llq, limit - sbidx); in __arm_smmu_cmdq_poll_set_valid_map()
575 /* Wait for the command queue to become non-full */
576 static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu, in arm_smmu_cmdq_poll_until_not_full() argument
581 struct arm_smmu_cmdq *cmdq = &smmu->cmdq; in arm_smmu_cmdq_poll_until_not_full()
589 WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); in arm_smmu_cmdq_poll_until_not_full()
591 llq->val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_poll_until_not_full()
595 queue_poll_init(smmu, &qp); in arm_smmu_cmdq_poll_until_not_full()
597 llq->val = READ_ONCE(smmu->cmdq.q.llq.val); in arm_smmu_cmdq_poll_until_not_full()
608 * Wait until the SMMU signals a CMD_SYNC completion MSI.
611 static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu, in __arm_smmu_cmdq_poll_until_msi() argument
616 struct arm_smmu_cmdq *cmdq = &smmu->cmdq; in __arm_smmu_cmdq_poll_until_msi()
617 u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod)); in __arm_smmu_cmdq_poll_until_msi()
619 queue_poll_init(smmu, &qp); in __arm_smmu_cmdq_poll_until_msi()
627 llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1); in __arm_smmu_cmdq_poll_until_msi()
632 * Wait until the SMMU cons index passes llq->prod.
635 static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu, in __arm_smmu_cmdq_poll_until_consumed() argument
639 struct arm_smmu_cmdq *cmdq = &smmu->cmdq; in __arm_smmu_cmdq_poll_until_consumed()
640 u32 prod = llq->prod; in __arm_smmu_cmdq_poll_until_consumed()
643 queue_poll_init(smmu, &qp); in __arm_smmu_cmdq_poll_until_consumed()
644 llq->val = READ_ONCE(smmu->cmdq.q.llq.val); in __arm_smmu_cmdq_poll_until_consumed()
659 * cmdq->q.llq.cons. Roughly speaking: in __arm_smmu_cmdq_poll_until_consumed()
679 llq->cons = readl(cmdq->q.cons_reg); in __arm_smmu_cmdq_poll_until_consumed()
685 static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu, in arm_smmu_cmdq_poll_until_sync() argument
688 if (smmu->options & ARM_SMMU_OPT_MSIPOLL) in arm_smmu_cmdq_poll_until_sync()
689 return __arm_smmu_cmdq_poll_until_msi(smmu, llq); in arm_smmu_cmdq_poll_until_sync()
691 return __arm_smmu_cmdq_poll_until_consumed(smmu, llq); in arm_smmu_cmdq_poll_until_sync()
699 .max_n_shift = cmdq->q.llq.max_n_shift, in arm_smmu_cmdq_write_entries()
707 queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS); in arm_smmu_cmdq_write_entries()
715 * - There is a dma_wmb() before publishing any commands to the queue.
719 * - On completion of a CMD_SYNC, there is a control dependency.
723 * - Command insertion is totally ordered, so if two CPUs each race to
727 static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, in arm_smmu_cmdq_issue_cmdlist() argument
734 struct arm_smmu_cmdq *cmdq = &smmu->cmdq; in arm_smmu_cmdq_issue_cmdlist()
736 .max_n_shift = cmdq->q.llq.max_n_shift, in arm_smmu_cmdq_issue_cmdlist()
742 llq.val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_issue_cmdlist()
748 if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq)) in arm_smmu_cmdq_issue_cmdlist()
749 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); in arm_smmu_cmdq_issue_cmdlist()
757 old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val); in arm_smmu_cmdq_issue_cmdlist()
774 arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, prod); in arm_smmu_cmdq_issue_cmdlist()
775 queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); in arm_smmu_cmdq_issue_cmdlist()
790 /* 4. If we are the owner, take control of the SMMU hardware */ in arm_smmu_cmdq_issue_cmdlist()
793 atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod); in arm_smmu_cmdq_issue_cmdlist()
797 &cmdq->q.llq.atomic.prod); in arm_smmu_cmdq_issue_cmdlist()
811 writel_relaxed(prod, cmdq->q.prod_reg); in arm_smmu_cmdq_issue_cmdlist()
818 atomic_set_release(&cmdq->owner_prod, prod); in arm_smmu_cmdq_issue_cmdlist()
824 ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq); in arm_smmu_cmdq_issue_cmdlist()
826 dev_err_ratelimited(smmu->dev, in arm_smmu_cmdq_issue_cmdlist()
829 readl_relaxed(cmdq->q.prod_reg), in arm_smmu_cmdq_issue_cmdlist()
830 readl_relaxed(cmdq->q.cons_reg)); in arm_smmu_cmdq_issue_cmdlist()
835 * reader, in which case we can safely update cmdq->q.llq.cons in arm_smmu_cmdq_issue_cmdlist()
838 WRITE_ONCE(cmdq->q.llq.cons, llq.cons); in arm_smmu_cmdq_issue_cmdlist()
847 static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, in arm_smmu_cmdq_issue_cmd() argument
853 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", in arm_smmu_cmdq_issue_cmd()
854 ent->opcode); in arm_smmu_cmdq_issue_cmd()
855 return -EINVAL; in arm_smmu_cmdq_issue_cmd()
858 return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, false); in arm_smmu_cmdq_issue_cmd()
861 static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) in arm_smmu_cmdq_issue_sync() argument
863 return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true); in arm_smmu_cmdq_issue_sync()
866 static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu, in arm_smmu_cmdq_batch_add() argument
870 if (cmds->num == CMDQ_BATCH_ENTRIES) { in arm_smmu_cmdq_batch_add()
871 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false); in arm_smmu_cmdq_batch_add()
872 cmds->num = 0; in arm_smmu_cmdq_batch_add()
874 arm_smmu_cmdq_build_cmd(&cmds->cmds[cmds->num * CMDQ_ENT_DWORDS], cmd); in arm_smmu_cmdq_batch_add()
875 cmds->num++; in arm_smmu_cmdq_batch_add()
878 static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu, in arm_smmu_cmdq_batch_submit() argument
881 return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true); in arm_smmu_cmdq_batch_submit()
885 void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid) in arm_smmu_tlb_inv_asid() argument
892 arm_smmu_cmdq_issue_cmd(smmu, &cmd); in arm_smmu_tlb_inv_asid()
893 arm_smmu_cmdq_issue_sync(smmu); in arm_smmu_tlb_inv_asid()
903 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_sync_cd() local
912 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_sync_cd()
913 list_for_each_entry(master, &smmu_domain->devices, domain_head) { in arm_smmu_sync_cd()
914 for (i = 0; i < master->num_sids; i++) { in arm_smmu_sync_cd()
915 cmd.cfgi.sid = master->sids[i]; in arm_smmu_sync_cd()
916 arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd); in arm_smmu_sync_cd()
919 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_sync_cd()
921 arm_smmu_cmdq_batch_submit(smmu, &cmds); in arm_smmu_sync_cd()
924 static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu, in arm_smmu_alloc_cd_leaf_table() argument
929 l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, in arm_smmu_alloc_cd_leaf_table()
930 &l1_desc->l2ptr_dma, GFP_KERNEL); in arm_smmu_alloc_cd_leaf_table()
931 if (!l1_desc->l2ptr) { in arm_smmu_alloc_cd_leaf_table()
932 dev_warn(smmu->dev, in arm_smmu_alloc_cd_leaf_table()
934 return -ENOMEM; in arm_smmu_alloc_cd_leaf_table()
942 u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) | in arm_smmu_write_cd_l1_desc()
955 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_get_cd_ptr() local
956 struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg; in arm_smmu_get_cd_ptr()
958 if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR) in arm_smmu_get_cd_ptr()
959 return cdcfg->cdtab + ssid * CTXDESC_CD_DWORDS; in arm_smmu_get_cd_ptr()
962 l1_desc = &cdcfg->l1_desc[idx]; in arm_smmu_get_cd_ptr()
963 if (!l1_desc->l2ptr) { in arm_smmu_get_cd_ptr()
964 if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc)) in arm_smmu_get_cd_ptr()
967 l1ptr = cdcfg->cdtab + idx * CTXDESC_L1_DESC_DWORDS; in arm_smmu_get_cd_ptr()
972 idx = ssid & (CTXDESC_L2_ENTRIES - 1); in arm_smmu_get_cd_ptr()
973 return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS; in arm_smmu_get_cd_ptr()
991 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_write_ctx_desc() local
993 if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax))) in arm_smmu_write_ctx_desc()
994 return -E2BIG; in arm_smmu_write_ctx_desc()
998 return -ENOMEM; in arm_smmu_write_ctx_desc()
1007 val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid); in arm_smmu_write_ctx_desc()
1013 cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK); in arm_smmu_write_ctx_desc()
1015 cdptr[3] = cpu_to_le64(cd->mair); in arm_smmu_write_ctx_desc()
1018 * STE is live, and the SMMU might read dwords of this CD in any in arm_smmu_write_ctx_desc()
1024 val = cd->tcr | in arm_smmu_write_ctx_desc()
1029 (cd->mm ? 0 : CTXDESC_CD_0_ASET) | in arm_smmu_write_ctx_desc()
1031 FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) | in arm_smmu_write_ctx_desc()
1035 if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE) in arm_smmu_write_ctx_desc()
1040 * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3 in arm_smmu_write_ctx_desc()
1043 * The size of single-copy atomic reads made by the SMMU is in arm_smmu_write_ctx_desc()
1045 * field within an aligned 64-bit span of a structure can be altered in arm_smmu_write_ctx_desc()
1058 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_alloc_cd_tables() local
1059 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; in arm_smmu_alloc_cd_tables()
1060 struct arm_smmu_ctx_desc_cfg *cdcfg = &cfg->cdcfg; in arm_smmu_alloc_cd_tables()
1062 max_contexts = 1 << cfg->s1cdmax; in arm_smmu_alloc_cd_tables()
1064 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) || in arm_smmu_alloc_cd_tables()
1066 cfg->s1fmt = STRTAB_STE_0_S1FMT_LINEAR; in arm_smmu_alloc_cd_tables()
1067 cdcfg->num_l1_ents = max_contexts; in arm_smmu_alloc_cd_tables()
1071 cfg->s1fmt = STRTAB_STE_0_S1FMT_64K_L2; in arm_smmu_alloc_cd_tables()
1072 cdcfg->num_l1_ents = DIV_ROUND_UP(max_contexts, in arm_smmu_alloc_cd_tables()
1075 cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents, in arm_smmu_alloc_cd_tables()
1076 sizeof(*cdcfg->l1_desc), in arm_smmu_alloc_cd_tables()
1078 if (!cdcfg->l1_desc) in arm_smmu_alloc_cd_tables()
1079 return -ENOMEM; in arm_smmu_alloc_cd_tables()
1081 l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3); in arm_smmu_alloc_cd_tables()
1084 cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma, in arm_smmu_alloc_cd_tables()
1086 if (!cdcfg->cdtab) { in arm_smmu_alloc_cd_tables()
1087 dev_warn(smmu->dev, "failed to allocate context descriptor\n"); in arm_smmu_alloc_cd_tables()
1088 ret = -ENOMEM; in arm_smmu_alloc_cd_tables()
1095 if (cdcfg->l1_desc) { in arm_smmu_alloc_cd_tables()
1096 devm_kfree(smmu->dev, cdcfg->l1_desc); in arm_smmu_alloc_cd_tables()
1097 cdcfg->l1_desc = NULL; in arm_smmu_alloc_cd_tables()
1106 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_free_cd_tables() local
1107 struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg; in arm_smmu_free_cd_tables()
1109 if (cdcfg->l1_desc) { in arm_smmu_free_cd_tables()
1112 for (i = 0; i < cdcfg->num_l1_ents; i++) { in arm_smmu_free_cd_tables()
1113 if (!cdcfg->l1_desc[i].l2ptr) in arm_smmu_free_cd_tables()
1116 dmam_free_coherent(smmu->dev, size, in arm_smmu_free_cd_tables()
1117 cdcfg->l1_desc[i].l2ptr, in arm_smmu_free_cd_tables()
1118 cdcfg->l1_desc[i].l2ptr_dma); in arm_smmu_free_cd_tables()
1120 devm_kfree(smmu->dev, cdcfg->l1_desc); in arm_smmu_free_cd_tables()
1121 cdcfg->l1_desc = NULL; in arm_smmu_free_cd_tables()
1123 l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3); in arm_smmu_free_cd_tables()
1125 l1size = cdcfg->num_l1_ents * (CTXDESC_CD_DWORDS << 3); in arm_smmu_free_cd_tables()
1128 dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma); in arm_smmu_free_cd_tables()
1129 cdcfg->cdtab_dma = 0; in arm_smmu_free_cd_tables()
1130 cdcfg->cdtab = NULL; in arm_smmu_free_cd_tables()
1138 if (!cd->asid) in arm_smmu_free_asid()
1141 free = refcount_dec_and_test(&cd->refs); in arm_smmu_free_asid()
1143 old_cd = xa_erase(&arm_smmu_asid_xa, cd->asid); in arm_smmu_free_asid()
1155 val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span); in arm_smmu_write_strtab_l1_desc()
1156 val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK; in arm_smmu_write_strtab_l1_desc()
1162 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid) in arm_smmu_sync_ste_for_sid() argument
1172 arm_smmu_cmdq_issue_cmd(smmu, &cmd); in arm_smmu_sync_ste_for_sid()
1173 arm_smmu_cmdq_issue_sync(smmu); in arm_smmu_sync_ste_for_sid()
1183 * 1. Invalid (all zero) -> bypass/fault (init) in arm_smmu_write_strtab_ent()
1184 * 2. Bypass/fault -> translation/bypass (attach) in arm_smmu_write_strtab_ent()
1185 * 3. Translation/bypass -> bypass/fault (detach) in arm_smmu_write_strtab_ent()
1187 * Given that we can't update the STE atomically and the SMMU in arm_smmu_write_strtab_ent()
1197 struct arm_smmu_device *smmu = NULL; in arm_smmu_write_strtab_ent() local
1209 smmu_domain = master->domain; in arm_smmu_write_strtab_ent()
1210 smmu = master->smmu; in arm_smmu_write_strtab_ent()
1214 switch (smmu_domain->stage) { in arm_smmu_write_strtab_ent()
1216 s1_cfg = &smmu_domain->s1_cfg; in arm_smmu_write_strtab_ent()
1220 s2_cfg = &smmu_domain->s2_cfg; in arm_smmu_write_strtab_ent()
1258 * The SMMU can perform negative caching, so we must sync in arm_smmu_write_strtab_ent()
1261 if (smmu) in arm_smmu_write_strtab_ent()
1262 arm_smmu_sync_ste_for_sid(smmu, sid); in arm_smmu_write_strtab_ent()
1275 if (smmu->features & ARM_SMMU_FEAT_STALLS && in arm_smmu_write_strtab_ent()
1276 !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE)) in arm_smmu_write_strtab_ent()
1279 val |= (s1_cfg->cdcfg.cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) | in arm_smmu_write_strtab_ent()
1281 FIELD_PREP(STRTAB_STE_0_S1CDMAX, s1_cfg->s1cdmax) | in arm_smmu_write_strtab_ent()
1282 FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt); in arm_smmu_write_strtab_ent()
1288 FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | in arm_smmu_write_strtab_ent()
1289 FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) | in arm_smmu_write_strtab_ent()
1296 dst[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK); in arm_smmu_write_strtab_ent()
1301 if (master->ats_enabled) in arm_smmu_write_strtab_ent()
1305 arm_smmu_sync_ste_for_sid(smmu, sid); in arm_smmu_write_strtab_ent()
1308 arm_smmu_sync_ste_for_sid(smmu, sid); in arm_smmu_write_strtab_ent()
1311 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) in arm_smmu_write_strtab_ent()
1312 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); in arm_smmu_write_strtab_ent()
1320 arm_smmu_write_strtab_ent(NULL, -1, strtab); in arm_smmu_init_bypass_stes()
1325 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) in arm_smmu_init_l2_strtab() argument
1329 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_init_l2_strtab()
1330 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT]; in arm_smmu_init_l2_strtab()
1332 if (desc->l2ptr) in arm_smmu_init_l2_strtab()
1336 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS]; in arm_smmu_init_l2_strtab()
1338 desc->span = STRTAB_SPLIT + 1; in arm_smmu_init_l2_strtab()
1339 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma, in arm_smmu_init_l2_strtab()
1341 if (!desc->l2ptr) { in arm_smmu_init_l2_strtab()
1342 dev_err(smmu->dev, in arm_smmu_init_l2_strtab()
1345 return -ENOMEM; in arm_smmu_init_l2_strtab()
1348 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT); in arm_smmu_init_l2_strtab()
1357 struct arm_smmu_device *smmu = dev; in arm_smmu_evtq_thread() local
1358 struct arm_smmu_queue *q = &smmu->evtq.q; in arm_smmu_evtq_thread()
1359 struct arm_smmu_ll_queue *llq = &q->llq; in arm_smmu_evtq_thread()
1366 dev_info(smmu->dev, "event 0x%02x received:\n", id); in arm_smmu_evtq_thread()
1368 dev_info(smmu->dev, "\t0x%016llx\n", in arm_smmu_evtq_thread()
1377 if (queue_sync_prod_in(q) == -EOVERFLOW) in arm_smmu_evtq_thread()
1378 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); in arm_smmu_evtq_thread()
1382 llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | in arm_smmu_evtq_thread()
1383 Q_IDX(llq, llq->cons); in arm_smmu_evtq_thread()
1387 static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt) in arm_smmu_handle_ppr() argument
1399 dev_info(smmu->dev, "unexpected PRI request received:\n"); in arm_smmu_handle_ppr()
1400 dev_info(smmu->dev, in arm_smmu_handle_ppr()
1421 arm_smmu_cmdq_issue_cmd(smmu, &cmd); in arm_smmu_handle_ppr()
1427 struct arm_smmu_device *smmu = dev; in arm_smmu_priq_thread() local
1428 struct arm_smmu_queue *q = &smmu->priq.q; in arm_smmu_priq_thread()
1429 struct arm_smmu_ll_queue *llq = &q->llq; in arm_smmu_priq_thread()
1434 arm_smmu_handle_ppr(smmu, evt); in arm_smmu_priq_thread()
1436 if (queue_sync_prod_in(q) == -EOVERFLOW) in arm_smmu_priq_thread()
1437 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); in arm_smmu_priq_thread()
1441 llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | in arm_smmu_priq_thread()
1442 Q_IDX(llq, llq->cons); in arm_smmu_priq_thread()
1447 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1452 struct arm_smmu_device *smmu = dev; in arm_smmu_gerror_handler() local
1454 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR); in arm_smmu_gerror_handler()
1455 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN); in arm_smmu_gerror_handler()
1461 dev_warn(smmu->dev, in arm_smmu_gerror_handler()
1466 dev_err(smmu->dev, "device has entered Service Failure Mode!\n"); in arm_smmu_gerror_handler()
1467 arm_smmu_device_disable(smmu); in arm_smmu_gerror_handler()
1471 dev_warn(smmu->dev, "GERROR MSI write aborted\n"); in arm_smmu_gerror_handler()
1474 dev_warn(smmu->dev, "PRIQ MSI write aborted\n"); in arm_smmu_gerror_handler()
1477 dev_warn(smmu->dev, "EVTQ MSI write aborted\n"); in arm_smmu_gerror_handler()
1480 dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); in arm_smmu_gerror_handler()
1483 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n"); in arm_smmu_gerror_handler()
1486 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n"); in arm_smmu_gerror_handler()
1489 arm_smmu_cmdq_skip_err(smmu); in arm_smmu_gerror_handler()
1491 writel(gerror, smmu->base + ARM_SMMU_GERRORN); in arm_smmu_gerror_handler()
1497 struct arm_smmu_device *smmu = dev; in arm_smmu_combined_irq_thread() local
1500 if (smmu->features & ARM_SMMU_FEAT_PRI) in arm_smmu_combined_irq_thread()
1518 /* ATC invalidates are always on 4096-bytes pages */ in arm_smmu_atc_inv_to_cmd()
1529 cmd->atc.size = ATC_INV_SIZE_ALL; in arm_smmu_atc_inv_to_cmd()
1534 page_end = (iova + size - 1) >> inval_grain_shift; in arm_smmu_atc_inv_to_cmd()
1539 * thus have to choose between grossly over-invalidating the region, or in arm_smmu_atc_inv_to_cmd()
1557 span_mask = (1ULL << log2_span) - 1; in arm_smmu_atc_inv_to_cmd()
1561 cmd->atc.addr = page_start << inval_grain_shift; in arm_smmu_atc_inv_to_cmd()
1562 cmd->atc.size = log2_span; in arm_smmu_atc_inv_to_cmd()
1572 for (i = 0; i < master->num_sids; i++) { in arm_smmu_atc_inv_master()
1573 cmd.atc.sid = master->sids[i]; in arm_smmu_atc_inv_master()
1574 arm_smmu_cmdq_issue_cmd(master->smmu, &cmd); in arm_smmu_atc_inv_master()
1577 return arm_smmu_cmdq_issue_sync(master->smmu); in arm_smmu_atc_inv_master()
1589 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) in arm_smmu_atc_inv_domain()
1606 if (!atomic_read(&smmu_domain->nr_ats_masters)) in arm_smmu_atc_inv_domain()
1611 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_atc_inv_domain()
1612 list_for_each_entry(master, &smmu_domain->devices, domain_head) { in arm_smmu_atc_inv_domain()
1613 if (!master->ats_enabled) in arm_smmu_atc_inv_domain()
1616 for (i = 0; i < master->num_sids; i++) { in arm_smmu_atc_inv_domain()
1617 cmd.atc.sid = master->sids[i]; in arm_smmu_atc_inv_domain()
1618 arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd); in arm_smmu_atc_inv_domain()
1621 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_atc_inv_domain()
1623 return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds); in arm_smmu_atc_inv_domain()
1630 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_context() local
1634 * NOTE: when io-pgtable is in non-strict mode, we may get here with in arm_smmu_tlb_inv_context()
1636 * to the SMMU. We are relying on the dma_wmb() implicit during cmd in arm_smmu_tlb_inv_context()
1640 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_tlb_inv_context()
1641 arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid); in arm_smmu_tlb_inv_context()
1644 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; in arm_smmu_tlb_inv_context()
1645 arm_smmu_cmdq_issue_cmd(smmu, &cmd); in arm_smmu_tlb_inv_context()
1646 arm_smmu_cmdq_issue_sync(smmu); in arm_smmu_tlb_inv_context()
1655 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_range() local
1668 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_tlb_inv_range()
1670 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid; in arm_smmu_tlb_inv_range()
1673 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; in arm_smmu_tlb_inv_range()
1676 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { in arm_smmu_tlb_inv_range()
1678 tg = __ffs(smmu_domain->domain.pgsize_bitmap); in arm_smmu_tlb_inv_range()
1681 cmd.tlbi.tg = (tg - 10) / 2; in arm_smmu_tlb_inv_range()
1684 cmd.tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); in arm_smmu_tlb_inv_range()
1690 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { in arm_smmu_tlb_inv_range()
1706 cmd.tlbi.num = num - 1; in arm_smmu_tlb_inv_range()
1712 num_pages -= num << scale; in arm_smmu_tlb_inv_range()
1716 arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd); in arm_smmu_tlb_inv_range()
1719 arm_smmu_cmdq_batch_submit(smmu, &cmds); in arm_smmu_tlb_inv_range()
1722 * Unfortunately, this can't be leaf-only since we may have in arm_smmu_tlb_inv_range()
1733 struct iommu_domain *domain = &smmu_domain->domain; in arm_smmu_tlb_inv_page_nosync()
1789 iommu_get_dma_cookie(&smmu_domain->domain)) { in arm_smmu_domain_alloc()
1794 mutex_init(&smmu_domain->init_mutex); in arm_smmu_domain_alloc()
1795 INIT_LIST_HEAD(&smmu_domain->devices); in arm_smmu_domain_alloc()
1796 spin_lock_init(&smmu_domain->devices_lock); in arm_smmu_domain_alloc()
1798 return &smmu_domain->domain; in arm_smmu_domain_alloc()
1808 return -ENOSPC; in arm_smmu_bitmap_alloc()
1822 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_free() local
1825 free_io_pgtable_ops(smmu_domain->pgtbl_ops); in arm_smmu_domain_free()
1828 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_domain_free()
1829 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; in arm_smmu_domain_free()
1833 if (cfg->cdcfg.cdtab) in arm_smmu_domain_free()
1835 arm_smmu_free_asid(&cfg->cd); in arm_smmu_domain_free()
1838 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; in arm_smmu_domain_free()
1839 if (cfg->vmid) in arm_smmu_domain_free()
1840 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid); in arm_smmu_domain_free()
1852 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_finalise_s1() local
1853 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; in arm_smmu_domain_finalise_s1()
1854 typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr; in arm_smmu_domain_finalise_s1()
1856 refcount_set(&cfg->cd.refs, 1); in arm_smmu_domain_finalise_s1()
1860 ret = xa_alloc(&arm_smmu_asid_xa, &asid, &cfg->cd, in arm_smmu_domain_finalise_s1()
1861 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); in arm_smmu_domain_finalise_s1()
1865 cfg->s1cdmax = master->ssid_bits; in arm_smmu_domain_finalise_s1()
1871 cfg->cd.asid = (u16)asid; in arm_smmu_domain_finalise_s1()
1872 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr; in arm_smmu_domain_finalise_s1()
1873 cfg->cd.tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) | in arm_smmu_domain_finalise_s1()
1874 FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) | in arm_smmu_domain_finalise_s1()
1875 FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) | in arm_smmu_domain_finalise_s1()
1876 FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) | in arm_smmu_domain_finalise_s1()
1877 FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) | in arm_smmu_domain_finalise_s1()
1878 FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) | in arm_smmu_domain_finalise_s1()
1880 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair; in arm_smmu_domain_finalise_s1()
1887 ret = arm_smmu_write_ctx_desc(smmu_domain, 0, &cfg->cd); in arm_smmu_domain_finalise_s1()
1897 arm_smmu_free_asid(&cfg->cd); in arm_smmu_domain_finalise_s1()
1908 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_finalise_s2() local
1909 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; in arm_smmu_domain_finalise_s2()
1910 typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr; in arm_smmu_domain_finalise_s2()
1912 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits); in arm_smmu_domain_finalise_s2()
1916 vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr; in arm_smmu_domain_finalise_s2()
1917 cfg->vmid = (u16)vmid; in arm_smmu_domain_finalise_s2()
1918 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; in arm_smmu_domain_finalise_s2()
1919 cfg->vtcr = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) | in arm_smmu_domain_finalise_s2()
1920 FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) | in arm_smmu_domain_finalise_s2()
1921 FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) | in arm_smmu_domain_finalise_s2()
1922 FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) | in arm_smmu_domain_finalise_s2()
1923 FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) | in arm_smmu_domain_finalise_s2()
1924 FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) | in arm_smmu_domain_finalise_s2()
1925 FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps); in arm_smmu_domain_finalise_s2()
1941 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_finalise() local
1943 if (domain->type == IOMMU_DOMAIN_IDENTITY) { in arm_smmu_domain_finalise()
1944 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; in arm_smmu_domain_finalise()
1949 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) in arm_smmu_domain_finalise()
1950 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; in arm_smmu_domain_finalise()
1951 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) in arm_smmu_domain_finalise()
1952 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; in arm_smmu_domain_finalise()
1954 switch (smmu_domain->stage) { in arm_smmu_domain_finalise()
1956 ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48; in arm_smmu_domain_finalise()
1958 oas = smmu->ias; in arm_smmu_domain_finalise()
1964 ias = smmu->ias; in arm_smmu_domain_finalise()
1965 oas = smmu->oas; in arm_smmu_domain_finalise()
1970 return -EINVAL; in arm_smmu_domain_finalise()
1974 .pgsize_bitmap = smmu->pgsize_bitmap, in arm_smmu_domain_finalise()
1977 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY, in arm_smmu_domain_finalise()
1979 .iommu_dev = smmu->dev, in arm_smmu_domain_finalise()
1982 if (smmu_domain->non_strict) in arm_smmu_domain_finalise()
1987 return -ENOMEM; in arm_smmu_domain_finalise()
1989 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; in arm_smmu_domain_finalise()
1990 domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1; in arm_smmu_domain_finalise()
1991 domain->geometry.force_aperture = true; in arm_smmu_domain_finalise()
1999 smmu_domain->pgtbl_ops = pgtbl_ops; in arm_smmu_domain_finalise()
2003 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) in arm_smmu_get_step_for_sid() argument
2006 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_get_step_for_sid()
2008 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { in arm_smmu_get_step_for_sid()
2012 /* Two-level walk */ in arm_smmu_get_step_for_sid()
2014 l1_desc = &cfg->l1_desc[idx]; in arm_smmu_get_step_for_sid()
2015 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS; in arm_smmu_get_step_for_sid()
2016 step = &l1_desc->l2ptr[idx]; in arm_smmu_get_step_for_sid()
2019 step = &cfg->strtab[sid * STRTAB_STE_DWORDS]; in arm_smmu_get_step_for_sid()
2028 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_install_ste_for_dev() local
2030 for (i = 0; i < master->num_sids; ++i) { in arm_smmu_install_ste_for_dev()
2031 u32 sid = master->sids[i]; in arm_smmu_install_ste_for_dev()
2032 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); in arm_smmu_install_ste_for_dev()
2036 if (master->sids[j] == sid) in arm_smmu_install_ste_for_dev()
2047 struct device *dev = master->dev; in arm_smmu_ats_supported()
2048 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_ats_supported() local
2051 if (!(smmu->features & ARM_SMMU_FEAT_ATS)) in arm_smmu_ats_supported()
2054 if (!(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS)) in arm_smmu_ats_supported()
2064 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_enable_ats() local
2065 struct arm_smmu_domain *smmu_domain = master->domain; in arm_smmu_enable_ats()
2068 if (!master->ats_enabled) in arm_smmu_enable_ats()
2072 stu = __ffs(smmu->pgsize_bitmap); in arm_smmu_enable_ats()
2073 pdev = to_pci_dev(master->dev); in arm_smmu_enable_ats()
2075 atomic_inc(&smmu_domain->nr_ats_masters); in arm_smmu_enable_ats()
2078 dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu); in arm_smmu_enable_ats()
2083 struct arm_smmu_domain *smmu_domain = master->domain; in arm_smmu_disable_ats()
2085 if (!master->ats_enabled) in arm_smmu_disable_ats()
2088 pci_disable_ats(to_pci_dev(master->dev)); in arm_smmu_disable_ats()
2091 * ATC invalidation via the SMMU. in arm_smmu_disable_ats()
2095 atomic_dec(&smmu_domain->nr_ats_masters); in arm_smmu_disable_ats()
2105 if (!dev_is_pci(master->dev)) in arm_smmu_enable_pasid()
2106 return -ENODEV; in arm_smmu_enable_pasid()
2108 pdev = to_pci_dev(master->dev); in arm_smmu_enable_pasid()
2120 dev_err(&pdev->dev, "Failed to enable PASID\n"); in arm_smmu_enable_pasid()
2124 master->ssid_bits = min_t(u8, ilog2(num_pasids), in arm_smmu_enable_pasid()
2125 master->smmu->ssid_bits); in arm_smmu_enable_pasid()
2133 if (!dev_is_pci(master->dev)) in arm_smmu_disable_pasid()
2136 pdev = to_pci_dev(master->dev); in arm_smmu_disable_pasid()
2138 if (!pdev->pasid_enabled) in arm_smmu_disable_pasid()
2141 master->ssid_bits = 0; in arm_smmu_disable_pasid()
2148 struct arm_smmu_domain *smmu_domain = master->domain; in arm_smmu_detach_dev()
2155 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_detach_dev()
2156 list_del(&master->domain_head); in arm_smmu_detach_dev()
2157 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_detach_dev()
2159 master->domain = NULL; in arm_smmu_detach_dev()
2160 master->ats_enabled = false; in arm_smmu_detach_dev()
2169 struct arm_smmu_device *smmu; in arm_smmu_attach_dev() local
2174 return -ENOENT; in arm_smmu_attach_dev()
2177 smmu = master->smmu; in arm_smmu_attach_dev()
2185 dev_err(dev, "cannot attach - SVA enabled\n"); in arm_smmu_attach_dev()
2186 return -EBUSY; in arm_smmu_attach_dev()
2191 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_attach_dev()
2193 if (!smmu_domain->smmu) { in arm_smmu_attach_dev()
2194 smmu_domain->smmu = smmu; in arm_smmu_attach_dev()
2197 smmu_domain->smmu = NULL; in arm_smmu_attach_dev()
2200 } else if (smmu_domain->smmu != smmu) { in arm_smmu_attach_dev()
2202 "cannot attach to SMMU %s (upstream of %s)\n", in arm_smmu_attach_dev()
2203 dev_name(smmu_domain->smmu->dev), in arm_smmu_attach_dev()
2204 dev_name(smmu->dev)); in arm_smmu_attach_dev()
2205 ret = -ENXIO; in arm_smmu_attach_dev()
2207 } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && in arm_smmu_attach_dev()
2208 master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) { in arm_smmu_attach_dev()
2211 smmu_domain->s1_cfg.s1cdmax, master->ssid_bits); in arm_smmu_attach_dev()
2212 ret = -EINVAL; in arm_smmu_attach_dev()
2216 master->domain = smmu_domain; in arm_smmu_attach_dev()
2218 if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS) in arm_smmu_attach_dev()
2219 master->ats_enabled = arm_smmu_ats_supported(master); in arm_smmu_attach_dev()
2223 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_attach_dev()
2224 list_add(&master->domain_head, &smmu_domain->devices); in arm_smmu_attach_dev()
2225 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_attach_dev()
2230 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_attach_dev()
2237 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_map()
2240 return -ENODEV; in arm_smmu_map()
2242 return ops->map(ops, iova, paddr, size, prot, gfp); in arm_smmu_map()
2249 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; in arm_smmu_unmap()
2254 return ops->unmap(ops, iova, size, gather); in arm_smmu_unmap()
2261 if (smmu_domain->smmu) in arm_smmu_flush_iotlb_all()
2270 arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start, in arm_smmu_iotlb_sync()
2271 gather->pgsize, true, smmu_domain); in arm_smmu_iotlb_sync()
2277 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_iova_to_phys()
2279 if (domain->type == IOMMU_DOMAIN_IDENTITY) in arm_smmu_iova_to_phys()
2285 return ops->iova_to_phys(ops, iova); in arm_smmu_iova_to_phys()
2299 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) in arm_smmu_sid_in_range() argument
2301 unsigned long limit = smmu->strtab_cfg.num_l1_ents; in arm_smmu_sid_in_range()
2303 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) in arm_smmu_sid_in_range()
2314 struct arm_smmu_device *smmu; in arm_smmu_probe_device() local
2318 if (!fwspec || fwspec->ops != &arm_smmu_ops) in arm_smmu_probe_device()
2319 return ERR_PTR(-ENODEV); in arm_smmu_probe_device()
2322 return ERR_PTR(-EBUSY); in arm_smmu_probe_device()
2324 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); in arm_smmu_probe_device()
2325 if (!smmu) in arm_smmu_probe_device()
2326 return ERR_PTR(-ENODEV); in arm_smmu_probe_device()
2330 return ERR_PTR(-ENOMEM); in arm_smmu_probe_device()
2332 master->dev = dev; in arm_smmu_probe_device()
2333 master->smmu = smmu; in arm_smmu_probe_device()
2334 master->sids = fwspec->ids; in arm_smmu_probe_device()
2335 master->num_sids = fwspec->num_ids; in arm_smmu_probe_device()
2336 INIT_LIST_HEAD(&master->bonds); in arm_smmu_probe_device()
2339 /* Check the SIDs are in range of the SMMU and our stream table */ in arm_smmu_probe_device()
2340 for (i = 0; i < master->num_sids; i++) { in arm_smmu_probe_device()
2341 u32 sid = master->sids[i]; in arm_smmu_probe_device()
2343 if (!arm_smmu_sid_in_range(smmu, sid)) { in arm_smmu_probe_device()
2344 ret = -ERANGE; in arm_smmu_probe_device()
2349 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { in arm_smmu_probe_device()
2350 ret = arm_smmu_init_l2_strtab(smmu, sid); in arm_smmu_probe_device()
2356 master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits); in arm_smmu_probe_device()
2360 * PCI Express Base 4.0r1.0 - 10.5.1.3 ATS Control Register in arm_smmu_probe_device()
2368 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB)) in arm_smmu_probe_device()
2369 master->ssid_bits = min_t(u8, master->ssid_bits, in arm_smmu_probe_device()
2372 return &smmu->iommu; in arm_smmu_probe_device()
2385 if (!fwspec || fwspec->ops != &arm_smmu_ops) in arm_smmu_release_device()
2402 * aliases, since the necessary ID-to-device lookup becomes rather in arm_smmu_device_group()
2403 * impractical given a potential sparse 32-bit stream ID space. in arm_smmu_device_group()
2418 switch (domain->type) { in arm_smmu_domain_get_attr()
2422 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); in arm_smmu_domain_get_attr()
2425 return -ENODEV; in arm_smmu_domain_get_attr()
2431 *(int *)data = smmu_domain->non_strict; in arm_smmu_domain_get_attr()
2434 return -ENODEV; in arm_smmu_domain_get_attr()
2438 return -EINVAL; in arm_smmu_domain_get_attr()
2448 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_domain_set_attr()
2450 switch (domain->type) { in arm_smmu_domain_set_attr()
2454 if (smmu_domain->smmu) { in arm_smmu_domain_set_attr()
2455 ret = -EPERM; in arm_smmu_domain_set_attr()
2460 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; in arm_smmu_domain_set_attr()
2462 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; in arm_smmu_domain_set_attr()
2465 ret = -ENODEV; in arm_smmu_domain_set_attr()
2471 smmu_domain->non_strict = *(int *)data; in arm_smmu_domain_set_attr()
2474 ret = -ENODEV; in arm_smmu_domain_set_attr()
2478 ret = -EINVAL; in arm_smmu_domain_set_attr()
2482 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_domain_set_attr()
2488 return iommu_fwspec_add_ids(dev, args->args, 1); in arm_smmu_of_xlate()
2502 list_add_tail(&region->list, head); in arm_smmu_get_resv_regions()
2543 return -ENODEV; in arm_smmu_dev_enable_feature()
2546 return -EBUSY; in arm_smmu_dev_enable_feature()
2552 return -EINVAL; in arm_smmu_dev_enable_feature()
2560 return -EINVAL; in arm_smmu_dev_disable_feature()
2566 return -EINVAL; in arm_smmu_dev_disable_feature()
2592 .pgsize_bitmap = -1UL, /* Restricted during device attach */
2596 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, in arm_smmu_init_one_queue() argument
2605 qsz = ((1 << q->llq.max_n_shift) * dwords) << 3; in arm_smmu_init_one_queue()
2606 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, in arm_smmu_init_one_queue()
2608 if (q->base || qsz < PAGE_SIZE) in arm_smmu_init_one_queue()
2611 q->llq.max_n_shift--; in arm_smmu_init_one_queue()
2614 if (!q->base) { in arm_smmu_init_one_queue()
2615 dev_err(smmu->dev, in arm_smmu_init_one_queue()
2618 return -ENOMEM; in arm_smmu_init_one_queue()
2621 if (!WARN_ON(q->base_dma & (qsz - 1))) { in arm_smmu_init_one_queue()
2622 dev_info(smmu->dev, "allocated %u entries for %s\n", in arm_smmu_init_one_queue()
2623 1 << q->llq.max_n_shift, name); in arm_smmu_init_one_queue()
2626 q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu); in arm_smmu_init_one_queue()
2627 q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu); in arm_smmu_init_one_queue()
2628 q->ent_dwords = dwords; in arm_smmu_init_one_queue()
2630 q->q_base = Q_BASE_RWA; in arm_smmu_init_one_queue()
2631 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK; in arm_smmu_init_one_queue()
2632 q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift); in arm_smmu_init_one_queue()
2634 q->llq.prod = q->llq.cons = 0; in arm_smmu_init_one_queue()
2644 static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu) in arm_smmu_cmdq_init() argument
2647 struct arm_smmu_cmdq *cmdq = &smmu->cmdq; in arm_smmu_cmdq_init()
2648 unsigned int nents = 1 << cmdq->q.llq.max_n_shift; in arm_smmu_cmdq_init()
2651 atomic_set(&cmdq->owner_prod, 0); in arm_smmu_cmdq_init()
2652 atomic_set(&cmdq->lock, 0); in arm_smmu_cmdq_init()
2656 dev_err(smmu->dev, "failed to allocate cmdq bitmap\n"); in arm_smmu_cmdq_init()
2657 ret = -ENOMEM; in arm_smmu_cmdq_init()
2659 cmdq->valid_map = bitmap; in arm_smmu_cmdq_init()
2660 devm_add_action(smmu->dev, arm_smmu_cmdq_free_bitmap, bitmap); in arm_smmu_cmdq_init()
2666 static int arm_smmu_init_queues(struct arm_smmu_device *smmu) in arm_smmu_init_queues() argument
2671 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD, in arm_smmu_init_queues()
2677 ret = arm_smmu_cmdq_init(smmu); in arm_smmu_init_queues()
2682 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD, in arm_smmu_init_queues()
2689 if (!(smmu->features & ARM_SMMU_FEAT_PRI)) in arm_smmu_init_queues()
2692 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD, in arm_smmu_init_queues()
2697 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu) in arm_smmu_init_l1_strtab() argument
2700 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_init_l1_strtab()
2701 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents; in arm_smmu_init_l1_strtab()
2702 void *strtab = smmu->strtab_cfg.strtab; in arm_smmu_init_l1_strtab()
2704 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL); in arm_smmu_init_l1_strtab()
2705 if (!cfg->l1_desc) { in arm_smmu_init_l1_strtab()
2706 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n"); in arm_smmu_init_l1_strtab()
2707 return -ENOMEM; in arm_smmu_init_l1_strtab()
2710 for (i = 0; i < cfg->num_l1_ents; ++i) { in arm_smmu_init_l1_strtab()
2711 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]); in arm_smmu_init_l1_strtab()
2718 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) in arm_smmu_init_strtab_2lvl() argument
2723 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_init_strtab_2lvl()
2726 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); in arm_smmu_init_strtab_2lvl()
2727 size = min(size, smmu->sid_bits - STRTAB_SPLIT); in arm_smmu_init_strtab_2lvl()
2728 cfg->num_l1_ents = 1 << size; in arm_smmu_init_strtab_2lvl()
2731 if (size < smmu->sid_bits) in arm_smmu_init_strtab_2lvl()
2732 dev_warn(smmu->dev, in arm_smmu_init_strtab_2lvl()
2733 "2-level strtab only covers %u/%u bits of SID\n", in arm_smmu_init_strtab_2lvl()
2734 size, smmu->sid_bits); in arm_smmu_init_strtab_2lvl()
2736 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); in arm_smmu_init_strtab_2lvl()
2737 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma, in arm_smmu_init_strtab_2lvl()
2740 dev_err(smmu->dev, in arm_smmu_init_strtab_2lvl()
2743 return -ENOMEM; in arm_smmu_init_strtab_2lvl()
2745 cfg->strtab = strtab; in arm_smmu_init_strtab_2lvl()
2751 cfg->strtab_base_cfg = reg; in arm_smmu_init_strtab_2lvl()
2753 return arm_smmu_init_l1_strtab(smmu); in arm_smmu_init_strtab_2lvl()
2756 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) in arm_smmu_init_strtab_linear() argument
2761 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_init_strtab_linear()
2763 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3); in arm_smmu_init_strtab_linear()
2764 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma, in arm_smmu_init_strtab_linear()
2767 dev_err(smmu->dev, in arm_smmu_init_strtab_linear()
2770 return -ENOMEM; in arm_smmu_init_strtab_linear()
2772 cfg->strtab = strtab; in arm_smmu_init_strtab_linear()
2773 cfg->num_l1_ents = 1 << smmu->sid_bits; in arm_smmu_init_strtab_linear()
2777 reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits); in arm_smmu_init_strtab_linear()
2778 cfg->strtab_base_cfg = reg; in arm_smmu_init_strtab_linear()
2780 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents); in arm_smmu_init_strtab_linear()
2784 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu) in arm_smmu_init_strtab() argument
2789 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) in arm_smmu_init_strtab()
2790 ret = arm_smmu_init_strtab_2lvl(smmu); in arm_smmu_init_strtab()
2792 ret = arm_smmu_init_strtab_linear(smmu); in arm_smmu_init_strtab()
2798 reg = smmu->strtab_cfg.strtab_dma & STRTAB_BASE_ADDR_MASK; in arm_smmu_init_strtab()
2800 smmu->strtab_cfg.strtab_base = reg; in arm_smmu_init_strtab()
2802 /* Allocate the first VMID for stage-2 bypass STEs */ in arm_smmu_init_strtab()
2803 set_bit(0, smmu->vmid_map); in arm_smmu_init_strtab()
2807 static int arm_smmu_init_structures(struct arm_smmu_device *smmu) in arm_smmu_init_structures() argument
2811 ret = arm_smmu_init_queues(smmu); in arm_smmu_init_structures()
2815 return arm_smmu_init_strtab(smmu); in arm_smmu_init_structures()
2818 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val, in arm_smmu_write_reg_sync() argument
2823 writel_relaxed(val, smmu->base + reg_off); in arm_smmu_write_reg_sync()
2824 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val, in arm_smmu_write_reg_sync()
2829 static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr) in arm_smmu_update_gbpa() argument
2832 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA; in arm_smmu_update_gbpa()
2846 dev_err(smmu->dev, "GBPA not responding to update\n"); in arm_smmu_update_gbpa()
2860 struct arm_smmu_device *smmu = dev_get_drvdata(dev); in arm_smmu_write_msi_msg() local
2861 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index]; in arm_smmu_write_msi_msg()
2863 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; in arm_smmu_write_msi_msg()
2866 writeq_relaxed(doorbell, smmu->base + cfg[0]); in arm_smmu_write_msi_msg()
2867 writel_relaxed(msg->data, smmu->base + cfg[1]); in arm_smmu_write_msi_msg()
2868 writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]); in arm_smmu_write_msi_msg()
2871 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) in arm_smmu_setup_msis() argument
2875 struct device *dev = smmu->dev; in arm_smmu_setup_msis()
2878 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0); in arm_smmu_setup_msis()
2879 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0); in arm_smmu_setup_msis()
2881 if (smmu->features & ARM_SMMU_FEAT_PRI) in arm_smmu_setup_msis()
2882 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0); in arm_smmu_setup_msis()
2884 nvec--; in arm_smmu_setup_msis()
2886 if (!(smmu->features & ARM_SMMU_FEAT_MSI)) in arm_smmu_setup_msis()
2889 if (!dev->msi_domain) { in arm_smmu_setup_msis()
2890 dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n"); in arm_smmu_setup_msis()
2897 dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n"); in arm_smmu_setup_msis()
2902 switch (desc->platform.msi_index) { in arm_smmu_setup_msis()
2904 smmu->evtq.q.irq = desc->irq; in arm_smmu_setup_msis()
2907 smmu->gerr_irq = desc->irq; in arm_smmu_setup_msis()
2910 smmu->priq.q.irq = desc->irq; in arm_smmu_setup_msis()
2921 static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) in arm_smmu_setup_unique_irqs() argument
2925 arm_smmu_setup_msis(smmu); in arm_smmu_setup_unique_irqs()
2928 irq = smmu->evtq.q.irq; in arm_smmu_setup_unique_irqs()
2930 ret = devm_request_threaded_irq(smmu->dev, irq, NULL, in arm_smmu_setup_unique_irqs()
2933 "arm-smmu-v3-evtq", smmu); in arm_smmu_setup_unique_irqs()
2935 dev_warn(smmu->dev, "failed to enable evtq irq\n"); in arm_smmu_setup_unique_irqs()
2937 dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n"); in arm_smmu_setup_unique_irqs()
2940 irq = smmu->gerr_irq; in arm_smmu_setup_unique_irqs()
2942 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler, in arm_smmu_setup_unique_irqs()
2943 0, "arm-smmu-v3-gerror", smmu); in arm_smmu_setup_unique_irqs()
2945 dev_warn(smmu->dev, "failed to enable gerror irq\n"); in arm_smmu_setup_unique_irqs()
2947 dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n"); in arm_smmu_setup_unique_irqs()
2950 if (smmu->features & ARM_SMMU_FEAT_PRI) { in arm_smmu_setup_unique_irqs()
2951 irq = smmu->priq.q.irq; in arm_smmu_setup_unique_irqs()
2953 ret = devm_request_threaded_irq(smmu->dev, irq, NULL, in arm_smmu_setup_unique_irqs()
2956 "arm-smmu-v3-priq", in arm_smmu_setup_unique_irqs()
2957 smmu); in arm_smmu_setup_unique_irqs()
2959 dev_warn(smmu->dev, in arm_smmu_setup_unique_irqs()
2962 dev_warn(smmu->dev, "no priq irq - PRI will be broken\n"); in arm_smmu_setup_unique_irqs()
2967 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) in arm_smmu_setup_irqs() argument
2973 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL, in arm_smmu_setup_irqs()
2976 dev_err(smmu->dev, "failed to disable irqs\n"); in arm_smmu_setup_irqs()
2980 irq = smmu->combined_irq; in arm_smmu_setup_irqs()
2986 ret = devm_request_threaded_irq(smmu->dev, irq, in arm_smmu_setup_irqs()
2990 "arm-smmu-v3-combined-irq", smmu); in arm_smmu_setup_irqs()
2992 dev_warn(smmu->dev, "failed to enable combined irq\n"); in arm_smmu_setup_irqs()
2994 arm_smmu_setup_unique_irqs(smmu); in arm_smmu_setup_irqs()
2996 if (smmu->features & ARM_SMMU_FEAT_PRI) in arm_smmu_setup_irqs()
2999 /* Enable interrupt generation on the SMMU */ in arm_smmu_setup_irqs()
3000 ret = arm_smmu_write_reg_sync(smmu, irqen_flags, in arm_smmu_setup_irqs()
3003 dev_warn(smmu->dev, "failed to enable irqs\n"); in arm_smmu_setup_irqs()
3008 static int arm_smmu_device_disable(struct arm_smmu_device *smmu) in arm_smmu_device_disable() argument
3012 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK); in arm_smmu_device_disable()
3014 dev_err(smmu->dev, "failed to clear cr0\n"); in arm_smmu_device_disable()
3019 static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) in arm_smmu_device_reset() argument
3025 /* Clear CR0 and sync (disables SMMU and queue processing) */ in arm_smmu_device_reset()
3026 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0); in arm_smmu_device_reset()
3028 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n"); in arm_smmu_device_reset()
3030 arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0); in arm_smmu_device_reset()
3033 ret = arm_smmu_device_disable(smmu); in arm_smmu_device_reset()
3044 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1); in arm_smmu_device_reset()
3048 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2); in arm_smmu_device_reset()
3051 writeq_relaxed(smmu->strtab_cfg.strtab_base, in arm_smmu_device_reset()
3052 smmu->base + ARM_SMMU_STRTAB_BASE); in arm_smmu_device_reset()
3053 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg, in arm_smmu_device_reset()
3054 smmu->base + ARM_SMMU_STRTAB_BASE_CFG); in arm_smmu_device_reset()
3057 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); in arm_smmu_device_reset()
3058 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); in arm_smmu_device_reset()
3059 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); in arm_smmu_device_reset()
3062 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, in arm_smmu_device_reset()
3065 dev_err(smmu->dev, "failed to enable command queue\n"); in arm_smmu_device_reset()
3071 arm_smmu_cmdq_issue_cmd(smmu, &cmd); in arm_smmu_device_reset()
3072 arm_smmu_cmdq_issue_sync(smmu); in arm_smmu_device_reset()
3075 if (smmu->features & ARM_SMMU_FEAT_HYP) { in arm_smmu_device_reset()
3077 arm_smmu_cmdq_issue_cmd(smmu, &cmd); in arm_smmu_device_reset()
3081 arm_smmu_cmdq_issue_cmd(smmu, &cmd); in arm_smmu_device_reset()
3082 arm_smmu_cmdq_issue_sync(smmu); in arm_smmu_device_reset()
3085 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); in arm_smmu_device_reset()
3086 writel_relaxed(smmu->evtq.q.llq.prod, in arm_smmu_device_reset()
3087 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu)); in arm_smmu_device_reset()
3088 writel_relaxed(smmu->evtq.q.llq.cons, in arm_smmu_device_reset()
3089 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu)); in arm_smmu_device_reset()
3092 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, in arm_smmu_device_reset()
3095 dev_err(smmu->dev, "failed to enable event queue\n"); in arm_smmu_device_reset()
3100 if (smmu->features & ARM_SMMU_FEAT_PRI) { in arm_smmu_device_reset()
3101 writeq_relaxed(smmu->priq.q.q_base, in arm_smmu_device_reset()
3102 smmu->base + ARM_SMMU_PRIQ_BASE); in arm_smmu_device_reset()
3103 writel_relaxed(smmu->priq.q.llq.prod, in arm_smmu_device_reset()
3104 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu)); in arm_smmu_device_reset()
3105 writel_relaxed(smmu->priq.q.llq.cons, in arm_smmu_device_reset()
3106 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu)); in arm_smmu_device_reset()
3109 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, in arm_smmu_device_reset()
3112 dev_err(smmu->dev, "failed to enable PRI queue\n"); in arm_smmu_device_reset()
3117 if (smmu->features & ARM_SMMU_FEAT_ATS) { in arm_smmu_device_reset()
3119 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, in arm_smmu_device_reset()
3122 dev_err(smmu->dev, "failed to enable ATS check\n"); in arm_smmu_device_reset()
3127 ret = arm_smmu_setup_irqs(smmu); in arm_smmu_device_reset()
3129 dev_err(smmu->dev, "failed to setup irqs\n"); in arm_smmu_device_reset()
3136 /* Enable the SMMU interface, or ensure bypass */ in arm_smmu_device_reset()
3140 ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT); in arm_smmu_device_reset()
3144 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, in arm_smmu_device_reset()
3147 dev_err(smmu->dev, "failed to enable SMMU interface\n"); in arm_smmu_device_reset()
3154 static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) in arm_smmu_device_hw_probe() argument
3157 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY; in arm_smmu_device_hw_probe()
3160 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0); in arm_smmu_device_hw_probe()
3162 /* 2-level structures */ in arm_smmu_device_hw_probe()
3164 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB; in arm_smmu_device_hw_probe()
3167 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB; in arm_smmu_device_hw_probe()
3176 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE; in arm_smmu_device_hw_probe()
3180 smmu->features |= ARM_SMMU_FEAT_TT_BE; in arm_smmu_device_hw_probe()
3184 smmu->features |= ARM_SMMU_FEAT_TT_LE; in arm_smmu_device_hw_probe()
3188 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n"); in arm_smmu_device_hw_probe()
3189 return -ENXIO; in arm_smmu_device_hw_probe()
3194 smmu->features |= ARM_SMMU_FEAT_PRI; in arm_smmu_device_hw_probe()
3197 smmu->features |= ARM_SMMU_FEAT_ATS; in arm_smmu_device_hw_probe()
3200 smmu->features |= ARM_SMMU_FEAT_SEV; in arm_smmu_device_hw_probe()
3203 smmu->features |= ARM_SMMU_FEAT_MSI; in arm_smmu_device_hw_probe()
3205 smmu->options |= ARM_SMMU_OPT_MSIPOLL; in arm_smmu_device_hw_probe()
3209 smmu->features |= ARM_SMMU_FEAT_HYP; in arm_smmu_device_hw_probe()
3216 dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n", in arm_smmu_device_hw_probe()
3221 smmu->features |= ARM_SMMU_FEAT_STALL_FORCE; in arm_smmu_device_hw_probe()
3224 smmu->features |= ARM_SMMU_FEAT_STALLS; in arm_smmu_device_hw_probe()
3228 smmu->features |= ARM_SMMU_FEAT_TRANS_S1; in arm_smmu_device_hw_probe()
3231 smmu->features |= ARM_SMMU_FEAT_TRANS_S2; in arm_smmu_device_hw_probe()
3234 dev_err(smmu->dev, "no translation support!\n"); in arm_smmu_device_hw_probe()
3235 return -ENXIO; in arm_smmu_device_hw_probe()
3241 smmu->ias = 40; in arm_smmu_device_hw_probe()
3246 dev_err(smmu->dev, "AArch64 table format not supported!\n"); in arm_smmu_device_hw_probe()
3247 return -ENXIO; in arm_smmu_device_hw_probe()
3251 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8; in arm_smmu_device_hw_probe()
3252 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8; in arm_smmu_device_hw_probe()
3255 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1); in arm_smmu_device_hw_probe()
3257 dev_err(smmu->dev, "embedded implementation not supported\n"); in arm_smmu_device_hw_probe()
3258 return -ENXIO; in arm_smmu_device_hw_probe()
3262 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
3264 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) { in arm_smmu_device_hw_probe()
3269 * restrictions on the base pointer for a unit-length queue. in arm_smmu_device_hw_probe()
3271 dev_err(smmu->dev, "command queue size <= %d entries not supported\n", in arm_smmu_device_hw_probe()
3273 return -ENXIO; in arm_smmu_device_hw_probe()
3276 smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
3278 smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
3282 smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg); in arm_smmu_device_hw_probe()
3283 smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg); in arm_smmu_device_hw_probe()
3286 * If the SMMU supports fewer bits than would fill a single L2 stream in arm_smmu_device_hw_probe()
3289 if (smmu->sid_bits <= STRTAB_SPLIT) in arm_smmu_device_hw_probe()
3290 smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB; in arm_smmu_device_hw_probe()
3293 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3); in arm_smmu_device_hw_probe()
3295 smmu->features |= ARM_SMMU_FEAT_RANGE_INV; in arm_smmu_device_hw_probe()
3298 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); in arm_smmu_device_hw_probe()
3301 smmu->evtq.max_stalls = FIELD_GET(IDR5_STALL_MAX, reg); in arm_smmu_device_hw_probe()
3305 smmu->pgsize_bitmap |= SZ_64K | SZ_512M; in arm_smmu_device_hw_probe()
3307 smmu->pgsize_bitmap |= SZ_16K | SZ_32M; in arm_smmu_device_hw_probe()
3309 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; in arm_smmu_device_hw_probe()
3313 smmu->features |= ARM_SMMU_FEAT_VAX; in arm_smmu_device_hw_probe()
3318 smmu->oas = 32; in arm_smmu_device_hw_probe()
3321 smmu->oas = 36; in arm_smmu_device_hw_probe()
3324 smmu->oas = 40; in arm_smmu_device_hw_probe()
3327 smmu->oas = 42; in arm_smmu_device_hw_probe()
3330 smmu->oas = 44; in arm_smmu_device_hw_probe()
3333 smmu->oas = 52; in arm_smmu_device_hw_probe()
3334 smmu->pgsize_bitmap |= 1ULL << 42; /* 4TB */ in arm_smmu_device_hw_probe()
3337 dev_info(smmu->dev, in arm_smmu_device_hw_probe()
3338 "unknown output address size. Truncating to 48-bit\n"); in arm_smmu_device_hw_probe()
3341 smmu->oas = 48; in arm_smmu_device_hw_probe()
3344 if (arm_smmu_ops.pgsize_bitmap == -1UL) in arm_smmu_device_hw_probe()
3345 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap; in arm_smmu_device_hw_probe()
3347 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap; in arm_smmu_device_hw_probe()
3350 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas))) in arm_smmu_device_hw_probe()
3351 dev_warn(smmu->dev, in arm_smmu_device_hw_probe()
3354 smmu->ias = max(smmu->ias, smmu->oas); in arm_smmu_device_hw_probe()
3356 if (arm_smmu_sva_supported(smmu)) in arm_smmu_device_hw_probe()
3357 smmu->features |= ARM_SMMU_FEAT_SVA; in arm_smmu_device_hw_probe()
3359 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", in arm_smmu_device_hw_probe()
3360 smmu->ias, smmu->oas, smmu->features); in arm_smmu_device_hw_probe()
3365 static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu) in acpi_smmu_get_options() argument
3369 smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY; in acpi_smmu_get_options()
3372 smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH; in acpi_smmu_get_options()
3376 dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options); in acpi_smmu_get_options()
3380 struct arm_smmu_device *smmu) in arm_smmu_device_acpi_probe() argument
3383 struct device *dev = smmu->dev; in arm_smmu_device_acpi_probe()
3389 iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_device_acpi_probe()
3391 acpi_smmu_get_options(iort_smmu->model, smmu); in arm_smmu_device_acpi_probe()
3393 if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) in arm_smmu_device_acpi_probe()
3394 smmu->features |= ARM_SMMU_FEAT_COHERENCY; in arm_smmu_device_acpi_probe()
3400 struct arm_smmu_device *smmu) in arm_smmu_device_acpi_probe() argument
3402 return -ENODEV; in arm_smmu_device_acpi_probe()
3407 struct arm_smmu_device *smmu) in arm_smmu_device_dt_probe() argument
3409 struct device *dev = &pdev->dev; in arm_smmu_device_dt_probe()
3411 int ret = -EINVAL; in arm_smmu_device_dt_probe()
3413 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells)) in arm_smmu_device_dt_probe()
3414 dev_err(dev, "missing #iommu-cells property\n"); in arm_smmu_device_dt_probe()
3416 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells); in arm_smmu_device_dt_probe()
3420 parse_driver_options(smmu); in arm_smmu_device_dt_probe()
3422 if (of_dma_is_coherent(dev->of_node)) in arm_smmu_device_dt_probe()
3423 smmu->features |= ARM_SMMU_FEAT_COHERENCY; in arm_smmu_device_dt_probe()
3428 static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu) in arm_smmu_resource_size() argument
3430 if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY) in arm_smmu_resource_size()
3479 .end = start + size - 1, in arm_smmu_ioremap()
3490 struct arm_smmu_device *smmu; in arm_smmu_device_probe() local
3491 struct device *dev = &pdev->dev; in arm_smmu_device_probe()
3494 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); in arm_smmu_device_probe()
3495 if (!smmu) { in arm_smmu_device_probe()
3497 return -ENOMEM; in arm_smmu_device_probe()
3499 smmu->dev = dev; in arm_smmu_device_probe()
3501 if (dev->of_node) { in arm_smmu_device_probe()
3502 ret = arm_smmu_device_dt_probe(pdev, smmu); in arm_smmu_device_probe()
3504 ret = arm_smmu_device_acpi_probe(pdev, smmu); in arm_smmu_device_probe()
3505 if (ret == -ENODEV) in arm_smmu_device_probe()
3514 if (resource_size(res) < arm_smmu_resource_size(smmu)) { in arm_smmu_device_probe()
3516 return -EINVAL; in arm_smmu_device_probe()
3518 ioaddr = res->start; in arm_smmu_device_probe()
3524 smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ); in arm_smmu_device_probe()
3525 if (IS_ERR(smmu->base)) in arm_smmu_device_probe()
3526 return PTR_ERR(smmu->base); in arm_smmu_device_probe()
3528 if (arm_smmu_resource_size(smmu) > SZ_64K) { in arm_smmu_device_probe()
3529 smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K, in arm_smmu_device_probe()
3531 if (IS_ERR(smmu->page1)) in arm_smmu_device_probe()
3532 return PTR_ERR(smmu->page1); in arm_smmu_device_probe()
3534 smmu->page1 = smmu->base; in arm_smmu_device_probe()
3541 smmu->combined_irq = irq; in arm_smmu_device_probe()
3545 smmu->evtq.q.irq = irq; in arm_smmu_device_probe()
3549 smmu->priq.q.irq = irq; in arm_smmu_device_probe()
3553 smmu->gerr_irq = irq; in arm_smmu_device_probe()
3556 ret = arm_smmu_device_hw_probe(smmu); in arm_smmu_device_probe()
3560 /* Initialise in-memory data structures */ in arm_smmu_device_probe()
3561 ret = arm_smmu_init_structures(smmu); in arm_smmu_device_probe()
3566 platform_set_drvdata(pdev, smmu); in arm_smmu_device_probe()
3569 ret = arm_smmu_device_reset(smmu, bypass); in arm_smmu_device_probe()
3574 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, in arm_smmu_device_probe()
3579 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops); in arm_smmu_device_probe()
3580 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); in arm_smmu_device_probe()
3582 ret = iommu_device_register(&smmu->iommu); in arm_smmu_device_probe()
3593 struct arm_smmu_device *smmu = platform_get_drvdata(pdev); in arm_smmu_device_remove() local
3596 iommu_device_unregister(&smmu->iommu); in arm_smmu_device_remove()
3597 iommu_device_sysfs_remove(&smmu->iommu); in arm_smmu_device_remove()
3598 arm_smmu_device_disable(smmu); in arm_smmu_device_remove()
3609 { .compatible = "arm,smmu-v3", },
3616 .name = "arm-smmu-v3",
3628 MODULE_ALIAS("platform:arm-smmu-v3");
3629 MODULE_LICENSE("GPL v2");