1 /*
2 * Copyright (c) 2023 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <errno.h>
8
9 #include <zephyr/drivers/pcie/vc.h>
10 #include <zephyr/drivers/pcie/cap.h>
11
12 #include "vc.h"
13
pcie_vc_cap_lookup(pcie_bdf_t bdf,struct pcie_vc_regs * regs)14 uint32_t pcie_vc_cap_lookup(pcie_bdf_t bdf, struct pcie_vc_regs *regs)
15 {
16 uint32_t base;
17
18 base = pcie_get_ext_cap(bdf, PCIE_EXT_CAP_ID_VC);
19 if (base == 0) {
20 base = pcie_get_ext_cap(bdf, PCIE_EXT_CAP_ID_MFVC_VC);
21 if (base == 0) {
22 return 0;
23 }
24 }
25
26 regs->cap_reg_1.raw = pcie_conf_read(bdf, base +
27 PCIE_VC_CAP_REG_1_OFFSET);
28 regs->cap_reg_2.raw = pcie_conf_read(bdf, base +
29 PCIE_VC_CAP_REG_2_OFFSET);
30 regs->ctrl_reg.raw = pcie_conf_read(bdf, base +
31 PCIE_VC_CTRL_STATUS_REG_OFFSET);
32
33 return base;
34 }
35
pcie_vc_load_resources_regs(pcie_bdf_t bdf,uint32_t base,struct pcie_vc_resource_regs * regs,int nb_regs)36 void pcie_vc_load_resources_regs(pcie_bdf_t bdf,
37 uint32_t base,
38 struct pcie_vc_resource_regs *regs,
39 int nb_regs)
40 {
41 int idx;
42
43 for (idx = 0; idx < nb_regs; idx++) {
44 regs->cap_reg.raw =
45 pcie_conf_read(bdf, base +
46 PCIE_VC_RES_CAP_REG_OFFSET(idx));
47 regs->ctrl_reg.raw =
48 pcie_conf_read(bdf, base +
49 PCIE_VC_RES_CTRL_REG_OFFSET(idx));
50 regs->status_reg.raw =
51 pcie_conf_read(bdf, base +
52 PCIE_VC_RES_STATUS_REG_OFFSET(idx));
53 regs++;
54 }
55 }
56
get_vc_registers(pcie_bdf_t bdf,struct pcie_vc_regs * regs,struct pcie_vc_resource_regs * res_regs)57 static int get_vc_registers(pcie_bdf_t bdf,
58 struct pcie_vc_regs *regs,
59 struct pcie_vc_resource_regs *res_regs)
60 {
61 uint32_t base;
62
63 base = pcie_vc_cap_lookup(bdf, regs);
64 if (base == 0) {
65 return -ENOTSUP;
66 }
67
68 if (regs->cap_reg_1.vc_count == 0) {
69 /* Having only VC0 is like having no real VC */
70 return -ENOTSUP;
71 }
72
73 pcie_vc_load_resources_regs(bdf, base, res_regs,
74 regs->cap_reg_1.vc_count + 1);
75
76 return 0;
77 }
78
79
pcie_vc_enable(pcie_bdf_t bdf)80 int pcie_vc_enable(pcie_bdf_t bdf)
81 {
82 struct pcie_vc_regs regs;
83 struct pcie_vc_resource_regs res_regs[PCIE_VC_MAX_COUNT];
84 int idx;
85
86 if (get_vc_registers(bdf, ®s, res_regs) != 0) {
87 return -ENOTSUP;
88 }
89
90 /* We do not touch VC0: it is always on */
91 for (idx = 1; idx < regs.cap_reg_1.vc_count + 1; idx++) {
92 if (idx > 0 && res_regs[idx].ctrl_reg.vc_enable == 1) {
93 /*
94 * VC has not been disabled properly, if at all?
95 */
96 return -EALREADY;
97 }
98
99 res_regs[idx].ctrl_reg.vc_enable = 1;
100 }
101
102 return 0;
103 }
104
pcie_vc_disable(pcie_bdf_t bdf)105 int pcie_vc_disable(pcie_bdf_t bdf)
106 {
107 struct pcie_vc_regs regs;
108 struct pcie_vc_resource_regs res_regs[PCIE_VC_MAX_COUNT];
109 int idx;
110
111 if (get_vc_registers(bdf, ®s, res_regs) != 0) {
112 return -ENOTSUP;
113 }
114
115 /* We do not touch VC0: it is always on */
116 for (idx = 1; idx < regs.cap_reg_1.vc_count + 1; idx++) {
117 /* Let's wait for the pending negotiation to end */
118 while (res_regs[idx].status_reg.vc_negocation_pending == 1) {
119 k_msleep(10);
120 }
121
122 res_regs[idx].ctrl_reg.vc_enable = 0;
123 }
124
125 return 0;
126 }
127
pcie_vc_map_tc(pcie_bdf_t bdf,struct pcie_vctc_map * map)128 int pcie_vc_map_tc(pcie_bdf_t bdf, struct pcie_vctc_map *map)
129 {
130 struct pcie_vc_regs regs;
131 struct pcie_vc_resource_regs res_regs[PCIE_VC_MAX_COUNT];
132 int idx;
133 uint8_t tc_mapped = 0;
134
135 if (get_vc_registers(bdf, ®s, res_regs) != 0) {
136 return -ENOTSUP;
137 }
138
139 /* Map must relate to the actual VC count */
140 if (regs.cap_reg_1.vc_count != map->vc_count) {
141 return -EINVAL;
142 }
143
144 /* Veryfying that map is sane */
145 for (idx = 0; idx < map->vc_count; idx++) {
146 if (idx == 0 && !(map->vc_tc[idx] & PCIE_VC_SET_TC0)) {
147 /* TC0 is on VC0 and cannot be unset */
148 return -EINVAL;
149 }
150
151 /* Each TC must appear only once in the map */
152 if (tc_mapped & map->vc_tc[idx]) {
153 return -EINVAL;
154 }
155
156 tc_mapped |= map->vc_tc[idx];
157 }
158
159 for (idx = 0; idx < regs.cap_reg_1.vc_count + 1; idx++) {
160 /* Let's just set the VC ID to related index for now */
161 if (idx > 0) {
162 res_regs[idx].ctrl_reg.vc_id = idx;
163 }
164
165 /* Currently, only HW round robin is used */
166 res_regs[idx].ctrl_reg.pa_select = PCIE_VC_PA_RR;
167
168 res_regs[idx].ctrl_reg.tc_vc_map = map->vc_tc[idx];
169 }
170
171 return 0;
172 }
173