1 /* Copyright 2008 - 2016 Freescale Semiconductor Inc. 2 * 3 * Redistribution and use in source and binary forms, with or without 4 * modification, are permitted provided that the following conditions are met: 5 * * Redistributions of source code must retain the above copyright 6 * notice, this list of conditions and the following disclaimer. 7 * * Redistributions in binary form must reproduce the above copyright 8 * notice, this list of conditions and the following disclaimer in the 9 * documentation and/or other materials provided with the distribution. 10 * * Neither the name of Freescale Semiconductor nor the 11 * names of its contributors may be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * GNU General Public License ("GPL") as published by the Free Software 16 * Foundation, either version 2 of that License or (at your option) any 17 * later version. 18 * 19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #ifndef __DPAA_H 32 #define __DPAA_H 33 34 #include <linux/netdevice.h> 35 #include <linux/refcount.h> 36 #include <soc/fsl/qman.h> 37 #include <soc/fsl/bman.h> 38 39 #include "fman.h" 40 #include "mac.h" 41 #include "dpaa_eth_trace.h" 42 43 /* Number of prioritised traffic classes */ 44 #define DPAA_TC_NUM 4 45 /* Number of Tx queues per traffic class */ 46 #define DPAA_TC_TXQ_NUM NR_CPUS 47 /* Total number of Tx queues */ 48 #define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM) 49 50 /* More detailed FQ types - used for fine-grained WQ assignments */ 51 enum dpaa_fq_type { 52 FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */ 53 FQ_TYPE_RX_ERROR, /* Rx Error FQs */ 54 FQ_TYPE_RX_PCD, /* Rx Parse Classify Distribute FQs */ 55 FQ_TYPE_TX, /* "Real" Tx FQs */ 56 FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */ 57 FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */ 58 FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */ 59 }; 60 61 struct dpaa_fq { 62 struct qman_fq fq_base; 63 struct list_head list; 64 struct net_device *net_dev; 65 bool init; 66 u32 fqid; 67 u32 flags; 68 u16 channel; 69 u8 wq; 70 enum dpaa_fq_type fq_type; 71 struct xdp_rxq_info xdp_rxq; 72 }; 73 74 struct dpaa_fq_cbs { 75 struct qman_fq rx_defq; 76 struct qman_fq tx_defq; 77 struct qman_fq rx_errq; 78 struct qman_fq tx_errq; 79 struct qman_fq egress_ern; 80 }; 81 82 struct dpaa_priv; 83 84 struct dpaa_bp { 85 /* used in the DMA mapping operations */ 86 struct dpaa_priv *priv; 87 /* current number of buffers in the buffer pool alloted to each CPU */ 88 int __percpu *percpu_count; 89 /* all buffers allocated for this pool have this raw size */ 90 size_t raw_size; 91 /* all buffers in this pool have this same usable size */ 92 size_t size; 93 /* the buffer pools are initialized with config_count buffers for each 94 * CPU; at runtime the number of buffers per CPU is constantly brought 95 * back to this level 96 */ 97 u16 config_count; 98 u8 bpid; 99 struct bman_pool *pool; 100 /* bpool can be seeded before use by this cb */ 101 int (*seed_cb)(struct dpaa_bp *); 102 /* bpool can be emptied before freeing by this cb */ 103 void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *); 104 refcount_t refs; 105 }; 106 107 struct dpaa_rx_errors { 108 u64 dme; /* DMA Error */ 109 u64 fpe; /* Frame Physical Error */ 110 u64 fse; /* Frame Size Error */ 111 u64 phe; /* Header Error */ 112 }; 113 114 /* Counters for QMan ERN frames - one counter per rejection code */ 115 struct dpaa_ern_cnt { 116 u64 cg_tdrop; /* Congestion group taildrop */ 117 u64 wred; /* WRED congestion */ 118 u64 err_cond; /* Error condition */ 119 u64 early_window; /* Order restoration, frame too early */ 120 u64 late_window; /* Order restoration, frame too late */ 121 u64 fq_tdrop; /* FQ taildrop */ 122 u64 fq_retired; /* FQ is retired */ 123 u64 orp_zero; /* ORP disabled */ 124 }; 125 126 struct dpaa_napi_portal { 127 struct napi_struct napi; 128 struct qman_portal *p; 129 bool down; 130 int xdp_act; 131 }; 132 133 struct dpaa_percpu_priv { 134 struct net_device *net_dev; 135 struct dpaa_napi_portal np; 136 u64 in_interrupt; 137 u64 tx_confirm; 138 /* fragmented (non-linear) skbuffs received from the stack */ 139 u64 tx_frag_skbuffs; 140 struct rtnl_link_stats64 stats; 141 struct dpaa_rx_errors rx_errors; 142 struct dpaa_ern_cnt ern_cnt; 143 }; 144 145 struct dpaa_buffer_layout { 146 u16 priv_data_size; 147 }; 148 149 /* Information to be used on the Tx confirmation path. Stored just 150 * before the start of the transmit buffer. Maximum size allowed 151 * is DPAA_TX_PRIV_DATA_SIZE bytes. 152 */ 153 struct dpaa_eth_swbp { 154 struct sk_buff *skb; 155 struct xdp_frame *xdpf; 156 }; 157 158 struct dpaa_priv { 159 struct dpaa_percpu_priv __percpu *percpu_priv; 160 struct dpaa_bp *dpaa_bp; 161 /* Store here the needed Tx headroom for convenience and speed 162 * (even though it can be computed based on the fields of buf_layout) 163 */ 164 u16 tx_headroom; 165 struct net_device *net_dev; 166 struct mac_device *mac_dev; 167 struct device *rx_dma_dev; 168 struct device *tx_dma_dev; 169 struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM]; 170 struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM]; 171 172 u16 channel; 173 struct list_head dpaa_fq_list; 174 175 u8 num_tc; 176 bool keygen_in_use; 177 u32 msg_enable; /* net_device message level */ 178 179 struct { 180 /* All egress queues to a given net device belong to one 181 * (and the same) congestion group. 182 */ 183 struct qman_cgr cgr; 184 /* If congested, when it began. Used for performance stats. */ 185 u32 congestion_start_jiffies; 186 /* Number of jiffies the Tx port was congested. */ 187 u32 congested_jiffies; 188 /* Counter for the number of times the CGR 189 * entered congestion state 190 */ 191 u32 cgr_congested_count; 192 } cgr_data; 193 /* Use a per-port CGR for ingress traffic. */ 194 bool use_ingress_cgr; 195 struct qman_cgr ingress_cgr; 196 197 struct dpaa_buffer_layout buf_layout[2]; 198 u16 rx_headroom; 199 200 bool tx_tstamp; /* Tx timestamping enabled */ 201 bool rx_tstamp; /* Rx timestamping enabled */ 202 203 struct bpf_prog *xdp_prog; 204 }; 205 206 /* from dpaa_ethtool.c */ 207 extern const struct ethtool_ops dpaa_ethtool_ops; 208 209 /* from dpaa_eth_sysfs.c */ 210 void dpaa_eth_sysfs_remove(struct device *dev); 211 void dpaa_eth_sysfs_init(struct device *dev); 212 #endif /* __DPAA_H */ 213