/Linux-v5.4/include/linux/ |
D | edac.h | 357 * Maximum number of layers used by the memory controller to uniquely 361 * some code there that are optimized for 3 layers. 370 * @layers: a struct edac_mc_layer array, describing how many elements 372 * @nlayers: Number of layers at the @layers array 379 * For 2 layers, this macro is similar to allocate a bi-dimensional array 382 * For 3 layers, this macro is similar to allocate a tri-dimensional array 386 * 3 layers, this is a little faster. 388 * By design, layers can never be 0 or more than 3. If that ever happens, 392 #define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \ argument 397 __i = (layer1) + ((layers[1]).size * (layer0)); \ [all …]
|
/Linux-v5.4/drivers/edac/ |
D | pasemi_edac.c | 183 struct edac_mc_layer layers[2]; in pasemi_edac_probe() local 200 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in pasemi_edac_probe() 201 layers[0].size = PASEMI_EDAC_NR_CSROWS; in pasemi_edac_probe() 202 layers[0].is_virt_csrow = true; in pasemi_edac_probe() 203 layers[1].type = EDAC_MC_LAYER_CHANNEL; in pasemi_edac_probe() 204 layers[1].size = PASEMI_EDAC_NR_CHANS; in pasemi_edac_probe() 205 layers[1].is_virt_csrow = false; in pasemi_edac_probe() 206 mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers, in pasemi_edac_probe()
|
D | highbank_mc_edac.c | 148 struct edac_mc_layer layers[2]; in highbank_mc_probe() local 162 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in highbank_mc_probe() 163 layers[0].size = 1; in highbank_mc_probe() 164 layers[0].is_virt_csrow = true; in highbank_mc_probe() 165 layers[1].type = EDAC_MC_LAYER_CHANNEL; in highbank_mc_probe() 166 layers[1].size = 1; in highbank_mc_probe() 167 layers[1].is_virt_csrow = false; in highbank_mc_probe() 168 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, in highbank_mc_probe()
|
D | cell_edac.c | 172 struct edac_mc_layer layers[2]; in cell_edac_probe() local 202 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in cell_edac_probe() 203 layers[0].size = 1; in cell_edac_probe() 204 layers[0].is_virt_csrow = true; in cell_edac_probe() 205 layers[1].type = EDAC_MC_LAYER_CHANNEL; in cell_edac_probe() 206 layers[1].size = num_chans; in cell_edac_probe() 207 layers[1].is_virt_csrow = false; in cell_edac_probe() 208 mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers, in cell_edac_probe()
|
D | i82860_edac.c | 188 struct edac_mc_layer layers[2]; in i82860_probe1() local 201 layers[0].type = EDAC_MC_LAYER_CHANNEL; in i82860_probe1() 202 layers[0].size = 2; in i82860_probe1() 203 layers[0].is_virt_csrow = true; in i82860_probe1() 204 layers[1].type = EDAC_MC_LAYER_SLOT; in i82860_probe1() 205 layers[1].size = 8; in i82860_probe1() 206 layers[1].is_virt_csrow = true; in i82860_probe1() 207 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); in i82860_probe1()
|
D | amd76x_edac.c | 238 struct edac_mc_layer layers[2]; in amd76x_probe1() local 247 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in amd76x_probe1() 248 layers[0].size = AMD76X_NR_CSROWS; in amd76x_probe1() 249 layers[0].is_virt_csrow = true; in amd76x_probe1() 250 layers[1].type = EDAC_MC_LAYER_CHANNEL; in amd76x_probe1() 251 layers[1].size = 1; in amd76x_probe1() 252 layers[1].is_virt_csrow = false; in amd76x_probe1() 253 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); in amd76x_probe1()
|
D | aspeed_edac.c | 282 struct edac_mc_layer layers[2]; in aspeed_probe() local 312 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in aspeed_probe() 313 layers[0].size = 1; in aspeed_probe() 314 layers[0].is_virt_csrow = true; in aspeed_probe() 315 layers[1].type = EDAC_MC_LAYER_CHANNEL; in aspeed_probe() 316 layers[1].size = 1; in aspeed_probe() 317 layers[1].is_virt_csrow = false; in aspeed_probe() 319 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); in aspeed_probe()
|
D | octeon_edac-lmc.c | 228 struct edac_mc_layer layers[1]; in octeon_lmc_edac_probe() local 233 layers[0].type = EDAC_MC_LAYER_CHANNEL; in octeon_lmc_edac_probe() 234 layers[0].size = 1; in octeon_lmc_edac_probe() 235 layers[0].is_virt_csrow = false; in octeon_lmc_edac_probe() 246 mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt)); in octeon_lmc_edac_probe() 278 mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt)); in octeon_lmc_edac_probe()
|
D | i3200_edac.c | 341 struct edac_mc_layer layers[2]; in i3200_probe1() local 356 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in i3200_probe1() 357 layers[0].size = I3200_DIMMS; in i3200_probe1() 358 layers[0].is_virt_csrow = true; in i3200_probe1() 359 layers[1].type = EDAC_MC_LAYER_CHANNEL; in i3200_probe1() 360 layers[1].size = nr_channels; in i3200_probe1() 361 layers[1].is_virt_csrow = false; in i3200_probe1() 362 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, in i3200_probe1() 395 struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, in i3200_probe1()
|
D | r82600_edac.c | 272 struct edac_mc_layer layers[2]; in r82600_probe1() local 286 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in r82600_probe1() 287 layers[0].size = R82600_NR_CSROWS; in r82600_probe1() 288 layers[0].is_virt_csrow = true; in r82600_probe1() 289 layers[1].type = EDAC_MC_LAYER_CHANNEL; in r82600_probe1() 290 layers[1].size = R82600_NR_CHANS; in r82600_probe1() 291 layers[1].is_virt_csrow = false; in r82600_probe1() 292 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); in r82600_probe1()
|
D | i82443bxgx_edac.c | 235 struct edac_mc_layer layers[2]; in i82443bxgx_edacmc_probe1() local 249 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in i82443bxgx_edacmc_probe1() 250 layers[0].size = I82443BXGX_NR_CSROWS; in i82443bxgx_edacmc_probe1() 251 layers[0].is_virt_csrow = true; in i82443bxgx_edacmc_probe1() 252 layers[1].type = EDAC_MC_LAYER_CHANNEL; in i82443bxgx_edacmc_probe1() 253 layers[1].size = I82443BXGX_NR_CHANS; in i82443bxgx_edacmc_probe1() 254 layers[1].is_virt_csrow = false; in i82443bxgx_edacmc_probe1() 255 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); in i82443bxgx_edacmc_probe1()
|
D | x38_edac.c | 323 struct edac_mc_layer layers[2]; in x38_probe1() local 339 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in x38_probe1() 340 layers[0].size = X38_RANKS; in x38_probe1() 341 layers[0].is_virt_csrow = true; in x38_probe1() 342 layers[1].type = EDAC_MC_LAYER_CHANNEL; in x38_probe1() 343 layers[1].size = x38_channel_num; in x38_probe1() 344 layers[1].is_virt_csrow = false; in x38_probe1() 345 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); in x38_probe1()
|
D | i3000_edac.c | 314 struct edac_mc_layer layers[2]; in i3000_probe1() local 357 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in i3000_probe1() 358 layers[0].size = I3000_RANKS / nr_channels; in i3000_probe1() 359 layers[0].is_virt_csrow = true; in i3000_probe1() 360 layers[1].type = EDAC_MC_LAYER_CHANNEL; in i3000_probe1() 361 layers[1].size = nr_channels; in i3000_probe1() 362 layers[1].is_virt_csrow = false; in i3000_probe1() 363 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); in i3000_probe1()
|
D | bluefield_edac.c | 246 struct edac_mc_layer layers[1]; in bluefield_edac_mc_probe() local 273 layers[0].type = EDAC_MC_LAYER_SLOT; in bluefield_edac_mc_probe() 274 layers[0].size = dimm_count; in bluefield_edac_mc_probe() 275 layers[0].is_virt_csrow = true; in bluefield_edac_mc_probe() 277 mci = edac_mc_alloc(mc_idx, ARRAY_SIZE(layers), layers, sizeof(*priv)); in bluefield_edac_mc_probe()
|
D | ie31200_edac.c | 399 struct edac_mc_layer layers[2]; in ie31200_probe1() local 419 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in ie31200_probe1() 420 layers[0].size = IE31200_DIMMS; in ie31200_probe1() 421 layers[0].is_virt_csrow = true; in ie31200_probe1() 422 layers[1].type = EDAC_MC_LAYER_CHANNEL; in ie31200_probe1() 423 layers[1].size = nr_channels; in ie31200_probe1() 424 layers[1].is_virt_csrow = false; in ie31200_probe1() 425 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, in ie31200_probe1() 493 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, in ie31200_probe1() 506 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, in ie31200_probe1()
|
D | edac_mc.c | 126 edac_layer_name[mci->layers[i].type], in edac_dimm_info_location() 307 struct edac_mc_layer *layers, in edac_mc_alloc() argument 329 tot_dimms *= layers[i].size; in edac_mc_alloc() 330 if (layers[i].is_virt_csrow) in edac_mc_alloc() 331 tot_csrows *= layers[i].size; in edac_mc_alloc() 333 tot_channels *= layers[i].size; in edac_mc_alloc() 335 if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT) in edac_mc_alloc() 347 count *= layers[i].size; in edac_mc_alloc() 383 mci->layers = layer; in edac_mc_alloc() 384 memcpy(mci->layers, layers, sizeof(*layer) * n_layers); in edac_mc_alloc() [all …]
|
D | i82875p_edac.c | 392 struct edac_mc_layer layers[2]; in i82875p_probe1() local 407 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; in i82875p_probe1() 408 layers[0].size = I82875P_NR_CSROWS(nr_chans); in i82875p_probe1() 409 layers[0].is_virt_csrow = true; in i82875p_probe1() 410 layers[1].type = EDAC_MC_LAYER_CHANNEL; in i82875p_probe1() 411 layers[1].size = nr_chans; in i82875p_probe1() 412 layers[1].is_virt_csrow = false; in i82875p_probe1() 413 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); in i82875p_probe1()
|
/Linux-v5.4/drivers/media/dvb-frontends/ |
D | tc90522.c | 201 int layers; in tc90522s_get_frontend() local 209 layers = 0; in tc90522s_get_frontend() 236 layers = (v > 0) ? 2 : 1; in tc90522s_get_frontend() 284 stats->len = layers; in tc90522s_get_frontend() 287 for (i = 0; i < layers; i++) in tc90522s_get_frontend() 290 for (i = 0; i < layers; i++) { in tc90522s_get_frontend() 298 stats->len = layers; in tc90522s_get_frontend() 300 for (i = 0; i < layers; i++) in tc90522s_get_frontend() 303 for (i = 0; i < layers; i++) { in tc90522s_get_frontend() 336 int layers; in tc90522t_get_frontend() local [all …]
|
/Linux-v5.4/Documentation/scsi/ |
D | scsi_eh.txt | 137 Note that this does not mean lower layers are quiescent. If a LLDD 138 completed a scmd with error status, the LLDD and lower layers are 140 has timed out, unless hostt->eh_timed_out() made lower layers forget 142 active as long as lower layers are concerned and completion could 188 lower layers and lower layers are ready to process or fail the scmd 355 that lower layers have forgotten about the scmd and we can 364 and STU doesn't make lower layers forget about those 366 if STU succeeds leaving lower layers in an inconsistent 418 On completion, the handler should have made lower layers forget about 458 - Know that timed out scmds are still active on lower layers. Make [all …]
|
D | ufs.txt | 48 UFS communication architecture consists of following layers, 63 layers. Device level configurations involve handling of query 70 the higher layers through Service Access Points. UTP defines 3 71 service access points for higher layers. 88 * UIO_SAP: To issue commands to Unipro layers.
|
/Linux-v5.4/Documentation/driver-api/fpga/ |
D | intro.rst | 9 * The FPGA subsystem separates upper layers (userspace interfaces and 10 enumeration) from lower layers that know how to program a specific 13 * Code should not be shared between upper and lower layers. This
|
/Linux-v5.4/drivers/staging/most/Documentation/ |
D | driver_usage.txt | 8 MOST defines the protocol, hardware and software layers necessary to allow 19 consumer devices via optical or electrical physical layers directly to one 27 three layers. From bottom up these layers are: the adapter layer, the core 31 routing through all three layers, the configuration of the driver, the 35 For each of the other two layers a set of modules is provided. Those can be
|
/Linux-v5.4/include/net/caif/ |
D | caif_layer.h | 129 * It defines CAIF layering structure, used by all CAIF Layers and the 130 * layers interfacing CAIF. 136 * Principles for layering of protocol layers: 137 * - All layers must use this structure. If embedding it, then place this 169 * - If parsing succeeds (and above layers return OK) then 253 * logical CAIF connection. Used by service layers to
|
/Linux-v5.4/Documentation/filesystems/ |
D | overlayfs.txt | 30 In the special case of all overlay layers on the same underlying 37 On 64bit systems, even if all overlay layers are not on the same 249 Multiple lower layers 252 Multiple lower layers can now be given using the the colon (":") as a 287 for untrusted layers like from a pen drive. 295 Sharing and copying layers 298 Lower layers may be shared among several overlay mounts and that is indeed 330 It is quite a common practice to copy overlay layers to a different 333 the copied layers will fail the verification of the lower root file handle. 376 underlying filesystem for all layers making up the overlay. [all …]
|
/Linux-v5.4/drivers/gpu/drm/atmel-hlcdc/ |
D | atmel_hlcdc_dc.h | 135 * can be placed differently on 2 different layers depending on its 307 * @layers: a layer description table describing available layers 320 const struct atmel_hlcdc_layer_desc *layers; member 333 * @layers: active HLCDC layers 343 struct atmel_hlcdc_layer *layers[ATMEL_HLCDC_MAX_LAYERS]; member
|