1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * sata_sx4.c - Promise SATA
4 *
5 * Maintained by: Tejun Heo <tj@kernel.org>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
8 *
9 * Copyright 2003-2004 Red Hat, Inc.
10 *
11 * libata documentation is available via 'make {ps|pdf}docs',
12 * as Documentation/driver-api/libata.rst
13 *
14 * Hardware documentation available under NDA.
15 */
16
17 /*
18 Theory of operation
19 -------------------
20
21 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
22 engine, DIMM memory, and four ATA engines (one per SATA port).
23 Data is copied to/from DIMM memory by the HDMA engine, before
24 handing off to one (or more) of the ATA engines. The ATA
25 engines operate solely on DIMM memory.
26
27 The SX4 behaves like a PATA chip, with no SATA controls or
28 knowledge whatsoever, leading to the presumption that
29 PATA<->SATA bridges exist on SX4 boards, external to the
30 PDC20621 chip itself.
31
32 The chip is quite capable, supporting an XOR engine and linked
33 hardware commands (permits a string to transactions to be
34 submitted and waited-on as a single unit), and an optional
35 microprocessor.
36
37 The limiting factor is largely software. This Linux driver was
38 written to multiplex the single HDMA engine to copy disk
39 transactions into a fixed DIMM memory space, from where an ATA
40 engine takes over. As a result, each WRITE looks like this:
41
42 submit HDMA packet to hardware
43 hardware copies data from system memory to DIMM
44 hardware raises interrupt
45
46 submit ATA packet to hardware
47 hardware executes ATA WRITE command, w/ data in DIMM
48 hardware raises interrupt
49
50 and each READ looks like this:
51
52 submit ATA packet to hardware
53 hardware executes ATA READ command, w/ data in DIMM
54 hardware raises interrupt
55
56 submit HDMA packet to hardware
57 hardware copies data from DIMM to system memory
58 hardware raises interrupt
59
60 This is a very slow, lock-step way of doing things that can
61 certainly be improved by motivated kernel hackers.
62
63 */
64
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/slab.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/device.h>
73 #include <scsi/scsi_host.h>
74 #include <scsi/scsi_cmnd.h>
75 #include <linux/libata.h>
76 #include "sata_promise.h"
77
78 #define DRV_NAME "sata_sx4"
79 #define DRV_VERSION "0.12"
80
81 static int dimm_test;
82 module_param(dimm_test, int, 0644);
83 MODULE_PARM_DESC(dimm_test, "Enable DIMM test during startup (1 = enabled)");
84
85 enum {
86 PDC_MMIO_BAR = 3,
87 PDC_DIMM_BAR = 4,
88
89 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
90
91 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
92 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
93 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
94 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
95
96 PDC_CTLSTAT = 0x60, /* IDEn control / status */
97
98 PDC_20621_SEQCTL = 0x400,
99 PDC_20621_SEQMASK = 0x480,
100 PDC_20621_GENERAL_CTL = 0x484,
101 PDC_20621_PAGE_SIZE = (32 * 1024),
102
103 /* chosen, not constant, values; we design our own DIMM mem map */
104 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
105 PDC_20621_DIMM_BASE = 0x00200000,
106 PDC_20621_DIMM_DATA = (64 * 1024),
107 PDC_DIMM_DATA_STEP = (256 * 1024),
108 PDC_DIMM_WINDOW_STEP = (8 * 1024),
109 PDC_DIMM_HOST_PRD = (6 * 1024),
110 PDC_DIMM_HOST_PKT = (128 * 0),
111 PDC_DIMM_HPKT_PRD = (128 * 1),
112 PDC_DIMM_ATA_PKT = (128 * 2),
113 PDC_DIMM_APKT_PRD = (128 * 3),
114 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
115 PDC_PAGE_WINDOW = 0x40,
116 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
117 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
118 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
119
120 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
121
122 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
123 (1<<23),
124
125 board_20621 = 0, /* FastTrak S150 SX4 */
126
127 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
128 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
129 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
130
131 PDC_MAX_HDMA = 32,
132 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
133
134 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
135 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
136 PDC_I2C_CONTROL = 0x48,
137 PDC_I2C_ADDR_DATA = 0x4C,
138 PDC_DIMM0_CONTROL = 0x80,
139 PDC_DIMM1_CONTROL = 0x84,
140 PDC_SDRAM_CONTROL = 0x88,
141 PDC_I2C_WRITE = 0, /* master -> slave */
142 PDC_I2C_READ = (1 << 6), /* master <- slave */
143 PDC_I2C_START = (1 << 7), /* start I2C proto */
144 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
145 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
146 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
147 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
148 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
149 PDC_DIMM_SPD_ROW_NUM = 3,
150 PDC_DIMM_SPD_COLUMN_NUM = 4,
151 PDC_DIMM_SPD_MODULE_ROW = 5,
152 PDC_DIMM_SPD_TYPE = 11,
153 PDC_DIMM_SPD_FRESH_RATE = 12,
154 PDC_DIMM_SPD_BANK_NUM = 17,
155 PDC_DIMM_SPD_CAS_LATENCY = 18,
156 PDC_DIMM_SPD_ATTRIBUTE = 21,
157 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
158 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
159 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
160 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
161 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
162 PDC_CTL_STATUS = 0x08,
163 PDC_DIMM_WINDOW_CTLR = 0x0C,
164 PDC_TIME_CONTROL = 0x3C,
165 PDC_TIME_PERIOD = 0x40,
166 PDC_TIME_COUNTER = 0x44,
167 PDC_GENERAL_CTLR = 0x484,
168 PCI_PLL_INIT = 0x8A531824,
169 PCI_X_TCOUNT = 0xEE1E5CFF,
170
171 /* PDC_TIME_CONTROL bits */
172 PDC_TIMER_BUZZER = (1 << 10),
173 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
174 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
175 PDC_TIMER_ENABLE = (1 << 7),
176 PDC_TIMER_MASK_INT = (1 << 5),
177 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
178 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
179 PDC_TIMER_ENABLE |
180 PDC_TIMER_MASK_INT,
181 };
182
183 #define ECC_ERASE_BUF_SZ (128 * 1024)
184
185 struct pdc_port_priv {
186 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
187 u8 *pkt;
188 dma_addr_t pkt_dma;
189 };
190
191 struct pdc_host_priv {
192 unsigned int doing_hdma;
193 unsigned int hdma_prod;
194 unsigned int hdma_cons;
195 struct {
196 struct ata_queued_cmd *qc;
197 unsigned int seq;
198 unsigned long pkt_ofs;
199 } hdma[32];
200 };
201
202
203 static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
204 static void pdc_error_handler(struct ata_port *ap);
205 static void pdc_freeze(struct ata_port *ap);
206 static void pdc_thaw(struct ata_port *ap);
207 static int pdc_port_start(struct ata_port *ap);
208 static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
209 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
210 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
211 static unsigned int pdc20621_dimm_init(struct ata_host *host);
212 static int pdc20621_detect_dimm(struct ata_host *host);
213 static unsigned int pdc20621_i2c_read(struct ata_host *host,
214 u32 device, u32 subaddr, u32 *pdata);
215 static int pdc20621_prog_dimm0(struct ata_host *host);
216 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
217 static void pdc20621_get_from_dimm(struct ata_host *host,
218 void *psource, u32 offset, u32 size);
219 static void pdc20621_put_to_dimm(struct ata_host *host,
220 void *psource, u32 offset, u32 size);
221 static void pdc20621_irq_clear(struct ata_port *ap);
222 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
223 static int pdc_softreset(struct ata_link *link, unsigned int *class,
224 unsigned long deadline);
225 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
226 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
227
228
229 static struct scsi_host_template pdc_sata_sht = {
230 ATA_BASE_SHT(DRV_NAME),
231 .sg_tablesize = LIBATA_MAX_PRD,
232 .dma_boundary = ATA_DMA_BOUNDARY,
233 };
234
235 /* TODO: inherit from base port_ops after converting to new EH */
236 static struct ata_port_operations pdc_20621_ops = {
237 .inherits = &ata_sff_port_ops,
238
239 .check_atapi_dma = pdc_check_atapi_dma,
240 .qc_prep = pdc20621_qc_prep,
241 .qc_issue = pdc20621_qc_issue,
242
243 .freeze = pdc_freeze,
244 .thaw = pdc_thaw,
245 .softreset = pdc_softreset,
246 .error_handler = pdc_error_handler,
247 .lost_interrupt = ATA_OP_NULL,
248 .post_internal_cmd = pdc_post_internal_cmd,
249
250 .port_start = pdc_port_start,
251
252 .sff_tf_load = pdc_tf_load_mmio,
253 .sff_exec_command = pdc_exec_command_mmio,
254 .sff_irq_clear = pdc20621_irq_clear,
255 };
256
257 static const struct ata_port_info pdc_port_info[] = {
258 /* board_20621 */
259 {
260 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
261 ATA_FLAG_PIO_POLLING,
262 .pio_mask = ATA_PIO4,
263 .mwdma_mask = ATA_MWDMA2,
264 .udma_mask = ATA_UDMA6,
265 .port_ops = &pdc_20621_ops,
266 },
267
268 };
269
270 static const struct pci_device_id pdc_sata_pci_tbl[] = {
271 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
272
273 { } /* terminate list */
274 };
275
276 static struct pci_driver pdc_sata_pci_driver = {
277 .name = DRV_NAME,
278 .id_table = pdc_sata_pci_tbl,
279 .probe = pdc_sata_init_one,
280 .remove = ata_pci_remove_one,
281 };
282
283
pdc_port_start(struct ata_port * ap)284 static int pdc_port_start(struct ata_port *ap)
285 {
286 struct device *dev = ap->host->dev;
287 struct pdc_port_priv *pp;
288
289 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
290 if (!pp)
291 return -ENOMEM;
292
293 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
294 if (!pp->pkt)
295 return -ENOMEM;
296
297 ap->private_data = pp;
298
299 return 0;
300 }
301
pdc20621_ata_sg(u8 * buf,unsigned int portno,unsigned int total_len)302 static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
303 unsigned int total_len)
304 {
305 u32 addr;
306 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
307 __le32 *buf32 = (__le32 *) buf;
308
309 /* output ATA packet S/G table */
310 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
311 (PDC_DIMM_DATA_STEP * portno);
312
313 buf32[dw] = cpu_to_le32(addr);
314 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
315 }
316
pdc20621_host_sg(u8 * buf,unsigned int portno,unsigned int total_len)317 static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
318 unsigned int total_len)
319 {
320 u32 addr;
321 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
322 __le32 *buf32 = (__le32 *) buf;
323
324 /* output Host DMA packet S/G table */
325 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
326 (PDC_DIMM_DATA_STEP * portno);
327
328 buf32[dw] = cpu_to_le32(addr);
329 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
330 }
331
pdc20621_ata_pkt(struct ata_taskfile * tf,unsigned int devno,u8 * buf,unsigned int portno)332 static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
333 unsigned int devno, u8 *buf,
334 unsigned int portno)
335 {
336 unsigned int i, dw;
337 __le32 *buf32 = (__le32 *) buf;
338 u8 dev_reg;
339
340 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
341 (PDC_DIMM_WINDOW_STEP * portno) +
342 PDC_DIMM_APKT_PRD;
343
344 i = PDC_DIMM_ATA_PKT;
345
346 /*
347 * Set up ATA packet
348 */
349 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
350 buf[i++] = PDC_PKT_READ;
351 else if (tf->protocol == ATA_PROT_NODATA)
352 buf[i++] = PDC_PKT_NODATA;
353 else
354 buf[i++] = 0;
355 buf[i++] = 0; /* reserved */
356 buf[i++] = portno + 1; /* seq. id */
357 buf[i++] = 0xff; /* delay seq. id */
358
359 /* dimm dma S/G, and next-pkt */
360 dw = i >> 2;
361 if (tf->protocol == ATA_PROT_NODATA)
362 buf32[dw] = 0;
363 else
364 buf32[dw] = cpu_to_le32(dimm_sg);
365 buf32[dw + 1] = 0;
366 i += 8;
367
368 if (devno == 0)
369 dev_reg = ATA_DEVICE_OBS;
370 else
371 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
372
373 /* select device */
374 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
375 buf[i++] = dev_reg;
376
377 /* device control register */
378 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
379 buf[i++] = tf->ctl;
380
381 return i;
382 }
383
pdc20621_host_pkt(struct ata_taskfile * tf,u8 * buf,unsigned int portno)384 static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
385 unsigned int portno)
386 {
387 unsigned int dw;
388 u32 tmp;
389 __le32 *buf32 = (__le32 *) buf;
390
391 unsigned int host_sg = PDC_20621_DIMM_BASE +
392 (PDC_DIMM_WINDOW_STEP * portno) +
393 PDC_DIMM_HOST_PRD;
394 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
395 (PDC_DIMM_WINDOW_STEP * portno) +
396 PDC_DIMM_HPKT_PRD;
397
398 dw = PDC_DIMM_HOST_PKT >> 2;
399
400 /*
401 * Set up Host DMA packet
402 */
403 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
404 tmp = PDC_PKT_READ;
405 else
406 tmp = 0;
407 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
408 tmp |= (0xff << 24); /* delay seq. id */
409 buf32[dw + 0] = cpu_to_le32(tmp);
410 buf32[dw + 1] = cpu_to_le32(host_sg);
411 buf32[dw + 2] = cpu_to_le32(dimm_sg);
412 buf32[dw + 3] = 0;
413 }
414
pdc20621_dma_prep(struct ata_queued_cmd * qc)415 static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
416 {
417 struct scatterlist *sg;
418 struct ata_port *ap = qc->ap;
419 struct pdc_port_priv *pp = ap->private_data;
420 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
421 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
422 unsigned int portno = ap->port_no;
423 unsigned int i, si, idx, total_len = 0, sgt_len;
424 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
425
426 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
427
428 /* hard-code chip #0 */
429 mmio += PDC_CHIP0_OFS;
430
431 /*
432 * Build S/G table
433 */
434 idx = 0;
435 for_each_sg(qc->sg, sg, qc->n_elem, si) {
436 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
437 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
438 total_len += sg_dma_len(sg);
439 }
440 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
441 sgt_len = idx * 4;
442
443 /*
444 * Build ATA, host DMA packets
445 */
446 pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
447 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
448
449 pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
450 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
451
452 if (qc->tf.flags & ATA_TFLAG_LBA48)
453 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
454 else
455 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
456
457 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
458
459 /* copy three S/G tables and two packets to DIMM MMIO window */
460 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
461 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
462 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
463 PDC_DIMM_HOST_PRD,
464 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
465
466 /* force host FIFO dump */
467 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
468
469 readl(dimm_mmio); /* MMIO PCI posting flush */
470
471 ata_port_dbg(ap, "ata pkt buf ofs %u, prd size %u, mmio copied\n",
472 i, sgt_len);
473 }
474
pdc20621_nodata_prep(struct ata_queued_cmd * qc)475 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
476 {
477 struct ata_port *ap = qc->ap;
478 struct pdc_port_priv *pp = ap->private_data;
479 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
480 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
481 unsigned int portno = ap->port_no;
482 unsigned int i;
483
484 /* hard-code chip #0 */
485 mmio += PDC_CHIP0_OFS;
486
487 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
488
489 if (qc->tf.flags & ATA_TFLAG_LBA48)
490 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
491 else
492 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
493
494 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
495
496 /* copy three S/G tables and two packets to DIMM MMIO window */
497 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
498 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
499
500 /* force host FIFO dump */
501 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
502
503 readl(dimm_mmio); /* MMIO PCI posting flush */
504
505 ata_port_dbg(ap, "ata pkt buf ofs %u, mmio copied\n", i);
506 }
507
pdc20621_qc_prep(struct ata_queued_cmd * qc)508 static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
509 {
510 switch (qc->tf.protocol) {
511 case ATA_PROT_DMA:
512 pdc20621_dma_prep(qc);
513 break;
514 case ATA_PROT_NODATA:
515 pdc20621_nodata_prep(qc);
516 break;
517 default:
518 break;
519 }
520
521 return AC_ERR_OK;
522 }
523
__pdc20621_push_hdma(struct ata_queued_cmd * qc,unsigned int seq,u32 pkt_ofs)524 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
525 unsigned int seq,
526 u32 pkt_ofs)
527 {
528 struct ata_port *ap = qc->ap;
529 struct ata_host *host = ap->host;
530 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
531
532 /* hard-code chip #0 */
533 mmio += PDC_CHIP0_OFS;
534
535 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
536 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
537
538 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
539 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
540 }
541
pdc20621_push_hdma(struct ata_queued_cmd * qc,unsigned int seq,u32 pkt_ofs)542 static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
543 unsigned int seq,
544 u32 pkt_ofs)
545 {
546 struct ata_port *ap = qc->ap;
547 struct pdc_host_priv *pp = ap->host->private_data;
548 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
549
550 if (!pp->doing_hdma) {
551 __pdc20621_push_hdma(qc, seq, pkt_ofs);
552 pp->doing_hdma = 1;
553 return;
554 }
555
556 pp->hdma[idx].qc = qc;
557 pp->hdma[idx].seq = seq;
558 pp->hdma[idx].pkt_ofs = pkt_ofs;
559 pp->hdma_prod++;
560 }
561
pdc20621_pop_hdma(struct ata_queued_cmd * qc)562 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
563 {
564 struct ata_port *ap = qc->ap;
565 struct pdc_host_priv *pp = ap->host->private_data;
566 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
567
568 /* if nothing on queue, we're done */
569 if (pp->hdma_prod == pp->hdma_cons) {
570 pp->doing_hdma = 0;
571 return;
572 }
573
574 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
575 pp->hdma[idx].pkt_ofs);
576 pp->hdma_cons++;
577 }
578
pdc20621_dump_hdma(struct ata_queued_cmd * qc)579 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
580 {
581 struct ata_port *ap = qc->ap;
582 unsigned int port_no = ap->port_no;
583 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
584
585 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
586 dimm_mmio += PDC_DIMM_HOST_PKT;
587
588 ata_port_dbg(ap, "HDMA 0x%08X 0x%08X 0x%08X 0x%08X\n",
589 readl(dimm_mmio), readl(dimm_mmio + 4),
590 readl(dimm_mmio + 8), readl(dimm_mmio + 12));
591 }
592
pdc20621_packet_start(struct ata_queued_cmd * qc)593 static void pdc20621_packet_start(struct ata_queued_cmd *qc)
594 {
595 struct ata_port *ap = qc->ap;
596 struct ata_host *host = ap->host;
597 unsigned int port_no = ap->port_no;
598 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
599 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
600 u8 seq = (u8) (port_no + 1);
601 unsigned int port_ofs;
602
603 /* hard-code chip #0 */
604 mmio += PDC_CHIP0_OFS;
605
606 wmb(); /* flush PRD, pkt writes */
607
608 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
609
610 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
611 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
612 seq += 4;
613
614 pdc20621_dump_hdma(qc);
615 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
616 ata_port_dbg(ap, "queued ofs 0x%x (%u), seq %u\n",
617 port_ofs + PDC_DIMM_HOST_PKT,
618 port_ofs + PDC_DIMM_HOST_PKT,
619 seq);
620 } else {
621 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
622 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
623
624 writel(port_ofs + PDC_DIMM_ATA_PKT,
625 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
626 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
627 ata_port_dbg(ap, "submitted ofs 0x%x (%u), seq %u\n",
628 port_ofs + PDC_DIMM_ATA_PKT,
629 port_ofs + PDC_DIMM_ATA_PKT,
630 seq);
631 }
632 }
633
pdc20621_qc_issue(struct ata_queued_cmd * qc)634 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
635 {
636 switch (qc->tf.protocol) {
637 case ATA_PROT_NODATA:
638 if (qc->tf.flags & ATA_TFLAG_POLLING)
639 break;
640 fallthrough;
641 case ATA_PROT_DMA:
642 pdc20621_packet_start(qc);
643 return 0;
644
645 case ATAPI_PROT_DMA:
646 BUG();
647 break;
648
649 default:
650 break;
651 }
652
653 return ata_sff_qc_issue(qc);
654 }
655
pdc20621_host_intr(struct ata_port * ap,struct ata_queued_cmd * qc,unsigned int doing_hdma,void __iomem * mmio)656 static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
657 struct ata_queued_cmd *qc,
658 unsigned int doing_hdma,
659 void __iomem *mmio)
660 {
661 unsigned int port_no = ap->port_no;
662 unsigned int port_ofs =
663 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
664 u8 status;
665 unsigned int handled = 0;
666
667 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
668 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
669
670 /* step two - DMA from DIMM to host */
671 if (doing_hdma) {
672 ata_port_dbg(ap, "read hdma, 0x%x 0x%x\n",
673 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
674 /* get drive status; clear intr; complete txn */
675 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
676 ata_qc_complete(qc);
677 pdc20621_pop_hdma(qc);
678 }
679
680 /* step one - exec ATA command */
681 else {
682 u8 seq = (u8) (port_no + 1 + 4);
683 ata_port_dbg(ap, "read ata, 0x%x 0x%x\n",
684 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
685
686 /* submit hdma pkt */
687 pdc20621_dump_hdma(qc);
688 pdc20621_push_hdma(qc, seq,
689 port_ofs + PDC_DIMM_HOST_PKT);
690 }
691 handled = 1;
692
693 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
694
695 /* step one - DMA from host to DIMM */
696 if (doing_hdma) {
697 u8 seq = (u8) (port_no + 1);
698 ata_port_dbg(ap, "write hdma, 0x%x 0x%x\n",
699 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
700
701 /* submit ata pkt */
702 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
703 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
704 writel(port_ofs + PDC_DIMM_ATA_PKT,
705 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
706 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
707 }
708
709 /* step two - execute ATA command */
710 else {
711 ata_port_dbg(ap, "write ata, 0x%x 0x%x\n",
712 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
713 /* get drive status; clear intr; complete txn */
714 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
715 ata_qc_complete(qc);
716 pdc20621_pop_hdma(qc);
717 }
718 handled = 1;
719
720 /* command completion, but no data xfer */
721 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
722
723 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
724 ata_port_dbg(ap, "BUS_NODATA (drv_stat 0x%X)\n", status);
725 qc->err_mask |= ac_err_mask(status);
726 ata_qc_complete(qc);
727 handled = 1;
728
729 } else {
730 ap->stats.idle_irq++;
731 }
732
733 return handled;
734 }
735
pdc20621_irq_clear(struct ata_port * ap)736 static void pdc20621_irq_clear(struct ata_port *ap)
737 {
738 ioread8(ap->ioaddr.status_addr);
739 }
740
pdc20621_interrupt(int irq,void * dev_instance)741 static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
742 {
743 struct ata_host *host = dev_instance;
744 struct ata_port *ap;
745 u32 mask = 0;
746 unsigned int i, tmp, port_no;
747 unsigned int handled = 0;
748 void __iomem *mmio_base;
749
750 if (!host || !host->iomap[PDC_MMIO_BAR])
751 return IRQ_NONE;
752
753 mmio_base = host->iomap[PDC_MMIO_BAR];
754
755 /* reading should also clear interrupts */
756 mmio_base += PDC_CHIP0_OFS;
757 mask = readl(mmio_base + PDC_20621_SEQMASK);
758
759 if (mask == 0xffffffff)
760 return IRQ_NONE;
761
762 mask &= 0xffff; /* only 16 tags possible */
763 if (!mask)
764 return IRQ_NONE;
765
766 spin_lock(&host->lock);
767
768 for (i = 1; i < 9; i++) {
769 port_no = i - 1;
770 if (port_no > 3)
771 port_no -= 4;
772 if (port_no >= host->n_ports)
773 ap = NULL;
774 else
775 ap = host->ports[port_no];
776 tmp = mask & (1 << i);
777 if (ap)
778 ata_port_dbg(ap, "seq %u, tmp %x\n", i, tmp);
779 if (tmp && ap) {
780 struct ata_queued_cmd *qc;
781
782 qc = ata_qc_from_tag(ap, ap->link.active_tag);
783 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
784 handled += pdc20621_host_intr(ap, qc, (i > 4),
785 mmio_base);
786 }
787 }
788
789 spin_unlock(&host->lock);
790
791 return IRQ_RETVAL(handled);
792 }
793
pdc_freeze(struct ata_port * ap)794 static void pdc_freeze(struct ata_port *ap)
795 {
796 void __iomem *mmio = ap->ioaddr.cmd_addr;
797 u32 tmp;
798
799 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
800
801 tmp = readl(mmio + PDC_CTLSTAT);
802 tmp |= PDC_MASK_INT;
803 tmp &= ~PDC_DMA_ENABLE;
804 writel(tmp, mmio + PDC_CTLSTAT);
805 readl(mmio + PDC_CTLSTAT); /* flush */
806 }
807
pdc_thaw(struct ata_port * ap)808 static void pdc_thaw(struct ata_port *ap)
809 {
810 void __iomem *mmio = ap->ioaddr.cmd_addr;
811 u32 tmp;
812
813 /* FIXME: start HDMA engine, if zero ATA engines running */
814
815 /* clear IRQ */
816 ioread8(ap->ioaddr.status_addr);
817
818 /* turn IRQ back on */
819 tmp = readl(mmio + PDC_CTLSTAT);
820 tmp &= ~PDC_MASK_INT;
821 writel(tmp, mmio + PDC_CTLSTAT);
822 readl(mmio + PDC_CTLSTAT); /* flush */
823 }
824
pdc_reset_port(struct ata_port * ap)825 static void pdc_reset_port(struct ata_port *ap)
826 {
827 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
828 unsigned int i;
829 u32 tmp;
830
831 /* FIXME: handle HDMA copy engine */
832
833 for (i = 11; i > 0; i--) {
834 tmp = readl(mmio);
835 if (tmp & PDC_RESET)
836 break;
837
838 udelay(100);
839
840 tmp |= PDC_RESET;
841 writel(tmp, mmio);
842 }
843
844 tmp &= ~PDC_RESET;
845 writel(tmp, mmio);
846 readl(mmio); /* flush */
847 }
848
pdc_softreset(struct ata_link * link,unsigned int * class,unsigned long deadline)849 static int pdc_softreset(struct ata_link *link, unsigned int *class,
850 unsigned long deadline)
851 {
852 pdc_reset_port(link->ap);
853 return ata_sff_softreset(link, class, deadline);
854 }
855
pdc_error_handler(struct ata_port * ap)856 static void pdc_error_handler(struct ata_port *ap)
857 {
858 if (!(ap->pflags & ATA_PFLAG_FROZEN))
859 pdc_reset_port(ap);
860
861 ata_sff_error_handler(ap);
862 }
863
pdc_post_internal_cmd(struct ata_queued_cmd * qc)864 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
865 {
866 struct ata_port *ap = qc->ap;
867
868 /* make DMA engine forget about the failed command */
869 if (qc->flags & ATA_QCFLAG_FAILED)
870 pdc_reset_port(ap);
871 }
872
pdc_check_atapi_dma(struct ata_queued_cmd * qc)873 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
874 {
875 u8 *scsicmd = qc->scsicmd->cmnd;
876 int pio = 1; /* atapi dma off by default */
877
878 /* Whitelist commands that may use DMA. */
879 switch (scsicmd[0]) {
880 case WRITE_12:
881 case WRITE_10:
882 case WRITE_6:
883 case READ_12:
884 case READ_10:
885 case READ_6:
886 case 0xad: /* READ_DVD_STRUCTURE */
887 case 0xbe: /* READ_CD */
888 pio = 0;
889 }
890 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
891 if (scsicmd[0] == WRITE_10) {
892 unsigned int lba =
893 (scsicmd[2] << 24) |
894 (scsicmd[3] << 16) |
895 (scsicmd[4] << 8) |
896 scsicmd[5];
897 if (lba >= 0xFFFF4FA2)
898 pio = 1;
899 }
900 return pio;
901 }
902
pdc_tf_load_mmio(struct ata_port * ap,const struct ata_taskfile * tf)903 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
904 {
905 WARN_ON(tf->protocol == ATA_PROT_DMA ||
906 tf->protocol == ATAPI_PROT_DMA);
907 ata_sff_tf_load(ap, tf);
908 }
909
910
pdc_exec_command_mmio(struct ata_port * ap,const struct ata_taskfile * tf)911 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
912 {
913 WARN_ON(tf->protocol == ATA_PROT_DMA ||
914 tf->protocol == ATAPI_PROT_DMA);
915 ata_sff_exec_command(ap, tf);
916 }
917
918
pdc_sata_setup_port(struct ata_ioports * port,void __iomem * base)919 static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
920 {
921 port->cmd_addr = base;
922 port->data_addr = base;
923 port->feature_addr =
924 port->error_addr = base + 0x4;
925 port->nsect_addr = base + 0x8;
926 port->lbal_addr = base + 0xc;
927 port->lbam_addr = base + 0x10;
928 port->lbah_addr = base + 0x14;
929 port->device_addr = base + 0x18;
930 port->command_addr =
931 port->status_addr = base + 0x1c;
932 port->altstatus_addr =
933 port->ctl_addr = base + 0x38;
934 }
935
936
pdc20621_get_from_dimm(struct ata_host * host,void * psource,u32 offset,u32 size)937 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
938 u32 offset, u32 size)
939 {
940 u32 window_size;
941 u16 idx;
942 u8 page_mask;
943 long dist;
944 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
945 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
946
947 /* hard-code chip #0 */
948 mmio += PDC_CHIP0_OFS;
949
950 page_mask = 0x00;
951 window_size = 0x2000 * 4; /* 32K byte uchar size */
952 idx = (u16) (offset / window_size);
953
954 writel(0x01, mmio + PDC_GENERAL_CTLR);
955 readl(mmio + PDC_GENERAL_CTLR);
956 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
957 readl(mmio + PDC_DIMM_WINDOW_CTLR);
958
959 offset -= (idx * window_size);
960 idx++;
961 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
962 (long) (window_size - offset);
963 memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
964
965 psource += dist;
966 size -= dist;
967 for (; (long) size >= (long) window_size ;) {
968 writel(0x01, mmio + PDC_GENERAL_CTLR);
969 readl(mmio + PDC_GENERAL_CTLR);
970 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
971 readl(mmio + PDC_DIMM_WINDOW_CTLR);
972 memcpy_fromio(psource, dimm_mmio, window_size / 4);
973 psource += window_size;
974 size -= window_size;
975 idx++;
976 }
977
978 if (size) {
979 writel(0x01, mmio + PDC_GENERAL_CTLR);
980 readl(mmio + PDC_GENERAL_CTLR);
981 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
982 readl(mmio + PDC_DIMM_WINDOW_CTLR);
983 memcpy_fromio(psource, dimm_mmio, size / 4);
984 }
985 }
986
987
pdc20621_put_to_dimm(struct ata_host * host,void * psource,u32 offset,u32 size)988 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
989 u32 offset, u32 size)
990 {
991 u32 window_size;
992 u16 idx;
993 u8 page_mask;
994 long dist;
995 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
996 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
997
998 /* hard-code chip #0 */
999 mmio += PDC_CHIP0_OFS;
1000
1001 page_mask = 0x00;
1002 window_size = 0x2000 * 4; /* 32K byte uchar size */
1003 idx = (u16) (offset / window_size);
1004
1005 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1006 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1007 offset -= (idx * window_size);
1008 idx++;
1009 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1010 (long) (window_size - offset);
1011 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1012 writel(0x01, mmio + PDC_GENERAL_CTLR);
1013 readl(mmio + PDC_GENERAL_CTLR);
1014
1015 psource += dist;
1016 size -= dist;
1017 for (; (long) size >= (long) window_size ;) {
1018 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1019 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1020 memcpy_toio(dimm_mmio, psource, window_size / 4);
1021 writel(0x01, mmio + PDC_GENERAL_CTLR);
1022 readl(mmio + PDC_GENERAL_CTLR);
1023 psource += window_size;
1024 size -= window_size;
1025 idx++;
1026 }
1027
1028 if (size) {
1029 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1030 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1031 memcpy_toio(dimm_mmio, psource, size / 4);
1032 writel(0x01, mmio + PDC_GENERAL_CTLR);
1033 readl(mmio + PDC_GENERAL_CTLR);
1034 }
1035 }
1036
1037
pdc20621_i2c_read(struct ata_host * host,u32 device,u32 subaddr,u32 * pdata)1038 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1039 u32 subaddr, u32 *pdata)
1040 {
1041 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1042 u32 i2creg = 0;
1043 u32 status;
1044 u32 count = 0;
1045
1046 /* hard-code chip #0 */
1047 mmio += PDC_CHIP0_OFS;
1048
1049 i2creg |= device << 24;
1050 i2creg |= subaddr << 16;
1051
1052 /* Set the device and subaddress */
1053 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1054 readl(mmio + PDC_I2C_ADDR_DATA);
1055
1056 /* Write Control to perform read operation, mask int */
1057 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1058 mmio + PDC_I2C_CONTROL);
1059
1060 for (count = 0; count <= 1000; count ++) {
1061 status = readl(mmio + PDC_I2C_CONTROL);
1062 if (status & PDC_I2C_COMPLETE) {
1063 status = readl(mmio + PDC_I2C_ADDR_DATA);
1064 break;
1065 } else if (count == 1000)
1066 return 0;
1067 }
1068
1069 *pdata = (status >> 8) & 0x000000ff;
1070 return 1;
1071 }
1072
1073
pdc20621_detect_dimm(struct ata_host * host)1074 static int pdc20621_detect_dimm(struct ata_host *host)
1075 {
1076 u32 data = 0;
1077 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1078 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1079 if (data == 100)
1080 return 100;
1081 } else
1082 return 0;
1083
1084 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1085 if (data <= 0x75)
1086 return 133;
1087 } else
1088 return 0;
1089
1090 return 0;
1091 }
1092
1093
pdc20621_prog_dimm0(struct ata_host * host)1094 static int pdc20621_prog_dimm0(struct ata_host *host)
1095 {
1096 u32 spd0[50];
1097 u32 data = 0;
1098 int size, i;
1099 u8 bdimmsize;
1100 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1101 static const struct {
1102 unsigned int reg;
1103 unsigned int ofs;
1104 } pdc_i2c_read_data [] = {
1105 { PDC_DIMM_SPD_TYPE, 11 },
1106 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1107 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1108 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1109 { PDC_DIMM_SPD_ROW_NUM, 3 },
1110 { PDC_DIMM_SPD_BANK_NUM, 17 },
1111 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1112 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1113 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1114 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1115 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1116 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1117 };
1118
1119 /* hard-code chip #0 */
1120 mmio += PDC_CHIP0_OFS;
1121
1122 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1123 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1124 pdc_i2c_read_data[i].reg,
1125 &spd0[pdc_i2c_read_data[i].ofs]);
1126
1127 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1128 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1129 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1130 data |= (((((spd0[29] > spd0[28])
1131 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1132 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1133
1134 if (spd0[18] & 0x08)
1135 data |= ((0x03) << 14);
1136 else if (spd0[18] & 0x04)
1137 data |= ((0x02) << 14);
1138 else if (spd0[18] & 0x01)
1139 data |= ((0x01) << 14);
1140 else
1141 data |= (0 << 14);
1142
1143 /*
1144 Calculate the size of bDIMMSize (power of 2) and
1145 merge the DIMM size by program start/end address.
1146 */
1147
1148 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1149 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1150 data |= (((size / 16) - 1) << 16);
1151 data |= (0 << 23);
1152 data |= 8;
1153 writel(data, mmio + PDC_DIMM0_CONTROL);
1154 readl(mmio + PDC_DIMM0_CONTROL);
1155 return size;
1156 }
1157
1158
pdc20621_prog_dimm_global(struct ata_host * host)1159 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1160 {
1161 u32 data, spd0;
1162 int error, i;
1163 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1164
1165 /* hard-code chip #0 */
1166 mmio += PDC_CHIP0_OFS;
1167
1168 /*
1169 Set To Default : DIMM Module Global Control Register (0x022259F1)
1170 DIMM Arbitration Disable (bit 20)
1171 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1172 Refresh Enable (bit 17)
1173 */
1174
1175 data = 0x022259F1;
1176 writel(data, mmio + PDC_SDRAM_CONTROL);
1177 readl(mmio + PDC_SDRAM_CONTROL);
1178
1179 /* Turn on for ECC */
1180 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1181 PDC_DIMM_SPD_TYPE, &spd0)) {
1182 dev_err(host->dev,
1183 "Failed in i2c read: device=%#x, subaddr=%#x\n",
1184 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1185 return 1;
1186 }
1187 if (spd0 == 0x02) {
1188 data |= (0x01 << 16);
1189 writel(data, mmio + PDC_SDRAM_CONTROL);
1190 readl(mmio + PDC_SDRAM_CONTROL);
1191 dev_err(host->dev, "Local DIMM ECC Enabled\n");
1192 }
1193
1194 /* DIMM Initialization Select/Enable (bit 18/19) */
1195 data &= (~(1<<18));
1196 data |= (1<<19);
1197 writel(data, mmio + PDC_SDRAM_CONTROL);
1198
1199 error = 1;
1200 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1201 data = readl(mmio + PDC_SDRAM_CONTROL);
1202 if (!(data & (1<<19))) {
1203 error = 0;
1204 break;
1205 }
1206 msleep(i*100);
1207 }
1208 return error;
1209 }
1210
1211
pdc20621_dimm_init(struct ata_host * host)1212 static unsigned int pdc20621_dimm_init(struct ata_host *host)
1213 {
1214 int speed, size, length;
1215 u32 addr, spd0, pci_status;
1216 u32 time_period = 0;
1217 u32 tcount = 0;
1218 u32 ticks = 0;
1219 u32 clock = 0;
1220 u32 fparam = 0;
1221 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1222
1223 /* hard-code chip #0 */
1224 mmio += PDC_CHIP0_OFS;
1225
1226 /* Initialize PLL based upon PCI Bus Frequency */
1227
1228 /* Initialize Time Period Register */
1229 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1230 time_period = readl(mmio + PDC_TIME_PERIOD);
1231 dev_dbg(host->dev, "Time Period Register (0x40): 0x%x\n", time_period);
1232
1233 /* Enable timer */
1234 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1235 readl(mmio + PDC_TIME_CONTROL);
1236
1237 /* Wait 3 seconds */
1238 msleep(3000);
1239
1240 /*
1241 When timer is enabled, counter is decreased every internal
1242 clock cycle.
1243 */
1244
1245 tcount = readl(mmio + PDC_TIME_COUNTER);
1246 dev_dbg(host->dev, "Time Counter Register (0x44): 0x%x\n", tcount);
1247
1248 /*
1249 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1250 register should be >= (0xffffffff - 3x10^8).
1251 */
1252 if (tcount >= PCI_X_TCOUNT) {
1253 ticks = (time_period - tcount);
1254 dev_dbg(host->dev, "Num counters 0x%x (%d)\n", ticks, ticks);
1255
1256 clock = (ticks / 300000);
1257 dev_dbg(host->dev, "10 * Internal clk = 0x%x (%d)\n",
1258 clock, clock);
1259
1260 clock = (clock * 33);
1261 dev_dbg(host->dev, "10 * Internal clk * 33 = 0x%x (%d)\n",
1262 clock, clock);
1263
1264 /* PLL F Param (bit 22:16) */
1265 fparam = (1400000 / clock) - 2;
1266 dev_dbg(host->dev, "PLL F Param: 0x%x (%d)\n", fparam, fparam);
1267
1268 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1269 pci_status = (0x8a001824 | (fparam << 16));
1270 } else
1271 pci_status = PCI_PLL_INIT;
1272
1273 /* Initialize PLL. */
1274 dev_dbg(host->dev, "pci_status: 0x%x\n", pci_status);
1275 writel(pci_status, mmio + PDC_CTL_STATUS);
1276 readl(mmio + PDC_CTL_STATUS);
1277
1278 /*
1279 Read SPD of DIMM by I2C interface,
1280 and program the DIMM Module Controller.
1281 */
1282 if (!(speed = pdc20621_detect_dimm(host))) {
1283 dev_err(host->dev, "Detect Local DIMM Fail\n");
1284 return 1; /* DIMM error */
1285 }
1286 dev_dbg(host->dev, "Local DIMM Speed = %d\n", speed);
1287
1288 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1289 size = pdc20621_prog_dimm0(host);
1290 dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
1291
1292 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1293 if (pdc20621_prog_dimm_global(host)) {
1294 dev_err(host->dev,
1295 "Programming DIMM Module Global Control Register Fail\n");
1296 return 1;
1297 }
1298
1299 if (dimm_test) {
1300 u8 test_parttern1[40] =
1301 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1302 'N','o','t',' ','Y','e','t',' ',
1303 'D','e','f','i','n','e','d',' ',
1304 '1','.','1','0',
1305 '9','8','0','3','1','6','1','2',0,0};
1306 u8 test_parttern2[40] = {0};
1307
1308 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1309 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1310
1311 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1312 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1313 dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_parttern2[0],
1314 test_parttern2[1], &(test_parttern2[2]));
1315 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1316 40);
1317 dev_info(host->dev, "DIMM test pattern 2: %x, %x, %s\n",
1318 test_parttern2[0],
1319 test_parttern2[1], &(test_parttern2[2]));
1320
1321 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1322 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1323 dev_info(host->dev, "DIMM test pattern 3: %x, %x, %s\n",
1324 test_parttern2[0],
1325 test_parttern2[1], &(test_parttern2[2]));
1326 }
1327
1328 /* ECC initiliazation. */
1329
1330 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1331 PDC_DIMM_SPD_TYPE, &spd0)) {
1332 dev_err(host->dev,
1333 "Failed in i2c read: device=%#x, subaddr=%#x\n",
1334 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1335 return 1;
1336 }
1337 if (spd0 == 0x02) {
1338 void *buf;
1339 dev_dbg(host->dev, "Start ECC initialization\n");
1340 addr = 0;
1341 length = size * 1024 * 1024;
1342 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1343 if (!buf)
1344 return 1;
1345 while (addr < length) {
1346 pdc20621_put_to_dimm(host, buf, addr,
1347 ECC_ERASE_BUF_SZ);
1348 addr += ECC_ERASE_BUF_SZ;
1349 }
1350 kfree(buf);
1351 dev_dbg(host->dev, "Finish ECC initialization\n");
1352 }
1353 return 0;
1354 }
1355
1356
pdc_20621_init(struct ata_host * host)1357 static void pdc_20621_init(struct ata_host *host)
1358 {
1359 u32 tmp;
1360 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1361
1362 /* hard-code chip #0 */
1363 mmio += PDC_CHIP0_OFS;
1364
1365 /*
1366 * Select page 0x40 for our 32k DIMM window
1367 */
1368 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1369 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1370 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1371
1372 /*
1373 * Reset Host DMA
1374 */
1375 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1376 tmp |= PDC_RESET;
1377 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1378 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1379
1380 udelay(10);
1381
1382 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1383 tmp &= ~PDC_RESET;
1384 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1385 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1386 }
1387
pdc_sata_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1388 static int pdc_sata_init_one(struct pci_dev *pdev,
1389 const struct pci_device_id *ent)
1390 {
1391 const struct ata_port_info *ppi[] =
1392 { &pdc_port_info[ent->driver_data], NULL };
1393 struct ata_host *host;
1394 struct pdc_host_priv *hpriv;
1395 int i, rc;
1396
1397 ata_print_version_once(&pdev->dev, DRV_VERSION);
1398
1399 /* allocate host */
1400 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1401 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1402 if (!host || !hpriv)
1403 return -ENOMEM;
1404
1405 host->private_data = hpriv;
1406
1407 /* acquire resources and fill host */
1408 rc = pcim_enable_device(pdev);
1409 if (rc)
1410 return rc;
1411
1412 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1413 DRV_NAME);
1414 if (rc == -EBUSY)
1415 pcim_pin_device(pdev);
1416 if (rc)
1417 return rc;
1418 host->iomap = pcim_iomap_table(pdev);
1419
1420 for (i = 0; i < 4; i++) {
1421 struct ata_port *ap = host->ports[i];
1422 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1423 unsigned int offset = 0x200 + i * 0x80;
1424
1425 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1426
1427 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1428 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1429 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1430 }
1431
1432 /* configure and activate */
1433 rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
1434 if (rc)
1435 return rc;
1436
1437 if (pdc20621_dimm_init(host))
1438 return -ENOMEM;
1439 pdc_20621_init(host);
1440
1441 pci_set_master(pdev);
1442 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1443 IRQF_SHARED, &pdc_sata_sht);
1444 }
1445
1446 module_pci_driver(pdc_sata_pci_driver);
1447
1448 MODULE_AUTHOR("Jeff Garzik");
1449 MODULE_DESCRIPTION("Promise SATA low-level driver");
1450 MODULE_LICENSE("GPL");
1451 MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1452 MODULE_VERSION(DRV_VERSION);
1453