Lines Matching +full:port +full:- +full:1
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel IXP4xx HSS (synchronous serial port) driver for Linux
5 * Copyright (C) 2007-2008 Krzysztof Hałasa <khc@pm.waw.pl>
13 #include <linux/dma-mapping.h>
34 #define PKT_EXTRA_FLAGS 0 /* orig 1 */
35 #define PKT_NUM_PIPES 1 /* 1, 2 or 4 */
70 #define HSS1_PKT_RXFREE0_QUEUE 1
77 #define NPE_PKT_MODE_RAW 1
83 #define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */
136 /* 56k data endiannes - which bit unused: high (default) or low */
143 /* Number of packetized clients, default = 1 */
175 * Each clock bit takes 7.5 ns (1 / 133.xx MHz).
176 * The clock sequence consists of (C - B) states of 0s and 1s, each state is
177 * A bits wide. It's followed by (B + 1) states of 0s and 1s, each state is
178 * (A + 1) bits wide.
181 * freq = 66.666 MHz / (A + (B + 1) / (C + 1))
182 * minimum freq = 66.666 MHz / (A + 1)
186 * freq = 66.666 MHz / (2 + (2 + 1) / (7 + 1)) = 28.07 MHz (Mb/s).
188 * The sequence takes (C - B) * A + (B + 1) * (A + 1) = 5 * 2 + 3 * 3 bits
197 #define TDMMAP_HDLC 1 /* HDLC - packetized */
198 #define TDMMAP_VOICE56K 2 /* Voice56K - 7-bit channelized */
199 #define TDMMAP_VOICE64K 3 /* Voice64K - 8-bit channelized */
202 #define HSS_CONFIG_TX_PCR 0x00 /* port configuration registers */
208 #define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */
233 /* HDLC packet status values - desc->status */
234 #define ERR_SHUTDOWN 1 /* stop or shutdown occurrence */
237 #define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving
254 struct port { struct
312 #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ argument
314 #define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) argument
316 #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ argument
318 #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) argument
340 static inline struct port* dev_to_port(struct net_device *dev) in dev_to_port()
342 return dev_to_hdlc(dev)->priv; in dev_to_port()
358 static void hss_npe_send(struct port *port, struct msg *msg, const char* what) in hss_npe_send() argument
361 if (npe_send_message(port->npe, msg, what)) { in hss_npe_send()
362 pr_crit("HSS-%i: unable to send command [%08X:%08X] to %s\n", in hss_npe_send()
363 port->id, val[0], val[1], npe_name(port->npe)); in hss_npe_send()
368 static void hss_config_set_lut(struct port *port) in hss_config_set_lut() argument
375 msg.hss_port = port->id; in hss_config_set_lut()
383 hss_npe_send(port, &msg, "HSS_SET_TX_LUT"); in hss_config_set_lut()
385 msg.index += HSS_CONFIG_RX_LUT - HSS_CONFIG_TX_LUT; in hss_config_set_lut()
386 hss_npe_send(port, &msg, "HSS_SET_RX_LUT"); in hss_config_set_lut()
391 static void hss_config(struct port *port) in hss_config() argument
397 msg.hss_port = port->id; in hss_config()
401 if (port->clock_type == CLOCK_INT) in hss_config()
403 hss_npe_send(port, &msg, "HSS_SET_TX_PCR"); in hss_config()
407 hss_npe_send(port, &msg, "HSS_SET_RX_PCR"); in hss_config()
411 msg.hss_port = port->id; in hss_config()
413 msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) | in hss_config()
414 (port->id ? CCR_SECOND_HSS : 0); in hss_config()
415 hss_npe_send(port, &msg, "HSS_SET_CORE_CR"); in hss_config()
419 msg.hss_port = port->id; in hss_config()
421 msg.data32 = port->clock_reg; in hss_config()
422 hss_npe_send(port, &msg, "HSS_SET_CLOCK_CR"); in hss_config()
426 msg.hss_port = port->id; in hss_config()
429 msg.data16b = FRAME_SIZE - 1; in hss_config()
430 hss_npe_send(port, &msg, "HSS_SET_TX_FCR"); in hss_config()
434 msg.hss_port = port->id; in hss_config()
437 msg.data16b = FRAME_SIZE - 1; in hss_config()
438 hss_npe_send(port, &msg, "HSS_SET_RX_FCR"); in hss_config()
440 hss_config_set_lut(port); in hss_config()
444 msg.hss_port = port->id; in hss_config()
445 hss_npe_send(port, &msg, "HSS_LOAD_CONFIG"); in hss_config()
447 if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") || in hss_config()
448 /* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */ in hss_config()
450 pr_crit("HSS-%i: HSS_LOAD_CONFIG failed\n", port->id); in hss_config()
454 /* HDLC may stop working without this - check FIXME */ in hss_config()
455 npe_recv_message(port->npe, &msg, "FLUSH_IT"); in hss_config()
458 static void hss_set_hdlc_cfg(struct port *port) in hss_set_hdlc_cfg() argument
464 msg.hss_port = port->id; in hss_set_hdlc_cfg()
465 msg.data8a = port->hdlc_cfg; /* rx_cfg */ in hss_set_hdlc_cfg()
466 msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */ in hss_set_hdlc_cfg()
467 hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG"); in hss_set_hdlc_cfg()
470 static u32 hss_get_status(struct port *port) in hss_get_status() argument
476 msg.hss_port = port->id; in hss_get_status()
477 hss_npe_send(port, &msg, "PORT_ERROR_READ"); in hss_get_status()
478 if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) { in hss_get_status()
479 pr_crit("HSS-%i: unable to read HSS status\n", port->id); in hss_get_status()
486 static void hss_start_hdlc(struct port *port) in hss_start_hdlc() argument
492 msg.hss_port = port->id; in hss_start_hdlc()
494 hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE"); in hss_start_hdlc()
497 static void hss_stop_hdlc(struct port *port) in hss_stop_hdlc() argument
503 msg.hss_port = port->id; in hss_stop_hdlc()
504 hss_npe_send(port, &msg, "HSS_DISABLE_PKT_PIPE"); in hss_stop_hdlc()
505 hss_get_status(port); /* make sure it's halted */ in hss_stop_hdlc()
508 static int hss_load_firmware(struct port *port) in hss_load_firmware() argument
513 if (port->initialized) in hss_load_firmware()
516 if (!npe_running(port->npe) && in hss_load_firmware()
517 (err = npe_load_firmware(port->npe, npe_name(port->npe), in hss_load_firmware()
518 port->dev))) in hss_load_firmware()
524 msg.hss_port = port->id; in hss_load_firmware()
526 hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES"); in hss_load_firmware()
530 hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO"); in hss_load_firmware()
536 hss_npe_send(port, &msg, "HSS_SET_PKT_MODE"); in hss_load_firmware()
540 hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE"); in hss_load_firmware()
544 hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE"); in hss_load_firmware()
546 port->initialized = 1; in hss_load_firmware()
560 printk(KERN_DEBUG "%s: %s(%i)", dev->name, func, len); in debug_pkt()
575 phys, desc->next, desc->buf_len, desc->pkt_len, in debug_desc()
576 desc->data, desc->status, desc->error_count); in debug_desc()
580 static inline int queue_get_desc(unsigned int queue, struct port *port, in queue_get_desc() argument
587 return -1; in queue_get_desc()
590 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); in queue_get_desc()
591 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); in queue_get_desc()
592 n_desc = (phys - tab_phys) / sizeof(struct desc); in queue_get_desc()
610 static inline void dma_unmap_tx(struct port *port, struct desc *desc) in dma_unmap_tx() argument
613 dma_unmap_single(&port->netdev->dev, desc->data, in dma_unmap_tx()
614 desc->buf_len, DMA_TO_DEVICE); in dma_unmap_tx()
616 dma_unmap_single(&port->netdev->dev, desc->data & ~3, in dma_unmap_tx()
617 ALIGN((desc->data & 3) + desc->buf_len, 4), in dma_unmap_tx()
626 struct port *port = dev_to_port(netdev); in hss_hdlc_set_carrier() local
630 port->carrier = carrier; in hss_hdlc_set_carrier()
631 if (!port->loopback) { in hss_hdlc_set_carrier()
643 struct port *port = dev_to_port(dev); in hss_hdlc_rx_irq() local
646 printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name); in hss_hdlc_rx_irq()
648 qmgr_disable_irq(queue_ids[port->id].rx); in hss_hdlc_rx_irq()
649 napi_schedule(&port->napi); in hss_hdlc_rx_irq()
654 struct port *port = container_of(napi, struct port, napi); in hss_hdlc_poll() local
655 struct net_device *dev = port->netdev; in hss_hdlc_poll()
656 unsigned int rxq = queue_ids[port->id].rx; in hss_hdlc_poll()
657 unsigned int rxfreeq = queue_ids[port->id].rxfree; in hss_hdlc_poll()
661 printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name); in hss_hdlc_poll()
673 if ((n = queue_get_desc(rxq, port, 0)) < 0) { in hss_hdlc_poll()
676 " napi_complete\n", dev->name); in hss_hdlc_poll()
685 dev->name); in hss_hdlc_poll()
692 dev->name); in hss_hdlc_poll()
697 desc = rx_desc_ptr(port, n); in hss_hdlc_poll()
698 #if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */ in hss_hdlc_poll()
699 if (desc->error_count) in hss_hdlc_poll()
701 " errors %u\n", dev->name, desc->status, in hss_hdlc_poll()
702 desc->error_count); in hss_hdlc_poll()
705 switch (desc->status) { in hss_hdlc_poll()
709 phys = dma_map_single(&dev->dev, skb->data, in hss_hdlc_poll()
712 if (dma_mapping_error(&dev->dev, phys)) { in hss_hdlc_poll()
718 skb = netdev_alloc_skb(dev, desc->pkt_len); in hss_hdlc_poll()
721 dev->stats.rx_dropped++; in hss_hdlc_poll()
725 dev->stats.rx_frame_errors++; in hss_hdlc_poll()
726 dev->stats.rx_errors++; in hss_hdlc_poll()
729 dev->stats.rx_crc_errors++; in hss_hdlc_poll()
730 dev->stats.rx_errors++; in hss_hdlc_poll()
733 dev->stats.rx_length_errors++; in hss_hdlc_poll()
734 dev->stats.rx_errors++; in hss_hdlc_poll()
736 default: /* FIXME - remove printk */ in hss_hdlc_poll()
738 desc->status, desc->error_count); in hss_hdlc_poll()
739 dev->stats.rx_errors++; in hss_hdlc_poll()
743 /* put the desc back on RX-ready queue */ in hss_hdlc_poll()
744 desc->buf_len = RX_SIZE; in hss_hdlc_poll()
745 desc->pkt_len = desc->status = 0; in hss_hdlc_poll()
746 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); in hss_hdlc_poll()
753 skb = port->rx_buff_tab[n]; in hss_hdlc_poll()
754 dma_unmap_single(&dev->dev, desc->data, in hss_hdlc_poll()
757 dma_sync_single_for_cpu(&dev->dev, desc->data, in hss_hdlc_poll()
759 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], in hss_hdlc_poll()
760 ALIGN(desc->pkt_len, 4) / 4); in hss_hdlc_poll()
762 skb_put(skb, desc->pkt_len); in hss_hdlc_poll()
764 debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len); in hss_hdlc_poll()
766 skb->protocol = hdlc_type_trans(skb, dev); in hss_hdlc_poll()
767 dev->stats.rx_packets++; in hss_hdlc_poll()
768 dev->stats.rx_bytes += skb->len; in hss_hdlc_poll()
771 /* put the new buffer on RX-free queue */ in hss_hdlc_poll()
773 port->rx_buff_tab[n] = temp; in hss_hdlc_poll()
774 desc->data = phys; in hss_hdlc_poll()
776 desc->buf_len = RX_SIZE; in hss_hdlc_poll()
777 desc->pkt_len = 0; in hss_hdlc_poll()
778 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); in hss_hdlc_poll()
791 struct port *port = dev_to_port(dev); in hss_hdlc_txdone_irq() local
797 while ((n_desc = queue_get_desc(queue_ids[port->id].txdone, in hss_hdlc_txdone_irq()
798 port, 1)) >= 0) { in hss_hdlc_txdone_irq()
802 desc = tx_desc_ptr(port, n_desc); in hss_hdlc_txdone_irq()
804 dev->stats.tx_packets++; in hss_hdlc_txdone_irq()
805 dev->stats.tx_bytes += desc->pkt_len; in hss_hdlc_txdone_irq()
807 dma_unmap_tx(port, desc); in hss_hdlc_txdone_irq()
810 dev->name, port->tx_buff_tab[n_desc]); in hss_hdlc_txdone_irq()
812 free_buffer_irq(port->tx_buff_tab[n_desc]); in hss_hdlc_txdone_irq()
813 port->tx_buff_tab[n_desc] = NULL; in hss_hdlc_txdone_irq()
815 start = qmgr_stat_below_low_watermark(port->plat->txreadyq); in hss_hdlc_txdone_irq()
816 queue_put_desc(port->plat->txreadyq, in hss_hdlc_txdone_irq()
817 tx_desc_phys(port, n_desc), desc); in hss_hdlc_txdone_irq()
818 if (start) { /* TX-ready queue was empty */ in hss_hdlc_txdone_irq()
821 " ready\n", dev->name); in hss_hdlc_txdone_irq()
830 struct port *port = dev_to_port(dev); in hss_hdlc_xmit() local
831 unsigned int txreadyq = port->plat->txreadyq; in hss_hdlc_xmit()
838 printk(KERN_DEBUG "%s: hss_hdlc_xmit\n", dev->name); in hss_hdlc_xmit()
841 if (unlikely(skb->len > HDLC_MAX_MRU)) { in hss_hdlc_xmit()
843 dev->stats.tx_errors++; in hss_hdlc_xmit()
847 debug_pkt(dev, "hss_hdlc_xmit", skb->data, skb->len); in hss_hdlc_xmit()
849 len = skb->len; in hss_hdlc_xmit()
853 mem = skb->data; in hss_hdlc_xmit()
855 offset = (int)skb->data & 3; /* keep 32-bit alignment */ in hss_hdlc_xmit()
859 dev->stats.tx_dropped++; in hss_hdlc_xmit()
862 memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4); in hss_hdlc_xmit()
866 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); in hss_hdlc_xmit()
867 if (dma_mapping_error(&dev->dev, phys)) { in hss_hdlc_xmit()
873 dev->stats.tx_dropped++; in hss_hdlc_xmit()
877 n = queue_get_desc(txreadyq, port, 1); in hss_hdlc_xmit()
879 desc = tx_desc_ptr(port, n); in hss_hdlc_xmit()
882 port->tx_buff_tab[n] = skb; in hss_hdlc_xmit()
884 port->tx_buff_tab[n] = mem; in hss_hdlc_xmit()
886 desc->data = phys + offset; in hss_hdlc_xmit()
887 desc->buf_len = desc->pkt_len = len; in hss_hdlc_xmit()
890 queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc); in hss_hdlc_xmit()
894 printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name); in hss_hdlc_xmit()
901 dev->name); in hss_hdlc_xmit()
908 printk(KERN_DEBUG "%s: hss_hdlc_xmit end\n", dev->name); in hss_hdlc_xmit()
914 static int request_hdlc_queues(struct port *port) in request_hdlc_queues() argument
918 err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0, in request_hdlc_queues()
919 "%s:RX-free", port->netdev->name); in request_hdlc_queues()
923 err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0, in request_hdlc_queues()
924 "%s:RX", port->netdev->name); in request_hdlc_queues()
928 err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0, in request_hdlc_queues()
929 "%s:TX", port->netdev->name); in request_hdlc_queues()
933 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, in request_hdlc_queues()
934 "%s:TX-ready", port->netdev->name); in request_hdlc_queues()
938 err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0, in request_hdlc_queues()
939 "%s:TX-done", port->netdev->name); in request_hdlc_queues()
945 qmgr_release_queue(port->plat->txreadyq); in request_hdlc_queues()
947 qmgr_release_queue(queue_ids[port->id].tx); in request_hdlc_queues()
949 qmgr_release_queue(queue_ids[port->id].rx); in request_hdlc_queues()
951 qmgr_release_queue(queue_ids[port->id].rxfree); in request_hdlc_queues()
953 port->netdev->name); in request_hdlc_queues()
957 static void release_hdlc_queues(struct port *port) in release_hdlc_queues() argument
959 qmgr_release_queue(queue_ids[port->id].rxfree); in release_hdlc_queues()
960 qmgr_release_queue(queue_ids[port->id].rx); in release_hdlc_queues()
961 qmgr_release_queue(queue_ids[port->id].txdone); in release_hdlc_queues()
962 qmgr_release_queue(queue_ids[port->id].tx); in release_hdlc_queues()
963 qmgr_release_queue(port->plat->txreadyq); in release_hdlc_queues()
966 static int init_hdlc_queues(struct port *port) in init_hdlc_queues() argument
971 dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, in init_hdlc_queues()
974 return -ENOMEM; in init_hdlc_queues()
977 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, in init_hdlc_queues()
978 &port->desc_tab_phys))) in init_hdlc_queues()
979 return -ENOMEM; in init_hdlc_queues()
980 memset(port->desc_tab, 0, POOL_ALLOC_SIZE); in init_hdlc_queues()
981 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ in init_hdlc_queues()
982 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); in init_hdlc_queues()
986 struct desc *desc = rx_desc_ptr(port, i); in init_hdlc_queues()
990 if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE))) in init_hdlc_queues()
991 return -ENOMEM; in init_hdlc_queues()
992 data = buff->data; in init_hdlc_queues()
995 return -ENOMEM; in init_hdlc_queues()
998 desc->buf_len = RX_SIZE; in init_hdlc_queues()
999 desc->data = dma_map_single(&port->netdev->dev, data, in init_hdlc_queues()
1001 if (dma_mapping_error(&port->netdev->dev, desc->data)) { in init_hdlc_queues()
1003 return -EIO; in init_hdlc_queues()
1005 port->rx_buff_tab[i] = buff; in init_hdlc_queues()
1011 static void destroy_hdlc_queues(struct port *port) in destroy_hdlc_queues() argument
1015 if (port->desc_tab) { in destroy_hdlc_queues()
1017 struct desc *desc = rx_desc_ptr(port, i); in destroy_hdlc_queues()
1018 buffer_t *buff = port->rx_buff_tab[i]; in destroy_hdlc_queues()
1020 dma_unmap_single(&port->netdev->dev, in destroy_hdlc_queues()
1021 desc->data, RX_SIZE, in destroy_hdlc_queues()
1027 struct desc *desc = tx_desc_ptr(port, i); in destroy_hdlc_queues()
1028 buffer_t *buff = port->tx_buff_tab[i]; in destroy_hdlc_queues()
1030 dma_unmap_tx(port, desc); in destroy_hdlc_queues()
1034 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys); in destroy_hdlc_queues()
1035 port->desc_tab = NULL; in destroy_hdlc_queues()
1046 struct port *port = dev_to_port(dev); in hss_hdlc_open() local
1053 if ((err = hss_load_firmware(port))) in hss_hdlc_open()
1056 if ((err = request_hdlc_queues(port))) in hss_hdlc_open()
1059 if ((err = init_hdlc_queues(port))) in hss_hdlc_open()
1063 if (port->plat->open) in hss_hdlc_open()
1064 if ((err = port->plat->open(port->id, dev, in hss_hdlc_open()
1071 queue_put_desc(port->plat->txreadyq, in hss_hdlc_open()
1072 tx_desc_phys(port, i), tx_desc_ptr(port, i)); in hss_hdlc_open()
1075 queue_put_desc(queue_ids[port->id].rxfree, in hss_hdlc_open()
1076 rx_desc_phys(port, i), rx_desc_ptr(port, i)); in hss_hdlc_open()
1078 napi_enable(&port->napi); in hss_hdlc_open()
1081 qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY, in hss_hdlc_open()
1084 qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY, in hss_hdlc_open()
1086 qmgr_enable_irq(queue_ids[port->id].txdone); in hss_hdlc_open()
1090 hss_set_hdlc_cfg(port); in hss_hdlc_open()
1091 hss_config(port); in hss_hdlc_open()
1093 hss_start_hdlc(port); in hss_hdlc_open()
1096 napi_schedule(&port->napi); in hss_hdlc_open()
1102 destroy_hdlc_queues(port); in hss_hdlc_open()
1103 release_hdlc_queues(port); in hss_hdlc_open()
1111 struct port *port = dev_to_port(dev); in hss_hdlc_close() local
1116 ports_open--; in hss_hdlc_close()
1117 qmgr_disable_irq(queue_ids[port->id].rx); in hss_hdlc_close()
1119 napi_disable(&port->napi); in hss_hdlc_close()
1121 hss_stop_hdlc(port); in hss_hdlc_close()
1123 while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0) in hss_hdlc_close()
1124 buffs--; in hss_hdlc_close()
1125 while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0) in hss_hdlc_close()
1126 buffs--; in hss_hdlc_close()
1133 while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0) in hss_hdlc_close()
1134 buffs--; /* cancel TX */ in hss_hdlc_close()
1138 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) in hss_hdlc_close()
1139 buffs--; in hss_hdlc_close()
1151 qmgr_disable_irq(queue_ids[port->id].txdone); in hss_hdlc_close()
1153 if (port->plat->close) in hss_hdlc_close()
1154 port->plat->close(port->id, dev); in hss_hdlc_close()
1157 destroy_hdlc_queues(port); in hss_hdlc_close()
1158 release_hdlc_queues(port); in hss_hdlc_close()
1167 struct port *port = dev_to_port(dev); in hss_hdlc_attach() local
1170 return -EINVAL; in hss_hdlc_attach()
1174 port->hdlc_cfg = 0; in hss_hdlc_attach()
1178 port->hdlc_cfg = PKT_HDLC_CRC_32; in hss_hdlc_attach()
1182 return -EINVAL; in hss_hdlc_attach()
1189 /* a is 10-bit, b is 10-bit, c is 12-bit */ in check_clock()
1193 new_rate = timer_freq * (u64)(c + 1); in check_clock()
1194 do_div(new_rate, a * (c + 1) + b + 1); in check_clock()
1195 new_diff = abs((u32)new_rate - rate); in check_clock()
1211 if (a > 0x3FF) { /* 10-bit value - we can go as slow as ca. 65 kb/s */ in find_best_clock()
1212 check_clock(timer_freq, rate, 0x3FF, 1, 1, best, &diff, reg); in find_best_clock()
1216 a = 1; /* minimum divider is 1 (a = 0, b = 1, c = 1) */ in find_best_clock()
1221 check_clock(timer_freq, rate, a - 1, 1, 1, best, &diff, reg); in find_best_clock()
1226 u64 c = (b + 1) * (u64)rate; in find_best_clock()
1227 do_div(c, timer_freq - rate * a); in find_best_clock()
1228 c--; in find_best_clock()
1229 if (c >= 0xFFF) { /* 12-bit - no need to check more 'b's */ in find_best_clock()
1231 !check_clock(timer_freq, rate, a - 1, 1, 1, best, in find_best_clock()
1240 if (!check_clock(timer_freq, rate, a, b, c + 1, best, &diff, in find_best_clock()
1250 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; in hss_hdlc_ioctl()
1251 struct port *port = dev_to_port(dev); in hss_hdlc_ioctl() local
1258 switch(ifr->ifr_settings.type) { in hss_hdlc_ioctl()
1260 ifr->ifr_settings.type = IF_IFACE_V35; in hss_hdlc_ioctl()
1261 if (ifr->ifr_settings.size < size) { in hss_hdlc_ioctl()
1262 ifr->ifr_settings.size = size; /* data size wanted */ in hss_hdlc_ioctl()
1263 return -ENOBUFS; in hss_hdlc_ioctl()
1266 new_line.clock_type = port->clock_type; in hss_hdlc_ioctl()
1267 new_line.clock_rate = port->clock_rate; in hss_hdlc_ioctl()
1268 new_line.loopback = port->loopback; in hss_hdlc_ioctl()
1270 return -EFAULT; in hss_hdlc_ioctl()
1276 return -EPERM; in hss_hdlc_ioctl()
1278 return -EFAULT; in hss_hdlc_ioctl()
1281 if (port->plat->set_clock) in hss_hdlc_ioctl()
1282 clk = port->plat->set_clock(port->id, clk); in hss_hdlc_ioctl()
1285 return -EINVAL; /* No such clock setting */ in hss_hdlc_ioctl()
1287 if (new_line.loopback != 0 && new_line.loopback != 1) in hss_hdlc_ioctl()
1288 return -EINVAL; in hss_hdlc_ioctl()
1290 port->clock_type = clk; /* Update settings */ in hss_hdlc_ioctl()
1292 find_best_clock(port->plat->timer_freq, in hss_hdlc_ioctl()
1294 &port->clock_rate, &port->clock_reg); in hss_hdlc_ioctl()
1296 port->clock_rate = 0; in hss_hdlc_ioctl()
1297 port->clock_reg = CLK42X_SPEED_2048KHZ; in hss_hdlc_ioctl()
1299 port->loopback = new_line.loopback; in hss_hdlc_ioctl()
1303 if (dev->flags & IFF_UP) in hss_hdlc_ioctl()
1304 hss_config(port); in hss_hdlc_ioctl()
1306 if (port->loopback || port->carrier) in hss_hdlc_ioctl()
1307 netif_carrier_on(port->netdev); in hss_hdlc_ioctl()
1309 netif_carrier_off(port->netdev); in hss_hdlc_ioctl()
1332 struct port *port; in hss_init_one() local
1337 if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL) in hss_init_one()
1338 return -ENOMEM; in hss_init_one()
1340 if ((port->npe = npe_request(0)) == NULL) { in hss_init_one()
1341 err = -ENODEV; in hss_init_one()
1345 if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) { in hss_init_one()
1346 err = -ENOMEM; in hss_init_one()
1350 SET_NETDEV_DEV(dev, &pdev->dev); in hss_init_one()
1352 hdlc->attach = hss_hdlc_attach; in hss_init_one()
1353 hdlc->xmit = hss_hdlc_xmit; in hss_init_one()
1354 dev->netdev_ops = &hss_hdlc_ops; in hss_init_one()
1355 dev->tx_queue_len = 100; in hss_init_one()
1356 port->clock_type = CLOCK_EXT; in hss_init_one()
1357 port->clock_rate = 0; in hss_init_one()
1358 port->clock_reg = CLK42X_SPEED_2048KHZ; in hss_init_one()
1359 port->id = pdev->id; in hss_init_one()
1360 port->dev = &pdev->dev; in hss_init_one()
1361 port->plat = pdev->dev.platform_data; in hss_init_one()
1362 netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT); in hss_init_one()
1367 platform_set_drvdata(pdev, port); in hss_init_one()
1375 npe_release(port->npe); in hss_init_one()
1377 kfree(port); in hss_init_one()
1383 struct port *port = platform_get_drvdata(pdev); in hss_remove_one() local
1385 unregister_hdlc_device(port->netdev); in hss_remove_one()
1386 free_netdev(port->netdev); in hss_remove_one()
1387 npe_release(port->npe); in hss_remove_one()
1388 kfree(port); in hss_remove_one()
1403 return -ENODEV; in hss_init_module()