USB fixes for 3.15-rc3
Here are a number of USB fixes for 3.15-rc3. The majority are gadget fixes, as we didn't get any of those in for 3.15-rc2. The others are all over the place, and there's a number of new device id addtions as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iEYEABECAAYFAlNcUKIACgkQMUfUDdst+ynSsQCeKmkO1od6fPs5uHPSEwns+pqB +B4Anip6dHYATN5GCv+p3JWLZkQgT9lD =s4E0 -----END PGP SIGNATURE----- Merge tag 'usb-3.15-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb Pull USB fixes from Greg KH: "Here are a number of USB fixes for 3.15-rc3. The majority are gadget fixes, as we didn't get any of those in for 3.15-rc2. The others are all over the place, and there's a number of new device id addtions as well." * tag 'usb-3.15-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb: (35 commits) usb: option: add and update a number of CMOTech devices usb: option: add Alcatel L800MA usb: option: add Olivetti Olicard 500 usb: qcserial: add Sierra Wireless MC7305/MC7355 usb: qcserial: add Sierra Wireless MC73xx usb: qcserial: add Sierra Wireless EM7355 USB: io_ti: fix firmware download on big-endian machines usb/xhci: fix compilation warning when !CONFIG_PCI && !CONFIG_PM xhci: extend quirk for Renesas cards xhci: Switch Intel Lynx Point ports to EHCI on shutdown. usb: xhci: Prefer endpoint context dequeue pointer over stopped_trb phy: core: make NULL a valid phy reference if !CONFIG_GENERIC_PHY phy: fix kernel oops in phy_lookup() phy: restore OMAP_CONTROL_PHY dependencies phy: exynos: fix building as a module USB: serial: fix sysfs-attribute removal deadlock usb: wusbcore: fix panic in wusbhc_chid_set usb: wusbcore: convert nested lock to use spin_lock instead of spin_lock_irq uwb: don't call spin_unlock_irq in a USB completion handler usb: chipidea: coordinate usb phy initialization for different phy type ...
This commit is contained in:
commit
fefb82756e
29 changed files with 304 additions and 162 deletions
|
@ -33,6 +33,7 @@ config PHY_MVEBU_SATA
|
|||
|
||||
config OMAP_CONTROL_PHY
|
||||
tristate "OMAP CONTROL PHY Driver"
|
||||
depends on ARCH_OMAP2PLUS || COMPILE_TEST
|
||||
help
|
||||
Enable this to add support for the PHY part present in the control
|
||||
module. This driver has API to power on the USB2 PHY and to write to
|
||||
|
|
|
@ -13,8 +13,9 @@ obj-$(CONFIG_TI_PIPE3) += phy-ti-pipe3.o
|
|||
obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
|
||||
obj-$(CONFIG_PHY_EXYNOS5250_SATA) += phy-exynos5250-sata.o
|
||||
obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o
|
||||
obj-$(CONFIG_PHY_SAMSUNG_USB2) += phy-samsung-usb2.o
|
||||
obj-$(CONFIG_PHY_EXYNOS4210_USB2) += phy-exynos4210-usb2.o
|
||||
obj-$(CONFIG_PHY_EXYNOS4X12_USB2) += phy-exynos4x12-usb2.o
|
||||
obj-$(CONFIG_PHY_EXYNOS5250_USB2) += phy-exynos5250-usb2.o
|
||||
obj-$(CONFIG_PHY_SAMSUNG_USB2) += phy-exynos-usb2.o
|
||||
phy-exynos-usb2-y += phy-samsung-usb2.o
|
||||
phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4210_USB2) += phy-exynos4210-usb2.o
|
||||
phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4X12_USB2) += phy-exynos4x12-usb2.o
|
||||
phy-exynos-usb2-$(CONFIG_PHY_EXYNOS5250_USB2) += phy-exynos5250-usb2.o
|
||||
obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
|
||||
|
|
|
@ -64,6 +64,9 @@ static struct phy *phy_lookup(struct device *device, const char *port)
|
|||
class_dev_iter_init(&iter, phy_class, NULL, NULL);
|
||||
while ((dev = class_dev_iter_next(&iter))) {
|
||||
phy = to_phy(dev);
|
||||
|
||||
if (!phy->init_data)
|
||||
continue;
|
||||
count = phy->init_data->num_consumers;
|
||||
consumers = phy->init_data->consumers;
|
||||
while (count--) {
|
||||
|
|
|
@ -276,6 +276,39 @@ static void hw_phymode_configure(struct ci_hdrc *ci)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ci_usb_phy_init: initialize phy according to different phy type
|
||||
* @ci: the controller
|
||||
*
|
||||
* This function returns an error code if usb_phy_init has failed
|
||||
*/
|
||||
static int ci_usb_phy_init(struct ci_hdrc *ci)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (ci->platdata->phy_mode) {
|
||||
case USBPHY_INTERFACE_MODE_UTMI:
|
||||
case USBPHY_INTERFACE_MODE_UTMIW:
|
||||
case USBPHY_INTERFACE_MODE_HSIC:
|
||||
ret = usb_phy_init(ci->transceiver);
|
||||
if (ret)
|
||||
return ret;
|
||||
hw_phymode_configure(ci);
|
||||
break;
|
||||
case USBPHY_INTERFACE_MODE_ULPI:
|
||||
case USBPHY_INTERFACE_MODE_SERIAL:
|
||||
hw_phymode_configure(ci);
|
||||
ret = usb_phy_init(ci->transceiver);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
ret = usb_phy_init(ci->transceiver);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* hw_device_reset: resets chip (execute without interruption)
|
||||
* @ci: the controller
|
||||
|
@ -543,8 +576,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
hw_phymode_configure(ci);
|
||||
|
||||
if (ci->platdata->phy)
|
||||
ci->transceiver = ci->platdata->phy;
|
||||
else
|
||||
|
@ -564,7 +595,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
|
|||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
ret = usb_phy_init(ci->transceiver);
|
||||
ret = ci_usb_phy_init(ci);
|
||||
if (ret) {
|
||||
dev_err(dev, "unable to init phy: %d\n", ret);
|
||||
return ret;
|
||||
|
|
|
@ -821,6 +821,7 @@ static void dwc3_complete(struct device *dev)
|
|||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
|
||||
dwc3_event_buffers_setup(dwc);
|
||||
switch (dwc->dr_mode) {
|
||||
case USB_DR_MODE_PERIPHERAL:
|
||||
case USB_DR_MODE_OTG:
|
||||
|
@ -828,7 +829,6 @@ static void dwc3_complete(struct device *dev)
|
|||
/* FALLTHROUGH */
|
||||
case USB_DR_MODE_HOST:
|
||||
default:
|
||||
dwc3_event_buffers_setup(dwc);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -187,15 +187,12 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
|
|||
* improve this algorithm so that we better use the internal
|
||||
* FIFO space
|
||||
*/
|
||||
for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
|
||||
struct dwc3_ep *dep = dwc->eps[num];
|
||||
int fifo_number = dep->number >> 1;
|
||||
for (num = 0; num < dwc->num_in_eps; num++) {
|
||||
/* bit0 indicates direction; 1 means IN ep */
|
||||
struct dwc3_ep *dep = dwc->eps[(num << 1) | 1];
|
||||
int mult = 1;
|
||||
int tmp;
|
||||
|
||||
if (!(dep->number & 1))
|
||||
continue;
|
||||
|
||||
if (!(dep->flags & DWC3_EP_ENABLED))
|
||||
continue;
|
||||
|
||||
|
@ -224,8 +221,7 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
|
|||
dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
|
||||
dep->name, last_fifo_depth, fifo_size & 0xffff);
|
||||
|
||||
dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
|
||||
fifo_size);
|
||||
dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
|
||||
|
||||
last_fifo_depth += (fifo_size & 0xffff);
|
||||
}
|
||||
|
|
|
@ -745,6 +745,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
|
|||
*/
|
||||
struct usb_gadget *gadget = epfile->ffs->gadget;
|
||||
|
||||
spin_lock_irq(&epfile->ffs->eps_lock);
|
||||
/* In the meantime, endpoint got disabled or changed. */
|
||||
if (epfile->ep != ep) {
|
||||
spin_unlock_irq(&epfile->ffs->eps_lock);
|
||||
return -ESHUTDOWN;
|
||||
}
|
||||
/*
|
||||
* Controller may require buffer size to be aligned to
|
||||
* maxpacketsize of an out endpoint.
|
||||
|
@ -752,6 +758,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
|
|||
data_len = io_data->read ?
|
||||
usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
|
||||
io_data->len;
|
||||
spin_unlock_irq(&epfile->ffs->eps_lock);
|
||||
|
||||
data = kmalloc(data_len, GFP_KERNEL);
|
||||
if (unlikely(!data))
|
||||
|
|
|
@ -377,7 +377,7 @@ static struct sk_buff *rndis_add_header(struct gether *port,
|
|||
if (skb2)
|
||||
rndis_add_hdr(skb2);
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
dev_kfree_skb(skb);
|
||||
return skb2;
|
||||
}
|
||||
|
||||
|
|
|
@ -1219,6 +1219,10 @@ static int fsl_pullup(struct usb_gadget *gadget, int is_on)
|
|||
struct fsl_udc *udc;
|
||||
|
||||
udc = container_of(gadget, struct fsl_udc, gadget);
|
||||
|
||||
if (!udc->vbus_active)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
udc->softconnect = (is_on != 0);
|
||||
if (can_pullup(udc))
|
||||
fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
|
||||
|
@ -2532,8 +2536,8 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
|
|||
if (!udc_controller)
|
||||
return -ENODEV;
|
||||
|
||||
usb_del_gadget_udc(&udc_controller->gadget);
|
||||
udc_controller->done = &done;
|
||||
usb_del_gadget_udc(&udc_controller->gadget);
|
||||
|
||||
fsl_udc_clk_release();
|
||||
|
||||
|
|
|
@ -2043,6 +2043,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
|
|||
return -ESRCH;
|
||||
|
||||
/* fake probe to determine $CHIP */
|
||||
CHIP = NULL;
|
||||
usb_gadget_probe_driver(&probe_driver);
|
||||
if (!CHIP)
|
||||
return -ENODEV;
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <asm/byteorder.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include "u_rndis.h"
|
||||
|
||||
#undef VERBOSE_DEBUG
|
||||
|
||||
|
|
|
@ -48,8 +48,6 @@
|
|||
|
||||
#define UETH__VERSION "29-May-2008"
|
||||
|
||||
#define GETHER_NAPI_WEIGHT 32
|
||||
|
||||
struct eth_dev {
|
||||
/* lock is held while accessing port_usb
|
||||
*/
|
||||
|
@ -74,7 +72,6 @@ struct eth_dev {
|
|||
struct sk_buff_head *list);
|
||||
|
||||
struct work_struct work;
|
||||
struct napi_struct rx_napi;
|
||||
|
||||
unsigned long todo;
|
||||
#define WORK_RX_MEMORY 0
|
||||
|
@ -256,16 +253,18 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
|
|||
DBG(dev, "rx submit --> %d\n", retval);
|
||||
if (skb)
|
||||
dev_kfree_skb_any(skb);
|
||||
spin_lock_irqsave(&dev->req_lock, flags);
|
||||
list_add(&req->list, &dev->rx_reqs);
|
||||
spin_unlock_irqrestore(&dev->req_lock, flags);
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
|
||||
{
|
||||
struct sk_buff *skb = req->context;
|
||||
struct sk_buff *skb = req->context, *skb2;
|
||||
struct eth_dev *dev = ep->driver_data;
|
||||
int status = req->status;
|
||||
bool rx_queue = 0;
|
||||
|
||||
switch (status) {
|
||||
|
||||
|
@ -289,8 +288,30 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
|
|||
} else {
|
||||
skb_queue_tail(&dev->rx_frames, skb);
|
||||
}
|
||||
if (!status)
|
||||
rx_queue = 1;
|
||||
skb = NULL;
|
||||
|
||||
skb2 = skb_dequeue(&dev->rx_frames);
|
||||
while (skb2) {
|
||||
if (status < 0
|
||||
|| ETH_HLEN > skb2->len
|
||||
|| skb2->len > VLAN_ETH_FRAME_LEN) {
|
||||
dev->net->stats.rx_errors++;
|
||||
dev->net->stats.rx_length_errors++;
|
||||
DBG(dev, "rx length %d\n", skb2->len);
|
||||
dev_kfree_skb_any(skb2);
|
||||
goto next_frame;
|
||||
}
|
||||
skb2->protocol = eth_type_trans(skb2, dev->net);
|
||||
dev->net->stats.rx_packets++;
|
||||
dev->net->stats.rx_bytes += skb2->len;
|
||||
|
||||
/* no buffer copies needed, unless hardware can't
|
||||
* use skb buffers.
|
||||
*/
|
||||
status = netif_rx(skb2);
|
||||
next_frame:
|
||||
skb2 = skb_dequeue(&dev->rx_frames);
|
||||
}
|
||||
break;
|
||||
|
||||
/* software-driven interface shutdown */
|
||||
|
@ -313,20 +334,22 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
|
|||
/* FALLTHROUGH */
|
||||
|
||||
default:
|
||||
rx_queue = 1;
|
||||
dev_kfree_skb_any(skb);
|
||||
dev->net->stats.rx_errors++;
|
||||
DBG(dev, "rx status %d\n", status);
|
||||
break;
|
||||
}
|
||||
|
||||
if (skb)
|
||||
dev_kfree_skb_any(skb);
|
||||
if (!netif_running(dev->net)) {
|
||||
clean:
|
||||
spin_lock(&dev->req_lock);
|
||||
list_add(&req->list, &dev->rx_reqs);
|
||||
spin_unlock(&dev->req_lock);
|
||||
|
||||
if (rx_queue && likely(napi_schedule_prep(&dev->rx_napi)))
|
||||
__napi_schedule(&dev->rx_napi);
|
||||
req = NULL;
|
||||
}
|
||||
if (req)
|
||||
rx_submit(dev, req, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
|
||||
|
@ -391,24 +414,16 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
|
|||
{
|
||||
struct usb_request *req;
|
||||
unsigned long flags;
|
||||
int rx_counts = 0;
|
||||
|
||||
/* fill unused rxq slots with some skb */
|
||||
spin_lock_irqsave(&dev->req_lock, flags);
|
||||
while (!list_empty(&dev->rx_reqs)) {
|
||||
|
||||
if (++rx_counts > qlen(dev->gadget, dev->qmult))
|
||||
break;
|
||||
|
||||
req = container_of(dev->rx_reqs.next,
|
||||
struct usb_request, list);
|
||||
list_del_init(&req->list);
|
||||
spin_unlock_irqrestore(&dev->req_lock, flags);
|
||||
|
||||
if (rx_submit(dev, req, gfp_flags) < 0) {
|
||||
spin_lock_irqsave(&dev->req_lock, flags);
|
||||
list_add(&req->list, &dev->rx_reqs);
|
||||
spin_unlock_irqrestore(&dev->req_lock, flags);
|
||||
defer_kevent(dev, WORK_RX_MEMORY);
|
||||
return;
|
||||
}
|
||||
|
@ -418,41 +433,6 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
|
|||
spin_unlock_irqrestore(&dev->req_lock, flags);
|
||||
}
|
||||
|
||||
static int gether_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct eth_dev *dev = container_of(napi, struct eth_dev, rx_napi);
|
||||
struct sk_buff *skb;
|
||||
unsigned int work_done = 0;
|
||||
int status = 0;
|
||||
|
||||
while ((skb = skb_dequeue(&dev->rx_frames))) {
|
||||
if (status < 0
|
||||
|| ETH_HLEN > skb->len
|
||||
|| skb->len > VLAN_ETH_FRAME_LEN) {
|
||||
dev->net->stats.rx_errors++;
|
||||
dev->net->stats.rx_length_errors++;
|
||||
DBG(dev, "rx length %d\n", skb->len);
|
||||
dev_kfree_skb_any(skb);
|
||||
continue;
|
||||
}
|
||||
skb->protocol = eth_type_trans(skb, dev->net);
|
||||
dev->net->stats.rx_packets++;
|
||||
dev->net->stats.rx_bytes += skb->len;
|
||||
|
||||
status = netif_rx_ni(skb);
|
||||
}
|
||||
|
||||
if (netif_running(dev->net)) {
|
||||
rx_fill(dev, GFP_KERNEL);
|
||||
work_done++;
|
||||
}
|
||||
|
||||
if (work_done < budget)
|
||||
napi_complete(&dev->rx_napi);
|
||||
|
||||
return work_done;
|
||||
}
|
||||
|
||||
static void eth_work(struct work_struct *work)
|
||||
{
|
||||
struct eth_dev *dev = container_of(work, struct eth_dev, work);
|
||||
|
@ -645,7 +625,6 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
|
|||
/* and open the tx floodgates */
|
||||
atomic_set(&dev->tx_qlen, 0);
|
||||
netif_wake_queue(dev->net);
|
||||
napi_enable(&dev->rx_napi);
|
||||
}
|
||||
|
||||
static int eth_open(struct net_device *net)
|
||||
|
@ -672,7 +651,6 @@ static int eth_stop(struct net_device *net)
|
|||
unsigned long flags;
|
||||
|
||||
VDBG(dev, "%s\n", __func__);
|
||||
napi_disable(&dev->rx_napi);
|
||||
netif_stop_queue(net);
|
||||
|
||||
DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
|
||||
|
@ -790,7 +768,6 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
|
|||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
dev = netdev_priv(net);
|
||||
netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
|
||||
spin_lock_init(&dev->lock);
|
||||
spin_lock_init(&dev->req_lock);
|
||||
INIT_WORK(&dev->work, eth_work);
|
||||
|
@ -853,7 +830,6 @@ struct net_device *gether_setup_name_default(const char *netname)
|
|||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
dev = netdev_priv(net);
|
||||
netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
|
||||
spin_lock_init(&dev->lock);
|
||||
spin_lock_init(&dev->req_lock);
|
||||
INIT_WORK(&dev->work, eth_work);
|
||||
|
@ -1137,7 +1113,6 @@ void gether_disconnect(struct gether *link)
|
|||
{
|
||||
struct eth_dev *dev = link->ioport;
|
||||
struct usb_request *req;
|
||||
struct sk_buff *skb;
|
||||
|
||||
WARN_ON(!dev);
|
||||
if (!dev)
|
||||
|
@ -1164,12 +1139,6 @@ void gether_disconnect(struct gether *link)
|
|||
spin_lock(&dev->req_lock);
|
||||
}
|
||||
spin_unlock(&dev->req_lock);
|
||||
|
||||
spin_lock(&dev->rx_frames.lock);
|
||||
while ((skb = __skb_dequeue(&dev->rx_frames)))
|
||||
dev_kfree_skb_any(skb);
|
||||
spin_unlock(&dev->rx_frames.lock);
|
||||
|
||||
link->in_ep->driver_data = NULL;
|
||||
link->in_ep->desc = NULL;
|
||||
|
||||
|
|
|
@ -300,7 +300,7 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
|
|||
ss_opts->isoc_interval = gzero_options.isoc_interval;
|
||||
ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket;
|
||||
ss_opts->isoc_mult = gzero_options.isoc_mult;
|
||||
ss_opts->isoc_maxburst = gzero_options.isoc_maxpacket;
|
||||
ss_opts->isoc_maxburst = gzero_options.isoc_maxburst;
|
||||
ss_opts->bulk_buflen = gzero_options.bulk_buflen;
|
||||
|
||||
func_ss = usb_get_function(func_inst_ss);
|
||||
|
|
|
@ -134,6 +134,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
|
|||
*/
|
||||
if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
|
||||
xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
|
||||
|
||||
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
|
||||
}
|
||||
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
|
||||
pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
|
||||
|
@ -143,9 +145,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
|
|||
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
|
||||
}
|
||||
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
|
||||
pdev->device == 0x0015 &&
|
||||
pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
|
||||
pdev->subsystem_device == 0xc0cd)
|
||||
pdev->device == 0x0015)
|
||||
xhci->quirks |= XHCI_RESET_ON_RESUME;
|
||||
if (pdev->vendor == PCI_VENDOR_ID_VIA)
|
||||
xhci->quirks |= XHCI_RESET_ON_RESUME;
|
||||
|
|
|
@ -550,6 +550,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
|
|||
struct xhci_ring *ep_ring;
|
||||
struct xhci_generic_trb *trb;
|
||||
dma_addr_t addr;
|
||||
u64 hw_dequeue;
|
||||
|
||||
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
|
||||
ep_index, stream_id);
|
||||
|
@ -559,16 +560,6 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
|
|||
stream_id);
|
||||
return;
|
||||
}
|
||||
state->new_cycle_state = 0;
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||
"Finding segment containing stopped TRB.");
|
||||
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
|
||||
dev->eps[ep_index].stopped_trb,
|
||||
&state->new_cycle_state);
|
||||
if (!state->new_deq_seg) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||
|
@ -577,46 +568,57 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
|
|||
if (ep->ep_state & EP_HAS_STREAMS) {
|
||||
struct xhci_stream_ctx *ctx =
|
||||
&ep->stream_info->stream_ctx_array[stream_id];
|
||||
state->new_cycle_state = 0x1 & le64_to_cpu(ctx->stream_ring);
|
||||
hw_dequeue = le64_to_cpu(ctx->stream_ring);
|
||||
} else {
|
||||
struct xhci_ep_ctx *ep_ctx
|
||||
= xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
|
||||
state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
|
||||
hw_dequeue = le64_to_cpu(ep_ctx->deq);
|
||||
}
|
||||
|
||||
/* Find virtual address and segment of hardware dequeue pointer */
|
||||
state->new_deq_seg = ep_ring->deq_seg;
|
||||
state->new_deq_ptr = ep_ring->dequeue;
|
||||
while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
|
||||
!= (dma_addr_t)(hw_dequeue & ~0xf)) {
|
||||
next_trb(xhci, ep_ring, &state->new_deq_seg,
|
||||
&state->new_deq_ptr);
|
||||
if (state->new_deq_ptr == ep_ring->dequeue) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Find cycle state for last_trb, starting at old cycle state of
|
||||
* hw_dequeue. If there is only one segment ring, find_trb_seg() will
|
||||
* return immediately and cannot toggle the cycle state if this search
|
||||
* wraps around, so add one more toggle manually in that case.
|
||||
*/
|
||||
state->new_cycle_state = hw_dequeue & 0x1;
|
||||
if (ep_ring->first_seg == ep_ring->first_seg->next &&
|
||||
cur_td->last_trb < state->new_deq_ptr)
|
||||
state->new_cycle_state ^= 0x1;
|
||||
|
||||
state->new_deq_ptr = cur_td->last_trb;
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||
"Finding segment containing last TRB in TD.");
|
||||
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
|
||||
state->new_deq_ptr,
|
||||
&state->new_cycle_state);
|
||||
state->new_deq_ptr, &state->new_cycle_state);
|
||||
if (!state->new_deq_seg) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Increment to find next TRB after last_trb. Cycle if appropriate. */
|
||||
trb = &state->new_deq_ptr->generic;
|
||||
if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
|
||||
(trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
|
||||
state->new_cycle_state ^= 0x1;
|
||||
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
|
||||
|
||||
/*
|
||||
* If there is only one segment in a ring, find_trb_seg()'s while loop
|
||||
* will not run, and it will return before it has a chance to see if it
|
||||
* needs to toggle the cycle bit. It can't tell if the stalled transfer
|
||||
* ended just before the link TRB on a one-segment ring, or if the TD
|
||||
* wrapped around the top of the ring, because it doesn't have the TD in
|
||||
* question. Look for the one-segment case where stalled TRB's address
|
||||
* is greater than the new dequeue pointer address.
|
||||
*/
|
||||
if (ep_ring->first_seg == ep_ring->first_seg->next &&
|
||||
state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
|
||||
state->new_cycle_state ^= 0x1;
|
||||
/* Don't update the ring cycle state for the producer (us). */
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||
"Cycle state = 0x%x", state->new_cycle_state);
|
||||
|
||||
/* Don't update the ring cycle state for the producer (us). */
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||
"New dequeue segment = %p (virtual)",
|
||||
state->new_deq_seg);
|
||||
|
@ -799,7 +801,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
|
|||
if (list_empty(&ep->cancelled_td_list)) {
|
||||
xhci_stop_watchdog_timer_in_irq(xhci, ep);
|
||||
ep->stopped_td = NULL;
|
||||
ep->stopped_trb = NULL;
|
||||
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
|
||||
return;
|
||||
}
|
||||
|
@ -867,11 +868,9 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
|
|||
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
|
||||
}
|
||||
|
||||
/* Clear stopped_td and stopped_trb if endpoint is not halted */
|
||||
if (!(ep->ep_state & EP_HALTED)) {
|
||||
/* Clear stopped_td if endpoint is not halted */
|
||||
if (!(ep->ep_state & EP_HALTED))
|
||||
ep->stopped_td = NULL;
|
||||
ep->stopped_trb = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop the lock and complete the URBs in the cancelled TD list.
|
||||
|
@ -1941,14 +1940,12 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
|
|||
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
|
||||
ep->ep_state |= EP_HALTED;
|
||||
ep->stopped_td = td;
|
||||
ep->stopped_trb = event_trb;
|
||||
ep->stopped_stream = stream_id;
|
||||
|
||||
xhci_queue_reset_ep(xhci, slot_id, ep_index);
|
||||
xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
|
||||
|
||||
ep->stopped_td = NULL;
|
||||
ep->stopped_trb = NULL;
|
||||
ep->stopped_stream = 0;
|
||||
|
||||
xhci_ring_cmd_db(xhci);
|
||||
|
@ -2030,7 +2027,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|||
* the ring dequeue pointer or take this TD off any lists yet.
|
||||
*/
|
||||
ep->stopped_td = td;
|
||||
ep->stopped_trb = event_trb;
|
||||
return 0;
|
||||
} else {
|
||||
if (trb_comp_code == COMP_STALL) {
|
||||
|
@ -2042,7 +2038,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|||
* USB class driver clear the stall later.
|
||||
*/
|
||||
ep->stopped_td = td;
|
||||
ep->stopped_trb = event_trb;
|
||||
ep->stopped_stream = ep_ring->stream_id;
|
||||
} else if (xhci_requires_manual_halt_cleanup(xhci,
|
||||
ep_ctx, trb_comp_code)) {
|
||||
|
|
|
@ -408,16 +408,16 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
|
|||
|
||||
#else
|
||||
|
||||
static int xhci_try_enable_msi(struct usb_hcd *hcd)
|
||||
static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xhci_cleanup_msix(struct xhci_hcd *xhci)
|
||||
static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
|
||||
{
|
||||
}
|
||||
|
||||
static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
|
||||
static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -2954,7 +2954,6 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
|
|||
xhci_ring_cmd_db(xhci);
|
||||
}
|
||||
virt_ep->stopped_td = NULL;
|
||||
virt_ep->stopped_trb = NULL;
|
||||
virt_ep->stopped_stream = 0;
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
|
||||
|
|
|
@ -865,8 +865,6 @@ struct xhci_virt_ep {
|
|||
#define EP_GETTING_NO_STREAMS (1 << 5)
|
||||
/* ---- Related to URB cancellation ---- */
|
||||
struct list_head cancelled_td_list;
|
||||
/* The TRB that was last reported in a stopped endpoint ring */
|
||||
union xhci_trb *stopped_trb;
|
||||
struct xhci_td *stopped_td;
|
||||
unsigned int stopped_stream;
|
||||
/* Watchdog timer for stop endpoint command to cancel URBs */
|
||||
|
|
|
@ -470,8 +470,9 @@ static int dsps_musb_exit(struct musb *musb)
|
|||
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
|
||||
|
||||
del_timer_sync(&glue->timer);
|
||||
|
||||
usb_phy_shutdown(musb->xceiv);
|
||||
debugfs_remove_recursive(glue->dbgfs_root);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -708,8 +709,6 @@ static int dsps_remove(struct platform_device *pdev)
|
|||
pm_runtime_put(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
debugfs_remove_recursive(glue->dbgfs_root);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -316,7 +316,13 @@ static void omap_musb_mailbox_work(struct work_struct *mailbox_work)
|
|||
{
|
||||
struct omap2430_glue *glue = container_of(mailbox_work,
|
||||
struct omap2430_glue, omap_musb_mailbox_work);
|
||||
struct musb *musb = glue_to_musb(glue);
|
||||
struct device *dev = musb->controller;
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
omap_musb_set_mailbox(glue);
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
}
|
||||
|
||||
static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
|
||||
|
@ -416,6 +422,7 @@ static int omap2430_musb_init(struct musb *musb)
|
|||
omap_musb_set_mailbox(glue);
|
||||
|
||||
phy_init(musb->phy);
|
||||
phy_power_on(musb->phy);
|
||||
|
||||
pm_runtime_put_noidle(musb->controller);
|
||||
return 0;
|
||||
|
@ -478,6 +485,7 @@ static int omap2430_musb_exit(struct musb *musb)
|
|||
del_timer_sync(&musb_idle_timer);
|
||||
|
||||
omap2430_low_level_exit(musb);
|
||||
phy_power_off(musb->phy);
|
||||
phy_exit(musb->phy);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/delay.h>
|
||||
#include "am35x-phy-control.h"
|
||||
|
||||
struct am335x_control_usb {
|
||||
|
@ -86,6 +87,14 @@ static void am335x_phy_power(struct phy_control *phy_ctrl, u32 id, bool on)
|
|||
}
|
||||
|
||||
writel(val, usb_ctrl->phy_reg + reg);
|
||||
|
||||
/*
|
||||
* Give the PHY ~1ms to complete the power up operation.
|
||||
* Tests have shown unstable behaviour if other USB PHY related
|
||||
* registers are written too shortly after such a transition.
|
||||
*/
|
||||
if (on)
|
||||
mdelay(1);
|
||||
}
|
||||
|
||||
static const struct phy_control ctrl_am335x = {
|
||||
|
|
|
@ -132,6 +132,9 @@ struct usb_phy *usb_get_phy(enum usb_phy_type type)
|
|||
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
|
||||
pr_debug("PHY: unable to find transceiver of type %s\n",
|
||||
usb_phy_type_string(type));
|
||||
if (!IS_ERR(phy))
|
||||
phy = ERR_PTR(-ENODEV);
|
||||
|
||||
goto err0;
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/serial.h>
|
||||
#include <linux/swab.h>
|
||||
#include <linux/kfifo.h>
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/firmware.h>
|
||||
|
@ -280,7 +281,7 @@ static int read_download_mem(struct usb_device *dev, int start_address,
|
|||
{
|
||||
int status = 0;
|
||||
__u8 read_length;
|
||||
__be16 be_start_address;
|
||||
u16 be_start_address;
|
||||
|
||||
dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length);
|
||||
|
||||
|
@ -296,10 +297,14 @@ static int read_download_mem(struct usb_device *dev, int start_address,
|
|||
if (read_length > 1) {
|
||||
dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length);
|
||||
}
|
||||
be_start_address = cpu_to_be16(start_address);
|
||||
/*
|
||||
* NOTE: Must use swab as wIndex is sent in little-endian
|
||||
* byte order regardless of host byte order.
|
||||
*/
|
||||
be_start_address = swab16((u16)start_address);
|
||||
status = ti_vread_sync(dev, UMPC_MEMORY_READ,
|
||||
(__u16)address_type,
|
||||
(__force __u16)be_start_address,
|
||||
be_start_address,
|
||||
buffer, read_length);
|
||||
|
||||
if (status) {
|
||||
|
@ -394,7 +399,7 @@ static int write_i2c_mem(struct edgeport_serial *serial,
|
|||
struct device *dev = &serial->serial->dev->dev;
|
||||
int status = 0;
|
||||
int write_length;
|
||||
__be16 be_start_address;
|
||||
u16 be_start_address;
|
||||
|
||||
/* We can only send a maximum of 1 aligned byte page at a time */
|
||||
|
||||
|
@ -409,11 +414,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
|
|||
__func__, start_address, write_length);
|
||||
usb_serial_debug_data(dev, __func__, write_length, buffer);
|
||||
|
||||
/* Write first page */
|
||||
be_start_address = cpu_to_be16(start_address);
|
||||
/*
|
||||
* Write first page.
|
||||
*
|
||||
* NOTE: Must use swab as wIndex is sent in little-endian byte order
|
||||
* regardless of host byte order.
|
||||
*/
|
||||
be_start_address = swab16((u16)start_address);
|
||||
status = ti_vsend_sync(serial->serial->dev,
|
||||
UMPC_MEMORY_WRITE, (__u16)address_type,
|
||||
(__force __u16)be_start_address,
|
||||
be_start_address,
|
||||
buffer, write_length);
|
||||
if (status) {
|
||||
dev_dbg(dev, "%s - ERROR %d\n", __func__, status);
|
||||
|
@ -436,11 +446,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
|
|||
__func__, start_address, write_length);
|
||||
usb_serial_debug_data(dev, __func__, write_length, buffer);
|
||||
|
||||
/* Write next page */
|
||||
be_start_address = cpu_to_be16(start_address);
|
||||
/*
|
||||
* Write next page.
|
||||
*
|
||||
* NOTE: Must use swab as wIndex is sent in little-endian byte
|
||||
* order regardless of host byte order.
|
||||
*/
|
||||
be_start_address = swab16((u16)start_address);
|
||||
status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
|
||||
(__u16)address_type,
|
||||
(__force __u16)be_start_address,
|
||||
be_start_address,
|
||||
buffer, write_length);
|
||||
if (status) {
|
||||
dev_err(dev, "%s - ERROR %d\n", __func__, status);
|
||||
|
@ -585,8 +600,8 @@ static int get_descriptor_addr(struct edgeport_serial *serial,
|
|||
if (rom_desc->Type == desc_type)
|
||||
return start_address;
|
||||
|
||||
start_address = start_address + sizeof(struct ti_i2c_desc)
|
||||
+ rom_desc->Size;
|
||||
start_address = start_address + sizeof(struct ti_i2c_desc) +
|
||||
le16_to_cpu(rom_desc->Size);
|
||||
|
||||
} while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type);
|
||||
|
||||
|
@ -599,7 +614,7 @@ static int valid_csum(struct ti_i2c_desc *rom_desc, __u8 *buffer)
|
|||
__u16 i;
|
||||
__u8 cs = 0;
|
||||
|
||||
for (i = 0; i < rom_desc->Size; i++)
|
||||
for (i = 0; i < le16_to_cpu(rom_desc->Size); i++)
|
||||
cs = (__u8)(cs + buffer[i]);
|
||||
|
||||
if (cs != rom_desc->CheckSum) {
|
||||
|
@ -650,7 +665,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
|
|||
break;
|
||||
|
||||
if ((start_address + sizeof(struct ti_i2c_desc) +
|
||||
rom_desc->Size) > TI_MAX_I2C_SIZE) {
|
||||
le16_to_cpu(rom_desc->Size)) > TI_MAX_I2C_SIZE) {
|
||||
status = -ENODEV;
|
||||
dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__);
|
||||
break;
|
||||
|
@ -665,7 +680,8 @@ static int check_i2c_image(struct edgeport_serial *serial)
|
|||
/* Read the descriptor data */
|
||||
status = read_rom(serial, start_address +
|
||||
sizeof(struct ti_i2c_desc),
|
||||
rom_desc->Size, buffer);
|
||||
le16_to_cpu(rom_desc->Size),
|
||||
buffer);
|
||||
if (status)
|
||||
break;
|
||||
|
||||
|
@ -674,7 +690,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
|
|||
break;
|
||||
}
|
||||
start_address = start_address + sizeof(struct ti_i2c_desc) +
|
||||
rom_desc->Size;
|
||||
le16_to_cpu(rom_desc->Size);
|
||||
|
||||
} while ((rom_desc->Type != I2C_DESC_TYPE_ION) &&
|
||||
(start_address < TI_MAX_I2C_SIZE));
|
||||
|
@ -712,7 +728,7 @@ static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer)
|
|||
|
||||
/* Read the descriptor data */
|
||||
status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc),
|
||||
rom_desc->Size, buffer);
|
||||
le16_to_cpu(rom_desc->Size), buffer);
|
||||
if (status)
|
||||
goto exit;
|
||||
|
||||
|
|
|
@ -234,8 +234,31 @@ static void option_instat_callback(struct urb *urb);
|
|||
#define QUALCOMM_VENDOR_ID 0x05C6
|
||||
|
||||
#define CMOTECH_VENDOR_ID 0x16d8
|
||||
#define CMOTECH_PRODUCT_6008 0x6008
|
||||
#define CMOTECH_PRODUCT_6280 0x6280
|
||||
#define CMOTECH_PRODUCT_6001 0x6001
|
||||
#define CMOTECH_PRODUCT_CMU_300 0x6002
|
||||
#define CMOTECH_PRODUCT_6003 0x6003
|
||||
#define CMOTECH_PRODUCT_6004 0x6004
|
||||
#define CMOTECH_PRODUCT_6005 0x6005
|
||||
#define CMOTECH_PRODUCT_CGU_628A 0x6006
|
||||
#define CMOTECH_PRODUCT_CHE_628S 0x6007
|
||||
#define CMOTECH_PRODUCT_CMU_301 0x6008
|
||||
#define CMOTECH_PRODUCT_CHU_628 0x6280
|
||||
#define CMOTECH_PRODUCT_CHU_628S 0x6281
|
||||
#define CMOTECH_PRODUCT_CDU_680 0x6803
|
||||
#define CMOTECH_PRODUCT_CDU_685A 0x6804
|
||||
#define CMOTECH_PRODUCT_CHU_720S 0x7001
|
||||
#define CMOTECH_PRODUCT_7002 0x7002
|
||||
#define CMOTECH_PRODUCT_CHU_629K 0x7003
|
||||
#define CMOTECH_PRODUCT_7004 0x7004
|
||||
#define CMOTECH_PRODUCT_7005 0x7005
|
||||
#define CMOTECH_PRODUCT_CGU_629 0x7006
|
||||
#define CMOTECH_PRODUCT_CHU_629S 0x700a
|
||||
#define CMOTECH_PRODUCT_CHU_720I 0x7211
|
||||
#define CMOTECH_PRODUCT_7212 0x7212
|
||||
#define CMOTECH_PRODUCT_7213 0x7213
|
||||
#define CMOTECH_PRODUCT_7251 0x7251
|
||||
#define CMOTECH_PRODUCT_7252 0x7252
|
||||
#define CMOTECH_PRODUCT_7253 0x7253
|
||||
|
||||
#define TELIT_VENDOR_ID 0x1bc7
|
||||
#define TELIT_PRODUCT_UC864E 0x1003
|
||||
|
@ -287,6 +310,7 @@ static void option_instat_callback(struct urb *urb);
|
|||
#define ALCATEL_PRODUCT_X060S_X200 0x0000
|
||||
#define ALCATEL_PRODUCT_X220_X500D 0x0017
|
||||
#define ALCATEL_PRODUCT_L100V 0x011e
|
||||
#define ALCATEL_PRODUCT_L800MA 0x0203
|
||||
|
||||
#define PIRELLI_VENDOR_ID 0x1266
|
||||
#define PIRELLI_PRODUCT_C100_1 0x1002
|
||||
|
@ -349,6 +373,7 @@ static void option_instat_callback(struct urb *urb);
|
|||
#define OLIVETTI_PRODUCT_OLICARD100 0xc000
|
||||
#define OLIVETTI_PRODUCT_OLICARD145 0xc003
|
||||
#define OLIVETTI_PRODUCT_OLICARD200 0xc005
|
||||
#define OLIVETTI_PRODUCT_OLICARD500 0xc00b
|
||||
|
||||
/* Celot products */
|
||||
#define CELOT_VENDOR_ID 0x211f
|
||||
|
@ -502,6 +527,10 @@ static const struct option_blacklist_info huawei_cdc12_blacklist = {
|
|||
.reserved = BIT(1) | BIT(2),
|
||||
};
|
||||
|
||||
static const struct option_blacklist_info net_intf0_blacklist = {
|
||||
.reserved = BIT(0),
|
||||
};
|
||||
|
||||
static const struct option_blacklist_info net_intf1_blacklist = {
|
||||
.reserved = BIT(1),
|
||||
};
|
||||
|
@ -1035,8 +1064,47 @@ static const struct usb_device_id option_ids[] = {
|
|||
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
|
||||
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
|
||||
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
|
||||
.driver_info = (kernel_ulong_t)&net_intf0_blacklist },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
|
||||
.driver_info = (kernel_ulong_t)&net_intf0_blacklist },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
|
||||
.driver_info = (kernel_ulong_t)&net_intf0_blacklist },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
|
||||
.driver_info = (kernel_ulong_t)&net_intf0_blacklist },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
|
||||
.driver_info = (kernel_ulong_t)&net_intf0_blacklist },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
|
||||
.driver_info = (kernel_ulong_t)&net_intf0_blacklist },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
|
||||
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
|
||||
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
|
||||
.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
|
||||
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
|
||||
.driver_info = (kernel_ulong_t)&net_intf0_blacklist },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
|
||||
.driver_info = (kernel_ulong_t)&net_intf0_blacklist },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
|
||||
.driver_info = (kernel_ulong_t)&net_intf0_blacklist },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
|
||||
.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
|
||||
.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
|
||||
.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
|
||||
|
@ -1500,6 +1568,8 @@ static const struct usb_device_id option_ids[] = {
|
|||
.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
|
||||
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
|
||||
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
|
||||
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
|
||||
.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
|
||||
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
|
||||
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
|
||||
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
|
||||
|
@ -1545,6 +1615,9 @@ static const struct usb_device_id option_ids[] = {
|
|||
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
|
||||
.driver_info = (kernel_ulong_t)&net_intf6_blacklist
|
||||
},
|
||||
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
|
||||
.driver_info = (kernel_ulong_t)&net_intf4_blacklist
|
||||
},
|
||||
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
|
||||
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
|
||||
|
|
|
@ -136,9 +136,18 @@ static const struct usb_device_id id_table[] = {
|
|||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 0)}, /* Sierra Wireless MC7710 Device Management */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 2)}, /* Sierra Wireless MC7710 NMEA */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 3)}, /* Sierra Wireless MC7710 Modem */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 0)}, /* Sierra Wireless MC73xx Device Management */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 2)}, /* Sierra Wireless MC73xx NMEA */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 3)}, /* Sierra Wireless MC73xx Modem */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)}, /* Sierra Wireless EM7355 Device Management */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)}, /* Sierra Wireless EM7355 NMEA */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)}, /* Sierra Wireless EM7355 Modem */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)}, /* Sierra Wireless MC7305/MC7355 Device Management */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)}, /* Sierra Wireless MC7305/MC7355 NMEA */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)}, /* Sierra Wireless MC7305/MC7355 Modem */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
|
||||
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
|
||||
|
|
|
@ -1347,10 +1347,12 @@ static int usb_serial_register(struct usb_serial_driver *driver)
|
|||
static void usb_serial_deregister(struct usb_serial_driver *device)
|
||||
{
|
||||
pr_info("USB Serial deregistering driver %s\n", device->description);
|
||||
|
||||
mutex_lock(&table_lock);
|
||||
list_del(&device->driver_list);
|
||||
usb_serial_bus_deregister(device);
|
||||
mutex_unlock(&table_lock);
|
||||
|
||||
usb_serial_bus_deregister(device);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -301,7 +301,7 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
|
|||
|
||||
if (chid)
|
||||
result = uwb_radio_start(&wusbhc->pal);
|
||||
else
|
||||
else if (wusbhc->uwb_rc)
|
||||
uwb_radio_stop(&wusbhc->pal);
|
||||
|
||||
return result;
|
||||
|
|
|
@ -2390,10 +2390,10 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
|
|||
done) {
|
||||
|
||||
dev_info(dev, "Control EP stall. Queue delayed work.\n");
|
||||
spin_lock_irq(&wa->xfer_list_lock);
|
||||
spin_lock(&wa->xfer_list_lock);
|
||||
/* move xfer from xfer_list to xfer_errored_list. */
|
||||
list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
|
||||
spin_unlock_irq(&wa->xfer_list_lock);
|
||||
spin_unlock(&wa->xfer_list_lock);
|
||||
spin_unlock_irqrestore(&xfer->lock, flags);
|
||||
queue_work(wusbd, &wa->xfer_error_work);
|
||||
} else {
|
||||
|
|
|
@ -59,6 +59,7 @@ static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
|
|||
struct uwb_rceb *reply, ssize_t reply_size)
|
||||
{
|
||||
struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
|
||||
unsigned long flags;
|
||||
|
||||
if (r != NULL) {
|
||||
if (r->bResultCode != UWB_RC_RES_SUCCESS)
|
||||
|
@ -67,14 +68,14 @@ static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
|
|||
} else
|
||||
dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
|
||||
|
||||
spin_lock_irq(&rc->rsvs_lock);
|
||||
spin_lock_irqsave(&rc->rsvs_lock, flags);
|
||||
if (rc->set_drp_ie_pending > 1) {
|
||||
rc->set_drp_ie_pending = 0;
|
||||
uwb_rsv_queue_update(rc);
|
||||
uwb_rsv_queue_update(rc);
|
||||
} else {
|
||||
rc->set_drp_ie_pending = 0;
|
||||
rc->set_drp_ie_pending = 0;
|
||||
}
|
||||
spin_unlock_irq(&rc->rsvs_lock);
|
||||
spin_unlock_irqrestore(&rc->rsvs_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -174,21 +174,29 @@ void devm_of_phy_provider_unregister(struct device *dev,
|
|||
#else
|
||||
static inline int phy_pm_runtime_get(struct phy *phy)
|
||||
{
|
||||
if (!phy)
|
||||
return 0;
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int phy_pm_runtime_get_sync(struct phy *phy)
|
||||
{
|
||||
if (!phy)
|
||||
return 0;
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int phy_pm_runtime_put(struct phy *phy)
|
||||
{
|
||||
if (!phy)
|
||||
return 0;
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int phy_pm_runtime_put_sync(struct phy *phy)
|
||||
{
|
||||
if (!phy)
|
||||
return 0;
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
|
@ -204,21 +212,29 @@ static inline void phy_pm_runtime_forbid(struct phy *phy)
|
|||
|
||||
static inline int phy_init(struct phy *phy)
|
||||
{
|
||||
if (!phy)
|
||||
return 0;
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int phy_exit(struct phy *phy)
|
||||
{
|
||||
if (!phy)
|
||||
return 0;
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int phy_power_on(struct phy *phy)
|
||||
{
|
||||
if (!phy)
|
||||
return 0;
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int phy_power_off(struct phy *phy)
|
||||
{
|
||||
if (!phy)
|
||||
return 0;
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue