various dwc (OTG) driver fixups

Signed-off-by: Tim Harvey <tharvey@gateworks.com>
Signed-off-by: Imre Kaloz <kaloz@openwrt.org>

SVN-Revision: 39892
This commit is contained in:
Imre Kaloz 2014-03-12 13:37:40 +00:00
parent 47dfbb5bc6
commit 68022cac64
8 changed files with 442 additions and 95 deletions

View File

@ -721,9 +721,10 @@ static ssize_t hcddump_show( struct device *_dev,
char *buf) char *buf)
{ {
#ifndef DWC_DEVICE_ONLY #ifndef DWC_DEVICE_ONLY
struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ struct platform_device *pdev = container_of(_dev, struct platform_device, dev);
dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ struct usb_hcd *hcd = platform_get_drvdata(pdev);
dwc_otg_hcd_dump_state(otg_dev->hcd); dwc_otg_hcd_t *otg_dev = hcd_to_dwc_otg_hcd(hcd);
dwc_otg_hcd_dump_state(otg_dev);
#endif #endif
return sprintf( buf, "HCD Dump\n" ); return sprintf( buf, "HCD Dump\n" );
} }

View File

@ -66,7 +66,7 @@
#include "otg_regs.h" #include "otg_regs.h"
#include "otg_cil.h" #include "otg_cil.h"
#include "otg_pcd.h" #include "otg_pcd.h"
#include "otg_hcd.h"
/** /**
* This function is called to initialize the DWC_otg CSR data * This function is called to initialize the DWC_otg CSR data
@ -1156,12 +1156,13 @@ void dwc_otg_core_host_init(dwc_otg_core_if_t *core_if)
DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i); DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i);
do { do {
hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
if (++count > 1000) if (++count > 200)
{ {
DWC_ERROR("%s: Unable to clear halt on channel %d\n", DWC_ERROR("%s: Unable to clear halt on channel %d\n",
__func__, i); __func__, i);
break; break;
} }
udelay(100);
} }
while (hcchar.b.chen); while (hcchar.b.chen);
} }
@ -1211,6 +1212,8 @@ void dwc_otg_hc_init(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
hc_intr_mask.b.chhltd = 1; hc_intr_mask.b.chhltd = 1;
if (core_if->dma_enable) { if (core_if->dma_enable) {
hc_intr_mask.b.ahberr = 1; hc_intr_mask.b.ahberr = 1;
/* Always record the first nak interrupt for bulk
* packets. */
if (hc->error_state && !hc->do_split && if (hc->error_state && !hc->do_split &&
hc->ep_type != DWC_OTG_EP_TYPE_ISOC) { hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
hc_intr_mask.b.ack = 1; hc_intr_mask.b.ack = 1;
@ -1375,7 +1378,7 @@ void dwc_otg_hc_init(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
* @param hc Host channel to halt. * @param hc Host channel to halt.
* @param halt_status Reason for halting the channel. * @param halt_status Reason for halting the channel.
*/ */
void dwc_otg_hc_halt(dwc_otg_core_if_t *core_if, void dwc_otg_hc_halt(dwc_otg_hcd_t *hcd,
dwc_hc_t *hc, dwc_hc_t *hc,
dwc_otg_halt_status_e halt_status) dwc_otg_halt_status_e halt_status)
{ {
@ -1385,6 +1388,7 @@ void dwc_otg_hc_halt(dwc_otg_core_if_t *core_if,
dwc_otg_hc_regs_t *hc_regs; dwc_otg_hc_regs_t *hc_regs;
dwc_otg_core_global_regs_t *global_regs; dwc_otg_core_global_regs_t *global_regs;
dwc_otg_host_global_regs_t *host_global_regs; dwc_otg_host_global_regs_t *host_global_regs;
dwc_otg_core_if_t *core_if = hcd->core_if;
hc_regs = core_if->host_if->hc_regs[hc->hc_num]; hc_regs = core_if->host_if->hc_regs[hc->hc_num];
global_regs = core_if->core_global_regs; global_regs = core_if->core_global_regs;
@ -1477,6 +1481,9 @@ void dwc_otg_hc_halt(dwc_otg_core_if_t *core_if,
hc->halt_status = halt_status; hc->halt_status = halt_status;
if (!hc->halt_on_queue && !hc->halt_pending && hc->qh->nak_frame != 0xffff)
hcd->nakking_channels--;
if (hcchar.b.chen) { if (hcchar.b.chen) {
hc->halt_pending = 1; hc->halt_pending = 1;
hc->halt_on_queue = 0; hc->halt_on_queue = 0;
@ -1744,9 +1751,9 @@ void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize); DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize);
DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt); DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt);
DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid); DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid);
if (core_if->dma_enable) { if (core_if->dma_enable) {
dwc_write_reg32(&hc_regs->hcdma, (uint32_t)hc->xfer_buff); dwc_write_reg32(&hc_regs->hcdma, (uint32_t)hc->xfer_buff);
@ -1774,6 +1781,10 @@ void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
/* Set host channel enable after all other setup is complete. */ /* Set host channel enable after all other setup is complete. */
hcchar.b.chen = 1; hcchar.b.chen = 1;
hcchar.b.chdis = 0; hcchar.b.chdis = 0;
/* Memory Barrier before enabling channel ensure the channel is setup correct */
mb();
dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
hc->xfer_started = 1; hc->xfer_started = 1;
@ -1786,7 +1797,7 @@ void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
} }
#ifdef DEBUG #ifdef DEBUG
/* Start a timer for this transfer. */ /* Start a timer for this transfer */
core_if->hc_xfer_timer[hc->hc_num].function = hc_xfer_timeout; core_if->hc_xfer_timer[hc->hc_num].function = hc_xfer_timeout;
core_if->hc_xfer_info[hc->hc_num].core_if = core_if; core_if->hc_xfer_info[hc->hc_num].core_if = core_if;
core_if->hc_xfer_info[hc->hc_num].hc = hc; core_if->hc_xfer_info[hc->hc_num].hc = hc;
@ -1844,6 +1855,10 @@ int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
hcchar.b.chen = 1; hcchar.b.chen = 1;
hcchar.b.chdis = 0; hcchar.b.chdis = 0;
DWC_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n", hcchar.d32); DWC_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n", hcchar.d32);
/* Memory Barrier before enabling channel ensure the channel is setup correct */
mb();
dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
hc->requests++; hc->requests++;
return 1; return 1;
@ -1891,6 +1906,10 @@ void dwc_otg_hc_do_ping(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
hcchar.b.chen = 1; hcchar.b.chen = 1;
hcchar.b.chdis = 0; hcchar.b.chdis = 0;
/* Memory Barrier before enabling channel ensure the channel is setup correct */
mb();
dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
} }
@ -2089,9 +2108,10 @@ void dwc_otg_ep_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
if(core_if->dma_desc_enable) { if(core_if->dma_desc_enable) {
diepmsk.b.bna = 1; diepmsk.b.bna = 1;
} }
/* /*
if(core_if->dma_enable) { if(core_if->dma_enable) {
doepmsk.b.nak = 1; diepmsk.b.nak = 1;
} }
*/ */
dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[ep->num], diepmsk.d32); dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[ep->num], diepmsk.d32);
@ -3567,6 +3587,7 @@ void dwc_otg_flush_tx_fifo(dwc_otg_core_if_t *core_if,
dwc_read_reg32(&global_regs->gnptxsts)); dwc_read_reg32(&global_regs->gnptxsts));
break; break;
} }
udelay(1);
} }
while (greset.b.txfflsh == 1); while (greset.b.txfflsh == 1);
@ -3599,6 +3620,7 @@ void dwc_otg_flush_rx_fifo(dwc_otg_core_if_t *core_if)
greset.d32); greset.d32);
break; break;
} }
udelay(1);
} }
while (greset.b.rxfflsh == 1); while (greset.b.rxfflsh == 1);
@ -3640,6 +3662,7 @@ void dwc_otg_core_reset(dwc_otg_core_if_t *core_if)
greset.d32); greset.d32);
break; break;
} }
udelay(1);
} }
while (greset.b.csftrst == 1); while (greset.b.csftrst == 1);

View File

@ -45,6 +45,8 @@
#include "linux/timer.h" #include "linux/timer.h"
#endif #endif
struct dwc_otg_hcd;
/** /**
* @file * @file
* This file contains the interface to the Core Interface Layer. * This file contains the interface to the Core Interface Layer.
@ -402,7 +404,7 @@ typedef struct dwc_otg_core_params
*/ */
int32_t dma_burst_size; /* Translate this to GAHBCFG values */ int32_t dma_burst_size; /* Translate this to GAHBCFG values */
//#define dwc_param_dma_burst_size_default 32 //#define dwc_param_dma_burst_size_default 32
#define dwc_param_dma_burst_size_default 1 #define dwc_param_dma_burst_size_default 32
/** /**
* Specifies the maximum speed of operation in host and device mode. * Specifies the maximum speed of operation in host and device mode.
@ -876,7 +878,7 @@ extern void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if, dwc_ep
*/ */
/**@{*/ /**@{*/
extern void dwc_otg_hc_init(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); extern void dwc_otg_hc_init(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc);
extern void dwc_otg_hc_halt(dwc_otg_core_if_t *_core_if, extern void dwc_otg_hc_halt(struct dwc_otg_hcd *_hcd,
dwc_hc_t *_hc, dwc_hc_t *_hc,
dwc_otg_halt_status_e _halt_status); dwc_otg_halt_status_e _halt_status);
extern void dwc_otg_hc_cleanup(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); extern void dwc_otg_hc_cleanup(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc);

View File

@ -176,8 +176,10 @@ static void kill_urbs_in_qh_list(dwc_otg_hcd_t *hcd, struct list_head *qh_list)
qtd_item = qh->qtd_list.next) { qtd_item = qh->qtd_list.next) {
qtd = list_entry(qtd_item, dwc_otg_qtd_t, qtd_list_entry); qtd = list_entry(qtd_item, dwc_otg_qtd_t, qtd_list_entry);
if (qtd->urb != NULL) { if (qtd->urb != NULL) {
SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
dwc_otg_hcd_complete_urb(hcd, qtd->urb, dwc_otg_hcd_complete_urb(hcd, qtd->urb,
-ETIMEDOUT); -ETIMEDOUT);
SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
} }
dwc_otg_hcd_qtd_remove_and_free(hcd, qtd); dwc_otg_hcd_qtd_remove_and_free(hcd, qtd);
} }
@ -589,6 +591,7 @@ static void hcd_reinit(dwc_otg_hcd_t *hcd)
hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active; hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active;
hcd->non_periodic_channels = 0; hcd->non_periodic_channels = 0;
hcd->periodic_channels = 0; hcd->periodic_channels = 0;
hcd->nakking_channels = 0;
/* /*
* Put all channels in the free channel list and clean up channel * Put all channels in the free channel list and clean up channel
@ -853,10 +856,10 @@ static void dump_channel_info(dwc_otg_hcd_t *hcd,
//OTG host require the DMA addr is DWORD-aligned, //OTG host require the DMA addr is DWORD-aligned,
//patch it if the buffer is not DWORD-aligned //patch it if the buffer is not DWORD-aligned
inline inline
void hcd_check_and_patch_dma_addr(struct urb *urb){ int hcd_check_and_patch_dma_addr(struct urb *urb){
if((!urb->transfer_buffer)||!urb->transfer_dma||urb->transfer_dma==0xffffffff) if((!urb->transfer_buffer)||!urb->transfer_dma||urb->transfer_dma==0xffffffff)
return; return 0;
if(((u32)urb->transfer_buffer)& 0x3){ if(((u32)urb->transfer_buffer)& 0x3){
/* /*
@ -881,11 +884,12 @@ void hcd_check_and_patch_dma_addr(struct urb *urb){
kfree(urb->aligned_transfer_buffer); kfree(urb->aligned_transfer_buffer);
} }
urb->aligned_transfer_buffer=kmalloc(urb->aligned_transfer_buffer_length,GFP_KERNEL|GFP_DMA|GFP_ATOMIC); urb->aligned_transfer_buffer=kmalloc(urb->aligned_transfer_buffer_length,GFP_KERNEL|GFP_DMA|GFP_ATOMIC);
urb->aligned_transfer_dma=dma_map_single(NULL,(void *)(urb->aligned_transfer_buffer),(urb->aligned_transfer_buffer_length),DMA_FROM_DEVICE);
if(!urb->aligned_transfer_buffer){ if(!urb->aligned_transfer_buffer){
DWC_ERROR("Cannot alloc required buffer!!\n"); DWC_ERROR("Cannot alloc required buffer!!\n");
BUG(); //BUG();
return -1;
} }
urb->aligned_transfer_dma=dma_map_single(NULL,(void *)(urb->aligned_transfer_buffer),(urb->aligned_transfer_buffer_length),DMA_FROM_DEVICE);
//printk(" new allocated aligned_buf=%.8x aligned_buf_len=%d\n", (u32)urb->aligned_transfer_buffer, urb->aligned_transfer_buffer_length); //printk(" new allocated aligned_buf=%.8x aligned_buf_len=%d\n", (u32)urb->aligned_transfer_buffer, urb->aligned_transfer_buffer_length);
} }
urb->transfer_dma=urb->aligned_transfer_dma; urb->transfer_dma=urb->aligned_transfer_dma;
@ -894,6 +898,7 @@ void hcd_check_and_patch_dma_addr(struct urb *urb){
dma_sync_single_for_device(NULL,urb->transfer_dma,urb->transfer_buffer_length,DMA_TO_DEVICE); dma_sync_single_for_device(NULL,urb->transfer_dma,urb->transfer_buffer_length,DMA_TO_DEVICE);
} }
} }
return 0;
} }
@ -910,7 +915,15 @@ int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd,
int retval = 0; int retval = 0;
dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
dwc_otg_qtd_t *qtd; dwc_otg_qtd_t *qtd;
unsigned long flags;
SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags);
if (urb->hcpriv != NULL) {
SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
return -ENOMEM;
}
#ifdef DEBUG #ifdef DEBUG
if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
dump_urb_info(urb, "dwc_otg_hcd_urb_enqueue"); dump_urb_info(urb, "dwc_otg_hcd_urb_enqueue");
@ -918,13 +931,19 @@ int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd,
#endif #endif
if (!dwc_otg_hcd->flags.b.port_connect_status) { if (!dwc_otg_hcd->flags.b.port_connect_status) {
/* No longer connected. */ /* No longer connected. */
SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
return -ENODEV; return -ENODEV;
} }
hcd_check_and_patch_dma_addr(urb); if (hcd_check_and_patch_dma_addr(urb)) {
DWC_ERROR("Unable to check and patch dma addr\n");
SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
return -ENOMEM;
}
qtd = dwc_otg_hcd_qtd_create(urb); qtd = dwc_otg_hcd_qtd_create(urb);
if (qtd == NULL) { if (qtd == NULL) {
DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n"); DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n");
SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
return -ENOMEM; return -ENOMEM;
} }
@ -934,7 +953,7 @@ int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd,
"Error status %d\n", retval); "Error status %d\n", retval);
dwc_otg_hcd_qtd_free(qtd); dwc_otg_hcd_qtd_free(qtd);
} }
SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
return retval; return retval;
} }
@ -948,6 +967,7 @@ int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd,
dwc_otg_qtd_t *urb_qtd; dwc_otg_qtd_t *urb_qtd;
dwc_otg_qh_t *qh; dwc_otg_qh_t *qh;
struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb); struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb);
int rc;
DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n"); DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n");
@ -958,10 +978,6 @@ int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd,
urb_qtd = (dwc_otg_qtd_t *)urb->hcpriv; urb_qtd = (dwc_otg_qtd_t *)urb->hcpriv;
qh = (dwc_otg_qh_t *)ep->hcpriv; qh = (dwc_otg_qh_t *)ep->hcpriv;
if (urb_qtd == NULL) {
SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
return 0;
}
#ifdef DEBUG #ifdef DEBUG
if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
dump_urb_info(urb, "dwc_otg_hcd_urb_dequeue"); dump_urb_info(urb, "dwc_otg_hcd_urb_dequeue");
@ -971,7 +987,7 @@ int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd,
} }
#endif #endif
if (urb_qtd == qh->qtd_in_process) { if (qh && urb_qtd == qh->qtd_in_process) {
/* The QTD is in process (it has been assigned to a channel). */ /* The QTD is in process (it has been assigned to a channel). */
if (dwc_otg_hcd->flags.b.port_connect_status) { if (dwc_otg_hcd->flags.b.port_connect_status) {
@ -982,7 +998,7 @@ int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd,
* written to halt the channel since the core is in * written to halt the channel since the core is in
* device mode. * device mode.
*/ */
dwc_otg_hc_halt(dwc_otg_hcd->core_if, qh->channel, dwc_otg_hc_halt(dwc_otg_hcd, qh->channel,
DWC_OTG_HC_XFER_URB_DEQUEUE); DWC_OTG_HC_XFER_URB_DEQUEUE);
} }
} }
@ -992,22 +1008,28 @@ int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd,
* schedule if it has any remaining QTDs. * schedule if it has any remaining QTDs.
*/ */
dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd, urb_qtd); dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd, urb_qtd);
if (urb_qtd == qh->qtd_in_process) { if (qh && urb_qtd == qh->qtd_in_process) {
/* Note that dwc_otg_hcd_qh_deactivate() locks the spin_lock again */
SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0); dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0);
qh->channel = NULL; qh->channel = NULL;
qh->qtd_in_process = NULL; qh->qtd_in_process = NULL;
} else { } else {
if (list_empty(&qh->qtd_list)) if (qh && list_empty(&qh->qtd_list)) {
dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh); dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh);
SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); }
} }
urb->hcpriv = NULL;
/* Higher layer software sets URB status. */ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
usb_hcd_giveback_urb(hcd, urb, status);
if (!rc) {
usb_hcd_unlink_urb_from_ep(hcd, urb);
}
urb->hcpriv = NULL;
SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
if (!rc) {
usb_hcd_giveback_urb(hcd, urb, status);
}
if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
DWC_PRINT("Called usb_hcd_giveback_urb()\n"); DWC_PRINT("Called usb_hcd_giveback_urb()\n");
DWC_PRINT(" urb->status = %d\n", urb->status); DWC_PRINT(" urb->status = %d\n", urb->status);
@ -2035,15 +2057,19 @@ static void assign_and_init_hc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
dwc_otg_qtd_t *qtd; dwc_otg_qtd_t *qtd;
struct urb *urb; struct urb *urb;
DWC_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, hcd, qh); DWC_DEBUGPL(DBG_HCD_FLOOD, "%s(%p,%p)\n", __func__, hcd, qh);
hc = list_entry(hcd->free_hc_list.next, dwc_hc_t, hc_list_entry); hc = list_entry(hcd->free_hc_list.next, dwc_hc_t, hc_list_entry);
qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
urb = qtd->urb;
if (!urb){
return;
}
/* Remove the host channel from the free list. */ /* Remove the host channel from the free list. */
list_del_init(&hc->hc_list_entry); list_del_init(&hc->hc_list_entry);
qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
urb = qtd->urb;
qh->channel = hc; qh->channel = hc;
qh->qtd_in_process = qtd; qh->qtd_in_process = qtd;
@ -2202,16 +2228,24 @@ static void assign_and_init_hc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd) dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd)
{ {
struct list_head *qh_ptr; struct list_head *qh_ptr;
dwc_otg_qh_t *qh; dwc_otg_qh_t *qh = NULL;
int num_channels; int num_channels;
dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE; dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE;
uint16_t cur_frame = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));
unsigned long flags;
int include_nakd, channels_full;
/* This condition has once been observed, but the cause was
* never determined. Check for it here, to collect debug data if
* it occurs again. */
WARN_ON_ONCE(hcd->non_periodic_channels < 0);
check_nakking(hcd, __FUNCTION__, "start");
#ifdef DEBUG_SOF #ifdef DEBUG_SOF
DWC_DEBUGPL(DBG_HCD, " Select Transactions\n"); DWC_DEBUGPL(DBG_HCD, " Select Transactions\n");
#endif #endif
spin_lock(&hcd->lock); SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
/* Process entries in the periodic ready list. */ /* Process entries in the periodic ready list. */
qh_ptr = hcd->periodic_sched_ready.next; qh_ptr = hcd->periodic_sched_ready.next;
while (qh_ptr != &hcd->periodic_sched_ready && while (qh_ptr != &hcd->periodic_sched_ready &&
!list_empty(&hcd->free_hc_list)) { !list_empty(&hcd->free_hc_list)) {
@ -2234,36 +2268,139 @@ dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd)
* schedule. Some free host channels may not be used if they are * schedule. Some free host channels may not be used if they are
* reserved for periodic transfers. * reserved for periodic transfers.
*/ */
qh_ptr = hcd->non_periodic_sched_inactive.next;
num_channels = hcd->core_if->core_params->host_channels; num_channels = hcd->core_if->core_params->host_channels;
while (qh_ptr != &hcd->non_periodic_sched_inactive &&
(hcd->non_periodic_channels <
num_channels - hcd->periodic_channels) &&
!list_empty(&hcd->free_hc_list)) {
qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); /* Go over the queue twice: Once while not including nak'd
assign_and_init_hc(hcd, qh); * entries, one while including them. This is so a retransmit of
* an entry that has received a nak is scheduled only after all
* new entries.
*/
channels_full = 0;
for (include_nakd = 0; include_nakd < 2 && !channels_full; ++include_nakd) {
qh_ptr = hcd->non_periodic_sched_inactive.next;
while (qh_ptr != &hcd->non_periodic_sched_inactive) {
qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
qh_ptr = qh_ptr->next;
/* /* If a nak'd frame is in the queue for 100ms, forget
* Move the QH from the non-periodic inactive schedule to the * about its nak status, to prevent the situation where
* non-periodic active schedule. * a nak'd frame never gets resubmitted because there
*/ * are continously non-nakking tranfsfers available.
qh_ptr = qh_ptr->next; */
list_move(&qh->qh_list_entry, &hcd->non_periodic_sched_active); if (qh->nak_frame != 0xffff &&
dwc_frame_num_gt(cur_frame, qh->nak_frame + 800))
qh->nak_frame = 0xffff;
if (ret_val == DWC_OTG_TRANSACTION_NONE) { /* In the first pass, ignore NAK'd retransmit
ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC; * alltogether, to give them lower priority. */
} else { if (!include_nakd && qh->nak_frame != 0xffff)
ret_val = DWC_OTG_TRANSACTION_ALL; continue;
/*
* Check to see if this is a NAK'd retransmit, in which case ignore for retransmission
* we hold off on bulk retransmissions to reduce NAK interrupt overhead for
* cheeky devices that just hold off using NAKs
*/
if (dwc_full_frame_num(qh->nak_frame) == dwc_full_frame_num(dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd))))
continue;
/* Ok, we found a candidate for scheduling. Is there a
* free channel? */
if (hcd->non_periodic_channels >=
num_channels - hcd->periodic_channels ||
list_empty(&hcd->free_hc_list)) {
channels_full = 1;
break;
}
/* When retrying a NAK'd transfer, we give it a fair
* chance of completing again. */
qh->nak_frame = 0xffff;
assign_and_init_hc(hcd, qh);
/*
* Move the QH from the non-periodic inactive schedule to the
* non-periodic active schedule.
*/
list_move(&qh->qh_list_entry, &hcd->non_periodic_sched_active);
if (ret_val == DWC_OTG_TRANSACTION_NONE) {
ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC;
} else {
ret_val = DWC_OTG_TRANSACTION_ALL;
}
hcd->non_periodic_channels++;
} }
if (hcd->core_if->dma_enable && channels_full &&
hcd->periodic_channels + hcd->nakking_channels >= num_channels) {
/* There are items queued, but all channels are either
* reserved for periodic or have received NAKs. This
* means that it could take an indefinite amount of time
* before a channel is actually freed (since in DMA
* mode, the hardware takes care of retries), so we take
* action here by forcing a nakking channel to halt to
* give other transfers a chance to run. */
dwc_otg_qtd_t *qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
struct urb *urb = qtd->urb;
dwc_hc_t *hc = dwc_otg_halt_nakking_channel(hcd);
hcd->non_periodic_channels++; if (hc)
DWC_DEBUGPL(DBG_HCD "Out of Host Channels for non-periodic transfer - Halting channel %d (dev %d ep%d%s) to service qh %p (dev %d ep%d%s)\n", hc->hc_num, hc->dev_addr, hc->ep_num, (hc->ep_is_in ? "in" : "out"), qh, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), (usb_pipein(urb->pipe) != 0) ? "in" : "out");
}
} }
spin_unlock(&hcd->lock);
SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
return ret_val; return ret_val;
} }
/**
* Halt a bulk channel that is blocking on NAKs to free up space.
*
* This will decrement hcd->nakking_channels immediately, but
* hcd->non_periodic_channels is not decremented until the channel is
* actually halted.
*
* Returns the halted channel.
*/
dwc_hc_t *dwc_otg_halt_nakking_channel(dwc_otg_hcd_t *hcd) {
int num_channels, i;
uint16_t cur_frame;
cur_frame = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));
num_channels = hcd->core_if->core_params->host_channels;
for (i = 0; i < num_channels; i++) {
int channel = (hcd->last_channel_halted + 1 + i) % num_channels;
dwc_hc_t *hc = hcd->hc_ptr_array[channel];
if (hc->xfer_started
&& !hc->halt_on_queue
&& !hc->halt_pending
&& hc->qh->nak_frame != 0xffff) {
dwc_otg_hc_halt(hcd, hc, DWC_OTG_HC_XFER_NAK);
/* Store the last channel halted to
* fairly rotate the channel to halt.
* This prevent the scenario where there
* are three blocking endpoints and only
* two free host channels, where the
* blocking endpoint that gets hc 3 will
* never be halted, while the other two
* endpoints will be fighting over the
* other host channel. */
hcd->last_channel_halted = channel;
/* Update nak_frame, so this frame is
* kept at low priority for a period of
* time starting now. */
hc->qh->nak_frame = cur_frame;
return hc;
}
}
dwc_otg_hcd_dump_state(hcd);
return NULL;
}
/** /**
* Attempts to queue a single transaction request for a host channel * Attempts to queue a single transaction request for a host channel
* associated with either a periodic or non-periodic transfer. This function * associated with either a periodic or non-periodic transfer. This function
@ -2298,7 +2435,7 @@ static int queue_transaction(dwc_otg_hcd_t *hcd,
/* Don't queue a request if the channel has been halted. */ /* Don't queue a request if the channel has been halted. */
retval = 0; retval = 0;
} else if (hc->halt_on_queue) { } else if (hc->halt_on_queue) {
dwc_otg_hc_halt(hcd->core_if, hc, hc->halt_status); dwc_otg_hc_halt(hcd, hc, hc->halt_status);
retval = 0; retval = 0;
} else if (hc->do_ping) { } else if (hc->do_ping) {
if (!hc->xfer_started) { if (!hc->xfer_started) {
@ -2446,12 +2583,12 @@ static void process_periodic_channels(dwc_otg_hcd_t *hcd)
dwc_otg_host_global_regs_t *host_regs; dwc_otg_host_global_regs_t *host_regs;
host_regs = hcd->core_if->host_if->host_global_regs; host_regs = hcd->core_if->host_if->host_global_regs;
DWC_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n"); DWC_DEBUGPL(DBG_HCD_FLOOD, "Queue periodic transactions\n");
#ifdef DEBUG #ifdef DEBUG
tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (before queue): %d\n", DWC_DEBUGPL(DBG_HCD_FLOOD, " P Tx Req Queue Space Avail (before queue): %d\n",
tx_status.b.ptxqspcavail); tx_status.b.ptxqspcavail);
DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (before queue): %d\n", DWC_DEBUGPL(DBG_HCD_FLOOD, " P Tx FIFO Space Avail (before queue): %d\n",
tx_status.b.ptxfspcavail); tx_status.b.ptxfspcavail);
#endif #endif
@ -2586,7 +2723,12 @@ void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *hcd,
*/ */
void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *hcd, struct urb *urb, int status) void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *hcd, struct urb *urb, int status)
{ {
unsigned long flags;
SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
#ifdef DEBUG #ifdef DEBUG
if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n", DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n",
__func__, urb, usb_pipedevice(urb->pipe), __func__, urb, usb_pipedevice(urb->pipe),
@ -2609,10 +2751,12 @@ void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *hcd, struct urb *urb, int status)
memcpy(urb->transfer_buffer,urb->aligned_transfer_buffer,urb->actual_length); memcpy(urb->transfer_buffer,urb->aligned_transfer_buffer,urb->actual_length);
} }
usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(hcd), urb);
urb->status = status; urb->status = status;
urb->hcpriv = NULL; urb->hcpriv = NULL;
SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, status); usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, status);
} }
/* /*
@ -2674,7 +2818,7 @@ void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd)
DWC_PRINT(" Num channels: %d\n", num_channels); DWC_PRINT(" Num channels: %d\n", num_channels);
for (i = 0; i < num_channels; i++) { for (i = 0; i < num_channels; i++) {
dwc_hc_t *hc = hcd->hc_ptr_array[i]; dwc_hc_t *hc = hcd->hc_ptr_array[i];
DWC_PRINT(" Channel %d:\n", i); DWC_PRINT(" Channel %d: %p\n", i, hc);
DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n", DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
hc->dev_addr, hc->ep_num, hc->ep_is_in); hc->dev_addr, hc->ep_num, hc->ep_is_in);
DWC_PRINT(" speed: %d\n", hc->speed); DWC_PRINT(" speed: %d\n", hc->speed);
@ -2696,6 +2840,8 @@ void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd)
DWC_PRINT(" xact_pos: %d\n", hc->xact_pos); DWC_PRINT(" xact_pos: %d\n", hc->xact_pos);
DWC_PRINT(" requests: %d\n", hc->requests); DWC_PRINT(" requests: %d\n", hc->requests);
DWC_PRINT(" qh: %p\n", hc->qh); DWC_PRINT(" qh: %p\n", hc->qh);
if (hc->qh)
DWC_PRINT(" nak_frame: %x\n", hc->qh->nak_frame);
if (hc->xfer_started) { if (hc->xfer_started) {
hfnum_data_t hfnum; hfnum_data_t hfnum;
hcchar_data_t hcchar; hcchar_data_t hcchar;
@ -2735,6 +2881,8 @@ void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd)
} }
DWC_PRINT(" non_periodic_channels: %d\n", hcd->non_periodic_channels); DWC_PRINT(" non_periodic_channels: %d\n", hcd->non_periodic_channels);
DWC_PRINT(" periodic_channels: %d\n", hcd->periodic_channels); DWC_PRINT(" periodic_channels: %d\n", hcd->periodic_channels);
DWC_PRINT(" nakking_channels: %d\n", hcd->nakking_channels);
DWC_PRINT(" last_channel_halted: %d\n", hcd->last_channel_halted);
DWC_PRINT(" periodic_usecs: %d\n", hcd->periodic_usecs); DWC_PRINT(" periodic_usecs: %d\n", hcd->periodic_usecs);
np_tx_status.d32 = dwc_read_reg32(&hcd->core_if->core_global_regs->gnptxsts); np_tx_status.d32 = dwc_read_reg32(&hcd->core_if->core_global_regs->gnptxsts);
DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n", np_tx_status.b.nptxqspcavail); DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n", np_tx_status.b.nptxqspcavail);

View File

@ -194,6 +194,11 @@ typedef struct dwc_otg_qh {
*/ */
uint16_t sched_frame; uint16_t sched_frame;
/*
* Frame a NAK was received on this queue head, used to minimise NAK retransmission
*/
uint16_t nak_frame;
/** (micro)frame at which last start split was initialized. */ /** (micro)frame at which last start split was initialized. */
uint16_t start_split_frame; uint16_t start_split_frame;
@ -327,6 +332,21 @@ typedef struct dwc_otg_hcd {
*/ */
struct list_head free_hc_list; struct list_head free_hc_list;
/**
* The number of bulk channels in the active schedule that do
* not have a halt pending or queued but received at least one
* nak and thus are probably blocking a host channel.
*
* This number is included in non_perodic_channels as well.
*/
int nakking_channels;
/**
* The number of the last host channel that was halted to free
* up a host channel.
*/
int last_channel_halted;
/** /**
* Number of host channels assigned to periodic transfers. Currently * Number of host channels assigned to periodic transfers. Currently
* assuming that there is a dedicated host channel for each periodic * assuming that there is a dedicated host channel for each periodic
@ -452,6 +472,8 @@ extern void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *hcd,
dwc_otg_transaction_type_e tr_type); dwc_otg_transaction_type_e tr_type);
extern void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *_hcd, struct urb *urb, extern void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *_hcd, struct urb *urb,
int status); int status);
extern dwc_hc_t *dwc_otg_halt_nakking_channel(dwc_otg_hcd_t *hcd);
/** @} */ /** @} */
/** @name Interrupt Handler Functions */ /** @name Interrupt Handler Functions */
@ -612,6 +634,40 @@ static inline uint16_t dwc_micro_frame_num(uint16_t frame)
return frame & 0x7; return frame & 0x7;
} }
/* Perform some sanity checks on nakking / non_perodic channel states. */
static inline int check_nakking(struct dwc_otg_hcd *hcd, const char *func, const char* context) {
#ifdef DEBUG
int nakking = 0, non_periodic = 0, i;
int num_channels = hcd->core_if->core_params->host_channels;
for (i = 0; i < num_channels; i++) {
dwc_hc_t *hc = hcd->hc_ptr_array[i];
if (hc->xfer_started
&& (hc->ep_type == DWC_OTG_EP_TYPE_BULK
|| hc->ep_type == DWC_OTG_EP_TYPE_CONTROL)) {
non_periodic++;
}
if (hc->xfer_started
&& !hc->halt_on_queue
&& !hc->halt_pending
&& hc->qh->nak_frame != 0xffff) {
nakking++;
}
}
if (nakking != hcd->nakking_channels
|| nakking > hcd->non_periodic_channels
|| non_periodic != hcd->non_periodic_channels) {
printk("%s/%s: Inconsistent nakking state\n", func, context);
printk("non_periodic: %d, real %d, nakking: %d, real %d\n", hcd->non_periodic_channels, non_periodic, hcd->nakking_channels, nakking);
dwc_otg_hcd_dump_state(hcd);
WARN_ON(1);
return 1;
}
#endif
return 0;
}
#ifdef DEBUG #ifdef DEBUG
/** /**
* Macro to sample the remaining PHY clocks left in the current frame. This * Macro to sample the remaining PHY clocks left in the current frame. This

View File

@ -65,14 +65,14 @@ int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd)
# ifndef DEBUG_SOF # ifndef DEBUG_SOF
if (gintsts.d32 != DWC_SOF_INTR_MASK) if (gintsts.d32 != DWC_SOF_INTR_MASK)
# endif # endif
DWC_DEBUGPL(DBG_HCD, "\n"); DWC_DEBUGPL(DBG_HCD_FLOOD, "\n");
#endif #endif
#ifdef DEBUG #ifdef DEBUG
# ifndef DEBUG_SOF # ifndef DEBUG_SOF
if (gintsts.d32 != DWC_SOF_INTR_MASK) if (gintsts.d32 != DWC_SOF_INTR_MASK)
# endif # endif
DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n", gintsts.d32); DWC_DEBUGPL(DBG_HCD_FLOOD, "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n", gintsts.d32);
#endif #endif
if (gintsts.b.usbreset) { if (gintsts.b.usbreset) {
DWC_PRINT("Usb Reset In Host Mode\n"); DWC_PRINT("Usb Reset In Host Mode\n");
@ -103,10 +103,10 @@ int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd)
if (gintsts.d32 != DWC_SOF_INTR_MASK) if (gintsts.d32 != DWC_SOF_INTR_MASK)
# endif # endif
{ {
DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Finished Servicing Interrupts\n"); DWC_DEBUGPL(DBG_HCD_FLOOD, "DWC OTG HCD Finished Servicing Interrupts\n");
DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n", DWC_DEBUGPL(DBG_HCD_FLOOD, "DWC OTG HCD gintsts=0x%08x\n",
dwc_read_reg32(&global_regs->gintsts)); dwc_read_reg32(&global_regs->gintsts));
DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n", DWC_DEBUGPL(DBG_HCD_FLOOD, "DWC OTG HCD gintmsk=0x%08x\n",
dwc_read_reg32(&global_regs->gintmsk)); dwc_read_reg32(&global_regs->gintmsk));
} }
#endif #endif
@ -115,7 +115,7 @@ int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd)
# ifndef DEBUG_SOF # ifndef DEBUG_SOF
if (gintsts.d32 != DWC_SOF_INTR_MASK) if (gintsts.d32 != DWC_SOF_INTR_MASK)
# endif # endif
DWC_DEBUGPL(DBG_HCD, "\n"); DWC_DEBUGPL(DBG_HCD_FLOOD, "\n");
#endif #endif
} }
@ -534,7 +534,7 @@ static int update_urb_state_xfer_comp(dwc_hc_t *hc,
DWC_OTG_HC_XFER_COMPLETE, DWC_OTG_HC_XFER_COMPLETE,
&short_read); &short_read);
if (short_read || urb->actual_length == urb->transfer_buffer_length) { if (short_read || urb->actual_length >= urb->transfer_buffer_length) {
xfer_done = 1; xfer_done = 1;
if (short_read && (urb->transfer_flags & URB_SHORT_NOT_OK)) { if (short_read && (urb->transfer_flags & URB_SHORT_NOT_OK)) {
urb->status = -EREMOTEIO; urb->status = -EREMOTEIO;
@ -551,6 +551,7 @@ static int update_urb_state_xfer_comp(dwc_hc_t *hc,
__func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num); __func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num);
DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", hc->xfer_len); DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", hc->xfer_len);
DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n", hctsiz.b.xfersize); DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n", hctsiz.b.xfersize);
DWC_DEBUGPL(DBG_HCDV, " urb %p\n", urb);
DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n", DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n",
urb->transfer_buffer_length); urb->transfer_buffer_length);
DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length); DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length);
@ -603,10 +604,12 @@ static void deactivate_qh(dwc_otg_hcd_t *hcd,
{ {
int continue_split = 0; int continue_split = 0;
dwc_otg_qtd_t *qtd; dwc_otg_qtd_t *qtd;
unsigned long flags;
DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, hcd, qh, free_qtd); DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, hcd, qh, free_qtd);
spin_lock(&hcd->lock); SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
if (qtd->complete_split) { if (qtd->complete_split) {
@ -623,8 +626,9 @@ static void deactivate_qh(dwc_otg_hcd_t *hcd,
qh->channel = NULL; qh->channel = NULL;
qh->qtd_in_process = NULL; qh->qtd_in_process = NULL;
spin_unlock(&hcd->lock);
dwc_otg_hcd_qh_deactivate(hcd, qh, continue_split); dwc_otg_hcd_qh_deactivate(hcd, qh, continue_split);
SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
} }
/** /**
@ -764,10 +768,18 @@ static void release_channel(dwc_otg_hcd_t *hcd,
dwc_otg_hc_cleanup(hcd->core_if, hc); dwc_otg_hc_cleanup(hcd->core_if, hc);
list_add_tail(&hc->hc_list_entry, &hcd->free_hc_list); list_add_tail(&hc->hc_list_entry, &hcd->free_hc_list);
if (!hc->halt_on_queue && !hc->halt_pending && hc->qh->nak_frame != 0xffff)
hcd->nakking_channels--;
switch (hc->ep_type) { switch (hc->ep_type) {
case DWC_OTG_EP_TYPE_CONTROL: case DWC_OTG_EP_TYPE_CONTROL:
case DWC_OTG_EP_TYPE_BULK: case DWC_OTG_EP_TYPE_BULK:
hcd->non_periodic_channels--; hcd->non_periodic_channels--;
/* This condition has once been observed, but the cause
* was never determined. Check for it here, to collect
* debug data if it occurs again. */
WARN_ON_ONCE(hcd->non_periodic_channels < 0);
break; break;
default: default:
@ -779,6 +791,9 @@ static void release_channel(dwc_otg_hcd_t *hcd,
break; break;
} }
if (halt_status != DWC_OTG_HC_XFER_NAK)
hc->qh->nak_frame = 0xffff;
/* Try to queue more transfers now that there's a free channel. */ /* Try to queue more transfers now that there's a free channel. */
tr_type = dwc_otg_hcd_select_transactions(hcd); tr_type = dwc_otg_hcd_select_transactions(hcd);
if (tr_type != DWC_OTG_TRANSACTION_NONE) { if (tr_type != DWC_OTG_TRANSACTION_NONE) {
@ -807,7 +822,7 @@ static void halt_channel(dwc_otg_hcd_t *hcd,
} }
/* Slave mode processing... */ /* Slave mode processing... */
dwc_otg_hc_halt(hcd->core_if, hc, halt_status); dwc_otg_hc_halt(hcd, hc, halt_status);
if (hc->halt_on_queue) { if (hc->halt_on_queue) {
gintmsk_data_t gintmsk = {.d32 = 0}; gintmsk_data_t gintmsk = {.d32 = 0};
@ -1085,6 +1100,7 @@ static void update_urb_state_xfer_intr(dwc_hc_t *hc,
DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt); DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt);
DWC_DEBUGPL(DBG_HCDV, " hc->max_packet %d\n", hc->max_packet); DWC_DEBUGPL(DBG_HCDV, " hc->max_packet %d\n", hc->max_packet);
DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n", bytes_transferred); DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n", bytes_transferred);
DWC_DEBUGPL(DBG_HCDV, " urb %p\n", urb);
DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length); DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length);
DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n", DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n",
urb->transfer_buffer_length); urb->transfer_buffer_length);
@ -1103,6 +1119,23 @@ static int32_t handle_hc_nak_intr(dwc_otg_hcd_t *hcd,
{ {
DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
"NAK Received--\n", hc->hc_num); "NAK Received--\n", hc->hc_num);
/*
* When we get bulk NAKs then remember this so we holdoff on this qh until
* the beginning of the next frame
*/
switch (usb_pipetype(qtd->urb->pipe)) {
case PIPE_BULK:
/* xfer_started can be 0 when a halted interrupt
* occurs with the nak flag set, then first the
* halted handler runs and then this nak
* handler. In this case, also don't update
* nak_frame, since the qh might already be
* assigned to another host channel. */
if (!hc->halt_on_queue && !hc->halt_pending && hc->xfer_started && hc->qh->nak_frame == 0xffff)
hcd->nakking_channels++;
if (hc->xfer_started)
hc->qh->nak_frame = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));
}
/* /*
* Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
@ -1410,7 +1443,7 @@ static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t *hcd,
* Force a channel halt. Don't call halt_channel because that won't * Force a channel halt. Don't call halt_channel because that won't
* write to the HCCHARn register in DMA mode to force the halt. * write to the HCCHARn register in DMA mode to force the halt.
*/ */
dwc_otg_hc_halt(hcd->core_if, hc, DWC_OTG_HC_XFER_AHB_ERR); dwc_otg_hc_halt(hcd, hc, DWC_OTG_HC_XFER_AHB_ERR);
disable_hc_int(hc_regs, ahberr); disable_hc_int(hc_regs, ahberr);
return 1; return 1;
@ -1515,13 +1548,28 @@ static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t *hcd,
dwc_otg_qtd_t *qtd) dwc_otg_qtd_t *qtd)
{ {
DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
"Data Toggle Error--\n", hc->hc_num); "Data Toggle Error on %s transfer--\n",
hc->hc_num, (hc->ep_is_in ? "IN" : "OUT"));
if (hc->ep_is_in) { /* Data toggles on split transactions cause the hc to halt.
* restart transfer */
if (hc->qh->do_split) {
qtd->error_count++;
save_data_toggle(hc, hc_regs, qtd);
update_urb_state_xfer_intr(hc, hc_regs,
qtd->urb, qtd, DWC_OTG_HC_XFER_XACT_ERR);
halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
} else if (hc->ep_is_in) {
/* An unmasked data toggle error on a non-split DMA transaction
is
* for the sole purpose of resetting error counts. Disable other
* interrupts unmasked for the same reason.
*/
if (hcd->core_if->dma_enable) {
disable_hc_int(hc_regs, ack);
disable_hc_int(hc_regs, nak);
}
qtd->error_count = 0; qtd->error_count = 0;
} else {
DWC_ERROR("Data Toggle Error on OUT transfer,"
"channel %d\n", hc->hc_num);
} }
disable_hc_int(hc_regs, datatglerr); disable_hc_int(hc_regs, datatglerr);
@ -1583,6 +1631,8 @@ static inline int halt_status_ok(dwc_otg_hcd_t *hcd,
"hcchar 0x%08x, trying to halt again\n", "hcchar 0x%08x, trying to halt again\n",
__func__, hcchar.d32); __func__, hcchar.d32);
clear_hc_int(hc_regs, chhltd); clear_hc_int(hc_regs, chhltd);
if (hc->halt_pending && !hc->halt_on_queue && hc->qh->nak_frame != 0xffff)
hcd->nakking_channels++;
hc->halt_pending = 0; hc->halt_pending = 0;
halt_channel(hcd, hc, qtd, hc->halt_status); halt_channel(hcd, hc, qtd, hc->halt_status);
return 0; return 0;
@ -1612,13 +1662,46 @@ static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t *hcd,
if (hc->speed == DWC_OTG_EP_SPEED_HIGH && !hc->ep_is_in && if (hc->speed == DWC_OTG_EP_SPEED_HIGH && !hc->ep_is_in &&
(hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
hc->ep_type == DWC_OTG_EP_TYPE_BULK)) { hc->ep_type == DWC_OTG_EP_TYPE_BULK)) {
DWC_DEBUGPL(DBG_HCD, "OUT NAK enhancement enabled\n"); DWC_DEBUGPL(DBG_HCD_FLOOD, "OUT NAK enhancement enabled\n");
out_nak_enh = 1; out_nak_enh = 1;
} else { } else {
DWC_DEBUGPL(DBG_HCD, "OUT NAK enhancement disabled, not HS Ctrl/Bulk OUT EP\n"); DWC_DEBUGPL(DBG_HCD_FLOOD, "OUT NAK enhancement disabled, not HS Ctrl/Bulk OUT EP\n");
} }
} else { } else {
DWC_DEBUGPL(DBG_HCD, "OUT NAK enhancement disabled, no core support\n"); DWC_DEBUGPL(DBG_HCD_FLOOD, "OUT NAK enhancement disabled, no core support\n");
}
if (hc->halt_status == DWC_OTG_HC_XFER_NAK) {
/* The channel was nakking and halted to free up the
* channel for another transfer. If this channel has
* already received data, we need to skip that amount on
* the next try.
*/
update_urb_state_xfer_intr(hc, hc_regs, qtd->urb,
qtd, DWC_OTG_HC_XFER_NAK);
save_data_toggle(hc, hc_regs, qtd);
/* It turns out that sometimes a channel is halted just
* as it receives its last packet. This causes the
* to trigger a channel halted interrupt without a
* transfer complete flag, even though the transfer is
* actually complete. If we don't handle that here, the
* qtd will be resubmitted and since bulk in can't have
* empty packets, this will cause one full packet of
* "extra" data to be transfered. So we check here to
* see if the transfer is complete and handle that
* accordingly.
*/
if (usb_pipebulk(qtd->urb->pipe) &&
usb_pipein(qtd->urb->pipe) &&
qtd->urb->actual_length == qtd->urb->transfer_buffer_length) {
dwc_otg_hcd_complete_urb(hcd, qtd->urb, 0);
complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
} else {
release_channel(hcd, hc, qtd, hc->halt_status);
}
return;
} }
if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE || if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
@ -1666,6 +1749,8 @@ static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t *hcd,
* that started with a PING. The xacterr takes precedence. * that started with a PING. The xacterr takes precedence.
*/ */
handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd); handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
} else if (hcint.b.datatglerr) {
handle_hc_datatglerr_intr(hcd, hc, hc_regs, qtd);
} else if (!out_nak_enh) { } else if (!out_nak_enh) {
if (hcint.b.nyet) { if (hcint.b.nyet) {
/* /*
@ -1767,6 +1852,10 @@ int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num)
DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", num); DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", num);
hc = dwc_otg_hcd->hc_ptr_array[num]; hc = dwc_otg_hcd->hc_ptr_array[num];
check_nakking(dwc_otg_hcd, __FUNCTION__, "start");
hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[num]; hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[num];
qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
@ -1774,6 +1863,7 @@ int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num)
hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk);
DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n", DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32)); hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32));
hcint.d32 = hcint.d32 & hcintmsk.d32; hcint.d32 = hcint.d32 & hcintmsk.d32;
if (!dwc_otg_hcd->core_if->dma_enable) { if (!dwc_otg_hcd->core_if->dma_enable) {
@ -1803,7 +1893,7 @@ int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num)
if (hcint.b.nak) { if (hcint.b.nak) {
retval |= handle_hc_nak_intr(dwc_otg_hcd, hc, hc_regs, qtd); retval |= handle_hc_nak_intr(dwc_otg_hcd, hc, hc_regs, qtd);
} }
if (hcint.b.ack) { if (hcint.b.ack && !hcint.b.chhltd) {
retval |= handle_hc_ack_intr(dwc_otg_hcd, hc, hc_regs, qtd); retval |= handle_hc_ack_intr(dwc_otg_hcd, hc, hc_regs, qtd);
} }
if (hcint.b.nyet) { if (hcint.b.nyet) {
@ -1821,6 +1911,11 @@ int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num)
if (hcint.b.datatglerr) { if (hcint.b.datatglerr) {
retval |= handle_hc_datatglerr_intr(dwc_otg_hcd, hc, hc_regs, qtd); retval |= handle_hc_datatglerr_intr(dwc_otg_hcd, hc, hc_regs, qtd);
} }
if (check_nakking(dwc_otg_hcd, __FUNCTION__, "end")) {
DWC_WARN("--Host Channel Interrupt--, Channel %d\n", num);
DWC_WARN(" hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32));
}
return retval; return retval;
} }

View File

@ -211,6 +211,8 @@ void dwc_otg_hcd_qh_init(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, struct urb *urb)
usb_pipeendpoint(urb->pipe), usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) == USB_DIR_IN ? "IN" : "OUT"); usb_pipein(urb->pipe) == USB_DIR_IN ? "IN" : "OUT");
qh->nak_frame = 0xffff;
switch(urb->dev->speed) { switch(urb->dev->speed) {
case USB_SPEED_LOW: case USB_SPEED_LOW:
speed = "low"; speed = "low";
@ -453,7 +455,26 @@ static int schedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
int status; int status;
struct usb_bus *bus = hcd_to_bus(dwc_otg_hcd_to_hcd(hcd)); struct usb_bus *bus = hcd_to_bus(dwc_otg_hcd_to_hcd(hcd));
int frame; int frame;
int num_channels;
num_channels = hcd->core_if->core_params->host_channels;
if ((hcd->periodic_channels < num_channels - 1)) {
if (hcd->periodic_channels + hcd->nakking_channels >= num_channels) {
/* All non-periodic channels are nakking? Halt
* one to make room (as long as there is at
* least one channel for non-periodic transfers,
* all the blocking non-periodics can time-share
* that one channel. */
dwc_hc_t *hc = dwc_otg_halt_nakking_channel(hcd);
if (hc)
DWC_DEBUGPL(DBG_HCD, "Out of Host Channels for periodic transfer - Halting channel %d (dev %d ep%d%s)\n", hc->hc_num, hc->dev_addr, hc->ep_num, (hc->ep_is_in ? "in" : "out"));
}
/* It could be that all channels are currently occupied,
* but in that case one will be freed up soon (either
* because it completed or because it was forced to halt
* above). */
}
status = find_uframe(hcd, qh); status = find_uframe(hcd, qh);
frame = -1; frame = -1;
if (status == 0) { if (status == 0) {
@ -483,6 +504,8 @@ static int schedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
/* Always start in the inactive schedule. */ /* Always start in the inactive schedule. */
list_add_tail(&qh->qh_list_entry, &hcd->periodic_sched_inactive); list_add_tail(&qh->qh_list_entry, &hcd->periodic_sched_inactive);
hcd->periodic_channels++;
/* Update claimed usecs per (micro)frame. */ /* Update claimed usecs per (micro)frame. */
hcd->periodic_usecs += qh->usecs; hcd->periodic_usecs += qh->usecs;
@ -553,6 +576,9 @@ static void deschedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
int i; int i;
list_del_init(&qh->qh_list_entry); list_del_init(&qh->qh_list_entry);
hcd->periodic_channels--;
/* Update claimed usecs per (micro)frame. */ /* Update claimed usecs per (micro)frame. */
hcd->periodic_usecs -= qh->usecs; hcd->periodic_usecs -= qh->usecs;
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
@ -628,9 +654,6 @@ void dwc_otg_hcd_qh_remove (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
*/ */
void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_next_periodic_split) void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_next_periodic_split)
{ {
unsigned long flags;
SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
if (dwc_qh_is_non_per(qh)) { if (dwc_qh_is_non_per(qh)) {
dwc_otg_hcd_qh_remove(hcd, qh); dwc_otg_hcd_qh_remove(hcd, qh);
if (!list_empty(&qh->qtd_list)) { if (!list_empty(&qh->qtd_list)) {
@ -690,8 +713,6 @@ void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_n
} }
} }
} }
SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
} }
/** /**
@ -759,22 +780,22 @@ int dwc_otg_hcd_qtd_add (dwc_otg_qtd_t *qtd,
{ {
struct usb_host_endpoint *ep; struct usb_host_endpoint *ep;
dwc_otg_qh_t *qh; dwc_otg_qh_t *qh;
unsigned long flags;
int retval = 0; int retval = 0;
struct urb *urb = qtd->urb; struct urb *urb = qtd->urb;
SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags);
/* /*
* Get the QH which holds the QTD-list to insert to. Create QH if it * Get the QH which holds the QTD-list to insert to. Create QH if it
* doesn't exist. * doesn't exist.
*/ */
usb_hcd_link_urb_to_ep(dwc_otg_hcd_to_hcd(dwc_otg_hcd), urb);
ep = dwc_urb_to_endpoint(urb); ep = dwc_urb_to_endpoint(urb);
qh = (dwc_otg_qh_t *)ep->hcpriv; qh = (dwc_otg_qh_t *)ep->hcpriv;
if (qh == NULL) { if (qh == NULL) {
qh = dwc_otg_hcd_qh_create (dwc_otg_hcd, urb); qh = dwc_otg_hcd_qh_create (dwc_otg_hcd, urb);
if (qh == NULL) { if (qh == NULL) {
usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(dwc_otg_hcd), urb);
retval = -ENOMEM;
goto done; goto done;
} }
ep->hcpriv = qh; ep->hcpriv = qh;
@ -783,11 +804,11 @@ int dwc_otg_hcd_qtd_add (dwc_otg_qtd_t *qtd,
retval = dwc_otg_hcd_qh_add(dwc_otg_hcd, qh); retval = dwc_otg_hcd_qh_add(dwc_otg_hcd, qh);
if (retval == 0) { if (retval == 0) {
list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list); list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
} else {
usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(dwc_otg_hcd), urb);
} }
done: done:
SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
return retval; return retval;
} }

View File

@ -202,6 +202,7 @@ static inline uint32_t SET_DEBUG_LEVEL( const uint32_t new )
/** When debug level has the DBG_HCD_URB bit set, display enqueued URBs in host /** When debug level has the DBG_HCD_URB bit set, display enqueued URBs in host
* mode. */ * mode. */
#define DBG_HCD_URB (0x800) #define DBG_HCD_URB (0x800)
#define DBG_HCD_FLOOD (0x1)
/** When debug level has any bit set, display debug messages */ /** When debug level has any bit set, display debug messages */
#define DBG_ANY (0xFF) #define DBG_ANY (0xFF)