diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c index 56602f0abf32..c65f0b9da843 100644 --- a/drivers/platform/msm/usb_bam.c +++ b/drivers/platform/msm/usb_bam.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -16,12 +17,18 @@ #define USB_THRESHOLD 512 #define USB_BAM_MAX_STR_LEN 50 +#define USB_BAM_TIMEOUT (10*HZ) #define DBG_MAX_MSG 512UL #define DBG_MSG_LEN 160UL #define TIME_BUF_LEN 17 #define DBG_EVENT_LEN 143 #define ENABLE_EVENT_LOG 1 + +#define USB_BAM_NR_PORTS 4 + +#define ARRAY_INDEX_FROM_ADDR(base, addr) ((addr) - (base)) + static unsigned int enable_event_log = ENABLE_EVENT_LOG; module_param(enable_event_log, uint, 0644); MODULE_PARM_DESC(enable_event_log, "enable event logging in debug buffer"); @@ -114,6 +121,7 @@ struct usb_bam_pipe_connect { enum usb_pipe_mem_type mem_type; enum usb_bam_pipe_dir dir; enum usb_ctrl bam_type; + enum usb_bam_mode bam_mode; enum peer_bam peer_bam; enum usb_bam_pipe_type pipe_type; u32 src_phy_addr; @@ -127,10 +135,11 @@ struct usb_bam_pipe_connect { struct sps_mem_buffer data_mem_buf; struct sps_mem_buffer desc_mem_buf; struct usb_bam_event_info event; - bool enabled; - bool suspended; - bool cons_stopped; - bool prod_stopped; + int enabled; + int suspended; + int cons_stopped; + int prod_stopped; + int ipa_clnt_hdl; void *priv; int (*activity_notify)(void *priv); int (*inactivity_notify)(void *priv); @@ -207,6 +216,70 @@ struct usb_bam_ctx_type { spinlock_t usb_bam_lock; }; +/* + * CI_CTRL & DWC3_CTRL shouldn't be used simultaneously + * since both share the same prod & cons rm resourses + */ +static enum ipa_rm_resource_name ipa_rm_resource_prod[MAX_BAMS] = { + [CI_CTRL] = IPA_RM_RESOURCE_USB_PROD, + [DWC3_CTRL] = IPA_RM_RESOURCE_USB_PROD, +}; + +static enum ipa_rm_resource_name ipa_rm_resource_cons[MAX_BAMS] = { + [CI_CTRL] = IPA_RM_RESOURCE_USB_CONS, + [DWC3_CTRL] = IPA_RM_RESOURCE_USB_CONS, +}; + +static int usb_cons_request_resource(void); +static int usb_cons_release_resource(void); +static int ss_usb_cons_request_resource(void); +static int ss_usb_cons_release_resource(void); + +static int (*request_resource_cb[MAX_BAMS])(void) = { + [CI_CTRL] = usb_cons_request_resource, + [DWC3_CTRL] = ss_usb_cons_request_resource, +}; + +static int (*release_resource_cb[MAX_BAMS])(void) = { + [CI_CTRL] = usb_cons_release_resource, + [DWC3_CTRL] = ss_usb_cons_release_resource, +}; + +struct usb_bam_ipa_handshake_info { + enum ipa_rm_event cur_prod_state; + enum ipa_rm_event cur_cons_state; + + enum usb_bam_mode cur_bam_mode; + enum usb_ctrl bam_type; + int connect_complete; + int bus_suspend; + int disconnected; + bool in_lpm; + u8 prod_pipes_enabled_per_bam; + + int (*wake_cb); + void *wake_param; + + u32 suspend_src_idx[USB_BAM_NR_PORTS]; + u32 suspend_dst_idx[USB_BAM_NR_PORTS]; + u32 resume_src_idx[USB_BAM_NR_PORTS]; + u32 resume_dst_idx[USB_BAM_NR_PORTS]; + + u32 pipes_to_suspend; + u32 pipes_suspended; + u32 pipes_resumed; + + struct completion prod_avail; + struct completion prod_released; + + struct mutex suspend_resume_mutex; + struct work_struct resume_work; + struct work_struct finish_suspend_work; +}; + +static spinlock_t usb_bam_ipa_handshake_info_lock; +static struct usb_bam_ipa_handshake_info info[MAX_BAMS]; + static struct usb_bam_ctx_type msm_usb_bam[MAX_BAMS]; /* USB bam type used as a peer of the qdss in bam2bam mode */ static enum usb_ctrl qdss_usb_bam_type; @@ -215,6 +288,9 @@ static int __usb_bam_register_wake_cb(enum usb_ctrl bam_type, int idx, int (*callback)(void *user), void *param, bool trigger_cb_per_pipe); +static void wait_for_prod_release(enum usb_ctrl cur_bam); +static void usb_bam_start_suspend(struct usb_bam_ipa_handshake_info *info_ptr); + static struct { char buf[DBG_MAX_MSG][DBG_MSG_LEN]; /* buffer */ unsigned int idx; /* index */ @@ -279,6 +355,12 @@ static void usb_bam_set_inactivity_timer(enum usb_ctrl bam) sps_timer_ctrl(pipe, &timer_ctrl, NULL); } +void msm_bam_set_usb_host_dev(struct device *dev) +{ + host_info[CI_CTRL].dev = dev; + host_info[CI_CTRL].in_lpm = false; +} + static int usb_bam_alloc_buffer(struct usb_bam_pipe_connect *pipe_connect) { int ret = 0; @@ -381,9 +463,9 @@ static int usb_bam_alloc_buffer(struct usb_bam_pipe_connect *pipe_connect) } /* BAM would use system memory, allocate FIFOs */ + data_buf->size = pipe_connect->data_fifo_size; data_buf->base = dma_alloc_attrs(dev, - pipe_connect->data_fifo_size, - &data_iova, GFP_KERNEL, + pipe_connect->data_fifo_size, &data_iova, GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS); if (!data_buf->base) { log_event_err("%s: data_fifo: dma_alloc_attr failed\n", @@ -602,6 +684,207 @@ static int connect_pipe(enum usb_ctrl cur_bam, u8 idx, u32 *usb_pipe_idx, return ret; } + +static int connect_pipe_sys2bam_ipa(enum usb_ctrl cur_bam, u8 idx, + struct usb_bam_connect_ipa_params *ipa_params) +{ + int ret; + enum usb_bam_pipe_dir dir = ipa_params->dir; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + struct usb_bam_pipe_connect *pipe_connect = + &ctx->usb_bam_connections[idx]; + struct ipa_sys_connect_params sys_in_params; + unsigned long usb_handle; + phys_addr_t usb_phy_addr; + u32 clnt_hdl = 0; + + memset(&sys_in_params, 0, sizeof(sys_in_params)); + + if (dir == USB_TO_PEER_PERIPHERAL) { + usb_phy_addr = pipe_connect->src_phy_addr; + sys_in_params.client = ipa_params->src_client; + ipa_params->ipa_cons_ep_idx = + ipa_get_ep_mapping(sys_in_params.client); + } else { + usb_phy_addr = pipe_connect->dst_phy_addr; + sys_in_params.client = ipa_params->dst_client; + ipa_params->ipa_prod_ep_idx = + ipa_get_ep_mapping(sys_in_params.client); + } + + log_event_dbg("%s(): ipa_prod_ep_idx:%d ipa_cons_ep_idx:%d\n", + __func__, ipa_params->ipa_prod_ep_idx, + ipa_params->ipa_cons_ep_idx); + + /* Get HSUSB / HSIC bam handle */ + ret = sps_phy2h(usb_phy_addr, &usb_handle); + if (ret) { + log_event_err("%s: sps_phy2h failed (HSUSB/HSIC BAM) %d\n", + __func__, ret); + return ret; + } + + pipe_connect->activity_notify = ipa_params->activity_notify; + pipe_connect->inactivity_notify = ipa_params->inactivity_notify; + pipe_connect->priv = ipa_params->priv; + + /* IPA sys connection params */ + sys_in_params.desc_fifo_sz = pipe_connect->desc_fifo_size; + sys_in_params.priv = ipa_params->priv; + sys_in_params.notify = ipa_params->notify; + sys_in_params.skip_ep_cfg = ipa_params->skip_ep_cfg; + sys_in_params.keep_ipa_awake = ipa_params->keep_ipa_awake; + memcpy(&sys_in_params.ipa_ep_cfg, &ipa_params->ipa_ep_cfg, + sizeof(struct ipa_ep_cfg)); + + ret = ipa_setup_sys_pipe(&sys_in_params, &clnt_hdl); + if (ret) { + log_event_err("%s: ipa_connect failed\n", __func__); + return ret; + } + pipe_connect->ipa_clnt_hdl = clnt_hdl; + if (dir == USB_TO_PEER_PERIPHERAL) + ipa_params->cons_clnt_hdl = clnt_hdl; + else + ipa_params->prod_clnt_hdl = clnt_hdl; + + return 0; +} + +static int connect_pipe_bam2bam_ipa(enum usb_ctrl cur_bam, u8 idx, + struct usb_bam_connect_ipa_params *ipa_params) +{ + int ret; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + struct usb_bam_sps_type usb_bam_sps = ctx->usb_bam_sps; + enum usb_bam_pipe_dir dir = ipa_params->dir; + struct sps_pipe **pipe = &(usb_bam_sps.sps_pipes[idx]); + struct sps_connect *sps_connection = &usb_bam_sps.sps_connections[idx]; + struct usb_bam_pipe_connect *pipe_connect = + &ctx->usb_bam_connections[idx]; + struct sps_mem_buffer *data_buf = &(pipe_connect->data_mem_buf); + struct sps_mem_buffer *desc_buf = &(pipe_connect->desc_mem_buf); + struct ipa_connect_params ipa_in_params; + struct ipa_sps_params sps_out_params; + u32 usb_phy_addr; + unsigned long usb_handle; + u32 clnt_hdl = 0; + + memset(&ipa_in_params, 0, sizeof(ipa_in_params)); + memset(&sps_out_params, 0, sizeof(sps_out_params)); + + if (dir == USB_TO_PEER_PERIPHERAL) { + usb_phy_addr = pipe_connect->src_phy_addr; + ipa_in_params.client_ep_idx = pipe_connect->src_pipe_index; + ipa_in_params.client = ipa_params->src_client; + } else { + usb_phy_addr = pipe_connect->dst_phy_addr; + ipa_in_params.client_ep_idx = pipe_connect->dst_pipe_index; + ipa_in_params.client = ipa_params->dst_client; + } + /* Get HSUSB / HSIC bam handle */ + ret = sps_phy2h(usb_phy_addr, &usb_handle); + if (ret) { + log_event_err("%s: sps_phy2h failed (HSUSB/HSIC BAM) %d\n", + __func__, ret); + return ret; + } + + pipe_connect->activity_notify = ipa_params->activity_notify; + pipe_connect->inactivity_notify = ipa_params->inactivity_notify; + pipe_connect->priv = ipa_params->priv; + pipe_connect->reset_pipe_after_lpm = ipa_params->reset_pipe_after_lpm; + + /* IPA input parameters */ + ipa_in_params.client_bam_hdl = usb_handle; + ipa_in_params.desc_fifo_sz = pipe_connect->desc_fifo_size; + ipa_in_params.data_fifo_sz = pipe_connect->data_fifo_size; + ipa_in_params.notify = ipa_params->notify; + ipa_in_params.priv = ipa_params->priv; + ipa_in_params.skip_ep_cfg = ipa_params->skip_ep_cfg; + ipa_in_params.keep_ipa_awake = ipa_params->keep_ipa_awake; + + ipa_in_params.desc = pipe_connect->desc_mem_buf; + ipa_in_params.data = pipe_connect->data_mem_buf; + + memcpy(&ipa_in_params.ipa_ep_cfg, &ipa_params->ipa_ep_cfg, + sizeof(struct ipa_ep_cfg)); + + ret = ipa_connect(&ipa_in_params, &sps_out_params, &clnt_hdl); + if (ret) { + log_event_err("%s: ipa_connect failed\n", __func__); + return ret; + } + pipe_connect->ipa_clnt_hdl = clnt_hdl; + + *pipe = sps_alloc_endpoint(); + if (*pipe == NULL) { + log_event_err("%s: sps_alloc_endpoint failed\n", __func__); + ret = -ENOMEM; + goto disconnect_ipa; + } + + ret = sps_get_config(*pipe, sps_connection); + if (ret) { + log_event_err("%s: tx get config failed %d\n", __func__, ret); + goto free_sps_endpoints; + } + + if (dir == USB_TO_PEER_PERIPHERAL) { + /* USB src IPA dest */ + sps_connection->mode = SPS_MODE_SRC; + ipa_params->cons_clnt_hdl = clnt_hdl; + sps_connection->source = usb_handle; + sps_connection->destination = sps_out_params.ipa_bam_hdl; + sps_connection->src_pipe_index = pipe_connect->src_pipe_index; + sps_connection->dest_pipe_index = sps_out_params.ipa_ep_idx; + ipa_params->ipa_cons_ep_idx = sps_out_params.ipa_ep_idx; + *(ipa_params->src_pipe) = sps_connection->src_pipe_index; + pipe_connect->dst_pipe_index = sps_out_params.ipa_ep_idx; + log_event_dbg("%s: BAM pipe usb[%x]->ipa[%x] connection\n", + __func__, + pipe_connect->src_pipe_index, + pipe_connect->dst_pipe_index); + sps_connection->options = SPS_O_NO_DISABLE; + } else { + /* IPA src, USB dest */ + sps_connection->mode = SPS_MODE_DEST; + ipa_params->prod_clnt_hdl = clnt_hdl; + sps_connection->source = sps_out_params.ipa_bam_hdl; + sps_connection->destination = usb_handle; + sps_connection->src_pipe_index = sps_out_params.ipa_ep_idx; + ipa_params->ipa_prod_ep_idx = sps_out_params.ipa_ep_idx; + sps_connection->dest_pipe_index = pipe_connect->dst_pipe_index; + *(ipa_params->dst_pipe) = sps_connection->dest_pipe_index; + pipe_connect->src_pipe_index = sps_out_params.ipa_ep_idx; + log_event_dbg("%s: BAM pipe ipa[%x]->usb[%x] connection\n", + __func__, + pipe_connect->src_pipe_index, + pipe_connect->dst_pipe_index); + sps_connection->options = 0; + } + + sps_connection->data = *data_buf; + sps_connection->desc = *desc_buf; + sps_connection->event_thresh = 16; + sps_connection->options |= SPS_O_AUTO_ENABLE; + + ret = sps_connect(*pipe, sps_connection); + if (ret < 0) { + log_event_err("%s: sps_connect failed %d\n", __func__, ret); + goto error; + } + return 0; + +error: + sps_disconnect(*pipe); +free_sps_endpoints: + sps_free_endpoint(*pipe); +disconnect_ipa: + ipa_disconnect(clnt_hdl); + return ret; +} + static int disconnect_pipe(enum usb_ctrl cur_bam, u8 idx) { struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; @@ -617,6 +900,206 @@ static int disconnect_pipe(enum usb_ctrl cur_bam, u8 idx) return 0; } +static void usb_bam_suspend_core(enum usb_ctrl bam_type, + enum usb_bam_mode bam_mode, + bool disconnect) +{ + log_event_dbg("%s: enter bam=%s\n", __func__, + bam_enable_strings[bam_type]); + + if ((bam_mode == USB_BAM_DEVICE) || (bam_type != HSIC_CTRL)) { + log_event_err("%s: Invalid BAM type %d\n", __func__, bam_type); + return; + } +} + +/** + * usb_bam_disconnect_ipa_prod() - disconnects USB consumer(i.e. IPA producer) + * @ipa_params: USB IPA related parameters + * @cur_bam: USB controller used for BAM functionality + * + * It performs disconnect with IPA driver for IPA producer pipe and + * with SPS driver for USB BAM consumer pipe. This API also takes care + * of SYS2BAM and BAM2BAM IPA disconnect functionality. + * + * Return: 0 in case of success, errno otherwise. + */ +static int usb_bam_disconnect_ipa_prod( + struct usb_bam_connect_ipa_params *ipa_params, + enum usb_ctrl cur_bam) +{ + int ret; + u8 idx = 0; + struct usb_bam_pipe_connect *pipe_connect; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + + idx = ipa_params->dst_idx; + pipe_connect = &ctx->usb_bam_connections[idx]; + pipe_connect->activity_notify = NULL; + pipe_connect->inactivity_notify = NULL; + pipe_connect->priv = NULL; + + /* close IPA -> USB pipe */ + if (pipe_connect->pipe_type == USB_BAM_PIPE_BAM2BAM) { + ret = ipa_disconnect(ipa_params->prod_clnt_hdl); + if (ret) { + log_event_err("%s: dst pipe disconnection failure\n", + __func__); + return ret; + } + + ret = usb_bam_disconnect_pipe(cur_bam, idx); + if (ret) { + log_event_err("%s: failure to disconnect pipe %d\n", + __func__, idx); + return ret; + } + } else { + ret = ipa_teardown_sys_pipe(ipa_params->prod_clnt_hdl); + if (ret) { + log_event_err("%s: dst pipe disconnection failure\n", + __func__); + return ret; + } + + pipe_connect->enabled = false; + spin_lock(&ctx->usb_bam_lock); + if (ctx->pipes_enabled_per_bam == 0) + log_event_err("%s: wrong pipes enabled counter for bam=%d\n", + __func__, pipe_connect->bam_type); + else + ctx->pipes_enabled_per_bam -= 1; + spin_unlock(&ctx->usb_bam_lock); + } + + return 0; +} + +/** + * usb_bam_disconnect_ipa_cons() - disconnects USB producer(i.e. IPA consumer) + * @ipa_params: USB IPA related parameters + * @cur_bam: USB controller used for BAM functionality + * + * It performs disconnect with IPA driver for IPA consumer pipe and + * with SPS driver for USB BAM producer pipe. This API also takes care + * of SYS2BAM and BAM2BAM IPA disconnect functionality. + * + * Return: 0 in case of success, errno otherwise. + */ +static int usb_bam_disconnect_ipa_cons( + struct usb_bam_connect_ipa_params *ipa_params, + enum usb_ctrl cur_bam) +{ + int ret; + u8 idx = 0; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + struct usb_bam_pipe_connect *pipe_connect; + struct sps_pipe *pipe; + u32 timeout = 10, pipe_empty; + struct usb_bam_sps_type usb_bam_sps = ctx->usb_bam_sps; + struct sps_connect *sps_connection; + bool inject_zlt = true; + + idx = ipa_params->src_idx; + pipe = ctx->usb_bam_sps.sps_pipes[idx]; + pipe_connect = &ctx->usb_bam_connections[idx]; + sps_connection = &usb_bam_sps.sps_connections[idx]; + + pipe_connect->activity_notify = NULL; + pipe_connect->inactivity_notify = NULL; + pipe_connect->priv = NULL; + + /* + * On some platforms, there is a chance that flow control + * is disabled from IPA side, due to this IPA core may not + * consume data from USB. Hence notify IPA to enable flow + * control and then check sps pipe is empty or not before + * processing USB->IPA pipes disconnect. + */ + ipa_clear_endpoint_delay(ipa_params->cons_clnt_hdl); +retry: + /* Make sure pipe is empty before disconnecting it */ + while (1) { + ret = sps_is_pipe_empty(pipe, &pipe_empty); + if (ret) { + log_event_err("%s: sps_is_pipe_empty failed with %d\n", + __func__, ret); + return ret; + } + if (pipe_empty || !--timeout) + break; + + /* Check again */ + usleep_range(1000, 2000); + } + + if (!pipe_empty) { + if (inject_zlt) { + pr_debug("%s: Inject ZLT\n", __func__); + log_event_dbg("%s: Inject ZLT\n", __func__); + inject_zlt = false; + sps_pipe_inject_zlt(sps_connection->destination, + sps_connection->dest_pipe_index); + timeout = 10; + goto retry; + } + log_event_err("%s: src pipe(USB) not empty, wait timed out!\n", + __func__); + sps_get_bam_debug_info(ctx->h_bam, 93, + (SPS_BAM_PIPE(0) | SPS_BAM_PIPE(1)), 0, 2); + ipa_bam_reg_dump(); + panic("%s:SPS pipe not empty for USB->IPA\n", __func__); + } + + /* Do the release handshake with the IPA via RM */ + spin_lock(&usb_bam_ipa_handshake_info_lock); + info[cur_bam].connect_complete = 0; + info[cur_bam].disconnected = 1; + spin_unlock(&usb_bam_ipa_handshake_info_lock); + + /* Start release handshake on the last USB BAM producer pipe */ + if (info[cur_bam].prod_pipes_enabled_per_bam == 1) + wait_for_prod_release(cur_bam); + + /* close USB -> IPA pipe */ + if (pipe_connect->pipe_type == USB_BAM_PIPE_BAM2BAM) { + ret = ipa_disconnect(ipa_params->cons_clnt_hdl); + if (ret) { + log_event_err("%s: src pipe disconnection failure\n", + __func__); + return ret; + } + + ret = usb_bam_disconnect_pipe(cur_bam, idx); + if (ret) { + log_event_err("%s: failure to disconnect pipe %d\n", + __func__, idx); + return ret; + } + } else { + ret = ipa_teardown_sys_pipe(ipa_params->cons_clnt_hdl); + if (ret) { + log_event_err("%s: src pipe disconnection failure\n", + __func__); + return ret; + } + + pipe_connect->enabled = false; + spin_lock(&ctx->usb_bam_lock); + if (ctx->pipes_enabled_per_bam == 0) + log_event_err("%s: wrong pipes enabled counter for bam=%d\n", + __func__, pipe_connect->bam_type); + else + ctx->pipes_enabled_per_bam -= 1; + spin_unlock(&ctx->usb_bam_lock); + } + + pipe_connect->ipa_clnt_hdl = -1; + info[cur_bam].prod_pipes_enabled_per_bam -= 1; + + return 0; +} + int get_qdss_bam_info(enum usb_ctrl cur_bam, u8 idx, phys_addr_t *p_addr, u32 *bam_size) { @@ -673,7 +1156,7 @@ int usb_bam_connect(enum usb_ctrl cur_bam, int idx, u32 *bam_pipe_idx, spin_lock(&ctx->usb_bam_lock); /* Check if BAM requires RESET before connect and reset of first pipe */ - if ((ctx->usb_bam_data->reset_on_connect == true) && + if (ctx->usb_bam_data->reset_on_connect && (ctx->pipes_enabled_per_bam == 0)) { spin_unlock(&ctx->usb_bam_lock); sps_device_reset(ctx->h_bam); @@ -699,6 +1182,1032 @@ int usb_bam_connect(enum usb_ctrl cur_bam, int idx, u32 *bam_pipe_idx, return 0; } +static int __sps_reset_pipe(enum usb_ctrl bam_type, + struct sps_pipe *pipe, u32 idx) +{ + int ret; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[bam_type]; + struct sps_connect *sps_connection = + &ctx->usb_bam_sps.sps_connections[idx]; + + ret = sps_disconnect(pipe); + if (ret) { + log_event_err("%s: sps_disconnect() failed %d\n", + __func__, ret); + return ret; + } + + ret = sps_connect(pipe, sps_connection); + if (ret < 0) { + log_event_err("%s: sps_connect() failed %d\n", __func__, ret); + return ret; + } + + return 0; +} + +static void reset_pipe_for_resume(struct usb_bam_pipe_connect *pipe_connect) +{ + int ret; + enum usb_ctrl bam_type = pipe_connect->bam_type; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[bam_type]; + u32 idx = ARRAY_INDEX_FROM_ADDR(ctx->usb_bam_connections, pipe_connect); + struct sps_pipe *pipe = ctx->usb_bam_sps.sps_pipes[idx]; + + if (!pipe_connect->reset_pipe_after_lpm || + pipe_connect->pipe_type != USB_BAM_PIPE_BAM2BAM) { + log_event_dbg("No need to reset pipe %d\n", idx); + return; + } + + ret = __sps_reset_pipe(bam_type, pipe, idx); + if (ret) { + log_event_err("%s failed to reset the USB sps pipe\n", + __func__); + return; + } + + ret = ipa_reset_endpoint(pipe_connect->ipa_clnt_hdl); + if (ret) { + log_event_err("%s failed to reset the IPA pipe\n", __func__); + return; + } + log_event_dbg("%s: USB/IPA pipes reset after resume\n", __func__); +} + + +/* Stop PROD transfers in case they were started */ +static void stop_prod_transfers(struct usb_bam_pipe_connect *pipe_connect) +{ + if (pipe_connect->stop && !pipe_connect->prod_stopped) { + log_event_dbg("%s: Stop PROD transfers on\n", __func__); + pipe_connect->stop(pipe_connect->start_stop_param, + USB_TO_PEER_PERIPHERAL); + pipe_connect->prod_stopped = true; + } +} + +static void start_prod_transfers(struct usb_bam_pipe_connect *pipe_connect) +{ + log_event_err("%s: Starting PROD\n", __func__); + if (pipe_connect->start && pipe_connect->prod_stopped) { + log_event_dbg("%s: Enqueue PROD transfer\n", __func__); + pipe_connect->start(pipe_connect->start_stop_param, + USB_TO_PEER_PERIPHERAL); + pipe_connect->prod_stopped = false; + } +} + +static void start_cons_transfers(struct usb_bam_pipe_connect *pipe_connect) +{ + /* Start CONS transfer */ + if (pipe_connect->start && pipe_connect->cons_stopped) { + log_event_dbg("%s: Enqueue CONS transfer\n", __func__); + pipe_connect->start(pipe_connect->start_stop_param, + PEER_PERIPHERAL_TO_USB); + pipe_connect->cons_stopped = 0; + } +} + +/* Stop CONS transfers in case they were started */ +static void stop_cons_transfers(struct usb_bam_pipe_connect *pipe_connect) +{ + if (pipe_connect->stop && !pipe_connect->cons_stopped) { + log_event_dbg("%s: Stop CONS transfers\n", __func__); + pipe_connect->stop(pipe_connect->start_stop_param, + PEER_PERIPHERAL_TO_USB); + pipe_connect->cons_stopped = 1; + } +} + +static void resume_suspended_pipes(enum usb_ctrl cur_bam) +{ + u32 idx, dst_idx; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + struct usb_bam_pipe_connect *pipe_connect; + + log_event_dbg("Resuming: suspend pipes =%d\n", + info[cur_bam].pipes_suspended); + + while (info[cur_bam].pipes_suspended >= 1) { + idx = info[cur_bam].pipes_suspended - 1; + dst_idx = info[cur_bam].resume_dst_idx[idx]; + pipe_connect = &ctx->usb_bam_connections[dst_idx]; + if (pipe_connect->cons_stopped) { + log_event_dbg("%s: Starting CONS on %d\n", __func__, + dst_idx); + start_cons_transfers(pipe_connect); + } + + log_event_dbg("%s: Starting PROD on %d\n", __func__, dst_idx); + start_prod_transfers(pipe_connect); + info[cur_bam].pipes_suspended--; + info[cur_bam].pipes_resumed++; + /* Suspend was aborted, renew pm_runtime vote */ + log_event_dbg("%s: PM Runtime GET %d, count: %d\n", __func__, + idx, get_pm_runtime_counter(&ctx->usb_bam_pdev->dev)); + pm_runtime_get(&ctx->usb_bam_pdev->dev); + } +} + +static inline int all_pipes_suspended(enum usb_ctrl cur_bam) +{ + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + + log_event_dbg("%s: pipes_suspended=%d pipes_enabled_per_bam=%d\n", + __func__, info[cur_bam].pipes_suspended, + ctx->pipes_enabled_per_bam); + + return info[cur_bam].pipes_suspended == ctx->pipes_enabled_per_bam; +} + + +static void usb_bam_finish_suspend(enum usb_ctrl cur_bam) +{ + int ret, bam2bam; + u32 cons_empty, idx, dst_idx; + struct sps_pipe *cons_pipe; + struct usb_bam_pipe_connect *pipe_connect; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + struct device *bam_dev = &ctx->usb_bam_pdev->dev; + + mutex_lock(&info[cur_bam].suspend_resume_mutex); + + spin_lock(&usb_bam_ipa_handshake_info_lock); + /* If cable was disconnected, let disconnection seq do everything */ + if (info[cur_bam].disconnected || all_pipes_suspended(cur_bam)) { + spin_unlock(&usb_bam_ipa_handshake_info_lock); + mutex_unlock(&info[cur_bam].suspend_resume_mutex); + log_event_dbg("%s: Cable disconnected\n", __func__); + return; + } + log_event_dbg("%s: bam:%s RT GET: %d\n", __func__, + bam_enable_strings[cur_bam], get_pm_runtime_counter(bam_dev)); + pm_runtime_get(bam_dev); + + /* If resume was called don't finish this work */ + if (!info[cur_bam].bus_suspend) { + spin_unlock(&usb_bam_ipa_handshake_info_lock); + log_event_dbg("%s: Bus resume in progress\n", __func__); + goto no_lpm; + } + + /* Go over all pipes, stop and suspend them, and go to lpm */ + while (!all_pipes_suspended(cur_bam)) { + idx = info[cur_bam].pipes_suspended; + dst_idx = info[cur_bam].suspend_dst_idx[idx]; + cons_pipe = ctx->usb_bam_sps.sps_pipes[dst_idx]; + pipe_connect = &ctx->usb_bam_connections[dst_idx]; + + log_event_dbg("pipes_suspended=%d pipes_to_suspend=%d\n", + info[cur_bam].pipes_suspended, + info[cur_bam].pipes_to_suspend); + + bam2bam = (pipe_connect->pipe_type == USB_BAM_PIPE_BAM2BAM); + + spin_unlock(&usb_bam_ipa_handshake_info_lock); + + if (bam2bam) { + ret = sps_is_pipe_empty(cons_pipe, &cons_empty); + if (ret) { + log_event_err("%s: sps_is_pipe_empty failed with %d\n", + __func__, ret); + goto no_lpm; + } + } else { + log_event_err("%s: pipe type is not B2B\n", __func__); + cons_empty = true; + } + + spin_lock(&usb_bam_ipa_handshake_info_lock); + /* Stop CONS transfers and go to lpm if no more data in the */ + /* pipes */ + if (cons_empty) { + log_event_dbg("%s: Stopping CONS transfers on dst_idx=%d\n" + , __func__, dst_idx); + stop_cons_transfers(pipe_connect); + + spin_unlock(&usb_bam_ipa_handshake_info_lock); + log_event_dbg("%s: Suspending pipe\n", __func__); + spin_lock(&usb_bam_ipa_handshake_info_lock); + info[cur_bam].resume_src_idx[idx] = + info[cur_bam].suspend_src_idx[idx]; + info[cur_bam].resume_dst_idx[idx] = + info[cur_bam].suspend_dst_idx[idx]; + info[cur_bam].pipes_suspended++; + + log_event_dbg("%s: PM Runtime PUT %d, count: %d\n", + __func__, idx, get_pm_runtime_counter(bam_dev)); + pm_runtime_put(&ctx->usb_bam_pdev->dev); + } else { + log_event_dbg("%s: Pipe is not empty, not going to LPM\n", + __func__); + spin_unlock(&usb_bam_ipa_handshake_info_lock); + goto no_lpm; + } + } + info[cur_bam].pipes_to_suspend = 0; + info[cur_bam].pipes_resumed = 0; + spin_unlock(&usb_bam_ipa_handshake_info_lock); + + /* ACK on the last pipe */ + if (info[cur_bam].pipes_suspended == ctx->pipes_enabled_per_bam && + info[cur_bam].cur_cons_state == IPA_RM_RESOURCE_RELEASED) { + ipa_rm_notify_completion( + IPA_RM_RESOURCE_RELEASED, + ipa_rm_resource_cons[cur_bam]); + } + + log_event_dbg("%s: Starting LPM on Bus Suspend, RT PUT:%d\n", __func__, + get_pm_runtime_counter(bam_dev)); + /* Put to match _get at the beginning of this routine */ + pm_runtime_put_sync(bam_dev); + + mutex_unlock(&info[cur_bam].suspend_resume_mutex); + + return; + +no_lpm: + + spin_lock(&usb_bam_ipa_handshake_info_lock); + resume_suspended_pipes(cur_bam); + info[cur_bam].pipes_resumed = 0; + info[cur_bam].pipes_to_suspend = 0; + info[cur_bam].pipes_suspended = 0; + spin_unlock(&usb_bam_ipa_handshake_info_lock); + /* + * Finish the handshake. Resume Sequence will start automatically + * by the data in the pipes. + */ + if (info[cur_bam].cur_cons_state == IPA_RM_RESOURCE_RELEASED) + ipa_rm_notify_completion(IPA_RM_RESOURCE_RELEASED, + ipa_rm_resource_cons[cur_bam]); + + /* Put to match _get at the beginning of this routine */ + pm_runtime_put(bam_dev); + + mutex_unlock(&info[cur_bam].suspend_resume_mutex); +} + +static void usb_bam_finish_suspend_(struct work_struct *w) +{ + enum usb_ctrl cur_bam; + struct usb_bam_ipa_handshake_info *info_ptr; + + info_ptr = container_of(w, struct usb_bam_ipa_handshake_info, + finish_suspend_work); + cur_bam = info_ptr->bam_type; + + log_event_dbg("%s: Finishing suspend sequence(BAM=%s)\n", __func__, + bam_enable_strings[cur_bam]); + usb_bam_finish_suspend(cur_bam); +} + +static void usb_prod_notify_cb(void *user_data, enum ipa_rm_event event, + unsigned long data) +{ + enum usb_ctrl *cur_bam = (void *)user_data; + + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + log_event_dbg("%s: %s_PROD resource granted\n", + __func__, bam_enable_strings[*cur_bam]); + info[*cur_bam].cur_prod_state = IPA_RM_RESOURCE_GRANTED; + complete_all(&info[*cur_bam].prod_avail); + break; + case IPA_RM_RESOURCE_RELEASED: + log_event_dbg("%s: %s_PROD resource released\n", + __func__, bam_enable_strings[*cur_bam]); + info[*cur_bam].cur_prod_state = IPA_RM_RESOURCE_RELEASED; + complete_all(&info[*cur_bam].prod_released); + break; + default: + break; + } +} + +static int cons_request_resource(enum usb_ctrl cur_bam) +{ + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + int ret = -EINPROGRESS; + + log_event_dbg("%s: Request %s_CONS resource\n", + __func__, bam_enable_strings[cur_bam]); + + spin_lock(&ctx->usb_bam_lock); + spin_lock(&usb_bam_ipa_handshake_info_lock); + info[cur_bam].cur_cons_state = IPA_RM_RESOURCE_GRANTED; + + switch (info[cur_bam].cur_bam_mode) { + case USB_BAM_DEVICE: + if (ctx->pipes_enabled_per_bam && + info[cur_bam].connect_complete) { + if (!all_pipes_suspended(cur_bam) && + !info[cur_bam].bus_suspend) { + log_event_dbg("%s: ACK on cons_request\n", + __func__); + ret = 0; + } else if (info[cur_bam].bus_suspend) { + info[cur_bam].bus_suspend = 0; + log_event_dbg("%s: Wake up host\n", __func__); + if (info[cur_bam].wake_cb) + info[cur_bam].wake_cb( + info[cur_bam].wake_param); + } + } + + break; + default: + break; + } + + spin_unlock(&usb_bam_ipa_handshake_info_lock); + spin_unlock(&ctx->usb_bam_lock); + + if (ret == -EINPROGRESS) + log_event_dbg("%s: EINPROGRESS on cons_request\n", __func__); + + return ret; +} + +static int ss_usb_cons_request_resource(void) +{ + return cons_request_resource(DWC3_CTRL); +} + + +static int usb_cons_request_resource(void) +{ + return cons_request_resource(CI_CTRL); +} + +static int cons_release_resource(enum usb_ctrl cur_bam) +{ + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + + log_event_dbg("%s: Release %s_CONS resource\n", + __func__, bam_enable_strings[cur_bam]); + + info[cur_bam].cur_cons_state = IPA_RM_RESOURCE_RELEASED; + + spin_lock(&ctx->usb_bam_lock); + if (!ctx->pipes_enabled_per_bam) { + spin_unlock(&ctx->usb_bam_lock); + log_event_dbg("%s: ACK on cons_release\n", __func__); + return 0; + } + spin_unlock(&ctx->usb_bam_lock); + + if (info[cur_bam].cur_bam_mode == USB_BAM_DEVICE) { + spin_lock(&usb_bam_ipa_handshake_info_lock); + if (info[cur_bam].bus_suspend) { + queue_work(ctx->usb_bam_wq, + &info[cur_bam].finish_suspend_work); + } + spin_unlock(&usb_bam_ipa_handshake_info_lock); + + log_event_dbg("%s: EINPROGRESS cons_release\n", __func__); + return -EINPROGRESS; + } + + return 0; +} + +static int usb_cons_release_resource(void) +{ + return cons_release_resource(CI_CTRL); +} + +static int ss_usb_cons_release_resource(void) +{ + return cons_release_resource(DWC3_CTRL); +} + +static void usb_bam_ipa_create_resources(enum usb_ctrl cur_bam) +{ + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + struct ipa_rm_create_params usb_prod_create_params; + struct ipa_rm_create_params usb_cons_create_params; + int ret; + + /* Create USB/HSIC_PROD entity */ + memset(&usb_prod_create_params, 0, sizeof(usb_prod_create_params)); + usb_prod_create_params.name = ipa_rm_resource_prod[cur_bam]; + usb_prod_create_params.reg_params.notify_cb = usb_prod_notify_cb; + usb_prod_create_params.reg_params.user_data + = &ctx->usb_bam_data->bam_type; + usb_prod_create_params.floor_voltage = IPA_VOLTAGE_SVS; + ret = ipa_rm_create_resource(&usb_prod_create_params); + if (ret) { + log_event_err("%s: Failed to create USB_PROD resource\n", + __func__); + return; + } + + /* Create USB_CONS entity */ + memset(&usb_cons_create_params, 0, sizeof(usb_cons_create_params)); + usb_cons_create_params.name = ipa_rm_resource_cons[cur_bam]; + usb_cons_create_params.request_resource = request_resource_cb[cur_bam]; + usb_cons_create_params.release_resource = release_resource_cb[cur_bam]; + usb_cons_create_params.floor_voltage = IPA_VOLTAGE_SVS; + ret = ipa_rm_create_resource(&usb_cons_create_params); + if (ret) { + log_event_err("%s: Failed to create USB_CONS resource\n", + __func__); + return; + } +} + +static void usb_bam_ipa_delete_resources(enum usb_ctrl cur_bam) +{ + int ret; + + ret = ipa_rm_delete_resource(ipa_rm_resource_prod[cur_bam]); + if (ret) + log_event_err("%s: Failed to delete USB_PROD resource\n", + __func__); + + ret = ipa_rm_delete_resource(ipa_rm_resource_cons[cur_bam]); + if (ret) + log_event_err("%s: Failed to delete USB_CONS resource\n", + __func__); + +} + +static void wait_for_prod_granted(enum usb_ctrl cur_bam) +{ + int ret; + + log_event_dbg("%s Request %s_PROD_RES\n", __func__, + bam_enable_strings[cur_bam]); + if (info[cur_bam].cur_cons_state == IPA_RM_RESOURCE_GRANTED) + log_event_dbg("%s: CONS already granted for some reason\n", + __func__); + if (info[cur_bam].cur_prod_state == IPA_RM_RESOURCE_GRANTED) + log_event_dbg("%s: PROD already granted for some reason\n", + __func__); + + init_completion(&info[cur_bam].prod_avail); + + ret = ipa_rm_request_resource(ipa_rm_resource_prod[cur_bam]); + if (!ret) { + info[cur_bam].cur_prod_state = IPA_RM_RESOURCE_GRANTED; + complete_all(&info[cur_bam].prod_avail); + log_event_dbg("%s: PROD_GRANTED without wait\n", __func__); + } else if (ret == -EINPROGRESS) { + log_event_dbg("%s: Waiting for PROD_GRANTED\n", __func__); + if (!wait_for_completion_timeout(&info[cur_bam].prod_avail, + USB_BAM_TIMEOUT)) + log_event_err("%s: Timeout wainting for PROD_GRANTED\n", + __func__); + } else + log_event_err("%s: ipa_rm_request_resource ret =%d\n", + __func__, ret); +} + +static void notify_usb_connected(enum usb_ctrl cur_bam) +{ + log_event_dbg("%s: enter\n", __func__); + + spin_lock(&usb_bam_ipa_handshake_info_lock); + if (info[cur_bam].cur_bam_mode == USB_BAM_DEVICE) + info[cur_bam].connect_complete = 1; + spin_unlock(&usb_bam_ipa_handshake_info_lock); + + if (info[cur_bam].cur_cons_state == IPA_RM_RESOURCE_GRANTED) { + log_event_dbg("%s: Notify %s CONS_GRANTED\n", __func__, + bam_enable_strings[cur_bam]); + ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED, + ipa_rm_resource_cons[cur_bam]); + } +} + +static void wait_for_prod_release(enum usb_ctrl cur_bam) +{ + int ret; + + if (info[cur_bam].cur_cons_state == IPA_RM_RESOURCE_RELEASED) + log_event_dbg("%s consumer already released\n", __func__); + if (info[cur_bam].cur_prod_state == IPA_RM_RESOURCE_RELEASED) + log_event_dbg("%s producer already released\n", __func__); + + init_completion(&info[cur_bam].prod_released); + log_event_dbg("%s: Releasing %s_PROD\n", __func__, + bam_enable_strings[cur_bam]); + ret = ipa_rm_release_resource(ipa_rm_resource_prod[cur_bam]); + if (!ret) { + log_event_dbg("%s: Released without waiting\n", __func__); + info[cur_bam].cur_prod_state = IPA_RM_RESOURCE_RELEASED; + complete_all(&info[cur_bam].prod_released); + } else if (ret == -EINPROGRESS) { + log_event_dbg("%s: Waiting for PROD_RELEASED\n", __func__); + if (!wait_for_completion_timeout(&info[cur_bam].prod_released, + USB_BAM_TIMEOUT)) + log_event_err("%s: Timeout waiting for PROD_RELEASED\n", + __func__); + } else { + log_event_err("%s: ipa_rm_request_resource ret =%d\n", + __func__, ret); + } +} + +static bool check_pipes_empty(enum usb_ctrl bam_type, u8 src_idx, u8 dst_idx) +{ + struct usb_bam_ctx_type *ctx = &msm_usb_bam[bam_type]; + struct sps_pipe *prod_pipe, *cons_pipe; + struct usb_bam_pipe_connect *prod_pipe_connect, *cons_pipe_connect; + u32 prod_empty, cons_empty; + + prod_pipe_connect = &ctx->usb_bam_connections[src_idx]; + cons_pipe_connect = &ctx->usb_bam_connections[dst_idx]; + if (!prod_pipe_connect->enabled || !cons_pipe_connect->enabled) { + log_event_err("%s: pipes are not enabled dst=%d src=%d\n", + __func__, prod_pipe_connect->enabled, + cons_pipe_connect->enabled); + } + + /* If we have any remaints in the pipes we don't go to sleep */ + prod_pipe = ctx->usb_bam_sps.sps_pipes[src_idx]; + cons_pipe = ctx->usb_bam_sps.sps_pipes[dst_idx]; + log_event_dbg("prod_pipe=%pK, cons_pipe=%pK\n", prod_pipe, cons_pipe); + + if (!cons_pipe || (!prod_pipe && + prod_pipe_connect->pipe_type == USB_BAM_PIPE_BAM2BAM)) { + log_event_err("Missing a pipe!\n"); + return false; + } + + if (prod_pipe && sps_is_pipe_empty(prod_pipe, &prod_empty)) { + log_event_err("sps_is_pipe_empty(prod) failed\n"); + return false; + } + + prod_empty = true; + if (sps_is_pipe_empty(cons_pipe, &cons_empty)) { + log_event_err("sps_is_pipe_empty(cons) failed\n"); + return false; + } + + if (!prod_empty || !cons_empty) { + log_event_err("pipes not empty prod=%d cond=%d\n", + prod_empty, cons_empty); + return false; + } + + return true; + +} + +void usb_bam_suspend(enum usb_ctrl cur_bam, + struct usb_bam_connect_ipa_params *ipa_params) +{ + struct usb_bam_pipe_connect *pipe_connect; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + enum usb_bam_mode bam_mode; + u8 src_idx, dst_idx; + + log_event_dbg("%s: enter\n", __func__); + + if (!ipa_params) { + log_event_err("%s: Invalid ipa params\n", __func__); + return; + } + + src_idx = ipa_params->src_idx; + dst_idx = ipa_params->dst_idx; + + if (src_idx >= ctx->max_connections || + dst_idx >= ctx->max_connections) { + log_event_err("%s: Invalid connection index src=%d dst=%d\n", + __func__, src_idx, dst_idx); + } + + pipe_connect = &ctx->usb_bam_connections[src_idx]; + bam_mode = pipe_connect->bam_mode; + if (bam_mode != USB_BAM_DEVICE) + return; + + log_event_dbg("%s: Starting suspend sequence(BAM=%s)\n", __func__, + bam_enable_strings[cur_bam]); + + spin_lock(&usb_bam_ipa_handshake_info_lock); + info[cur_bam].bus_suspend = 1; + + /* If cable was disconnected, let disconnection seq do everything */ + if (info[cur_bam].disconnected) { + spin_unlock(&usb_bam_ipa_handshake_info_lock); + log_event_dbg("%s: Cable disconnected\n", __func__); + return; + } + + log_event_dbg("%s: Adding src=%d dst=%d in pipes_to_suspend=%d\n", + __func__, src_idx, + dst_idx, info[cur_bam].pipes_to_suspend); + info[cur_bam].suspend_src_idx[info[cur_bam].pipes_to_suspend] = src_idx; + info[cur_bam].suspend_dst_idx[info[cur_bam].pipes_to_suspend] = dst_idx; + info[cur_bam].pipes_to_suspend++; + + + spin_unlock(&usb_bam_ipa_handshake_info_lock); + + usb_bam_start_suspend(&info[cur_bam]); +} + +static void usb_bam_start_suspend(struct usb_bam_ipa_handshake_info *info_ptr) +{ + struct usb_bam_pipe_connect *pipe_connect; + enum usb_ctrl cur_bam = info_ptr->bam_type; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + u8 src_idx, dst_idx; + int pipes_to_suspend; + + cur_bam = info_ptr->bam_type; + log_event_dbg("%s: Starting suspend sequence(BAM=%s)\n", __func__, + bam_enable_strings[cur_bam]); + + mutex_lock(&info[cur_bam].suspend_resume_mutex); + + spin_lock(&usb_bam_ipa_handshake_info_lock); + /* If cable was disconnected, let disconnection seq do everything */ + if (info[cur_bam].disconnected) { + spin_unlock(&usb_bam_ipa_handshake_info_lock); + mutex_unlock(&info[cur_bam].suspend_resume_mutex); + log_event_dbg("%s: Cable disconnected\n", __func__); + return; + } + + pipes_to_suspend = info[cur_bam].pipes_to_suspend; + if (!info[cur_bam].bus_suspend || !pipes_to_suspend) { + spin_unlock(&usb_bam_ipa_handshake_info_lock); + log_event_dbg("%s: Resume started, not suspending\n", __func__); + mutex_unlock(&info[cur_bam].suspend_resume_mutex); + return; + } + + src_idx = info[cur_bam].suspend_src_idx[pipes_to_suspend - 1]; + dst_idx = info[cur_bam].suspend_dst_idx[pipes_to_suspend - 1]; + + pipe_connect = &ctx->usb_bam_connections[dst_idx]; + stop_prod_transfers(pipe_connect); + + spin_unlock(&usb_bam_ipa_handshake_info_lock); + + /* Don't start LPM seq if data in the pipes */ + if (!check_pipes_empty(cur_bam, src_idx, dst_idx)) { + start_prod_transfers(pipe_connect); + info[cur_bam].pipes_to_suspend = 0; + info[cur_bam].bus_suspend = 0; + mutex_unlock(&info[cur_bam].suspend_resume_mutex); + return; + } + + spin_lock(&usb_bam_ipa_handshake_info_lock); + + /* Start release handshake on the last pipe */ + if (info[cur_bam].pipes_to_suspend * 2 == ctx->pipes_enabled_per_bam) { + spin_unlock(&usb_bam_ipa_handshake_info_lock); + wait_for_prod_release(cur_bam); + } else { + spin_unlock(&usb_bam_ipa_handshake_info_lock); + } + + mutex_unlock(&info[cur_bam].suspend_resume_mutex); + if (info[cur_bam].cur_cons_state == IPA_RM_RESOURCE_RELEASED) + usb_bam_finish_suspend(cur_bam); + else + log_event_dbg("Consumer not released yet\n"); +} + +static void usb_bam_finish_resume(struct work_struct *w) +{ + /* TODO: Change this when HSIC device support is introduced */ + enum usb_ctrl cur_bam; + struct usb_bam_ipa_handshake_info *info_ptr; + struct usb_bam_pipe_connect *pipe_connect; + struct usb_bam_ctx_type *ctx; + struct device *bam_dev; + u32 idx, dst_idx, suspended; + + info_ptr = container_of(w, struct usb_bam_ipa_handshake_info, + resume_work); + cur_bam = info_ptr->bam_type; + ctx = &msm_usb_bam[cur_bam]; + bam_dev = &ctx->usb_bam_pdev->dev; + + log_event_dbg("%s: enter bam=%s, RT GET: %d\n", __func__, + bam_enable_strings[cur_bam], get_pm_runtime_counter(bam_dev)); + + pm_runtime_get_sync(bam_dev); + + mutex_lock(&info[cur_bam].suspend_resume_mutex); + + /* Suspend or disconnect happened in the meantime */ + spin_lock(&usb_bam_ipa_handshake_info_lock); + if (info[cur_bam].bus_suspend || info[cur_bam].disconnected) { + spin_unlock(&usb_bam_ipa_handshake_info_lock); + log_event_dbg("%s: Bus suspended, not resuming, RT PUT: %d\n", + __func__, get_pm_runtime_counter(bam_dev)); + mutex_unlock(&info[cur_bam].suspend_resume_mutex); + pm_runtime_put_sync(bam_dev); + return; + } + info[cur_bam].pipes_to_suspend = 0; + + log_event_dbg("Resuming: pipes_suspended =%d\n", + info[cur_bam].pipes_suspended); + + suspended = info[cur_bam].pipes_suspended; + while (suspended >= 1) { + idx = suspended - 1; + dst_idx = info[cur_bam].resume_dst_idx[idx]; + pipe_connect = &ctx->usb_bam_connections[dst_idx]; + spin_unlock(&usb_bam_ipa_handshake_info_lock); + reset_pipe_for_resume(pipe_connect); + spin_lock(&usb_bam_ipa_handshake_info_lock); + if (pipe_connect->cons_stopped) { + log_event_dbg("%s: Starting CONS on %d\n", __func__, + dst_idx); + start_cons_transfers(pipe_connect); + } + suspended--; + } + if (info[cur_bam].cur_cons_state == IPA_RM_RESOURCE_GRANTED) { + log_event_dbg("%s: Notify CONS_GRANTED\n", __func__); + ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED, + ipa_rm_resource_cons[cur_bam]); + } + spin_unlock(&usb_bam_ipa_handshake_info_lock); + + /* Start handshake for the first pipe resumed */ + if (info[cur_bam].pipes_resumed == 0) + wait_for_prod_granted(cur_bam); + + spin_lock(&usb_bam_ipa_handshake_info_lock); + while (info[cur_bam].pipes_suspended >= 1) { + idx = info[cur_bam].pipes_suspended - 1; + dst_idx = info[cur_bam].resume_dst_idx[idx]; + pipe_connect = &ctx->usb_bam_connections[dst_idx]; + log_event_dbg("%s: Starting PROD on %d\n", __func__, dst_idx); + start_prod_transfers(pipe_connect); + info[cur_bam].pipes_suspended--; + info[cur_bam].pipes_resumed++; + log_event_dbg("%s: PM Runtime GET %d, count: %d\n", + __func__, idx, get_pm_runtime_counter(bam_dev)); + pm_runtime_get(&ctx->usb_bam_pdev->dev); + } + + if (info[cur_bam].pipes_resumed == ctx->pipes_enabled_per_bam) { + info[cur_bam].pipes_resumed = 0; + if (info[cur_bam].cur_cons_state == IPA_RM_RESOURCE_GRANTED) { + log_event_dbg("%s: Notify CONS_GRANTED\n", __func__); + ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED, + ipa_rm_resource_cons[cur_bam]); + } + } + + spin_unlock(&usb_bam_ipa_handshake_info_lock); + mutex_unlock(&info[cur_bam].suspend_resume_mutex); + log_event_dbg("%s: done..PM Runtime PUT :%d\n", + __func__, get_pm_runtime_counter(bam_dev)); + /* Put to match _get at the beginning of this routine */ + pm_runtime_put(&ctx->usb_bam_pdev->dev); +} + +void usb_bam_resume(enum usb_ctrl cur_bam, + struct usb_bam_connect_ipa_params *ipa_params) +{ + u8 src_idx, dst_idx; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + struct usb_bam_pipe_connect *pipe_connect; + + log_event_dbg("%s: Resuming\n", __func__); + + if (!ipa_params) { + log_event_err("%s: Invalid ipa params\n", __func__); + return; + } + + src_idx = ipa_params->src_idx; + dst_idx = ipa_params->dst_idx; + + if (src_idx >= ctx->max_connections || + dst_idx >= ctx->max_connections) { + log_event_err("%s: Invalid connection index src=%d dst=%d\n", + __func__, src_idx, dst_idx); + return; + } + + pipe_connect = &ctx->usb_bam_connections[src_idx]; + log_event_dbg("%s: bam=%s mode =%d\n", __func__, + bam_enable_strings[cur_bam], pipe_connect->bam_mode); + if (pipe_connect->bam_mode != USB_BAM_DEVICE) + return; + + info[cur_bam].in_lpm = false; + spin_lock(&usb_bam_ipa_handshake_info_lock); + info[cur_bam].bus_suspend = 0; + spin_unlock(&usb_bam_ipa_handshake_info_lock); + queue_work(ctx->usb_bam_wq, &info[cur_bam].resume_work); +} + +static int usb_bam_set_ipa_perf(enum usb_ctrl cur_bam, + enum usb_bam_pipe_dir dir, + enum usb_device_speed usb_connection_speed) +{ + int ret; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + struct ipa_rm_perf_profile ipa_rm_perf_prof; + + if (usb_connection_speed == USB_SPEED_SUPER) + ipa_rm_perf_prof.max_supported_bandwidth_mbps = + ctx->usb_bam_data->max_mbps_superspeed; + else + /* Bam2Bam is supported only for SS and HS (HW limitation) */ + ipa_rm_perf_prof.max_supported_bandwidth_mbps = + ctx->usb_bam_data->max_mbps_highspeed; + + /* + * Having a max mbps property in dtsi file is a must + * for target with IPA capability. + */ + if (!ipa_rm_perf_prof.max_supported_bandwidth_mbps) { + log_event_err("%s: Max mbps is required for speed %d\n", + __func__, usb_connection_speed); + return -EINVAL; + } + + if (dir == USB_TO_PEER_PERIPHERAL) { + log_event_dbg("%s: vote ipa_perf resource=%d perf=%d mbps\n", + __func__, ipa_rm_resource_prod[cur_bam], + ipa_rm_perf_prof.max_supported_bandwidth_mbps); + ret = ipa_rm_set_perf_profile(ipa_rm_resource_prod[cur_bam], + &ipa_rm_perf_prof); + } else { + log_event_dbg("%s: vote ipa_perf resource=%d perf=%d mbps\n", + __func__, ipa_rm_resource_cons[cur_bam], + ipa_rm_perf_prof.max_supported_bandwidth_mbps); + ret = ipa_rm_set_perf_profile(ipa_rm_resource_cons[cur_bam], + &ipa_rm_perf_prof); + } + + return ret; +} + +int usb_bam_connect_ipa(enum usb_ctrl cur_bam, + struct usb_bam_connect_ipa_params *ipa_params) +{ + u8 idx; + enum usb_bam_mode cur_mode; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + struct usb_bam_pipe_connect *pipe_connect; + struct device *bam_dev = &ctx->usb_bam_pdev->dev; + int ret; + bool bam2bam, is_dpl; + + log_event_dbg("%s: start\n", __func__); + + if (!ipa_params) { + log_event_err("%s: Invalid ipa params\n", __func__); + return -EINVAL; + } + + if (ipa_params->dir == USB_TO_PEER_PERIPHERAL) + idx = ipa_params->src_idx; + else + idx = ipa_params->dst_idx; + + if (idx >= ctx->max_connections) { + log_event_err("%s: Invalid connection index\n", __func__); + return -EINVAL; + } + pipe_connect = &ctx->usb_bam_connections[idx]; + + if (pipe_connect->enabled) { + log_event_err("%s: connection %d was already established\n", + __func__, idx); + return 0; + } + + ret = usb_bam_set_ipa_perf(pipe_connect->bam_type, ipa_params->dir, + ipa_params->usb_connection_speed); + if (ret) { + log_event_err("%s: call to usb_bam_set_ipa_perf failed %d\n", + __func__, ret); + return ret; + } + + log_event_dbg("%s: enter\n", __func__); + + cur_mode = pipe_connect->bam_mode; + bam2bam = (pipe_connect->pipe_type == USB_BAM_PIPE_BAM2BAM); + + if (ipa_params->dst_client == IPA_CLIENT_USB_DPL_CONS) + is_dpl = true; + else + is_dpl = false; + + /* Set the BAM mode (host/device) according to connected pipe */ + info[cur_bam].cur_bam_mode = pipe_connect->bam_mode; + + if (cur_mode == USB_BAM_DEVICE) { + mutex_lock(&info[cur_bam].suspend_resume_mutex); + + spin_lock(&ctx->usb_bam_lock); + if (ctx->pipes_enabled_per_bam == 0) { + spin_unlock(&ctx->usb_bam_lock); + spin_lock(&usb_bam_ipa_handshake_info_lock); + info[cur_bam].connect_complete = 0; + info[cur_bam].disconnected = 0; + info[cur_bam].bus_suspend = 0; + info[cur_bam].pipes_suspended = 0; + info[cur_bam].pipes_to_suspend = 0; + info[cur_bam].pipes_resumed = 0; + spin_unlock(&usb_bam_ipa_handshake_info_lock); + } else { + spin_unlock(&ctx->usb_bam_lock); + } + pipe_connect->cons_stopped = 0; + pipe_connect->prod_stopped = 0; + } + + log_event_dbg("%s: PM Runtime GET %d, count: %d\n", + __func__, idx, get_pm_runtime_counter(bam_dev)); + pm_runtime_get_sync(bam_dev); + + /* Check if BAM requires RESET before connect and reset first pipe */ + spin_lock(&ctx->usb_bam_lock); + if (ctx->usb_bam_data->reset_on_connect && + !ctx->pipes_enabled_per_bam) { + spin_unlock(&ctx->usb_bam_lock); + if (cur_bam == CI_CTRL) + msm_hw_bam_disable(1); + + sps_device_reset(ctx->h_bam); + + if (cur_bam == CI_CTRL) + msm_hw_bam_disable(0); + + /* On re-connect assume out from lpm for all BAMs */ + info[cur_bam].in_lpm = false; + } else { + spin_unlock(&ctx->usb_bam_lock); + if (!ctx->pipes_enabled_per_bam) + pr_debug("No BAM reset on connect, just pipe reset\n"); + } + + if (ipa_params->dir == USB_TO_PEER_PERIPHERAL) { + if (info[cur_bam].prod_pipes_enabled_per_bam == 0) + wait_for_prod_granted(cur_bam); + info[cur_bam].prod_pipes_enabled_per_bam += 1; + } + + if (bam2bam) + ret = connect_pipe_bam2bam_ipa(cur_bam, idx, ipa_params); + else + ret = connect_pipe_sys2bam_ipa(cur_bam, idx, ipa_params); + + if (ret) { + log_event_err("%s: pipe connection failure RT PUT: %d\n", + __func__, get_pm_runtime_counter(bam_dev)); + pm_runtime_put_sync(bam_dev); + if (cur_mode == USB_BAM_DEVICE) + mutex_unlock(&info[cur_bam].suspend_resume_mutex); + return ret; + } + + log_event_dbg("%s: pipe connection success\n", __func__); + spin_lock(&ctx->usb_bam_lock); + pipe_connect->enabled = 1; + pipe_connect->suspended = 0; + + /* Set global inactivity timer upon first pipe connection */ + if (!ctx->pipes_enabled_per_bam && ctx->inactivity_timer_ms && + pipe_connect->inactivity_notify && bam2bam) + usb_bam_set_inactivity_timer(cur_bam); + + ctx->pipes_enabled_per_bam += 1; + + /* + * Notify USB connected on the first two pipes connected for + * tethered function's producer and consumer only. Current + * understanding is that there won't be more than 3 pipes used + * in USB BAM2BAM IPA mode i.e. 2 consumers and 1 producer. + * If more producer and consumer pipe are being used, this + * logic is required to be revisited here. + */ + if (ctx->pipes_enabled_per_bam >= 2 && + ipa_params->dir == PEER_PERIPHERAL_TO_USB && !is_dpl) + notify_usb_connected(cur_bam); + spin_unlock(&ctx->usb_bam_lock); + + if (cur_mode == USB_BAM_DEVICE) + mutex_unlock(&info[cur_bam].suspend_resume_mutex); + + log_event_dbg("%s: done\n", __func__); + return 0; +} +EXPORT_SYMBOL(usb_bam_connect_ipa); + int usb_bam_get_pipe_type(enum usb_ctrl bam_type, u8 idx, enum usb_bam_pipe_type *type) { @@ -752,6 +2261,18 @@ static void usb_bam_work(struct work_struct *w) usb_bam_set_inactivity_timer(pipe_connect->bam_type); spin_unlock(&ctx->usb_bam_lock); + if (pipe_connect->bam_mode == USB_BAM_DEVICE) { + /* A2 wakeup not from LPM (CONS was up) */ + wait_for_prod_granted(pipe_connect->bam_type); + if (pipe_connect->start) { + log_event_dbg("%s: Enqueue PROD transfer\n", + __func__); + pipe_connect->start( + pipe_connect->start_stop_param, + USB_TO_PEER_PERIPHERAL); + } + } + break; case USB_BAM_EVENT_INACTIVITY: @@ -794,6 +2315,24 @@ static void usb_bam_work(struct work_struct *w) if (callback) callback(param); + + wait_for_prod_release(pipe_connect->bam_type); + log_event_dbg("%s: complete wait on hsic producer s=%d\n", + __func__, info[pipe_connect->bam_type].cur_prod_state); + + /* + * Allow to go to lpm for now if also consumer is down. + * If consumer is up, we will wait to the release consumer + * notification. + */ + if (host_info[pipe_connect->bam_type].dev && + info[pipe_connect->bam_type].cur_cons_state == + IPA_RM_RESOURCE_RELEASED && + !info[pipe_connect->bam_type].in_lpm) { + usb_bam_suspend_core(pipe_connect->bam_type, + pipe_connect->bam_mode, 1); + } + break; default: log_event_err("%s: unknown usb bam event type %d\n", __func__, @@ -887,6 +2426,12 @@ static int __usb_bam_register_wake_cb(enum usb_ctrl bam_type, int idx, int usb_bam_register_wake_cb(enum usb_ctrl bam_type, u8 idx, int (*callback)(void *user), void *param) { + struct usb_bam_ctx_type *ctx = &msm_usb_bam[bam_type]; + struct usb_bam_pipe_connect *pipe_connect = + &ctx->usb_bam_connections[idx]; + + info[pipe_connect->bam_type].wake_cb = callback; + info[pipe_connect->bam_type].wake_param = param; return __usb_bam_register_wake_cb(bam_type, idx, callback, param, true); } @@ -944,15 +2489,105 @@ int usb_bam_disconnect_pipe(enum usb_ctrl bam_type, u8 idx) sps_device_reset(ctx->h_bam); /* This function is directly called by USB Transport drivers - * to disconnect pipes. Drop runtime usage count here. + * to disconnect pipes. Drop runtime usage count here. For + * IPA, caller takes care of it */ - log_event_dbg("%s: PM Runtime PUT %d, count: %d\n", __func__, idx, - get_pm_runtime_counter(bam_dev)); - pm_runtime_put_sync(bam_dev); + + if (pipe_connect->peer_bam != IPA_P_BAM) { + log_event_dbg("%s: PM Runtime PUT %d, count: %d\n", + __func__, idx, get_pm_runtime_counter(bam_dev)); + pm_runtime_put_sync(bam_dev); + } + return 0; } +/** + * is_ipa_handle_valid: Check if ipa_handle is valid or not + * @ipa_handle: IPA Handle for producer or consumer + * + * Returns true is ipa handle is valid. + */ +static bool is_ipa_handle_valid(u32 ipa_handle) +{ + + return (ipa_handle != -1); +} + +int usb_bam_disconnect_ipa(enum usb_ctrl cur_bam, + struct usb_bam_connect_ipa_params *ipa_params) +{ + int ret = 0, pipes_disconncted = 0; + u8 idx = 0; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + struct usb_bam_pipe_connect *pipe_connect; + struct device *bam_dev = &ctx->usb_bam_pdev->dev; + enum usb_bam_mode bam_mode; + + if (!is_ipa_handle_valid(ipa_params->prod_clnt_hdl) && + !is_ipa_handle_valid(ipa_params->cons_clnt_hdl)) { + log_event_err("%s: Both IPA handles are invalid.\n", __func__); + return -EINVAL; + } + + log_event_dbg("%s: Starting disconnect sequence\n", __func__); + log_event_dbg("%s(): prod_clnt_hdl:%d cons_clnt_hdl:%d\n", __func__, + ipa_params->prod_clnt_hdl, ipa_params->cons_clnt_hdl); + if (is_ipa_handle_valid(ipa_params->prod_clnt_hdl)) + idx = ipa_params->dst_idx; + if (is_ipa_handle_valid(ipa_params->cons_clnt_hdl)) + idx = ipa_params->src_idx; + pipe_connect = &ctx->usb_bam_connections[idx]; + bam_mode = pipe_connect->bam_mode; + + if (bam_mode != USB_BAM_DEVICE) + return -EINVAL; + + mutex_lock(&info[cur_bam].suspend_resume_mutex); + /* Delay USB core to go into lpm before we finish our handshake */ + if (is_ipa_handle_valid(ipa_params->prod_clnt_hdl)) { + ret = usb_bam_disconnect_ipa_prod(ipa_params, cur_bam); + if (ret) + goto out; + pipes_disconncted++; + } + + if (is_ipa_handle_valid(ipa_params->cons_clnt_hdl)) { + ret = usb_bam_disconnect_ipa_cons(ipa_params, cur_bam); + if (ret) + goto out; + pipes_disconncted++; + } + + /* Notify CONS release on the last cons pipe released */ + if (!ctx->pipes_enabled_per_bam) { + if (info[cur_bam].cur_cons_state == + IPA_RM_RESOURCE_RELEASED) { + log_event_dbg("%s: Notify CONS_RELEASED\n", __func__); + ipa_rm_notify_completion( + IPA_RM_RESOURCE_RELEASED, + ipa_rm_resource_cons[cur_bam]); + } + } + +out: + /* Pipes are connected one by one, but can get disconnected in pairs */ + while (pipes_disconncted--) { + if (!info[cur_bam].pipes_suspended) { + log_event_dbg("%s: PM Runtime PUT %d, count: %d\n", + __func__, pipes_disconncted, + get_pm_runtime_counter(bam_dev)); + pm_runtime_put_sync(&ctx->usb_bam_pdev->dev); + } + } + + mutex_unlock(&info[cur_bam].suspend_resume_mutex); + + return ret; +} +EXPORT_SYMBOL(usb_bam_disconnect_ipa); + static void usb_bam_sps_events(enum sps_callback_case sps_cb_case, void *user) { int i; @@ -1266,7 +2901,7 @@ static int usb_bam_panic_notifier(struct notifier_block *this, if (i == MAX_BAMS) goto fail; - if (!ctx->pipes_enabled_per_bam) + if (!ctx->pipes_enabled_per_bam || info[i].pipes_suspended) goto fail; pr_err("%s: dump usb bam registers here in call back!\n", @@ -1336,6 +2971,17 @@ static int usb_bam_probe(struct platform_device *pdev) usb_bam_work); } + init_completion(&info[bam_type].prod_avail); + complete(&info[bam_type].prod_avail); + init_completion(&info[bam_type].prod_released); + complete(&info[bam_type].prod_released); + info[bam_type].cur_prod_state = IPA_RM_RESOURCE_RELEASED; + info[bam_type].cur_cons_state = IPA_RM_RESOURCE_RELEASED; + info[bam_type].bam_type = bam_type; + INIT_WORK(&info[bam_type].resume_work, usb_bam_finish_resume); + INIT_WORK(&info[bam_type].finish_suspend_work, usb_bam_finish_suspend_); + mutex_init(&info[bam_type].suspend_resume_mutex); + ctx->usb_bam_wq = alloc_workqueue("usb_bam_wq", WQ_UNBOUND | WQ_MEM_RECLAIM, 1); if (!ctx->usb_bam_wq) { @@ -1353,12 +2999,23 @@ static int usb_bam_probe(struct platform_device *pdev) pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); + spin_lock_init(&usb_bam_ipa_handshake_info_lock); + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_SPS && + ipa_is_ready()) + usb_bam_ipa_create_resources(bam_type); spin_lock_init(&ctx->usb_bam_lock); usb_bam_register_panic_hdlr(); return ret; } +bool usb_bam_get_prod_granted(enum usb_ctrl bam_type, u8 idx) +{ + return (info[bam_type].cur_prod_state == IPA_RM_RESOURCE_GRANTED); +} +EXPORT_SYMBOL(usb_bam_get_prod_granted); + + int get_bam2bam_connection_info(enum usb_ctrl bam_type, u8 idx, u32 *usb_bam_pipe_idx, struct sps_mem_buffer *desc_fifo, struct sps_mem_buffer *data_fifo, enum usb_pipe_mem_type *mem_type) @@ -1449,6 +3106,7 @@ static int usb_bam_remove(struct platform_device *pdev) { struct usb_bam_ctx_type *ctx = dev_get_drvdata(&pdev->dev); + usb_bam_ipa_delete_resources(ctx->usb_bam_data->bam_type); usb_bam_unregister_panic_hdlr(); sps_deregister_bam_device(ctx->h_bam); destroy_workqueue(ctx->usb_bam_wq); diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 06d7f823be81..90681a129a63 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -190,6 +190,9 @@ config USB_RNDIS config USB_F_RNDIS tristate +config USB_F_QCRNDIS + tristate + config USB_F_MASS_STORAGE tristate @@ -247,6 +250,9 @@ config USB_F_MTP config USB_F_PTP tristate +config USB_F_RMNET_BAM + tristate + # this first set of drivers all depend on bulk-capable hardware. config USB_CONFIGFS @@ -331,6 +337,15 @@ config USB_CONFIGFS_ECM_SUBSET On hardware that can't implement the full protocol, a simple CDC subset is used, placing fewer demands on USB. +config USB_CONFIGFS_QCRNDIS + bool "QCRNDIS" + depends on USB_CONFIGFS + depends on NET + depends on RNDIS_IPA + select USB_U_ETHER + select USB_RNDIS + select USB_F_QCRNDIS + config USB_CONFIGFS_RNDIS bool "RNDIS" depends on USB_CONFIGFS @@ -348,6 +363,18 @@ config USB_CONFIGFS_RNDIS XP, you'll need to download drivers from Microsoft's website; a URL is given in comments found in that info file. +config USB_CONFIGFS_RMNET_BAM + bool "RMNET_BAM" + depends on USB_CONFIGFS + select USB_F_RMNET_BAM + help + RmNet interface is a new logical device in QMI framework for data + services. RmNet in accordance with QMI architecture uses Data I/O + channel for IP data transfer and control I/O channel for QMI + messaging (functionality similar to AT commands). + RmNet interface is an alternative to standard CDC-ECM and windows + RNDIS. + config USB_CONFIGFS_EEM bool "Ethernet Emulation Model (EEM)" depends on USB_CONFIGFS diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile index 7d81abbdcaa9..b9c7f6867ba6 100644 --- a/drivers/usb/gadget/function/Makefile +++ b/drivers/usb/gadget/function/Makefile @@ -69,3 +69,5 @@ usb_f_mtp-y := f_mtp.o obj-$(CONFIG_USB_F_MTP) += usb_f_mtp.o usb_f_ptp-y := f_ptp.o obj-$(CONFIG_USB_F_PTP) += usb_f_ptp.o +usb_f_qcrndis-y := f_qc_rndis.o u_data_ipa.o +obj-$(CONFIG_USB_F_QCRNDIS) += usb_f_qcrndis.o diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c new file mode 100644 index 000000000000..0bdf5a22263a --- /dev/null +++ b/drivers/usb/gadget/function/f_qc_rndis.c @@ -0,0 +1,1556 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * f_qc_rndis.c -- RNDIS link function driver + * + * Copyright (C) 2003-2005,2008 David Brownell + * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger + * Copyright (C) 2008 Nokia Corporation + * Copyright (C) 2009 Samsung Electronics + * Author: Michal Nazarewicz (mina86@mina86.com) + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* #define VERBOSE_DEBUG */ + +#include +#include +#include +#include +#include + +#include + +#include "u_ether.h" +#include "rndis.h" +#include "u_data_ipa.h" +#include +#include "configfs.h" + +static unsigned int rndis_dl_max_xfer_size = 9216; +module_param(rndis_dl_max_xfer_size, uint, 0644); +MODULE_PARM_DESC(rndis_dl_max_xfer_size, + "Max size of bus transfer to host"); + +static struct class *rndis_class; +static dev_t rndis_dev; +static DEFINE_IDA(chardev_ida); + +/* + * This function is an RNDIS Ethernet port -- a Microsoft protocol that's + * been promoted instead of the standard CDC Ethernet. The published RNDIS + * spec is ambiguous, incomplete, and needlessly complex. Variants such as + * ActiveSync have even worse status in terms of specification. + * + * In short: it's a protocol controlled by (and for) Microsoft, not for an + * Open ecosystem or markets. Linux supports it *only* because Microsoft + * doesn't support the CDC Ethernet standard. + * + * The RNDIS data transfer model is complex, with multiple Ethernet packets + * per USB message, and out of band data. The control model is built around + * what's essentially an "RNDIS RPC" protocol. It's all wrapped in a CDC ACM + * (modem, not Ethernet) veneer, with those ACM descriptors being entirely + * useless (they're ignored). RNDIS expects to be the only function in its + * configuration, so it's no real help if you need composite devices; and + * it expects to be the first configuration too. + * + * There is a single technical advantage of RNDIS over CDC Ethernet, if you + * discount the fluff that its RPC can be made to deliver: it doesn't need + * a NOP altsetting for the data interface. That lets it work on some of the + * "so smart it's stupid" hardware which takes over configuration changes + * from the software, and adds restrictions like "no altsettings". + * + * Unfortunately MSFT's RNDIS drivers are buggy. They hang or oops, and + * have all sorts of contrary-to-specification oddities that can prevent + * them from working sanely. Since bugfixes (or accurate specs, letting + * Linux work around those bugs) are unlikely to ever come from MSFT, you + * may want to avoid using RNDIS on purely operational grounds. + * + * Omissions from the RNDIS 1.0 specification include: + * + * - Power management ... references data that's scattered around lots + * of other documentation, which is incorrect/incomplete there too. + * + * - There are various undocumented protocol requirements, like the need + * to send garbage in some control-OUT messages. + * + * - MS-Windows drivers sometimes emit undocumented requests. + * + * This function is based on RNDIS link function driver and + * contains MSM specific implementation. + */ + +struct f_rndis_qc { + struct usb_function func; + u8 ctrl_id, data_id; + u8 ethaddr[ETH_ALEN]; + u32 vendorID; + u8 ul_max_pkt_per_xfer; + u8 pkt_alignment_factor; + u32 max_pkt_size; + const char *manufacturer; + struct rndis_params *params; + atomic_t ioctl_excl; + atomic_t open_excl; + + struct usb_ep *notify; + struct usb_request *notify_req; + atomic_t notify_count; + struct data_port bam_port; + struct cdev cdev; + struct device *dev; + u8 port_num; + u16 cdc_filter; + bool net_ready_trigger; +}; + +static struct ipa_usb_init_params rndis_ipa_params; +static spinlock_t rndis_lock; +static bool rndis_ipa_supported; +static void rndis_qc_open(struct f_rndis_qc *rndis); + +static inline struct f_rndis_qc *func_to_rndis_qc(struct usb_function *f) +{ + return container_of(f, struct f_rndis_qc, func); +} + +/* peak (theoretical) bulk transfer rate in bits-per-second */ +static unsigned int rndis_qc_bitrate(struct usb_gadget *g) +{ + if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) + return 13 * 1024 * 8 * 1000 * 8; + else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) + return 13 * 512 * 8 * 1000 * 8; + else + return 19 * 64 * 1 * 1000 * 8; +} + +/*-------------------------------------------------------------------------*/ + +#define RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */ +#define RNDIS_QC_STATUS_BYTECOUNT 8 /* 8 bytes data */ + +/* currently only one rndis instance is supported - port + * index 0. + */ +#define RNDIS_QC_NO_PORTS 1 +#define RNDIS_QC_ACTIVE_PORT 0 + +/* default max packets per tarnsfer value */ +#define DEFAULT_MAX_PKT_PER_XFER 15 + +/* default pkt alignment factor */ +#define DEFAULT_PKT_ALIGNMENT_FACTOR 4 + +#define RNDIS_QC_IOCTL_MAGIC 'i' +#define RNDIS_QC_GET_MAX_PKT_PER_XFER _IOR(RNDIS_QC_IOCTL_MAGIC, 1, u8) +#define RNDIS_QC_GET_MAX_PKT_SIZE _IOR(RNDIS_QC_IOCTL_MAGIC, 2, u32) + + +/* interface descriptor: */ + +/* interface descriptor: Supports "Wireless" RNDIS; auto-detected by Windows*/ +static struct usb_interface_descriptor rndis_qc_control_intf = { + .bLength = sizeof(rndis_qc_control_intf), + .bDescriptorType = USB_DT_INTERFACE, + + /* .bInterfaceNumber = DYNAMIC */ + /* status endpoint is optional; this could be patched later */ + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_WIRELESS_CONTROLLER, + .bInterfaceSubClass = 0x01, + .bInterfaceProtocol = 0x03, + /* .iInterface = DYNAMIC */ +}; + +static struct usb_cdc_header_desc rndis_qc_header_desc = { + .bLength = sizeof(rndis_qc_header_desc), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_HEADER_TYPE, + + .bcdCDC = cpu_to_le16(0x0110), +}; + +static struct usb_cdc_call_mgmt_descriptor rndis_qc_call_mgmt_descriptor = { + .bLength = sizeof(rndis_qc_call_mgmt_descriptor), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE, + + .bmCapabilities = 0x00, + .bDataInterface = 0x01, +}; + +static struct usb_cdc_acm_descriptor rndis_qc_acm_descriptor = { + .bLength = sizeof(rndis_qc_acm_descriptor), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_ACM_TYPE, + + .bmCapabilities = 0x00, +}; + +static struct usb_cdc_union_desc rndis_qc_union_desc = { + .bLength = sizeof(rndis_qc_union_desc), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_UNION_TYPE, + /* .bMasterInterface0 = DYNAMIC */ + /* .bSlaveInterface0 = DYNAMIC */ +}; + +/* the data interface has two bulk endpoints */ + +static struct usb_interface_descriptor rndis_qc_data_intf = { + .bLength = sizeof(rndis_qc_data_intf), + .bDescriptorType = USB_DT_INTERFACE, + + /* .bInterfaceNumber = DYNAMIC */ + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_CDC_DATA, + .bInterfaceSubClass = 0, + .bInterfaceProtocol = 0, + /* .iInterface = DYNAMIC */ +}; + + +/* Supports "Wireless" RNDIS; auto-detected by Windows */ +static struct usb_interface_assoc_descriptor +rndis_qc_iad_descriptor = { + .bLength = sizeof(rndis_qc_iad_descriptor), + .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, + .bFirstInterface = 0, /* XXX, hardcoded */ + .bInterfaceCount = 2, /* control + data */ + .bFunctionClass = USB_CLASS_WIRELESS_CONTROLLER, + .bFunctionSubClass = 0x01, + .bFunctionProtocol = 0x03, + /* .iFunction = DYNAMIC */ +}; + +/* full speed support: */ + +static struct usb_endpoint_descriptor rndis_qc_fs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT), + .bInterval = 1 << RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC, +}; + +static struct usb_endpoint_descriptor rndis_qc_fs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor rndis_qc_fs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_descriptor_header *eth_qc_fs_function[] = { + (struct usb_descriptor_header *) &rndis_qc_iad_descriptor, + /* control interface matches ACM, not Ethernet */ + (struct usb_descriptor_header *) &rndis_qc_control_intf, + (struct usb_descriptor_header *) &rndis_qc_header_desc, + (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor, + (struct usb_descriptor_header *) &rndis_qc_acm_descriptor, + (struct usb_descriptor_header *) &rndis_qc_union_desc, + (struct usb_descriptor_header *) &rndis_qc_fs_notify_desc, + /* data interface has no altsetting */ + (struct usb_descriptor_header *) &rndis_qc_data_intf, + (struct usb_descriptor_header *) &rndis_qc_fs_in_desc, + (struct usb_descriptor_header *) &rndis_qc_fs_out_desc, + NULL, +}; + +/* high speed support: */ + +static struct usb_endpoint_descriptor rndis_qc_hs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT), + .bInterval = RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4, +}; +static struct usb_endpoint_descriptor rndis_qc_hs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor rndis_qc_hs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_descriptor_header *eth_qc_hs_function[] = { + (struct usb_descriptor_header *) &rndis_qc_iad_descriptor, + /* control interface matches ACM, not Ethernet */ + (struct usb_descriptor_header *) &rndis_qc_control_intf, + (struct usb_descriptor_header *) &rndis_qc_header_desc, + (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor, + (struct usb_descriptor_header *) &rndis_qc_acm_descriptor, + (struct usb_descriptor_header *) &rndis_qc_union_desc, + (struct usb_descriptor_header *) &rndis_qc_hs_notify_desc, + /* data interface has no altsetting */ + (struct usb_descriptor_header *) &rndis_qc_data_intf, + (struct usb_descriptor_header *) &rndis_qc_hs_in_desc, + (struct usb_descriptor_header *) &rndis_qc_hs_out_desc, + NULL, +}; + +/* super speed support: */ + +static struct usb_endpoint_descriptor rndis_qc_ss_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT), + .bInterval = RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4, +}; + +static struct usb_ss_ep_comp_descriptor rndis_qc_ss_intr_comp_desc = { + .bLength = sizeof(rndis_qc_ss_intr_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + /* the following 3 values can be tweaked if necessary */ + /* .bMaxBurst = 0, */ + /* .bmAttributes = 0, */ + .wBytesPerInterval = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT), +}; + +static struct usb_endpoint_descriptor rndis_qc_ss_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_endpoint_descriptor rndis_qc_ss_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor rndis_qc_ss_bulk_comp_desc = { + .bLength = sizeof(rndis_qc_ss_bulk_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + /* the following 2 values can be tweaked if necessary */ + /* .bMaxBurst = 0, */ + /* .bmAttributes = 0, */ +}; + +static struct usb_descriptor_header *eth_qc_ss_function[] = { + (struct usb_descriptor_header *) &rndis_qc_iad_descriptor, + + /* control interface matches ACM, not Ethernet */ + (struct usb_descriptor_header *) &rndis_qc_control_intf, + (struct usb_descriptor_header *) &rndis_qc_header_desc, + (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor, + (struct usb_descriptor_header *) &rndis_qc_acm_descriptor, + (struct usb_descriptor_header *) &rndis_qc_union_desc, + (struct usb_descriptor_header *) &rndis_qc_ss_notify_desc, + (struct usb_descriptor_header *) &rndis_qc_ss_intr_comp_desc, + + /* data interface has no altsetting */ + (struct usb_descriptor_header *) &rndis_qc_data_intf, + (struct usb_descriptor_header *) &rndis_qc_ss_in_desc, + (struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc, + (struct usb_descriptor_header *) &rndis_qc_ss_out_desc, + (struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc, + NULL, +}; + +/* string descriptors: */ + +static struct usb_string rndis_qc_string_defs[] = { + [0].s = "RNDIS Communications Control", + [1].s = "RNDIS Ethernet Data", + [2].s = "RNDIS", + { } /* end of list */ +}; + +static struct usb_gadget_strings rndis_qc_string_table = { + .language = 0x0409, /* en-us */ + .strings = rndis_qc_string_defs, +}; + +static struct usb_gadget_strings *rndis_qc_strings[] = { + &rndis_qc_string_table, + NULL, +}; + +static struct f_rndis_qc *_rndis_qc; + +static inline int rndis_qc_lock(atomic_t *excl) +{ + if (atomic_inc_return(excl) == 1) + return 0; + + atomic_dec(excl); + return -EBUSY; +} + +static inline void rndis_qc_unlock(atomic_t *excl) +{ + atomic_dec(excl); +} + +/*-------------------------------------------------------------------------*/ + +static void rndis_qc_response_available(void *_rndis) +{ + struct f_rndis_qc *rndis = _rndis; + struct usb_request *req = rndis->notify_req; + __le32 *data = req->buf; + int status; + + if (atomic_inc_return(&rndis->notify_count) != 1) + return; + + if (!rndis->notify->driver_data) + return; + + /* Send RNDIS RESPONSE_AVAILABLE notification; a + * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too + * + * This is the only notification defined by RNDIS. + */ + data[0] = cpu_to_le32(1); + data[1] = cpu_to_le32(0); + + status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC); + if (status) { + atomic_dec(&rndis->notify_count); + pr_info("notify/0 --> %d\n", status); + } +} + +static void rndis_qc_response_complete(struct usb_ep *ep, + struct usb_request *req) +{ + struct f_rndis_qc *rndis; + int status = req->status; + struct usb_composite_dev *cdev; + struct usb_ep *notify_ep; + + spin_lock(&rndis_lock); + rndis = _rndis_qc; + if (!rndis || !rndis->notify || !rndis->notify->driver_data) { + spin_unlock(&rndis_lock); + return; + } + + if (!rndis->func.config || !rndis->func.config->cdev) { + pr_err("%s(): cdev or config is NULL.\n", __func__); + spin_unlock(&rndis_lock); + return; + } + + cdev = rndis->func.config->cdev; + + /* after TX: + * - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control) + * - RNDIS_RESPONSE_AVAILABLE (status/irq) + */ + switch (status) { + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + atomic_set(&rndis->notify_count, 0); + goto out; + default: + pr_info("RNDIS %s response error %d, %d/%d\n", + ep->name, status, + req->actual, req->length); + /* FALLTHROUGH */ + case 0: + if (ep != rndis->notify) + goto out; + + /* handle multiple pending RNDIS_RESPONSE_AVAILABLE + * notifications by resending until we're done + */ + if (atomic_dec_and_test(&rndis->notify_count)) + goto out; + notify_ep = rndis->notify; + spin_unlock(&rndis_lock); + status = usb_ep_queue(notify_ep, req, GFP_ATOMIC); + if (status) { + spin_lock(&rndis_lock); + if (!_rndis_qc) + goto out; + atomic_dec(&_rndis_qc->notify_count); + DBG(cdev, "notify/1 --> %d\n", status); + spin_unlock(&rndis_lock); + } + } + + return; + +out: + spin_unlock(&rndis_lock); +} + +static void rndis_qc_command_complete(struct usb_ep *ep, + struct usb_request *req) +{ + struct f_rndis_qc *rndis; + int status; + rndis_init_msg_type *buf; + u32 ul_max_xfer_size, dl_max_xfer_size; + + if (req->status != 0) { + pr_err("%s: RNDIS command completion error %d\n", + __func__, req->status); + return; + } + + spin_lock(&rndis_lock); + rndis = _rndis_qc; + if (!rndis || !rndis->notify || !rndis->notify->driver_data) { + spin_unlock(&rndis_lock); + return; + } + + /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */ + status = rndis_msg_parser(rndis->params, (u8 *) req->buf); + if (status < 0) + pr_err("RNDIS command error %d, %d/%d\n", + status, req->actual, req->length); + + buf = (rndis_init_msg_type *)req->buf; + + if (le32_to_cpu(buf->MessageType) == RNDIS_MSG_INIT) { + ul_max_xfer_size = rndis_get_ul_max_xfer_size(rndis->params); + ipa_data_set_ul_max_xfer_size(ul_max_xfer_size); + /* + * For consistent data throughput from IPA, it is required to + * fine tune aggregation byte limit as 7KB. RNDIS IPA driver + * use provided this value to calculate aggregation byte limit + * and program IPA hardware for aggregation. + * Host provides 8KB or 16KB as Max Transfer size, hence select + * minimum out of host provided value and optimum transfer size + * to get 7KB as aggregation byte limit. + */ + if (rndis_dl_max_xfer_size) + dl_max_xfer_size = min_t(u32, rndis_dl_max_xfer_size, + rndis_get_dl_max_xfer_size(rndis->params)); + else + dl_max_xfer_size = + rndis_get_dl_max_xfer_size(rndis->params); + ipa_data_set_dl_max_xfer_size(dl_max_xfer_size); + } + spin_unlock(&rndis_lock); +} + +static int +rndis_qc_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct f_rndis_qc *rndis = func_to_rndis_qc(f); + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int value = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + + /* composite driver infrastructure handles everything except + * CDC class messages; interface activation uses set_alt(). + */ + pr_debug("%s: Enter\n", __func__); + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { + + /* RNDIS uses the CDC command encapsulation mechanism to implement + * an RPC scheme, with much getting/setting of attributes by OID. + */ + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_SEND_ENCAPSULATED_COMMAND: + if (w_value || w_index != rndis->ctrl_id) + goto invalid; + /* read the request; process it later */ + value = w_length; + req->complete = rndis_qc_command_complete; + /* later, rndis_response_available() sends a notification */ + break; + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_GET_ENCAPSULATED_RESPONSE: + if (w_value || w_index != rndis->ctrl_id) + goto invalid; + else { + u8 *buf; + u32 n; + + /* return the result */ + buf = rndis_get_next_response(rndis->params, &n); + if (buf) { + memcpy(req->buf, buf, n); + req->complete = rndis_qc_response_complete; + rndis_free_response(rndis->params, buf); + value = n; + } + /* else stalls ... spec says to avoid that */ + } + break; + + default: +invalid: + VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + } + + /* respond with data transfer or status phase? */ + if (value >= 0) { + DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->context = rndis; + req->zero = (value < w_length); + req->length = value; + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (value < 0) + pr_err("rndis response on err %d\n", value); + } + + /* device either stalls (value < 0) or reports success */ + return value; +} + +static struct net_device *rndis_qc_get_net(const char *netname) +{ + struct net_device *net_dev; + + net_dev = dev_get_by_name(&init_net, netname); + if (!net_dev) + return ERR_PTR(-EINVAL); + + /* + * Decrement net_dev refcount as it was incremented in + * dev_get_by_name(). + */ + dev_put(net_dev); + return net_dev; +} + +static int rndis_qc_set_alt(struct usb_function *f, unsigned int intf, + unsigned int alt) +{ + struct f_rndis_qc *rndis = func_to_rndis_qc(f); + struct f_rndis_qc_opts *opts; + struct usb_composite_dev *cdev = f->config->cdev; + int src_connection_idx; + int dst_connection_idx; + enum usb_ctrl usb_bam_type; + int ret; + + /* we know alt == 0 */ + + opts = container_of(f->fi, struct f_rndis_qc_opts, func_inst); + if (intf == rndis->ctrl_id) { + if (rndis->notify->driver_data) { + VDBG(cdev, "reset rndis control %d\n", intf); + usb_ep_disable(rndis->notify); + } + if (!rndis->notify->desc) { + VDBG(cdev, "init rndis ctrl %d\n", intf); + if (config_ep_by_speed(cdev->gadget, f, rndis->notify)) + goto fail; + } + usb_ep_enable(rndis->notify); + rndis->notify->driver_data = rndis; + + } else if (intf == rndis->data_id) { + struct net_device *net; + + rndis->net_ready_trigger = false; + if (rndis->bam_port.in->driver_data) { + DBG(cdev, "reset rndis\n"); + /* bam_port is needed for disconnecting the BAM data + * path. Only after the BAM data path is disconnected, + * we can disconnect the port from the network layer. + */ + ipa_data_disconnect(&rndis->bam_port, + USB_IPA_FUNC_RNDIS); + } + + if (!rndis->bam_port.in->desc || !rndis->bam_port.out->desc) { + DBG(cdev, "init rndis\n"); + if (config_ep_by_speed(cdev->gadget, f, + rndis->bam_port.in) || + config_ep_by_speed(cdev->gadget, f, + rndis->bam_port.out)) { + rndis->bam_port.in->desc = NULL; + rndis->bam_port.out->desc = NULL; + goto fail; + } + } + + /* RNDIS should be in the "RNDIS uninitialized" state, + * either never activated or after rndis_uninit(). + * + * We don't want data to flow here until a nonzero packet + * filter is set, at which point it enters "RNDIS data + * initialized" state ... but we do want the endpoints + * to be activated. It's a strange little state. + * + * REVISIT the RNDIS gadget code has done this wrong for a + * very long time. We need another call to the link layer + * code -- gether_updown(...bool) maybe -- to do it right. + */ + rndis->cdc_filter = 0; + + rndis->bam_port.cdev = cdev; + rndis->bam_port.func = &rndis->func; + ipa_data_port_select(USB_IPA_FUNC_RNDIS); + usb_bam_type = usb_bam_get_bam_type(cdev->gadget->name); + + src_connection_idx = usb_bam_get_connection_idx(usb_bam_type, + IPA_P_BAM, USB_TO_PEER_PERIPHERAL, rndis->port_num); + dst_connection_idx = usb_bam_get_connection_idx(usb_bam_type, + IPA_P_BAM, PEER_PERIPHERAL_TO_USB, rndis->port_num); + if (src_connection_idx < 0 || dst_connection_idx < 0) { + pr_err("%s: usb_bam_get_connection_idx failed\n", + __func__); + return ret; + } + if (ipa_data_connect(&rndis->bam_port, USB_IPA_FUNC_RNDIS, + src_connection_idx, dst_connection_idx)) + goto fail; + + DBG(cdev, "RNDIS RX/TX early activation ...\n"); + rndis_qc_open(rndis); + net = rndis_qc_get_net("rndis0"); + if (IS_ERR(net)) + return PTR_ERR(net); + opts->net = net; + + rndis_set_param_dev(rndis->params, net, + &rndis->cdc_filter); + } else + goto fail; + + return 0; +fail: + return -EINVAL; +} + +static void rndis_qc_disable(struct usb_function *f) +{ + struct f_rndis_qc *rndis = func_to_rndis_qc(f); + struct usb_composite_dev *cdev = f->config->cdev; + unsigned long flags; + + if (!rndis->notify->driver_data) + return; + + DBG(cdev, "rndis deactivated\n"); + + spin_lock_irqsave(&rndis_lock, flags); + rndis_uninit(rndis->params); + spin_unlock_irqrestore(&rndis_lock, flags); + ipa_data_disconnect(&rndis->bam_port, USB_IPA_FUNC_RNDIS); + + msm_ep_unconfig(rndis->bam_port.out); + msm_ep_unconfig(rndis->bam_port.in); + usb_ep_disable(rndis->notify); + rndis->notify->driver_data = NULL; +} + +static void rndis_qc_suspend(struct usb_function *f) +{ + struct f_rndis_qc *rndis = func_to_rndis_qc(f); + bool remote_wakeup_allowed; + + if (f->config->cdev->gadget->speed == USB_SPEED_SUPER) + remote_wakeup_allowed = f->func_wakeup_allowed; + else + remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup; + + pr_info("%s(): start rndis suspend: remote_wakeup_allowed:%d\n", + __func__, remote_wakeup_allowed); + + if (!remote_wakeup_allowed) { + /* This is required as Linux host side RNDIS driver doesn't + * send RNDIS_MESSAGE_PACKET_FILTER before suspending USB bus. + * Hence we perform same operations explicitly here for Linux + * host case. In case of windows, this RNDIS state machine is + * already updated due to receiving of PACKET_FILTER. + */ + rndis_flow_control(rndis->params, true); + pr_debug("%s(): Disconnecting\n", __func__); + } + + ipa_data_suspend(&rndis->bam_port, USB_IPA_FUNC_RNDIS, + remote_wakeup_allowed); + pr_debug("rndis suspended\n"); +} + +static void rndis_qc_resume(struct usb_function *f) +{ + struct f_rndis_qc *rndis = func_to_rndis_qc(f); + bool remote_wakeup_allowed; + + pr_debug("%s: rndis resumed\n", __func__); + + /* Nothing to do if DATA interface wasn't initialized */ + if (!rndis->bam_port.cdev) { + pr_debug("data interface was not up\n"); + return; + } + + if (f->config->cdev->gadget->speed == USB_SPEED_SUPER) + remote_wakeup_allowed = f->func_wakeup_allowed; + else + remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup; + + ipa_data_resume(&rndis->bam_port, USB_IPA_FUNC_RNDIS, + remote_wakeup_allowed); + + if (!remote_wakeup_allowed) { + rndis_qc_open(rndis); + /* + * Linux Host doesn't sends RNDIS_MSG_INIT or non-zero value + * set with RNDIS_MESSAGE_PACKET_FILTER after performing bus + * resume. Hence trigger USB IPA transfer functionality + * explicitly here. For Windows host case is also being + * handle with RNDIS state machine. + */ + rndis_flow_control(rndis->params, false); + } + + pr_debug("%s: RNDIS resume completed\n", __func__); +} + +/*-------------------------------------------------------------------------*/ + +/* + * This isn't quite the same mechanism as CDC Ethernet, since the + * notification scheme passes less data, but the same set of link + * states must be tested. A key difference is that altsettings are + * not used to tell whether the link should send packets or not. + */ + +static void rndis_qc_open(struct f_rndis_qc *rndis) +{ + struct usb_composite_dev *cdev = rndis->func.config->cdev; + + DBG(cdev, "%s\n", __func__); + + rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3, + rndis_qc_bitrate(cdev->gadget) / 100); + rndis_signal_connect(rndis->params); +} + +static void ipa_data_flow_control_enable(bool enable, + struct rndis_params *param) +{ + if (enable) + ipa_data_stop_rndis_ipa(USB_IPA_FUNC_RNDIS); + else + ipa_data_start_rndis_ipa(USB_IPA_FUNC_RNDIS); +} + +/*-------------------------------------------------------------------------*/ + +/* ethernet function driver setup/binding */ + +static int +rndis_qc_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct f_rndis_qc *rndis = func_to_rndis_qc(f); + struct rndis_params *params; + int status; + struct usb_ep *ep; + + status = rndis_ipa_init(&rndis_ipa_params); + if (status) { + pr_err("%s: failed to init rndis_ipa\n", __func__); + return status; + } + + rndis_ipa_supported = true; + /* maybe allocate device-global string IDs */ + if (rndis_qc_string_defs[0].id == 0) { + + /* control interface label */ + status = usb_string_id(c->cdev); + if (status < 0) + return status; + rndis_qc_string_defs[0].id = status; + rndis_qc_control_intf.iInterface = status; + + /* data interface label */ + status = usb_string_id(c->cdev); + if (status < 0) + return status; + rndis_qc_string_defs[1].id = status; + rndis_qc_data_intf.iInterface = status; + + /* IAD iFunction label */ + status = usb_string_id(c->cdev); + if (status < 0) + return status; + rndis_qc_string_defs[2].id = status; + rndis_qc_iad_descriptor.iFunction = status; + } + + /* allocate instance-specific interface IDs */ + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + rndis->ctrl_id = status; + rndis_qc_iad_descriptor.bFirstInterface = status; + + rndis_qc_control_intf.bInterfaceNumber = status; + rndis_qc_union_desc.bMasterInterface0 = status; + + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + rndis->data_id = status; + + rndis_qc_data_intf.bInterfaceNumber = status; + rndis_qc_union_desc.bSlaveInterface0 = status; + + status = -ENODEV; + + /* allocate instance-specific endpoints */ + ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_in_desc); + if (!ep) + goto fail; + rndis->bam_port.in = ep; + ep->driver_data = cdev; /* claim */ + + ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_out_desc); + if (!ep) + goto fail; + rndis->bam_port.out = ep; + ep->driver_data = cdev; /* claim */ + + /* NOTE: a status/notification endpoint is, strictly speaking, + * optional. We don't treat it that way though! It's simpler, + * and some newer profiles don't treat it as optional. + */ + ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_notify_desc); + if (!ep) + goto fail; + rndis->notify = ep; + ep->driver_data = cdev; /* claim */ + + status = -ENOMEM; + + /* allocate notification request and buffer */ + rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); + if (!rndis->notify_req) + goto fail; + rndis->notify_req->buf = kmalloc(RNDIS_QC_STATUS_BYTECOUNT, GFP_KERNEL); + if (!rndis->notify_req->buf) + goto fail; + rndis->notify_req->length = RNDIS_QC_STATUS_BYTECOUNT; + rndis->notify_req->context = rndis; + rndis->notify_req->complete = rndis_qc_response_complete; + + /* copy descriptors, and track endpoint copies */ + f->fs_descriptors = usb_copy_descriptors(eth_qc_fs_function); + if (!f->fs_descriptors) + goto fail; + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at + * both speeds + */ + if (gadget_is_dualspeed(c->cdev->gadget)) { + rndis_qc_hs_in_desc.bEndpointAddress = + rndis_qc_fs_in_desc.bEndpointAddress; + rndis_qc_hs_out_desc.bEndpointAddress = + rndis_qc_fs_out_desc.bEndpointAddress; + rndis_qc_hs_notify_desc.bEndpointAddress = + rndis_qc_fs_notify_desc.bEndpointAddress; + + /* copy descriptors, and track endpoint copies */ + f->hs_descriptors = usb_copy_descriptors(eth_qc_hs_function); + + if (!f->hs_descriptors) + goto fail; + } + + if (gadget_is_superspeed(c->cdev->gadget)) { + rndis_qc_ss_in_desc.bEndpointAddress = + rndis_qc_fs_in_desc.bEndpointAddress; + rndis_qc_ss_out_desc.bEndpointAddress = + rndis_qc_fs_out_desc.bEndpointAddress; + rndis_qc_ss_notify_desc.bEndpointAddress = + rndis_qc_fs_notify_desc.bEndpointAddress; + + /* copy descriptors, and track endpoint copies */ + f->ss_descriptors = usb_copy_descriptors(eth_qc_ss_function); + if (!f->ss_descriptors) + goto fail; + } + + params = rndis_register(rndis_qc_response_available, rndis, + ipa_data_flow_control_enable); + if (IS_ERR(params)) + goto fail; + + rndis->params = params; + rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3, 0); + rndis_set_host_mac(rndis->params, rndis->ethaddr); + + if (rndis->manufacturer && rndis->vendorID && + rndis_set_param_vendor(rndis->params, rndis->vendorID, + rndis->manufacturer)) + goto fail; + + pr_debug("%s(): max_pkt_per_xfer:%d\n", __func__, + rndis->ul_max_pkt_per_xfer); + rndis_set_max_pkt_xfer(rndis->params, rndis->ul_max_pkt_per_xfer); + + /* In case of aggregated packets QC device will request + * aliment to 4 (2^2). + */ + pr_debug("%s(): pkt_alignment_factor:%d\n", __func__, + rndis->pkt_alignment_factor); + rndis_set_pkt_alignment_factor(rndis->params, + rndis->pkt_alignment_factor); + + /* NOTE: all that is done without knowing or caring about + * the network link ... which is unavailable to this code + * until we're activated via set_alt(). + */ + + c->cdev->gadget->bam2bam_func_enabled = true; + DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n", + gadget_is_superspeed(c->cdev->gadget) ? "super" : + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", + rndis->bam_port.in->name, rndis->bam_port.out->name, + rndis->notify->name); + return 0; + +fail: + if (gadget_is_superspeed(c->cdev->gadget) && f->ss_descriptors) + usb_free_descriptors(f->ss_descriptors); + if (gadget_is_dualspeed(c->cdev->gadget) && f->hs_descriptors) + usb_free_descriptors(f->hs_descriptors); + if (f->fs_descriptors) + usb_free_descriptors(f->fs_descriptors); + + if (rndis->notify_req) { + kfree(rndis->notify_req->buf); + usb_ep_free_request(rndis->notify, rndis->notify_req); + } + + /* we might as well release our claims on endpoints */ + if (rndis->notify) + rndis->notify->driver_data = NULL; + if (rndis->bam_port.out->desc) + rndis->bam_port.out->driver_data = NULL; + if (rndis->bam_port.in->desc) + rndis->bam_port.in->driver_data = NULL; + + pr_err("%s: can't bind, err %d\n", f->name, status); + + return status; +} + +static void rndis_qc_free(struct usb_function *f) +{ + struct f_rndis_qc_opts *opts; + + opts = container_of(f->fi, struct f_rndis_qc_opts, func_inst); + opts->refcnt--; +} + +static void +rndis_qc_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct f_rndis_qc *rndis = func_to_rndis_qc(f); + + pr_debug("func %s: free\n", __func__); + rndis_deregister(rndis->params); + + if (gadget_is_dualspeed(c->cdev->gadget)) + usb_free_descriptors(f->hs_descriptors); + usb_free_descriptors(f->fs_descriptors); + + c->cdev->gadget->bam2bam_func_enabled = false; + kfree(rndis->notify_req->buf); + usb_ep_free_request(rndis->notify, rndis->notify_req); + + /* + * call flush_workqueue to make sure that any pending + * disconnect_work() from u_bam_data.c file is being + * flushed before calling this rndis_ipa_cleanup API + * as rndis ipa disconnect API is required to be + * called before this. + */ + ipa_data_flush_workqueue(); + rndis_ipa_cleanup(rndis_ipa_params.private); + rndis_ipa_supported = false; + +} + +void rndis_ipa_reset_trigger(void) +{ + struct f_rndis_qc *rndis; + + rndis = _rndis_qc; + if (!rndis) { + pr_err("%s: No RNDIS instance\n", __func__); + return; + } + + rndis->net_ready_trigger = false; +} + +/* + * Callback let RNDIS_IPA trigger us when network interface is up + * and userspace is ready to answer DHCP requests + */ +static void rndis_net_ready_notify(void) +{ + struct f_rndis_qc *rndis; + unsigned long flags; + + spin_lock_irqsave(&rndis_lock, flags); + rndis = _rndis_qc; + if (!rndis) { + pr_err("%s: No RNDIS instance\n", __func__); + spin_unlock_irqrestore(&rndis_lock, flags); + return; + } + if (rndis->net_ready_trigger) { + pr_err("%s: Already triggered\n", __func__); + spin_unlock_irqrestore(&rndis_lock, flags); + return; + } + + pr_debug("%s: Set net_ready_trigger\n", __func__); + rndis->net_ready_trigger = true; + spin_unlock_irqrestore(&rndis_lock, flags); + ipa_data_start_rx_tx(USB_IPA_FUNC_RNDIS); +} + +/** + * rndis_qc_bind_config - add RNDIS network link to a configuration + * @c: the configuration to support the network link + * @ethaddr: a buffer in which the ethernet address of the host side + * side of the link was recorded + * Context: single threaded during gadget setup + * + * Returns zero on success, else negative errno. + * + * Caller must have called @gether_setup(). Caller is also responsible + * for calling @gether_cleanup() before module unload. + */ + +static struct +usb_function *rndis_qc_bind_config_vendor(struct usb_function_instance *fi, + u32 vendorID, const char *manufacturer, + u8 max_pkt_per_xfer, u8 pkt_alignment_factor) +{ + struct f_rndis_qc_opts *opts = container_of(fi, + struct f_rndis_qc_opts, func_inst); + struct f_rndis_qc *rndis; + + /* allocate and initialize one new instance */ + opts = container_of(fi, struct f_rndis_qc_opts, func_inst); + + opts->refcnt++; + rndis = opts->rndis; + + rndis->vendorID = opts->vendor_id; + rndis->manufacturer = opts->manufacturer; + /* export host's Ethernet address in CDC format */ + random_ether_addr(rndis_ipa_params.host_ethaddr); + random_ether_addr(rndis_ipa_params.device_ethaddr); + pr_debug("setting host_ethaddr=%pM, device_ethaddr=%pM\n", + rndis_ipa_params.host_ethaddr, + rndis_ipa_params.device_ethaddr); + ether_addr_copy(rndis->ethaddr, rndis_ipa_params.host_ethaddr); + rndis_ipa_params.device_ready_notify = rndis_net_ready_notify; + + /* if max_pkt_per_xfer was not configured set to default value */ + rndis->ul_max_pkt_per_xfer = + max_pkt_per_xfer ? max_pkt_per_xfer : + DEFAULT_MAX_PKT_PER_XFER; + ipa_data_set_ul_max_pkt_num(rndis->ul_max_pkt_per_xfer); + + /* + * Check no RNDIS aggregation, and alignment if not mentioned, + * use alignment factor as zero. If aggregated RNDIS data transfer, + * max packet per transfer would be default if it is not set + * explicitly, and same way use alignment factor as 2 by default. + * This would eliminate need of writing to sysfs if default RNDIS + * aggregation setting required. Writing to both sysfs entries, + * those values will always override default values. + */ + if ((rndis->pkt_alignment_factor == 0) && + (rndis->ul_max_pkt_per_xfer == 1)) + rndis->pkt_alignment_factor = 0; + else + rndis->pkt_alignment_factor = pkt_alignment_factor ? + pkt_alignment_factor : + DEFAULT_PKT_ALIGNMENT_FACTOR; + + /* RNDIS activates when the host changes this filter */ + rndis->cdc_filter = 0; + + rndis->func.name = "rndis"; + rndis->func.strings = rndis_qc_strings; + /* descriptors are per-instance copies */ + rndis->func.bind = rndis_qc_bind; + rndis->func.unbind = rndis_qc_unbind; + rndis->func.set_alt = rndis_qc_set_alt; + rndis->func.setup = rndis_qc_setup; + rndis->func.disable = rndis_qc_disable; + rndis->func.suspend = rndis_qc_suspend; + rndis->func.resume = rndis_qc_resume; + rndis->func.free_func = rndis_qc_free; + + _rndis_qc = rndis; + + return &rndis->func; +} + +static struct usb_function *qcrndis_alloc(struct usb_function_instance *fi) +{ + return rndis_qc_bind_config_vendor(fi, 0, NULL, 0, 0); +} + +static int rndis_qc_open_dev(struct inode *ip, struct file *fp) +{ + int ret = 0; + unsigned long flags; + + pr_info("Open rndis QC driver\n"); + + spin_lock_irqsave(&rndis_lock, flags); + if (!_rndis_qc) { + pr_err("rndis_qc_dev not created yet\n"); + ret = -ENODEV; + goto fail; + } + + if (rndis_qc_lock(&_rndis_qc->open_excl)) { + pr_err("Already opened\n"); + ret = -EBUSY; + goto fail; + } + + fp->private_data = _rndis_qc; +fail: + spin_unlock_irqrestore(&rndis_lock, flags); + + if (!ret) + pr_info("rndis QC file opened\n"); + + return ret; +} + +static int rndis_qc_release_dev(struct inode *ip, struct file *fp) +{ + unsigned long flags; + + pr_info("Close rndis QC file\n"); + + spin_lock_irqsave(&rndis_lock, flags); + + if (!_rndis_qc) { + pr_err("rndis_qc_dev not present\n"); + spin_unlock_irqrestore(&rndis_lock, flags); + return -ENODEV; + } + rndis_qc_unlock(&_rndis_qc->open_excl); + spin_unlock_irqrestore(&rndis_lock, flags); + return 0; +} + +static long rndis_qc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + u8 qc_max_pkt_per_xfer = 0; + u32 qc_max_pkt_size = 0; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&rndis_lock, flags); + if (!_rndis_qc) { + pr_err("rndis_qc_dev not present\n"); + ret = -ENODEV; + goto fail; + } + + qc_max_pkt_per_xfer = _rndis_qc->ul_max_pkt_per_xfer; + qc_max_pkt_size = _rndis_qc->max_pkt_size; + + if (rndis_qc_lock(&_rndis_qc->ioctl_excl)) { + ret = -EBUSY; + goto fail; + } + + spin_unlock_irqrestore(&rndis_lock, flags); + + pr_info("Received command %d\n", cmd); + + switch (cmd) { + case RNDIS_QC_GET_MAX_PKT_PER_XFER: + ret = copy_to_user((void __user *)arg, + &qc_max_pkt_per_xfer, + sizeof(qc_max_pkt_per_xfer)); + if (ret) { + pr_err("copying to user space failed\n"); + ret = -EFAULT; + } + pr_info("Sent UL max packets per xfer %d\n", + qc_max_pkt_per_xfer); + break; + case RNDIS_QC_GET_MAX_PKT_SIZE: + ret = copy_to_user((void __user *)arg, + &qc_max_pkt_size, + sizeof(qc_max_pkt_size)); + if (ret) { + pr_err("copying to user space failed\n"); + ret = -EFAULT; + } + pr_debug("Sent max packet size %d\n", + qc_max_pkt_size); + break; + default: + pr_err("Unsupported IOCTL\n"); + ret = -EINVAL; + } + + spin_lock_irqsave(&rndis_lock, flags); + + if (!_rndis_qc) { + pr_err("rndis_qc_dev not present\n"); + ret = -ENODEV; + goto fail; + } + + rndis_qc_unlock(&_rndis_qc->ioctl_excl); + +fail: + spin_unlock_irqrestore(&rndis_lock, flags); + return ret; +} + +static const struct file_operations rndis_qc_fops = { + .owner = THIS_MODULE, + .open = rndis_qc_open_dev, + .release = rndis_qc_release_dev, + .unlocked_ioctl = rndis_qc_ioctl, +}; + +static void qcrndis_free_inst(struct usb_function_instance *f) +{ + struct f_rndis_qc_opts *opts = container_of(f, + struct f_rndis_qc_opts, func_inst); + int minor = MINOR(opts->rndis->cdev.dev); + unsigned long flags; + + device_destroy(rndis_class, MKDEV(MAJOR(rndis_dev), minor)); + class_destroy(rndis_class); + cdev_del(&opts->rndis->cdev); + ida_simple_remove(&chardev_ida, minor); + unregister_chrdev_region(rndis_dev, 1); + + ipa_data_free(USB_IPA_FUNC_RNDIS); + spin_lock_irqsave(&rndis_lock, flags); + kfree(opts->rndis); + _rndis_qc = NULL; + kfree(opts); + spin_unlock_irqrestore(&rndis_lock, flags); +} + +static int qcrndis_set_inst_name(struct usb_function_instance *fi, + const char *name) +{ + struct f_rndis_qc_opts *opts = container_of(fi, + struct f_rndis_qc_opts, func_inst); + struct f_rndis_qc *rndis; + int name_len; + int ret, minor; + + name_len = strlen(name) + 1; + if (name_len > MAX_INST_NAME_LEN) + return -ENAMETOOLONG; + + pr_debug("initialize rndis QC instance\n"); + rndis = kzalloc(sizeof(*rndis), GFP_KERNEL); + if (!rndis) { + pr_err("%s: fail allocate and initialize new instance\n", + __func__); + return -ENOMEM; + } + + spin_lock_init(&rndis_lock); + opts->rndis = rndis; + rndis_class = class_create(THIS_MODULE, "usbrndis"); + ret = alloc_chrdev_region(&rndis_dev, 0, 1, "usb_rndis"); + if (ret < 0) { + pr_err("Fail to allocate usb rndis char dev region\n"); + return ret; + } + + /* get a minor number */ + minor = ida_simple_get(&chardev_ida, 0, 0, GFP_KERNEL); + if (minor < 0) { + pr_err("%s: No more minor numbers left! rc:%d\n", __func__, + minor); + ret = -ENODEV; + goto fail_out_of_minors; + } + rndis->dev = device_create(rndis_class, NULL, + MKDEV(MAJOR(rndis_dev), minor), + rndis, "android_rndis_qc"); + if (IS_ERR(rndis->dev)) { + ret = PTR_ERR(rndis->dev); + pr_err("%s: device_create failed for (%d)\n", __func__, ret); + goto fail_return_minor; + } + cdev_init(&rndis->cdev, &rndis_qc_fops); + ret = cdev_add(&rndis->cdev, MKDEV(MAJOR(rndis_dev), minor), 1); + if (ret < 0) { + pr_err("%s: cdev_add failed for %s (%d)\n", __func__, + name, ret); + goto fail_cdev_add; + } + + if (ret) + pr_err("rndis QC driver failed to register\n"); + + ret = ipa_data_setup(USB_IPA_FUNC_RNDIS); + if (ret) { + pr_err("bam_data_setup failed err: %d\n", ret); + goto fail_data_setup; + } + + return 0; +fail_data_setup: + cdev_del(&rndis->cdev); +fail_cdev_add: + device_destroy(rndis_class, MKDEV(MAJOR(rndis_dev), minor)); +fail_return_minor: + ida_simple_remove(&chardev_ida, minor); +fail_out_of_minors: + unregister_chrdev_region(rndis_dev, 1); + class_destroy(rndis_class); + kfree(rndis); + return ret; +} + +static inline +struct f_rndis_qc_opts *to_f_qc_rndis_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_rndis_qc_opts, + func_inst.group); +} + +static void qcrndis_attr_release(struct config_item *item) +{ + struct f_rndis_qc_opts *opts = to_f_qc_rndis_opts(item); + + usb_put_function_instance(&opts->func_inst); +} + +static struct configfs_item_operations qcrndis_item_ops = { + .release = qcrndis_attr_release, +}; + +static struct config_item_type qcrndis_func_type = { + .ct_item_ops = &qcrndis_item_ops, + .ct_owner = THIS_MODULE, +}; + +static struct usb_function_instance *qcrndis_alloc_inst(void) +{ + struct f_rndis_qc_opts *opts; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + + opts->func_inst.set_inst_name = qcrndis_set_inst_name; + opts->func_inst.free_func_inst = qcrndis_free_inst; + + config_group_init_type_name(&opts->func_inst.group, "", + &qcrndis_func_type); + + return &opts->func_inst; +} + +void *rndis_qc_get_ipa_rx_cb(void) +{ + return rndis_ipa_params.ipa_rx_notify; +} + +void *rndis_qc_get_ipa_tx_cb(void) +{ + return rndis_ipa_params.ipa_tx_notify; +} + +void *rndis_qc_get_ipa_priv(void) +{ + return rndis_ipa_params.private; +} + +bool rndis_qc_get_skip_ep_config(void) +{ + return rndis_ipa_params.skip_ep_cfg; +} + +DECLARE_USB_FUNCTION_INIT(rndis_bam, qcrndis_alloc_inst, qcrndis_alloc); + +static int __init usb_qcrndis_init(void) +{ + int ret; + + ret = usb_function_register(&rndis_bamusb_func); + if (ret) { + pr_err("%s: failed to register diag %d\n", __func__, ret); + return ret; + } + return ret; +} + +static void __exit usb_qcrndis_exit(void) +{ + usb_function_unregister(&rndis_bamusb_func); +} + +module_init(usb_qcrndis_init); +module_exit(usb_qcrndis_exit); +MODULE_DESCRIPTION("USB RMNET Function Driver"); diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index b16d1958f0f5..f1d3113698b8 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -1006,6 +1006,18 @@ int rndis_set_param_medium(struct rndis_params *params, u32 medium, u32 speed) } EXPORT_SYMBOL_GPL(rndis_set_param_medium); +u32 rndis_get_dl_max_xfer_size(struct rndis_params *params) +{ + pr_debug("%s:\n", __func__); + return params->dl_max_xfer_size; +} + +u32 rndis_get_ul_max_xfer_size(struct rndis_params *params) +{ + pr_debug("%s:\n", __func__); + return params->ul_max_xfer_size; +} + void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer) { pr_debug("%s:\n", __func__); diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h index de61b7bca40f..9aea5535d70d 100644 --- a/drivers/usb/gadget/function/rndis.h +++ b/drivers/usb/gadget/function/rndis.h @@ -174,6 +174,7 @@ typedef struct rndis_params { u32 host_rndis_major_ver; u32 host_rndis_minor_ver; u32 dl_max_xfer_size; + u32 ul_max_xfer_size; const char *vendorDescr; u8 pkt_alignment_factor; void (*resp_avail)(void *v); @@ -196,6 +197,8 @@ int rndis_set_param_vendor(struct rndis_params *params, u32 vendorID, int rndis_set_param_medium(struct rndis_params *params, u32 medium, u32 speed); void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer); +u32 rndis_get_ul_max_xfer_size(struct rndis_params *params); +u32 rndis_get_dl_max_xfer_size(struct rndis_params *params); void rndis_add_hdr(struct sk_buff *skb); int rndis_rm_hdr(struct gether *port, struct sk_buff *skb, struct sk_buff_head *list); diff --git a/drivers/usb/gadget/function/u_data_ipa.c b/drivers/usb/gadget/function/u_data_ipa.c new file mode 100644 index 000000000000..5a6d3e3916ca --- /dev/null +++ b/drivers/usb/gadget/function/u_data_ipa.c @@ -0,0 +1,1423 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "u_data_ipa.h" + +struct ipa_data_ch_info { + struct usb_request *rx_req; + struct usb_request *tx_req; + unsigned long flags; + unsigned int id; + enum ipa_func_type func_type; + bool is_connected; + unsigned int port_num; + spinlock_t port_lock; + + struct work_struct connect_w; + struct work_struct disconnect_w; + struct work_struct suspend_w; + struct work_struct resume_w; + + u32 src_pipe_idx; + u32 dst_pipe_idx; + u8 src_connection_idx; + u8 dst_connection_idx; + enum usb_ctrl usb_bam_type; + struct data_port *port_usb; + struct usb_gadget *gadget; + atomic_t pipe_connect_notified; + struct usb_bam_connect_ipa_params ipa_params; +}; + +struct rndis_data_ch_info { + /* this provides downlink (device->host i.e host) side configuration*/ + u32 dl_max_transfer_size; + /* this provides uplink (host->device i.e device) side configuration */ + u32 ul_max_transfer_size; + u32 ul_max_packets_number; + bool ul_aggregation_enable; + u32 prod_clnt_hdl; + u32 cons_clnt_hdl; + void *priv; +}; + +static struct workqueue_struct *ipa_data_wq; +static struct ipa_data_ch_info *ipa_data_ports[IPA_N_PORTS]; +static struct rndis_data_ch_info *rndis_data; +/** + * ipa_data_endless_complete() - completion callback for endless TX/RX request + * @ep: USB endpoint for which this completion happen + * @req: USB endless request + * + * This completion is being called when endless (TX/RX) transfer is terminated + * i.e. disconnect or suspend case. + */ +static void ipa_data_endless_complete(struct usb_ep *ep, + struct usb_request *req) +{ + pr_debug("%s: endless complete for(%s) with status: %d\n", + __func__, ep->name, req->status); +} + +/** + * ipa_data_start_endless_xfer() - configure USB endpoint and + * queue endless TX/RX request + * @port: USB IPA data channel information + * @in: USB endpoint direction i.e. true: IN(Device TX), false: OUT(Device RX) + * + * It is being used to queue endless TX/RX request with UDC driver. + * It does set required DBM endpoint configuration before queueing endless + * TX/RX request. + */ +static void ipa_data_start_endless_xfer(struct ipa_data_ch_info *port, bool in) +{ + unsigned long flags; + int status; + struct usb_ep *ep; + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb || (in && !port->tx_req) + || (!in && !port->rx_req)) { + spin_unlock_irqrestore(&port->port_lock, flags); + pr_err("%s(): port_usb/req is NULL.\n", __func__); + return; + } + + if (in) + ep = port->port_usb->in; + else + ep = port->port_usb->out; + + spin_unlock_irqrestore(&port->port_lock, flags); + + if (in) { + pr_debug("%s: enqueue endless TX_REQ(IN)\n", __func__); + status = usb_ep_queue(ep, port->tx_req, GFP_ATOMIC); + if (status) + pr_err("error enqueuing endless TX_REQ, %d\n", status); + } else { + pr_debug("%s: enqueue endless RX_REQ(OUT)\n", __func__); + status = usb_ep_queue(ep, port->rx_req, GFP_ATOMIC); + if (status) + pr_err("error enqueuing endless RX_REQ, %d\n", status); + } +} + +/** + * ipa_data_stop_endless_xfer() - terminate and dequeue endless TX/RX request + * @port: USB IPA data channel information + * @in: USB endpoint direction i.e. IN - Device TX, OUT - Device RX + * + * It is being used to terminate and dequeue endless TX/RX request with UDC + * driver. + */ +static void ipa_data_stop_endless_xfer(struct ipa_data_ch_info *port, bool in) +{ + unsigned long flags; + int status; + struct usb_ep *ep; + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb || (in && !port->tx_req) + || (!in && !port->rx_req)) { + spin_unlock_irqrestore(&port->port_lock, flags); + pr_err("%s(): port_usb/req is NULL.\n", __func__); + return; + } + + if (in) + ep = port->port_usb->in; + else + ep = port->port_usb->out; + + spin_unlock_irqrestore(&port->port_lock, flags); + + if (in) { + pr_debug("%s: dequeue endless TX_REQ(IN)\n", __func__); + status = usb_ep_dequeue(ep, port->tx_req); + if (status) + pr_err("error dequeueing endless TX_REQ, %d\n", status); + } else { + pr_debug("%s: dequeue endless RX_REQ(OUT)\n", __func__); + status = usb_ep_dequeue(ep, port->rx_req); + if (status) + pr_err("error dequeueing endless RX_REQ, %d\n", status); + } +} + +/* + * Called when IPA triggers us that the network interface is up. + * Starts the transfers on bulk endpoints. + * (optimization reasons, the pipes and bam with IPA are already connected) + */ +void ipa_data_start_rx_tx(enum ipa_func_type func) +{ + struct ipa_data_ch_info *port; + unsigned long flags; + struct usb_ep *epin, *epout; + + pr_debug("%s: Triggered: starting tx, rx\n", __func__); + /* queue in & out requests */ + port = ipa_data_ports[func]; + if (!port) { + pr_err("%s: port is NULL, can't start tx, rx\n", __func__); + return; + } + + spin_lock_irqsave(&port->port_lock, flags); + + if (!port->port_usb || !port->port_usb->in || + !port->port_usb->out) { + pr_err("%s: Can't start tx, rx, ep not enabled\n", __func__); + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + if (!port->rx_req || !port->tx_req) { + pr_err("%s: No request d->rx_req=%pK, d->tx_req=%pK\n", + __func__, port->rx_req, port->tx_req); + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + if (!port->is_connected) { + pr_debug("%s: pipes are disconnected\n", __func__); + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + epout = port->port_usb->out; + epin = port->port_usb->in; + spin_unlock_irqrestore(&port->port_lock, flags); + + /* queue in & out requests */ + pr_debug("%s: Starting rx\n", __func__); + if (epout) + ipa_data_start_endless_xfer(port, false); + + pr_debug("%s: Starting tx\n", __func__); + if (epin) + ipa_data_start_endless_xfer(port, true); +} +/** + * ipa_data_disconnect_work() - Perform USB IPA BAM disconnect + * @w: disconnect work + * + * It is being schedule from ipa_data_disconnect() API when particular function + * is being disable due to USB disconnect or USB composition switch is being + * trigger . This API performs disconnect of USB BAM pipe, IPA BAM pipe and also + * initiate USB IPA BAM pipe handshake for USB Disconnect sequence. Due to + * handshake operation and involvement of SPS related APIs, this functioality + * can't be used from atomic context. + */ +static void ipa_data_disconnect_work(struct work_struct *w) +{ + struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info, + disconnect_w); + unsigned long flags; + int ret; + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->is_connected) { + spin_unlock_irqrestore(&port->port_lock, flags); + pr_debug("Already disconnected.\n"); + return; + } + port->is_connected = false; + pr_debug("%s(): prod_clnt_hdl:%d cons_clnt_hdl:%d\n", __func__, + port->ipa_params.prod_clnt_hdl, + port->ipa_params.cons_clnt_hdl); + + spin_unlock_irqrestore(&port->port_lock, flags); + ret = usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params); + if (ret) + pr_err("usb_bam_disconnect_ipa failed: err:%d\n", ret); + + if (port->func_type == USB_IPA_FUNC_RNDIS) { + /* + * NOTE: it is required to disconnect USB and IPA BAM related + * pipes before calling IPA tethered function related disconnect + * API. IPA tethered function related disconnect API delete + * depedency graph with IPA RM which would results into IPA not + * pulling data although there is pending data on USB BAM + * producer pipe. + */ + if (atomic_xchg(&port->pipe_connect_notified, 0) == 1) { + void *priv; + + priv = rndis_qc_get_ipa_priv(); + rndis_ipa_pipe_disconnect_notify(priv); + } + } + + if (port->ipa_params.prod_clnt_hdl) + usb_bam_free_fifos(port->usb_bam_type, + port->dst_connection_idx); + if (port->ipa_params.cons_clnt_hdl) + usb_bam_free_fifos(port->usb_bam_type, + port->src_connection_idx); + + if (port->func_type == USB_IPA_FUNC_RMNET) + teth_bridge_disconnect(port->ipa_params.src_client); + /* + * Decrement usage count which was incremented + * upon cable connect or cable disconnect in suspended state. + */ + usb_gadget_autopm_put_async(port->gadget); + + pr_debug("%s(): disconnect work completed.\n", __func__); +} + +/** + * ipa_data_disconnect() - Restore USB ep operation and disable USB endpoint + * @gp: USB gadget IPA Port + * @port_num: Port num used by function driver which need to be disable + * + * It is being called from atomic context from gadget driver when particular + * function is being disable due to USB cable disconnect or USB composition + * switch is being trigger. This API performs restoring USB endpoint operation + * and disable USB endpoint used for accelerated path. + */ +void ipa_data_disconnect(struct data_port *gp, enum ipa_func_type func) +{ + struct ipa_data_ch_info *port; + unsigned long flags; + struct usb_gadget *gadget = NULL; + + pr_debug("dev:%pK port number:%d\n", gp, func); + if (func >= USB_IPA_NUM_FUNCS) { + pr_err("invalid ipa portno#%d\n", func); + return; + } + + if (!gp) { + pr_err("data port is null\n"); + return; + } + + port = ipa_data_ports[func]; + if (!port) { + pr_err("port %u is NULL\n", func); + return; + } + + spin_lock_irqsave(&port->port_lock, flags); + if (port->port_usb) { + gadget = port->port_usb->cdev->gadget; + port->port_usb->ipa_consumer_ep = -1; + port->port_usb->ipa_producer_ep = -1; + + if (port->port_usb->in) { + /* + * Disable endpoints. + * Unlocking is needed since disabling the eps might + * stop active transfers and therefore the request + * complete function will be called, where we try + * to obtain the spinlock as well. + */ + msm_ep_unconfig(port->port_usb->in); + spin_unlock_irqrestore(&port->port_lock, flags); + usb_ep_disable(port->port_usb->in); + spin_lock_irqsave(&port->port_lock, flags); + if (port->tx_req) { + usb_ep_free_request(port->port_usb->in, + port->tx_req); + port->tx_req = NULL; + } + port->port_usb->in->endless = false; + } + + if (port->port_usb->out) { + msm_ep_unconfig(port->port_usb->out); + spin_unlock_irqrestore(&port->port_lock, flags); + usb_ep_disable(port->port_usb->out); + spin_lock_irqsave(&port->port_lock, flags); + if (port->rx_req) { + usb_ep_free_request(port->port_usb->out, + port->rx_req); + port->rx_req = NULL; + } + port->port_usb->out->endless = false; + } + + port->port_usb = NULL; + } + spin_unlock_irqrestore(&port->port_lock, flags); + queue_work(ipa_data_wq, &port->disconnect_w); +} + +/** + * configure_fifo() - Configure USB BAM Pipe's data FIFO + * @idx: USB BAM Pipe index + * @ep: USB endpoint + * + * This function configures USB BAM data fifo using fetched pipe configuraion + * using provided index value. This function needs to used before starting + * endless transfer. + */ +static void configure_fifo(enum usb_ctrl bam_type, u8 idx, struct usb_ep *ep) +{ + struct sps_mem_buffer data_fifo = {0}; + u32 usb_bam_pipe_idx; + + get_bam2bam_connection_info(bam_type, idx, + &usb_bam_pipe_idx, + NULL, &data_fifo, NULL); + msm_data_fifo_config(ep, data_fifo.phys_base, data_fifo.size, + usb_bam_pipe_idx); +} + +/** + * ipa_data_connect_work() - Perform USB IPA BAM connect + * @w: connect work + * + * It is being schedule from ipa_data_connect() API when particular function + * which is using USB IPA accelerated path. This API performs allocating request + * for USB endpoint (tx/rx) for endless purpose, configure USB endpoint to be + * used in accelerated path, connect of USB BAM pipe, IPA BAM pipe and also + * initiate USB IPA BAM pipe handshake for connect sequence. + */ +static void ipa_data_connect_work(struct work_struct *w) +{ + struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info, + connect_w); + struct data_port *gport; + struct usb_gadget *gadget = NULL; + struct teth_bridge_connect_params connect_params; + struct teth_bridge_init_params teth_bridge_params; + u32 sps_params; + int ret; + unsigned long flags; + bool is_ipa_disconnected = true; + + pr_debug("%s: Connect workqueue started\n", __func__); + + spin_lock_irqsave(&port->port_lock, flags); + + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock, flags); + usb_gadget_autopm_put_async(port->gadget); + pr_err("%s(): port_usb is NULL.\n", __func__); + return; + } + + gport = port->port_usb; + if (gport && gport->cdev) + gadget = gport->cdev->gadget; + + if (!gadget) { + spin_unlock_irqrestore(&port->port_lock, flags); + usb_gadget_autopm_put_async(port->gadget); + pr_err("%s: gport is NULL.\n", __func__); + return; + } + + /* + * check if connect_w got called two times during RNDIS resume as + * explicit flow control is called to start data transfers after + * ipa_data_connect() + */ + if (port->is_connected) { + pr_debug("IPA connect is already done & Transfers started\n"); + spin_unlock_irqrestore(&port->port_lock, flags); + usb_gadget_autopm_put_async(port->gadget); + return; + } + + gport->ipa_consumer_ep = -1; + gport->ipa_producer_ep = -1; + + port->is_connected = true; + + /* update IPA Parameteres here. */ + port->ipa_params.usb_connection_speed = gadget->speed; + if (!gadget->is_chipidea) + port->ipa_params.reset_pipe_after_lpm = + msm_dwc3_reset_ep_after_lpm(gadget); + port->ipa_params.skip_ep_cfg = true; + port->ipa_params.keep_ipa_awake = true; + port->ipa_params.cons_clnt_hdl = -1; + port->ipa_params.prod_clnt_hdl = -1; + + if (gport->out) { + spin_unlock_irqrestore(&port->port_lock, flags); + usb_bam_alloc_fifos(port->usb_bam_type, + port->src_connection_idx); + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb || port->rx_req == NULL) { + spin_unlock_irqrestore(&port->port_lock, flags); + pr_err("%s: port_usb is NULL, or rx_req cleaned\n", + __func__); + goto out; + } + if (!gadget->is_chipidea) { + sps_params = MSM_SPS_MODE | MSM_DISABLE_WB + | MSM_PRODUCER | port->src_pipe_idx; + port->rx_req->length = 32*1024; + port->rx_req->udc_priv = sps_params; + configure_fifo(port->usb_bam_type, + port->src_connection_idx, + port->port_usb->out); + ret = msm_ep_config(gport->out, port->rx_req); + if (ret) { + pr_err("msm_ep_config() failed for OUT EP\n"); + spin_unlock_irqrestore(&port->port_lock, flags); + goto out; + } + } else { + /* gadget->is_chipidea */ + get_bam2bam_connection_info(port->usb_bam_type, + port->src_connection_idx, + &port->src_pipe_idx, + NULL, NULL, NULL); + sps_params = (MSM_SPS_MODE | port->src_pipe_idx | + MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER; + port->rx_req->udc_priv = sps_params; + } + } + + if (gport->in) { + spin_unlock_irqrestore(&port->port_lock, flags); + usb_bam_alloc_fifos(port->usb_bam_type, + port->dst_connection_idx); + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb || port->tx_req == NULL) { + spin_unlock_irqrestore(&port->port_lock, flags); + pr_err("%s: port_usb is NULL, or tx_req cleaned\n", + __func__); + goto unconfig_msm_ep_out; + } + if (!gadget->is_chipidea) { + sps_params = MSM_SPS_MODE | MSM_DISABLE_WB | + port->dst_pipe_idx; + port->tx_req->length = 32*1024; + port->tx_req->udc_priv = sps_params; + configure_fifo(port->usb_bam_type, + port->dst_connection_idx, gport->in); + + ret = msm_ep_config(gport->in, port->tx_req); + if (ret) { + pr_err("msm_ep_config() failed for IN EP\n"); + spin_unlock_irqrestore(&port->port_lock, flags); + goto unconfig_msm_ep_out; + } + } else { + /* gadget->is_chipidea */ + get_bam2bam_connection_info(port->usb_bam_type, + port->dst_connection_idx, + &port->dst_pipe_idx, + NULL, NULL, NULL); + sps_params = (MSM_SPS_MODE | port->dst_pipe_idx | + MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER; + port->tx_req->udc_priv = sps_params; + } + } + + if (port->func_type == USB_IPA_FUNC_RMNET) { + teth_bridge_params.client = port->ipa_params.src_client; + ret = teth_bridge_init(&teth_bridge_params); + if (ret) { + pr_err("%s:teth_bridge_init() failed\n", __func__); + spin_unlock_irqrestore(&port->port_lock, flags); + goto unconfig_msm_ep_in; + } + } + + /* + * Perform below operations for Tx from Device (OUT transfer) + * 1. Connect with pipe of USB BAM with IPA BAM pipe + * 2. Update USB Endpoint related information using SPS Param. + * 3. Configure USB Endpoint/DBM for the same. + * 4. Override USB ep queue functionality for endless transfer. + */ + if (gport->out) { + pr_debug("configure bam ipa connect for USB OUT\n"); + port->ipa_params.dir = USB_TO_PEER_PERIPHERAL; + + if (port->func_type == USB_IPA_FUNC_RNDIS) { + port->ipa_params.notify = rndis_qc_get_ipa_rx_cb(); + port->ipa_params.priv = rndis_qc_get_ipa_priv(); + port->ipa_params.skip_ep_cfg = + rndis_qc_get_skip_ep_config(); + } else if (port->func_type == USB_IPA_FUNC_RMNET) { + port->ipa_params.notify = + teth_bridge_params.usb_notify_cb; + port->ipa_params.priv = + teth_bridge_params.private_data; + port->ipa_params.reset_pipe_after_lpm = + msm_dwc3_reset_ep_after_lpm(gadget); + port->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC; + port->ipa_params.skip_ep_cfg = + teth_bridge_params.skip_ep_cfg; + } + + spin_unlock_irqrestore(&port->port_lock, flags); + ret = usb_bam_connect_ipa(port->usb_bam_type, + &port->ipa_params); + if (ret) { + pr_err("usb_bam_connect_ipa out failed err:%d\n", ret); + goto disconnect_usb_bam_ipa_out; + } + spin_lock_irqsave(&port->port_lock, flags); + is_ipa_disconnected = false; + /* check if USB cable is disconnected or not */ + if (!port->port_usb) { + pr_debug("%s:%d: cable is disconnected.\n", + __func__, __LINE__); + spin_unlock_irqrestore(&port->port_lock, flags); + goto disconnect_usb_bam_ipa_out; + } + + gport->ipa_consumer_ep = port->ipa_params.ipa_cons_ep_idx; + } + + if (gport->in) { + pr_debug("configure bam ipa connect for USB IN\n"); + port->ipa_params.dir = PEER_PERIPHERAL_TO_USB; + + if (port->func_type == USB_IPA_FUNC_RNDIS) { + port->ipa_params.notify = rndis_qc_get_ipa_tx_cb(); + port->ipa_params.priv = rndis_qc_get_ipa_priv(); + port->ipa_params.skip_ep_cfg = + rndis_qc_get_skip_ep_config(); + } else if (port->func_type == USB_IPA_FUNC_RMNET) { + port->ipa_params.notify = + teth_bridge_params.usb_notify_cb; + port->ipa_params.priv = + teth_bridge_params.private_data; + port->ipa_params.reset_pipe_after_lpm = + msm_dwc3_reset_ep_after_lpm(gadget); + port->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC; + port->ipa_params.skip_ep_cfg = + teth_bridge_params.skip_ep_cfg; + } + + if (port->func_type == USB_IPA_FUNC_DPL) + port->ipa_params.dst_client = IPA_CLIENT_USB_DPL_CONS; + spin_unlock_irqrestore(&port->port_lock, flags); + ret = usb_bam_connect_ipa(port->usb_bam_type, + &port->ipa_params); + if (ret) { + pr_err("usb_bam_connect_ipa IN failed err:%d\n", ret); + goto disconnect_usb_bam_ipa_out; + } + spin_lock_irqsave(&port->port_lock, flags); + is_ipa_disconnected = false; + /* check if USB cable is disconnected or not */ + if (!port->port_usb) { + pr_debug("%s:%d: cable is disconnected.\n", + __func__, __LINE__); + spin_unlock_irqrestore(&port->port_lock, flags); + goto disconnect_usb_bam_ipa_out; + } + + gport->ipa_producer_ep = port->ipa_params.ipa_prod_ep_idx; + } + + spin_unlock_irqrestore(&port->port_lock, flags); + if (port->func_type == USB_IPA_FUNC_RNDIS) { + rndis_data->prod_clnt_hdl = + port->ipa_params.prod_clnt_hdl; + rndis_data->cons_clnt_hdl = + port->ipa_params.cons_clnt_hdl; + rndis_data->priv = port->ipa_params.priv; + + pr_debug("ul_max_transfer_size:%d\n", + rndis_data->ul_max_transfer_size); + pr_debug("ul_max_packets_number:%d\n", + rndis_data->ul_max_packets_number); + pr_debug("dl_max_transfer_size:%d\n", + rndis_data->dl_max_transfer_size); + + ret = rndis_ipa_pipe_connect_notify( + rndis_data->cons_clnt_hdl, + rndis_data->prod_clnt_hdl, + rndis_data->ul_max_transfer_size, + rndis_data->ul_max_packets_number, + rndis_data->dl_max_transfer_size, + rndis_data->priv); + if (ret) { + pr_err("%s: failed to connect IPA: err:%d\n", + __func__, ret); + return; + } + atomic_set(&port->pipe_connect_notified, 1); + } else if (port->func_type == USB_IPA_FUNC_RMNET || + port->func_type == USB_IPA_FUNC_DPL) { + /* For RmNet and DPL need to update_ipa_pipes to qti */ + enum qti_port_type qti_port_type = port->func_type == + USB_IPA_FUNC_RMNET ? QTI_PORT_RMNET : QTI_PORT_DPL; + gqti_ctrl_update_ipa_pipes(port->port_usb, qti_port_type, + gport->ipa_producer_ep, gport->ipa_consumer_ep); + } + + if (port->func_type == USB_IPA_FUNC_RMNET) { + connect_params.ipa_usb_pipe_hdl = + port->ipa_params.prod_clnt_hdl; + connect_params.usb_ipa_pipe_hdl = + port->ipa_params.cons_clnt_hdl; + connect_params.tethering_mode = + TETH_TETHERING_MODE_RMNET; + connect_params.client_type = + port->ipa_params.src_client; + ret = teth_bridge_connect(&connect_params); + if (ret) { + pr_err("%s:teth_bridge_connect() failed\n", __func__); + goto disconnect_usb_bam_ipa_out; + } + } + + pr_debug("ipa_producer_ep:%d ipa_consumer_ep:%d\n", + gport->ipa_producer_ep, + gport->ipa_consumer_ep); + + pr_debug("src_bam_idx:%d dst_bam_idx:%d\n", + port->src_connection_idx, port->dst_connection_idx); + + /* Don't queue the transfers yet, only after network stack is up */ + if (port->func_type == USB_IPA_FUNC_RNDIS) { + pr_debug("%s: Not starting now, waiting for network notify\n", + __func__); + return; + } + + if (gport->out) + ipa_data_start_endless_xfer(port, false); + if (gport->in) + ipa_data_start_endless_xfer(port, true); + + pr_debug("Connect workqueue done (port %pK)\n", port); + return; + +disconnect_usb_bam_ipa_out: + if (!is_ipa_disconnected) { + usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params); + is_ipa_disconnected = true; + } + if (port->func_type == USB_IPA_FUNC_RMNET) + teth_bridge_disconnect(port->ipa_params.src_client); +unconfig_msm_ep_in: + spin_lock_irqsave(&port->port_lock, flags); + /* check if USB cable is disconnected or not */ + if (port->port_usb && gport->in) + msm_ep_unconfig(port->port_usb->in); + spin_unlock_irqrestore(&port->port_lock, flags); +unconfig_msm_ep_out: + if (gport->in) + usb_bam_free_fifos(port->usb_bam_type, + port->dst_connection_idx); + spin_lock_irqsave(&port->port_lock, flags); + /* check if USB cable is disconnected or not */ + if (port->port_usb && gport->out) + msm_ep_unconfig(port->port_usb->out); + spin_unlock_irqrestore(&port->port_lock, flags); +out: + if (gport->out) + usb_bam_free_fifos(port->usb_bam_type, + port->src_connection_idx); + spin_lock_irqsave(&port->port_lock, flags); + port->is_connected = false; + spin_unlock_irqrestore(&port->port_lock, flags); + usb_gadget_autopm_put_async(port->gadget); +} + +/** + * ipa_data_connect() - Prepare IPA params and enable USB endpoints + * @gp: USB IPA gadget port + * @port_num: port number used by accelerated function + * @src_connection_idx: USB BAM pipe index used as producer + * @dst_connection_idx: USB BAM pipe index used as consumer + * + * It is being called from accelerated function driver (from set_alt()) to + * initiate USB BAM IPA connection. This API is enabling accelerated endpoints + * and schedule connect_work() which establishes USB IPA BAM communication. + */ +int ipa_data_connect(struct data_port *gp, enum ipa_func_type func, + u8 src_connection_idx, u8 dst_connection_idx) +{ + struct ipa_data_ch_info *port; + unsigned long flags; + int ret = 0; + + pr_debug("dev:%pK port#%d src_connection_idx:%d dst_connection_idx:%d\n", + gp, func, src_connection_idx, dst_connection_idx); + + if (func >= USB_IPA_NUM_FUNCS) { + pr_err("invalid portno#%d\n", func); + ret = -ENODEV; + goto err; + } + + if (!gp) { + pr_err("gadget port is null\n"); + ret = -ENODEV; + goto err; + } + + port = ipa_data_ports[func]; + + spin_lock_irqsave(&port->port_lock, flags); + port->port_usb = gp; + port->gadget = gp->cdev->gadget; + + if (gp->out) { + port->rx_req = usb_ep_alloc_request(gp->out, GFP_ATOMIC); + if (!port->rx_req) { + spin_unlock_irqrestore(&port->port_lock, flags); + pr_err("%s: failed to allocate rx_req\n", __func__); + goto err; + } + port->rx_req->context = port; + port->rx_req->complete = ipa_data_endless_complete; + port->rx_req->length = 0; + port->rx_req->no_interrupt = 1; + } + + if (gp->in) { + port->tx_req = usb_ep_alloc_request(gp->in, GFP_ATOMIC); + if (!port->tx_req) { + pr_err("%s: failed to allocate tx_req\n", __func__); + goto free_rx_req; + } + port->tx_req->context = port; + port->tx_req->complete = ipa_data_endless_complete; + port->tx_req->length = 0; + port->tx_req->no_interrupt = 1; + } + port->src_connection_idx = src_connection_idx; + port->dst_connection_idx = dst_connection_idx; + port->usb_bam_type = usb_bam_get_bam_type(gp->cdev->gadget->name); + + port->ipa_params.src_pipe = &(port->src_pipe_idx); + port->ipa_params.dst_pipe = &(port->dst_pipe_idx); + port->ipa_params.src_idx = src_connection_idx; + port->ipa_params.dst_idx = dst_connection_idx; + + /* + * Disable Xfer complete and Xfer not ready interrupts by + * marking endless flag which is used in UDC driver to enable + * these interrupts. with this set, these interrupts for selected + * endpoints won't be enabled. + */ + if (port->port_usb->in) { + port->port_usb->in->endless = true; + ret = usb_ep_enable(port->port_usb->in); + if (ret) { + pr_err("usb_ep_enable failed eptype:IN ep:%pK\n", + port->port_usb->in); + usb_ep_free_request(port->port_usb->in, port->tx_req); + port->tx_req = NULL; + port->port_usb->in->endless = false; + goto err_usb_in; + } + } + + if (port->port_usb->out) { + port->port_usb->out->endless = true; + ret = usb_ep_enable(port->port_usb->out); + if (ret) { + pr_err("usb_ep_enable failed eptype:OUT ep:%pK\n", + port->port_usb->out); + usb_ep_free_request(port->port_usb->out, port->rx_req); + port->rx_req = NULL; + port->port_usb->out->endless = false; + goto err_usb_out; + } + } + + /* Wait for host to enable flow_control */ + if (port->func_type == USB_IPA_FUNC_RNDIS) { + spin_unlock_irqrestore(&port->port_lock, flags); + ret = 0; + return ret; + } + + /* + * Increment usage count upon cable connect. Decrement after IPA + * handshake is done in disconnect work (due to cable disconnect) + * or in suspend work. + */ + usb_gadget_autopm_get_noresume(port->gadget); + + queue_work(ipa_data_wq, &port->connect_w); + spin_unlock_irqrestore(&port->port_lock, flags); + + return ret; + +err_usb_out: + if (port->port_usb->in) { + usb_ep_disable(port->port_usb->in); + port->port_usb->in->endless = false; + } +err_usb_in: + if (gp->in && port->tx_req) { + usb_ep_free_request(gp->in, port->tx_req); + port->tx_req = NULL; + } +free_rx_req: + if (gp->out && port->rx_req) { + usb_ep_free_request(gp->out, port->rx_req); + port->rx_req = NULL; + } + spin_unlock_irqrestore(&port->port_lock, flags); +err: + pr_debug("%s(): failed with error:%d\n", __func__, ret); + return ret; +} + +/** + * ipa_data_start() - Restart USB endless transfer + * @param: IPA data channel information + * @dir: USB BAM pipe direction + * + * It is being used to restart USB endless transfer for USB bus resume. + * For USB consumer case, it restarts USB endless RX transfer, whereas + * for USB producer case, it resets DBM endpoint and restart USB endless + * TX transfer. + */ +static void ipa_data_start(void *param, enum usb_bam_pipe_dir dir) +{ + struct ipa_data_ch_info *port = param; + struct usb_gadget *gadget = NULL; + + if (!port || !port->port_usb || !port->port_usb->cdev->gadget) { + pr_err("%s:port,cdev or gadget is NULL\n", __func__); + return; + } + + gadget = port->port_usb->cdev->gadget; + if (dir == USB_TO_PEER_PERIPHERAL) { + pr_debug("%s(): start endless RX\n", __func__); + ipa_data_start_endless_xfer(port, false); + } else { + pr_debug("%s(): start endless TX\n", __func__); + if (msm_dwc3_reset_ep_after_lpm(gadget)) { + configure_fifo(port->usb_bam_type, + port->dst_connection_idx, port->port_usb->in); + } + ipa_data_start_endless_xfer(port, true); + } +} + +/** + * ipa_data_stop() - Stop endless Tx/Rx transfers + * @param: IPA data channel information + * @dir: USB BAM pipe direction + * + * It is being used to stop endless Tx/Rx transfers. It is being used + * for USB bus suspend functionality. + */ +static void ipa_data_stop(void *param, enum usb_bam_pipe_dir dir) +{ + struct ipa_data_ch_info *port = param; + struct usb_gadget *gadget = NULL; + + if (!port || !port->port_usb || !port->port_usb->cdev->gadget) { + pr_err("%s:port,cdev or gadget is NULL\n", __func__); + return; + } + + gadget = port->port_usb->cdev->gadget; + if (dir == USB_TO_PEER_PERIPHERAL) { + pr_debug("%s(): stop endless RX transfer\n", __func__); + ipa_data_stop_endless_xfer(port, false); + } else { + pr_debug("%s(): stop endless TX transfer\n", __func__); + ipa_data_stop_endless_xfer(port, true); + } +} + +void ipa_data_flush_workqueue(void) +{ + pr_debug("%s(): Flushing workqueue\n", __func__); + flush_workqueue(ipa_data_wq); +} + +/** + * ipa_data_suspend() - Initiate USB BAM IPA suspend functionality + * @gp: Gadget IPA port + * @port_num: port number used by function + * + * It is being used to initiate USB BAM IPA suspend functionality + * for USB bus suspend functionality. + */ +void ipa_data_suspend(struct data_port *gp, enum ipa_func_type func, + bool remote_wakeup_enabled) +{ + struct ipa_data_ch_info *port; + unsigned long flags; + + if (func >= USB_IPA_NUM_FUNCS) { + pr_err("invalid ipa portno#%d\n", func); + return; + } + + if (!gp) { + pr_err("data port is null\n"); + return; + } + pr_debug("%s: suspended port %d\n", __func__, func); + + port = ipa_data_ports[func]; + if (!port) { + pr_err("%s(): Port is NULL.\n", __func__); + return; + } + + /* suspend with remote wakeup disabled */ + if (!remote_wakeup_enabled) { + /* + * When remote wakeup is disabled, IPA BAM is disconnected + * because it cannot send new data until the USB bus is resumed. + * Endpoint descriptors info is saved before it gets reset by + * the BAM disconnect API. This lets us restore this info when + * the USB bus is resumed. + */ + if (gp->in) { + gp->in_ep_desc_backup = gp->in->desc; + pr_debug("in_ep_desc_backup = %pK\n", + gp->in_ep_desc_backup); + } + if (gp->out) { + gp->out_ep_desc_backup = gp->out->desc; + pr_debug("out_ep_desc_backup = %pK\n", + gp->out_ep_desc_backup); + } + ipa_data_disconnect(gp, func); + return; + } + + spin_lock_irqsave(&port->port_lock, flags); + queue_work(ipa_data_wq, &port->suspend_w); + spin_unlock_irqrestore(&port->port_lock, flags); +} +static void bam2bam_data_suspend_work(struct work_struct *w) +{ + struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info, + connect_w); + unsigned long flags; + int ret; + + pr_debug("%s: suspend started\n", __func__); + spin_lock_irqsave(&port->port_lock, flags); + + /* In case of RNDIS, host enables flow_control invoking connect_w. If it + * is delayed then we may end up having suspend_w run before connect_w. + * In this scenario, connect_w may or may not at all start if cable gets + * disconnected or if host changes configuration e.g. RNDIS --> MBIM + * For these cases don't do runtime_put as there was no _get yet, and + * detect this condition on disconnect to not do extra pm_runtme_get + * for SUSPEND --> DISCONNECT scenario. + */ + if (!port->is_connected) { + pr_err("%s: Not yet connected. SUSPEND pending.\n", __func__); + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + ret = usb_bam_register_wake_cb(port->usb_bam_type, + port->dst_connection_idx, NULL, port); + if (ret) { + pr_err("%s(): Failed to register BAM wake callback.\n", + __func__); + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + usb_bam_register_start_stop_cbs(port->usb_bam_type, + port->dst_connection_idx, ipa_data_start, + ipa_data_stop, port); + /* + * release lock here because bam_data_start() or + * bam_data_stop() called from usb_bam_suspend() + * re-acquires port lock. + */ + spin_unlock_irqrestore(&port->port_lock, flags); + usb_bam_suspend(port->usb_bam_type, &port->ipa_params); + spin_lock_irqsave(&port->port_lock, flags); + + /* + * Decrement usage count after IPA handshake is done + * to allow gadget parent to go to lpm. This counter was + * incremented upon cable connect. + */ + usb_gadget_autopm_put_async(port->gadget); + + spin_unlock_irqrestore(&port->port_lock, flags); +} + +/** + * ipa_data_resume() - Initiate USB resume functionality + * @gp: Gadget IPA port + * @port_num: port number used by function + * + * It is being used to initiate USB resume functionality + * for USB bus resume case. + */ +void ipa_data_resume(struct data_port *gp, enum ipa_func_type func, + bool remote_wakeup_enabled) +{ + struct ipa_data_ch_info *port; + unsigned long flags; + struct usb_gadget *gadget = NULL; + u8 src_connection_idx = 0; + u8 dst_connection_idx = 0; + enum usb_ctrl usb_bam_type; + + pr_debug("dev:%pK port number:%d\n", gp, func); + + if (func >= USB_IPA_NUM_FUNCS) { + pr_err("invalid ipa portno#%d\n", func); + return; + } + + if (!gp) { + pr_err("data port is null\n"); + return; + } + + port = ipa_data_ports[func]; + if (!port) { + pr_err("port %u is NULL\n", func); + return; + } + + gadget = gp->cdev->gadget; + /* resume with remote wakeup disabled */ + if (!remote_wakeup_enabled) { + int bam_pipe_num = (func == USB_IPA_FUNC_DPL) ? 1 : 0; + + usb_bam_type = usb_bam_get_bam_type(gadget->name); + /* Restore endpoint descriptors info. */ + if (gp->in) { + gp->in->desc = gp->in_ep_desc_backup; + pr_debug("in_ep_desc_backup = %pK\n", + gp->in_ep_desc_backup); + dst_connection_idx = usb_bam_get_connection_idx( + usb_bam_type, IPA_P_BAM, PEER_PERIPHERAL_TO_USB, + bam_pipe_num); + } + if (gp->out) { + gp->out->desc = gp->out_ep_desc_backup; + pr_debug("out_ep_desc_backup = %pK\n", + gp->out_ep_desc_backup); + src_connection_idx = usb_bam_get_connection_idx( + usb_bam_type, IPA_P_BAM, USB_TO_PEER_PERIPHERAL, + bam_pipe_num); + } + ipa_data_connect(gp, func, + src_connection_idx, dst_connection_idx); + return; + } + + spin_lock_irqsave(&port->port_lock, flags); + + /* + * Increment usage count here to disallow gadget + * parent suspend. This counter will decrement + * after IPA handshake is done in disconnect work + * (due to cable disconnect) or in bam_data_disconnect + * in suspended state. + */ + usb_gadget_autopm_get_noresume(port->gadget); + queue_work(ipa_data_wq, &port->resume_w); + spin_unlock_irqrestore(&port->port_lock, flags); +} + +static void bam2bam_data_resume_work(struct work_struct *w) +{ + struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info, + connect_w); + struct usb_gadget *gadget; + unsigned long flags; + int ret; + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb || !port->port_usb->cdev) { + pr_err("port->port_usb or cdev is NULL\n"); + goto exit; + } + + if (!port->port_usb->cdev->gadget) { + pr_err("port->port_usb->cdev->gadget is NULL\n"); + goto exit; + } + + pr_debug("%s: resume started\n", __func__); + gadget = port->port_usb->cdev->gadget; + if (!gadget) { + spin_unlock_irqrestore(&port->port_lock, flags); + pr_err("%s(): Gadget is NULL.\n", __func__); + return; + } + + ret = usb_bam_register_wake_cb(port->usb_bam_type, + port->dst_connection_idx, NULL, NULL); + if (ret) { + spin_unlock_irqrestore(&port->port_lock, flags); + pr_err("%s(): Failed to register BAM wake callback.\n", + __func__); + return; + } + + if (msm_dwc3_reset_ep_after_lpm(gadget)) { + configure_fifo(port->usb_bam_type, port->src_connection_idx, + port->port_usb->out); + configure_fifo(port->usb_bam_type, port->dst_connection_idx, + port->port_usb->in); + spin_unlock_irqrestore(&port->port_lock, flags); + msm_dwc3_reset_dbm_ep(port->port_usb->in); + spin_lock_irqsave(&port->port_lock, flags); + } + usb_bam_resume(port->usb_bam_type, &port->ipa_params); + +exit: + spin_unlock_irqrestore(&port->port_lock, flags); +} + +/** + * ipa_data_port_alloc() - Allocate IPA USB Port structure + * @portno: port number to be used by particular USB function + * + * It is being used by USB function driver to allocate IPA data port + * for USB IPA data accelerated path. + * + * Retrun: 0 in case of success, otherwise errno. + */ +static int ipa_data_port_alloc(enum ipa_func_type func) +{ + struct ipa_data_ch_info *port = NULL; + + if (ipa_data_ports[func] != NULL) { + pr_debug("port %d already allocated.\n", func); + return 0; + } + + port = kzalloc(sizeof(struct ipa_data_ch_info), GFP_KERNEL); + if (!port) + return -ENOMEM; + + ipa_data_ports[func] = port; + + pr_debug("port:%pK with portno:%d allocated\n", port, func); + return 0; +} + +/** + * ipa_data_port_select() - Select particular port for BAM2BAM IPA mode + * @portno: port number to be used by particular USB function + * @func_type: USB gadget function type + * + * It is being used by USB function driver to select which BAM2BAM IPA + * port particular USB function wants to use. + * + */ +void ipa_data_port_select(enum ipa_func_type func) +{ + struct ipa_data_ch_info *port = NULL; + + pr_debug("portno:%d\n", func); + + port = ipa_data_ports[func]; + port->port_num = func; + port->is_connected = false; + + spin_lock_init(&port->port_lock); + + if (!work_pending(&port->connect_w)) + INIT_WORK(&port->connect_w, ipa_data_connect_work); + + if (!work_pending(&port->disconnect_w)) + INIT_WORK(&port->disconnect_w, ipa_data_disconnect_work); + + INIT_WORK(&port->suspend_w, bam2bam_data_suspend_work); + INIT_WORK(&port->resume_w, bam2bam_data_resume_work); + + port->ipa_params.src_client = IPA_CLIENT_USB_PROD; + port->ipa_params.dst_client = IPA_CLIENT_USB_CONS; + port->func_type = func; +}; + +void ipa_data_free(enum ipa_func_type func) +{ + pr_debug("freeing %d IPA BAM port\n", func); + + kfree(ipa_data_ports[func]); + ipa_data_ports[func] = NULL; + if (func == USB_IPA_FUNC_RNDIS) + kfree(rndis_data); + if (ipa_data_wq) { + destroy_workqueue(ipa_data_wq); + ipa_data_wq = NULL; + } +} + +/** + * ipa_data_setup() - setup BAM2BAM IPA port + * + * Each USB function who wants to use BAM2BAM IPA port would + * be counting number of IPA port to use and initialize those + * ports at time of bind_config() in android gadget driver. + * + * Retrun: 0 in case of success, otherwise errno. + */ +int ipa_data_setup(enum ipa_func_type func) +{ + int ret; + + pr_debug("requested %d IPA BAM port\n", func); + + if (func >= USB_IPA_NUM_FUNCS) { + pr_err("Invalid num of ports count:%d\n", func); + return -EINVAL; + } + + ret = ipa_data_port_alloc(func); + if (ret) { + pr_err("Failed to alloc port:%d\n", func); + return ret; + } + + if (func == USB_IPA_FUNC_RNDIS) { + rndis_data = kzalloc(sizeof(*rndis_data), GFP_KERNEL); + if (!rndis_data) + goto free_ipa_ports; + } + if (ipa_data_wq) { + pr_debug("ipa_data_wq is already setup.\n"); + return 0; + } + + ipa_data_wq = alloc_workqueue("k_usb_ipa_data", + WQ_UNBOUND | WQ_MEM_RECLAIM, 1); + if (!ipa_data_wq) { + pr_err("Failed to create workqueue\n"); + ret = -ENOMEM; + goto free_rndis_data; + } + + return 0; + +free_rndis_data: + if (func == USB_IPA_FUNC_RNDIS) + kfree(rndis_data); +free_ipa_ports: + kfree(ipa_data_ports[func]); + ipa_data_ports[func] = NULL; + + return ret; +} + +void ipa_data_set_ul_max_xfer_size(u32 max_transfer_size) +{ + if (!max_transfer_size) { + pr_err("%s: invalid parameters\n", __func__); + return; + } + rndis_data->ul_max_transfer_size = max_transfer_size; + pr_debug("%s(): ul_max_xfer_size:%d\n", __func__, max_transfer_size); +} + +void ipa_data_set_dl_max_xfer_size(u32 max_transfer_size) +{ + + if (!max_transfer_size) { + pr_err("%s: invalid parameters\n", __func__); + return; + } + rndis_data->dl_max_transfer_size = max_transfer_size; + pr_debug("%s(): dl_max_xfer_size:%d\n", __func__, max_transfer_size); +} + +void ipa_data_set_ul_max_pkt_num(u8 max_packets_number) +{ + if (!max_packets_number) { + pr_err("%s: invalid parameters\n", __func__); + return; + } + + rndis_data->ul_max_packets_number = max_packets_number; + + if (max_packets_number > 1) + rndis_data->ul_aggregation_enable = true; + else + rndis_data->ul_aggregation_enable = false; + + pr_debug("%s(): ul_aggregation enable:%d ul_max_packets_number:%d\n", + __func__, rndis_data->ul_aggregation_enable, + max_packets_number); +} + +void ipa_data_start_rndis_ipa(enum ipa_func_type func) +{ + struct ipa_data_ch_info *port; + + pr_debug("%s\n", __func__); + + port = ipa_data_ports[func]; + if (!port) { + pr_err("%s: port is NULL\n", __func__); + return; + } + + if (atomic_read(&port->pipe_connect_notified)) { + pr_debug("%s: Transfers already started?\n", __func__); + return; + } + /* + * Increment usage count upon cable connect. Decrement after IPA + * handshake is done in disconnect work due to cable disconnect + * or in suspend work. + */ + usb_gadget_autopm_get_noresume(port->gadget); + queue_work(ipa_data_wq, &port->connect_w); +} + +void ipa_data_stop_rndis_ipa(enum ipa_func_type func) +{ + struct ipa_data_ch_info *port; + unsigned long flags; + + pr_debug("%s\n", __func__); + + port = ipa_data_ports[func]; + if (!port) { + pr_err("%s: port is NULL\n", __func__); + return; + } + + if (!atomic_read(&port->pipe_connect_notified)) + return; + + rndis_ipa_reset_trigger(); + ipa_data_stop_endless_xfer(port, true); + ipa_data_stop_endless_xfer(port, false); + spin_lock_irqsave(&port->port_lock, flags); + /* check if USB cable is disconnected or not */ + if (port->port_usb) { + msm_ep_unconfig(port->port_usb->in); + msm_ep_unconfig(port->port_usb->out); + } + spin_unlock_irqrestore(&port->port_lock, flags); + queue_work(ipa_data_wq, &port->disconnect_w); +} diff --git a/drivers/usb/gadget/function/u_data_ipa.h b/drivers/usb/gadget/function/u_data_ipa.h new file mode 100644 index 000000000000..9813374347da --- /dev/null +++ b/drivers/usb/gadget/function/u_data_ipa.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2014,2016,2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __U_DATA_IPA_H +#define __U_DATA_IPA_H + +#include +#include +#include +#include +#include +#include + +#include "u_rmnet.h" + +enum ipa_func_type { + USB_IPA_FUNC_ECM, + USB_IPA_FUNC_MBIM, + USB_IPA_FUNC_RMNET, + USB_IPA_FUNC_RNDIS, + USB_IPA_FUNC_DPL, + USB_IPA_NUM_FUNCS, +}; + +/* Max Number of IPA data ports supported */ +#define IPA_N_PORTS USB_IPA_NUM_FUNCS + +struct ipa_function_bind_info { + struct usb_string *string_defs; + int data_str_idx; + struct usb_interface_descriptor *data_desc; + struct usb_endpoint_descriptor *fs_in_desc; + struct usb_endpoint_descriptor *fs_out_desc; + struct usb_endpoint_descriptor *fs_notify_desc; + struct usb_endpoint_descriptor *hs_in_desc; + struct usb_endpoint_descriptor *hs_out_desc; + struct usb_endpoint_descriptor *hs_notify_desc; + struct usb_endpoint_descriptor *ss_in_desc; + struct usb_endpoint_descriptor *ss_out_desc; + struct usb_endpoint_descriptor *ss_notify_desc; + + struct usb_descriptor_header **fs_desc_hdr; + struct usb_descriptor_header **hs_desc_hdr; + struct usb_descriptor_header **ss_desc_hdr; +}; + +/* for configfs support */ +#define MAX_INST_NAME_LEN 40 + +struct f_rndis_qc_opts { + struct usb_function_instance func_inst; + struct f_rndis_qc *rndis; + u32 vendor_id; + const char *manufacturer; + struct net_device *net; + int refcnt; +}; + +struct f_rmnet_opts { + struct usb_function_instance func_inst; + struct f_rmnet *dev; + int refcnt; +}; + +#ifdef CONFIG_USB_F_QCRNDIS +void ipa_data_port_select(enum ipa_func_type func); +void ipa_data_disconnect(struct data_port *gp, enum ipa_func_type func); +int ipa_data_connect(struct data_port *gp, enum ipa_func_type func, + u8 src_connection_idx, u8 dst_connection_idx); +int ipa_data_setup(enum ipa_func_type func); +void ipa_data_free(enum ipa_func_type func); + +void ipa_data_flush_workqueue(void); +void ipa_data_resume(struct data_port *gp, enum ipa_func_type func, + bool remote_wakeup_enabled); +void ipa_data_suspend(struct data_port *gp, enum ipa_func_type func, + bool remote_wakeup_enabled); + +void ipa_data_set_ul_max_xfer_size(u32 ul_max_xfer_size); + +void ipa_data_set_dl_max_xfer_size(u32 dl_max_transfer_size); + +void ipa_data_set_ul_max_pkt_num(u8 ul_max_packets_number); + +void ipa_data_start_rx_tx(enum ipa_func_type func); + +void ipa_data_start_rndis_ipa(enum ipa_func_type func); + +void ipa_data_stop_rndis_ipa(enum ipa_func_type func); +#else +static inline void ipa_data_port_select(enum ipa_func_type func) +{ +} +static inline void ipa_data_disconnect(struct data_port *gp, + enum ipa_func_type func) +{ +} +static inline int ipa_data_connect(struct data_port *gp, + enum ipa_func_type func, u8 src_connection_idx, + u8 dst_connection_idx) +{ + return 0; +} +static inline int ipa_data_setup(enum ipa_func_type func) +{ + return 0; +} +static inline void ipa_data_free(enum ipa_func_type func) +{ +} +void ipa_data_flush_workqueue(void) +{ +} +static inline void ipa_data_resume(struct data_port *gp, + enum ipa_func_type func, bool remote_wakeup_enabled) +{ +} +static inline void ipa_data_suspend(struct data_port *gp, + enum ipa_func_type func, bool remote_wakeup_enabled) +{ +} +#endif /* CONFIG_USB_F_QCRNDIS */ + +#ifdef CONFIG_USB_F_QCRNDIS +void *rndis_qc_get_ipa_priv(void); +void *rndis_qc_get_ipa_rx_cb(void); +bool rndis_qc_get_skip_ep_config(void); +void *rndis_qc_get_ipa_tx_cb(void); +void rndis_ipa_reset_trigger(void); +#else +static inline void *rndis_qc_get_ipa_priv(void) +{ + return NULL; +} +static inline void *rndis_qc_get_ipa_rx_cb(void) +{ + return NULL; +} +static inline bool rndis_qc_get_skip_ep_config(void) +{ + return true; +} +static inline void *rndis_qc_get_ipa_tx_cb(void) +{ + return NULL; +} +static inline void rndis_ipa_reset_trigger(void) +{ +} +#endif /* CONFIG_USB_F_QCRNDIS */ + +#if IS_ENABLED(CONFIG_USB_CONFIGFS_RMNET_BAM) +void gqti_ctrl_update_ipa_pipes(void *gr, enum qti_port_type qport, + u32 ipa_prod, u32 ipa_cons); +#else +static inline void gqti_ctrl_update_ipa_pipes(void *gr, + enum qti_port_type qport, + u32 ipa_prod, u32 ipa_cons) +{ +} +#endif /* CONFIG_USB_CONFIGFS_RMNET_BAM */ +#endif diff --git a/include/linux/ipa.h b/include/linux/ipa.h index c102d2b23b7f..af4e599a7cc8 100644 --- a/include/linux/ipa.h +++ b/include/linux/ipa.h @@ -1804,6 +1804,19 @@ int ipa_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res); bool ipa_get_lan_rx_napi(void); #else /* (CONFIG_IPA || CONFIG_IPA3) */ +/* low-level IPA client Connect / Disconnect */ + +static inline int ipa_connect(const struct ipa_connect_params *in, + struct ipa_sps_params *sps, u32 *clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_disconnect(u32 clnt_hdl) +{ + return -EPERM; +} + /* * Resume / Suspend */ diff --git a/include/linux/usb_bam.h b/include/linux/usb_bam.h index 2034dccb61ff..7fe2daa480e7 100644 --- a/include/linux/usb_bam.h +++ b/include/linux/usb_bam.h @@ -1,15 +1,17 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2017, 2020, The Linux Foundation. All rights reserved. */ #ifndef _USB_BAM_H_ #define _USB_BAM_H_ #include +#include #include #include #include +#include #define MAX_BAMS NUM_CTRL /* Bam per USB controllers */ @@ -43,6 +45,61 @@ enum usb_bam_pipe_type { USB_BAM_MAX_PIPE_TYPES, }; +/* + * struct usb_bam_connect_ipa_params: Connect Bam pipe to IPA peer information. + * @ src_idx: Source pipe index in usb bam pipes lists. + * @ dst_idx: Destination pipe index in usb bam pipes lists. + * @ src_pipe: The source pipe index in the sps level. + * @ dst_pipe: The destination pipe index in the sps level. + * @ keep_ipa_awake: When true, IPA will not be clock gated. + * @ ipa_cons_ep_idx: The pipe index on the IPA peer bam side, consumer. + * @ ipa_prod_ep_idx: The pipe index on the IPA peer bam side, producer. + * @ prod_clnt_hdl: Producer client handle returned by IPA driver + * @ cons_clnt_hdl: Consumer client handle returned by IPA driver + * @ src_client: Source IPA client type. + * @ dst_client: Destination IPA client type. + * @ ipa_ep_cfg: Configuration of IPA end-point (see struct ipa_ep_cfg) + * @priv: Callback cookie to the notify event. + * @notify: Callback on data path event by IPA (see enum ipa_dp_evt_type) + * This call back gets back the priv cookie. + * for Bam2Bam mode, this callback is in the tethering bridge. + * @ activity_notify: Callback to be notified on and data being pushed into the + * USB consumer pipe. + * @ inactivity_notify: Callback to be notified on inactivity of all the current + * open pipes between the USB bam and its peer. + * @ skip_ep_cfg: boolean field that determines if Apps-processor + * should or should not confiugre this end-point. + * (Please see struct teth_bridge_init_params) + * @ reset_pipe_after_lpm: bool to indicate if IPA should reset pipe after LPM. + * @ usb_connection_speed: The actual speed the USB core currently works at. + */ +struct usb_bam_connect_ipa_params { + u8 src_idx; + u8 dst_idx; + u32 *src_pipe; + u32 *dst_pipe; + bool keep_ipa_awake; + enum usb_bam_pipe_dir dir; + /* Parameters for Port Mapper */ + u32 ipa_cons_ep_idx; + u32 ipa_prod_ep_idx; + /* client handle assigned by IPA to client */ + u32 prod_clnt_hdl; + u32 cons_clnt_hdl; + /* params assigned by the CD */ + enum ipa_client_type src_client; + enum ipa_client_type dst_client; + struct ipa_ep_cfg ipa_ep_cfg; + void *priv; + void (*notify)(void *priv, enum ipa_dp_evt_type evt, + unsigned long data); + int (*activity_notify)(void *priv); + int (*inactivity_notify)(void *priv); + bool skip_ep_cfg; + bool reset_pipe_after_lpm; + enum usb_device_speed usb_connection_speed; +}; + #if IS_ENABLED(CONFIG_USB_BAM) /** * Connect USB-to-Peripheral SPS connection. @@ -63,6 +120,36 @@ enum usb_bam_pipe_type { int usb_bam_connect(enum usb_ctrl bam_type, int idx, u32 *bam_pipe_idx, unsigned long iova); +/** + * Connect USB-to-IPA SPS connection. + * + * This function returns the allocated pipes number and clnt + * handles. Assumes that the user first connects producer pipes + * and only after that consumer pipes, since that's the correct + * sequence for the handshake with the IPA. + * + * @bam_type - USB BAM type - dwc3/CI/hsic + * + * @ipa_params - in/out parameters + * + * @return 0 on success, negative value on error + */ +int usb_bam_connect_ipa(enum usb_ctrl bam_type, + struct usb_bam_connect_ipa_params *ipa_params); + +/** + * Disconnect USB-to-IPA SPS connection. + * + * @bam_type - USB BAM type - dwc3/CI/hsic + * + * @ipa_params - in/out parameters + * + * @return 0 on success, negative value on error + */ +int usb_bam_disconnect_ipa(enum usb_ctrl bam_type, + struct usb_bam_connect_ipa_params *ipa_params); + + /** * Register a wakeup callback from peer BAM. * @@ -99,6 +186,27 @@ int usb_bam_register_start_stop_cbs(enum usb_ctrl bam_type, void (*stop)(void *, enum usb_bam_pipe_dir), void *param); +/** + * Start usb suspend sequence + * + * @ipa_params - in/out parameters + * + * @bam_type - USB BAM type - dwc3/CI/hsic + */ +void usb_bam_suspend(enum usb_ctrl bam_type, + struct usb_bam_connect_ipa_params *ipa_params); + +/** + * Start usb resume sequence + * + * @bam_type - USB BAM type - dwc3/CI/hsic + * + * @ipa_params - in/out parameters + */ +void usb_bam_resume(enum usb_ctrl bam_type, + struct usb_bam_connect_ipa_params *ipa_params); + + /** * Disconnect USB-to-Periperal SPS connection. * @@ -183,6 +291,13 @@ enum usb_ctrl usb_bam_get_bam_type(const char *core_name); int usb_bam_get_pipe_type(enum usb_ctrl bam_type, u8 idx, enum usb_bam_pipe_type *type); +/* + * Indicates whether USB producer is granted to IPA resource manager. + * + * @return true when producer granted, false when prodcuer is released. + */ +bool usb_bam_get_prod_granted(enum usb_ctrl bam_type, u8 idx); + /* Allocates memory for data fifo and descriptor fifos. */ int usb_bam_alloc_fifos(enum usb_ctrl cur_bam, u8 idx); @@ -197,6 +312,18 @@ static inline int usb_bam_connect(enum usb_ctrl bam, u8 idx, u32 *bam_pipe_idx, return -ENODEV; } +static inline int usb_bam_connect_ipa(enum usb_ctrl bam_type, + struct usb_bam_connect_ipa_params *ipa_params) +{ + return -ENODEV; +} + +static inline int usb_bam_disconnect_ipa(enum usb_ctrl bam_type, + struct usb_bam_connect_ipa_params *ipa_params) +{ + return -ENODEV; +} + static inline int usb_bam_register_wake_cb(enum usb_ctrl bam_type, u8 idx, int (*callback)(void *), void *param) { @@ -211,6 +338,12 @@ static inline int usb_bam_register_start_stop_cbs(enum usb_ctrl bam, u8 idx, return -ENODEV; } +static inline void usb_bam_suspend(enum usb_ctrl bam_type, + struct usb_bam_connect_ipa_params *ipa_params){} + +static inline void usb_bam_resume(enum usb_ctrl bam_type, + struct usb_bam_connect_ipa_params *ipa_params) {} + static inline int usb_bam_disconnect_pipe(enum usb_ctrl bam_type, u8 idx) { return -ENODEV; @@ -248,6 +381,11 @@ static inline int usb_bam_get_pipe_type(enum usb_ctrl bam_type, u8 idx, return -ENODEV; } +static inline bool usb_bam_get_prod_granted(enum usb_ctrl bam_type, u8 idx) +{ + return false; +} + static inline int usb_bam_alloc_fifos(enum usb_ctrl cur_bam, u8 idx) { return false; diff --git a/include/uapi/linux/usb/usb_ctrl_qti.h b/include/uapi/linux/usb/usb_ctrl_qti.h index 91c163316b31..7d125d1daddf 100644 --- a/include/uapi/linux/usb/usb_ctrl_qti.h +++ b/include/uapi/linux/usb/usb_ctrl_qti.h @@ -5,7 +5,7 @@ #include #include -#define MAX_QTI_PKT_SIZE 2048 +#define MAX_QTI_PKT_SIZE 8192 #define QTI_CTRL_IOCTL_MAGIC 'r' #define QTI_CTRL_GET_LINE_STATE _IOR(QTI_CTRL_IOCTL_MAGIC, 2, int)